summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2014-06-19 19:21:56 -0400
committerMike Pagano <mpagano@gentoo.org>2014-06-19 19:21:56 -0400
commit1ed332103c66657d4606ae29c42ba41adb713a3d (patch)
treea3534778dbbe80954e9eacc596da4f0ecdde4c9d
parentAdding generic patches for all versions (diff)
downloadlinux-patches-3.2.tar.gz
linux-patches-3.2.tar.bz2
linux-patches-3.2.zip
Adding patches for Linux 3.2.X3.2-393.2
-rw-r--r--0000_README291
-rw-r--r--1000_linux-3.2.1.patch1787
-rw-r--r--1001_linux-3.2.2.patch6552
-rw-r--r--1002_linux-3.2.3.patch3760
-rw-r--r--1003_linux-3.2.4.patch40
-rw-r--r--1004_linux-3.2.5.patch205
-rw-r--r--1005_linux-3.2.6.patch2789
-rw-r--r--1006_linux-3.2.7.patch994
-rw-r--r--1007_linux-3.2.8.patch666
-rw-r--r--1008_linux-3.2.9.patch3675
-rw-r--r--1009_linux-3.2.10.patch4275
-rw-r--r--1010_linux-3.2.11.patch37
-rw-r--r--1011_linux-3.2.12.patch1548
-rw-r--r--1012_linux-3.2.13.patch445
-rw-r--r--1013_linux-3.2.14.patch6098
-rw-r--r--1014_linux-3.2.15.patch2169
-rw-r--r--1015_linux-3.2.16.patch2005
-rw-r--r--1016_linux-3.2.17.patch5695
-rw-r--r--1017_linux-3.2.18.patch1791
-rw-r--r--1018_linux-3.2.19.patch4857
-rw-r--r--1019_linux-3.2.20.patch2829
-rw-r--r--1020_linux-3.2.21.patch2499
-rw-r--r--1021_linux-3.2.22.patch1245
-rw-r--r--1022_linux-3.2.23.patch1862
-rw-r--r--1023_linux-3.2.24.patch4684
-rw-r--r--1024_linux-3.2.25.patch4503
-rw-r--r--1025_linux-3.2.26.patch238
-rw-r--r--1026_linux-3.2.27.patch3188
-rw-r--r--1027_linux-3.2.28.patch1114
-rw-r--r--1028_linux-3.2.29.patch4279
-rw-r--r--1029_linux-3.2.30.patch5552
-rw-r--r--1030_linux-3.2.31.patch3327
-rw-r--r--1031_linux-3.2.32.patch6206
-rw-r--r--1032_linux-3.2.33.patch3450
-rw-r--r--1033_linux-3.2.34.patch3678
-rw-r--r--1034_linux-3.2.35.patch3014
-rw-r--r--1035_linux-3.2.36.patch6434
-rw-r--r--1036_linux-3.2.37.patch1689
-rw-r--r--1037_linux-3.2.38.patch4587
-rw-r--r--1038_linux-3.2.39.patch2660
-rw-r--r--1039_linux-3.2.40.patch6295
-rw-r--r--1040_linux-3.2.41.patch3865
-rw-r--r--1041_linux-3.2.42.patch3602
-rw-r--r--1042_linux-3.2.43.patch2442
-rw-r--r--1043_linux-3.2.44.patch2808
-rw-r--r--1044_linux-3.2.45.patch3809
-rw-r--r--1045_linux-3.2.46.patch3142
-rw-r--r--1046_linux-3.2.47.patch3314
-rw-r--r--1047_linux-3.2.48.patch952
-rw-r--r--1048_linux-3.2.49.patch2970
-rw-r--r--1049_linux-3.2.50.patch2495
-rw-r--r--1050_linux-3.2.51.patch3886
-rw-r--r--1051_linux-3.2.52.patch5221
-rw-r--r--1052_linux-3.2.53.patch3357
-rw-r--r--1053_linux-3.2.54.patch6825
-rw-r--r--1054_linux-3.2.55.patch2495
-rw-r--r--1055_linux-3.2.56.patch6904
-rw-r--r--1056_linux-3.2.57.patch905
-rw-r--r--1057_linux-3.2.58.patch3567
-rw-r--r--1058_linux-3.2.59.patch1213
-rw-r--r--1059_linux-3.2.60.patch2964
-rw-r--r--1500_selinux-add-SOCK_DIAG_BY_FAMILY-to-the-list-of-netli.patch56
-rw-r--r--1505_XATTR_USER_PREFIX.patch55
-rw-r--r--1512_af_key-initialize-satype-in-key_notify_policy_flush.patch29
-rw-r--r--2300_per-pci-device-msi-irq-listing.patch211
-rw-r--r--2400_kcopy-patch-for-infiniband-driver.patch725
-rw-r--r--2600_Input-ALPS-synaptics-touchpad.patch1267
-rw-r--r--2700_drm-i915-resurrect-panel-lid-handling.patch117
-rw-r--r--2705_ThinkPad-30-brightness-control-fix.patch81
-rw-r--r--4200_fbcondecor-0.9.6.patch2358
-rw-r--r--4500_nouveau-video-output-control-Kconfig.patch9
-rw-r--r--4567_distro-Gentoo-Kconfig.patch12
72 files changed, 194656 insertions, 12 deletions
diff --git a/0000_README b/0000_README
index 90189932..327132e3 100644
--- a/0000_README
+++ b/0000_README
@@ -1,6 +1,6 @@
README
--------------------------------------------------------------------------
-This patchset is to be the series of patches for gentoo-sources.
+This patchset is to be the 2.6 series of gentoo-sources.
It is designed for cross-compatibility, fixes and stability, with performance
and additional features/driver support being a second.
@@ -34,15 +34,292 @@ FEATURES
4100-4200 storage
4200-4300 graphics
4300-4400 filesystem
-4400-4500 security enhancement
-4500-4600 other
-
-EXPERIMENTAL
-5000-5100 experimental patches (BFQ, ...)
+4400-4500 security enhancement
+4500-4600 other
Individual Patch Descriptions:
--------------------------------------------------------------------------
+Patch: 1000_linux-3.2.1.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.1
+
+Patch: 1001_linux-3.2.2.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.2
+
+Patch: 1002_linux-3.2.3.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.3
+
+Patch: 1003_linux-3.2.4.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.4
+
+Patch: 1004_linux-3.2.5.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.5
+
+Patch: 1005_linux-3.2.6.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.6
+
+Patch: 1006_linux-3.2.7.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.7
+
+Patch: 1007_linux-3.2.8.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.8
+
+Patch: 1008_linux-3.2.9.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.9
+
+Patch: 1009_linux-3.2.10.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.10
+
+Patch: 1010_linux-3.2.11.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.11
+
+Patch: 1011_linux-3.2.12.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.12
+
+Patch: 1012_linux-3.2.13.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.13
+
+Patch: 1013_linux-3.2.14.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.14
+
+Patch: 1014_linux-3.2.15.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.15
+
+Patch: 1015_linux-3.2.16.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.16
+
+Patch: 1016_linux-3.2.17.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.17
+
+Patch: 1017_linux-3.2.18.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.18
+
+Patch: 1018_linux-3.2.19.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.19
+
+Patch: 1019_linux-3.2.20.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.20
+
+Patch: 1020_linux-3.2.21.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.21
+
+Patch: 1021_linux-3.2.22.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.22
+
+Patch: 1022_linux-3.2.23.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.23
+
+Patch: 1023_linux-3.2.24.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.24
+
+Patch: 1024_linux-3.2.25.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.25
+
+Patch: 1025_linux-3.2.26.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.26
+
+Patch: 1026_linux-3.2.27.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.27
+
+Patch: 1027_linux-3.2.28.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.28
+
+Patch: 1028_linux-3.2.29.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.29
+
+Patch: 1029_linux-3.2.30.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.30
+
+Patch: 1030_linux-3.2.31.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.31
+
+Patch: 1031_linux-3.2.32.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.32
+
+Patch: 1032_linux-3.2.33.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.33
+
+Patch: 1033_linux-3.2.34.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.34
+
+Patch: 1034_linux-3.2.35.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.35
+
+Patch: 1035_linux-3.2.36.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.36
+
+Patch: 1036_linux-3.2.37.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.37
+
+Patch: 1037_linux-3.2.38.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.38
+
+Patch: 1038_linux-3.2.39.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.39
+
+Patch: 1039_linux-3.2.40.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.40
+
+Patch: 1040_linux-3.2.41.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.41
+
+Patch: 1041_linux-3.2.42.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.42
+
+Patch: 1042_linux-3.2.43.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.43
+
+Patch: 1043_linux-3.2.44.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.44
+
+Patch: 1044_linux-3.2.45.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.45
+
+Patch: 1045_linux-3.2.46.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.46
+
+Patch: 1046_linux-3.2.47.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.47
+
+Patch: 1047_linux-3.2.48.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.48
+
+Patch: 1048_linux-3.2.49.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.49
+
+Patch: 1049_linux-3.2.50.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.50
+
+Patch: 1050_linux-3.2.51.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.51
+
+Patch: 1051_linux-3.2.52.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.52
+
+Patch: 1052_linux-3.2.53.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.53
+
+Patch: 1053_linux-3.2.54.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.54
+
+Patch: 1054_linux-3.2.55.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.55
+
+Patch: 1055_linux-3.2.56.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.56
+
+Patch: 1056_linux-3.2.57.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.57
+
+Patch: 1057_linux-3.2.58.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.58
+
+Patch: 1058_linux-3.2.59.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.59
+
+Patch: 1059_linux-3.2.60.patch
+From: http://www.kernel.org
+Desc: Linux 3.2.60
+
+Patch: 1500_selinux-add-SOCK_DIAG_BY_FAMILY-to-the-list-of-netli.patch
+From: https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=6a96e15096da6e7491107321cfa660c7c2aa119d
+Desc: selinux: add SOCK_DIAG_BY_FAMILY to the list of netlink message types
+
+Patch: 1505_XATTR_USER_PREFIX.patch
+From: https://bugs.gentoo.org/show_bug.cgi?id=470644
+Desc: Support for namespace user.pax.* on tmpfs.
+
+Patch: 1512_af_key-initialize-satype-in-key_notify_policy_flush.patch
+From: https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=85dfb745ee40232876663ae206cba35f24ab2a40
+Desc: af_key: initialize satype in key_notify_policy_flush()
+
+Patch: 2300_per-pci-device-msi-irq-listing.patch
+From: http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commitdiff;h=da8d1c8ba4dcb16d60be54b233deca9a7cac98dc
+Desc: Add a per-pci-device subdirectory in sysfs
+
+Patch: 2400_kcopy-patch-for-infiniband-driver.patch
+From: Alexey Shvetsov <alexxy@gentoo.org>
+Desc: Zero copy for infiniband psm userspace driver
+
+Patch: 2600_Input-ALPS-synaptics-touchpad.patch
+From: http://bugs.gentoo.org/show_bug.cgi?id=318567
+Desc: ALPS Touchpad Patchset
+
+Patch: 2700_drm-i915-resurrect-panel-lid-handling.patch
+From: Daniel Vetter <daniel.vetter@ffwll.ch>
+Desc: drm/i915: resurrect panel lid handling
+
+Patch: 2705_ThinkPad-30-brightness-control-fix.patch
+From: Seth Forshee <seth.forshee@canonical.com>
+Desc: ACPI: Disable Windows 8 compatibility for some Lenovo ThinkPads
+
+Patch: 4200_fbcondecor-0.9.6.patch
+From: http://dev.gentoo.org/~spock
+Desc: Bootsplash successor by Michal Januszewski ported by Alexxy
+
+Patch: 4500_nouveau-video-output-control-Kconfig.patch
+From: Tom Wijsman <TomWij@gentoo.org
+Desc: Make DRM_NOUVEU select VIDEO_OUTPUT_CONTROL, fixes bug #475748.
+
Patch: 4567_distro-Gentoo-Kconfig.patch
-From: Tom Wijsman <TomWij@gentoo.org>
+From: Tom Wijsman <TomWij@gentoo.org
Desc: Add Gentoo Linux support config settings and defaults.
diff --git a/1000_linux-3.2.1.patch b/1000_linux-3.2.1.patch
new file mode 100644
index 00000000..556a3afa
--- /dev/null
+++ b/1000_linux-3.2.1.patch
@@ -0,0 +1,1787 @@
+diff --git a/Documentation/HOWTO b/Documentation/HOWTO
+index 81bc1a9..f7ade3b 100644
+--- a/Documentation/HOWTO
++++ b/Documentation/HOWTO
+@@ -275,8 +275,8 @@ versions.
+ If no 2.6.x.y kernel is available, then the highest numbered 2.6.x
+ kernel is the current stable kernel.
+
+-2.6.x.y are maintained by the "stable" team <stable@kernel.org>, and are
+-released as needs dictate. The normal release period is approximately
++2.6.x.y are maintained by the "stable" team <stable@vger.kernel.org>, and
++are released as needs dictate. The normal release period is approximately
+ two weeks, but it can be longer if there are no pressing problems. A
+ security-related problem, instead, can cause a release to happen almost
+ instantly.
+diff --git a/Documentation/development-process/5.Posting b/Documentation/development-process/5.Posting
+index 903a254..8a48c9b 100644
+--- a/Documentation/development-process/5.Posting
++++ b/Documentation/development-process/5.Posting
+@@ -271,10 +271,10 @@ copies should go to:
+ the linux-kernel list.
+
+ - If you are fixing a bug, think about whether the fix should go into the
+- next stable update. If so, stable@kernel.org should get a copy of the
+- patch. Also add a "Cc: stable@kernel.org" to the tags within the patch
+- itself; that will cause the stable team to get a notification when your
+- fix goes into the mainline.
++ next stable update. If so, stable@vger.kernel.org should get a copy of
++ the patch. Also add a "Cc: stable@vger.kernel.org" to the tags within
++ the patch itself; that will cause the stable team to get a notification
++ when your fix goes into the mainline.
+
+ When selecting recipients for a patch, it is good to have an idea of who
+ you think will eventually accept the patch and get it merged. While it
+diff --git a/Documentation/usb/usbmon.txt b/Documentation/usb/usbmon.txt
+index a4efa04..5335fa8 100644
+--- a/Documentation/usb/usbmon.txt
++++ b/Documentation/usb/usbmon.txt
+@@ -47,10 +47,11 @@ This allows to filter away annoying devices that talk continuously.
+
+ 2. Find which bus connects to the desired device
+
+-Run "cat /proc/bus/usb/devices", and find the T-line which corresponds to
+-the device. Usually you do it by looking for the vendor string. If you have
+-many similar devices, unplug one and compare two /proc/bus/usb/devices outputs.
+-The T-line will have a bus number. Example:
++Run "cat /sys/kernel/debug/usb/devices", and find the T-line which corresponds
++to the device. Usually you do it by looking for the vendor string. If you have
++many similar devices, unplug one and compare the two
++/sys/kernel/debug/usb/devices outputs. The T-line will have a bus number.
++Example:
+
+ T: Bus=03 Lev=01 Prnt=01 Port=00 Cnt=01 Dev#= 2 Spd=12 MxCh= 0
+ D: Ver= 1.10 Cls=00(>ifc ) Sub=00 Prot=00 MxPS= 8 #Cfgs= 1
+@@ -58,7 +59,10 @@ P: Vendor=0557 ProdID=2004 Rev= 1.00
+ S: Manufacturer=ATEN
+ S: Product=UC100KM V2.00
+
+-Bus=03 means it's bus 3.
++"Bus=03" means it's bus 3. Alternatively, you can look at the output from
++"lsusb" and get the bus number from the appropriate line. Example:
++
++Bus 003 Device 002: ID 0557:2004 ATEN UC100KM V2.00
+
+ 3. Start 'cat'
+
+diff --git a/MAINTAINERS b/MAINTAINERS
+index 62f1cd3..f986e7d 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -6258,7 +6258,7 @@ F: arch/alpha/kernel/srm_env.c
+
+ STABLE BRANCH
+ M: Greg Kroah-Hartman <greg@kroah.com>
+-L: stable@kernel.org
++L: stable@vger.kernel.org
+ S: Maintained
+
+ STAGING SUBSYSTEM
+diff --git a/Makefile b/Makefile
+index adddd11..c5edffa 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 0
++SUBLEVEL = 1
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h
+index fe6f7c2..bc3c745 100644
+--- a/arch/powerpc/include/asm/time.h
++++ b/arch/powerpc/include/asm/time.h
+@@ -219,5 +219,7 @@ DECLARE_PER_CPU(struct cpu_usage, cpu_usage_array);
+ extern void secondary_cpu_time_init(void);
+ extern void iSeries_time_init_early(void);
+
++extern void decrementer_check_overflow(void);
++
+ #endif /* __KERNEL__ */
+ #endif /* __POWERPC_TIME_H */
+diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
+index 5c3c469..745c1e7 100644
+--- a/arch/powerpc/kernel/irq.c
++++ b/arch/powerpc/kernel/irq.c
+@@ -164,16 +164,13 @@ notrace void arch_local_irq_restore(unsigned long en)
+ */
+ local_paca->hard_enabled = en;
+
+-#ifndef CONFIG_BOOKE
+- /* On server, re-trigger the decrementer if it went negative since
+- * some processors only trigger on edge transitions of the sign bit.
+- *
+- * BookE has a level sensitive decrementer (latches in TSR) so we
+- * don't need that
++ /*
++ * Trigger the decrementer if we have a pending event. Some processors
++ * only trigger on edge transitions of the sign bit. We might also
++ * have disabled interrupts long enough that the decrementer wrapped
++ * to positive.
+ */
+- if ((int)mfspr(SPRN_DEC) < 0)
+- mtspr(SPRN_DEC, 1);
+-#endif /* CONFIG_BOOKE */
++ decrementer_check_overflow();
+
+ /*
+ * Force the delivery of pending soft-disabled interrupts on PS3.
+diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
+index 522bb1d..5db163c 100644
+--- a/arch/powerpc/kernel/time.c
++++ b/arch/powerpc/kernel/time.c
+@@ -889,6 +889,15 @@ static void __init clocksource_init(void)
+ clock->name, clock->mult, clock->shift);
+ }
+
++void decrementer_check_overflow(void)
++{
++ u64 now = get_tb_or_rtc();
++ struct decrementer_clock *decrementer = &__get_cpu_var(decrementers);
++
++ if (now >= decrementer->next_tb)
++ set_dec(1);
++}
++
+ static int decrementer_set_next_event(unsigned long evt,
+ struct clock_event_device *dev)
+ {
+diff --git a/arch/powerpc/platforms/pseries/hvCall_inst.c b/arch/powerpc/platforms/pseries/hvCall_inst.c
+index f106662..c9311cf 100644
+--- a/arch/powerpc/platforms/pseries/hvCall_inst.c
++++ b/arch/powerpc/platforms/pseries/hvCall_inst.c
+@@ -109,7 +109,7 @@ static void probe_hcall_entry(void *ignored, unsigned long opcode, unsigned long
+ if (opcode > MAX_HCALL_OPCODE)
+ return;
+
+- h = &get_cpu_var(hcall_stats)[opcode / 4];
++ h = &__get_cpu_var(hcall_stats)[opcode / 4];
+ h->tb_start = mftb();
+ h->purr_start = mfspr(SPRN_PURR);
+ }
+@@ -126,8 +126,6 @@ static void probe_hcall_exit(void *ignored, unsigned long opcode, unsigned long
+ h->num_calls++;
+ h->tb_total += mftb() - h->tb_start;
+ h->purr_total += mfspr(SPRN_PURR) - h->purr_start;
+-
+- put_cpu_var(hcall_stats);
+ }
+
+ static int __init hcall_inst_init(void)
+diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
+index 27a4950..dc36ea6 100644
+--- a/arch/powerpc/platforms/pseries/lpar.c
++++ b/arch/powerpc/platforms/pseries/lpar.c
+@@ -554,6 +554,7 @@ void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
+ goto out;
+
+ (*depth)++;
++ preempt_disable();
+ trace_hcall_entry(opcode, args);
+ (*depth)--;
+
+@@ -576,6 +577,7 @@ void __trace_hcall_exit(long opcode, unsigned long retval,
+
+ (*depth)++;
+ trace_hcall_exit(opcode, retval, retbuf);
++ preempt_enable();
+ (*depth)--;
+
+ out:
+diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
+index 06ed6b4..3719c94 100644
+--- a/drivers/base/firmware_class.c
++++ b/drivers/base/firmware_class.c
+@@ -226,13 +226,13 @@ static ssize_t firmware_loading_store(struct device *dev,
+ int loading = simple_strtol(buf, NULL, 10);
+ int i;
+
++ mutex_lock(&fw_lock);
++
++ if (!fw_priv->fw)
++ goto out;
++
+ switch (loading) {
+ case 1:
+- mutex_lock(&fw_lock);
+- if (!fw_priv->fw) {
+- mutex_unlock(&fw_lock);
+- break;
+- }
+ firmware_free_data(fw_priv->fw);
+ memset(fw_priv->fw, 0, sizeof(struct firmware));
+ /* If the pages are not owned by 'struct firmware' */
+@@ -243,7 +243,6 @@ static ssize_t firmware_loading_store(struct device *dev,
+ fw_priv->page_array_size = 0;
+ fw_priv->nr_pages = 0;
+ set_bit(FW_STATUS_LOADING, &fw_priv->status);
+- mutex_unlock(&fw_lock);
+ break;
+ case 0:
+ if (test_bit(FW_STATUS_LOADING, &fw_priv->status)) {
+@@ -274,7 +273,8 @@ static ssize_t firmware_loading_store(struct device *dev,
+ fw_load_abort(fw_priv);
+ break;
+ }
+-
++out:
++ mutex_unlock(&fw_lock);
+ return count;
+ }
+
+diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
+index 30a3085..fda56bd 100644
+--- a/drivers/bcma/bcma_private.h
++++ b/drivers/bcma/bcma_private.h
+@@ -18,6 +18,9 @@ void bcma_bus_unregister(struct bcma_bus *bus);
+ int __init bcma_bus_early_register(struct bcma_bus *bus,
+ struct bcma_device *core_cc,
+ struct bcma_device *core_mips);
++#ifdef CONFIG_PM
++int bcma_bus_resume(struct bcma_bus *bus);
++#endif
+
+ /* scan.c */
+ int bcma_bus_scan(struct bcma_bus *bus);
+diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
+index 1b51d8b..990f5a8 100644
+--- a/drivers/bcma/host_pci.c
++++ b/drivers/bcma/host_pci.c
+@@ -224,6 +224,41 @@ static void bcma_host_pci_remove(struct pci_dev *dev)
+ pci_set_drvdata(dev, NULL);
+ }
+
++#ifdef CONFIG_PM
++static int bcma_host_pci_suspend(struct pci_dev *dev, pm_message_t state)
++{
++ /* Host specific */
++ pci_save_state(dev);
++ pci_disable_device(dev);
++ pci_set_power_state(dev, pci_choose_state(dev, state));
++
++ return 0;
++}
++
++static int bcma_host_pci_resume(struct pci_dev *dev)
++{
++ struct bcma_bus *bus = pci_get_drvdata(dev);
++ int err;
++
++ /* Host specific */
++ pci_set_power_state(dev, 0);
++ err = pci_enable_device(dev);
++ if (err)
++ return err;
++ pci_restore_state(dev);
++
++ /* Bus specific */
++ err = bcma_bus_resume(bus);
++ if (err)
++ return err;
++
++ return 0;
++}
++#else /* CONFIG_PM */
++# define bcma_host_pci_suspend NULL
++# define bcma_host_pci_resume NULL
++#endif /* CONFIG_PM */
++
+ static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) },
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) },
+@@ -239,6 +274,8 @@ static struct pci_driver bcma_pci_bridge_driver = {
+ .id_table = bcma_pci_bridge_tbl,
+ .probe = bcma_host_pci_probe,
+ .remove = bcma_host_pci_remove,
++ .suspend = bcma_host_pci_suspend,
++ .resume = bcma_host_pci_resume,
+ };
+
+ int __init bcma_host_pci_init(void)
+diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
+index 70c84b9..10f92b3 100644
+--- a/drivers/bcma/main.c
++++ b/drivers/bcma/main.c
+@@ -240,6 +240,22 @@ int __init bcma_bus_early_register(struct bcma_bus *bus,
+ return 0;
+ }
+
++#ifdef CONFIG_PM
++int bcma_bus_resume(struct bcma_bus *bus)
++{
++ struct bcma_device *core;
++
++ /* Init CC core */
++ core = bcma_find_core(bus, BCMA_CORE_CHIPCOMMON);
++ if (core) {
++ bus->drv_cc.setup_done = false;
++ bcma_core_chipcommon_init(&bus->drv_cc);
++ }
++
++ return 0;
++}
++#endif
++
+ int __bcma_driver_register(struct bcma_driver *drv, struct module *owner)
+ {
+ drv->drv.name = drv->name;
+diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
+index 0c048dd..d2d0a2a 100644
+--- a/drivers/hv/vmbus_drv.c
++++ b/drivers/hv/vmbus_drv.c
+@@ -62,6 +62,14 @@ struct hv_device_info {
+ struct hv_dev_port_info outbound;
+ };
+
++static int vmbus_exists(void)
++{
++ if (hv_acpi_dev == NULL)
++ return -ENODEV;
++
++ return 0;
++}
++
+
+ static void get_channel_info(struct hv_device *device,
+ struct hv_device_info *info)
+@@ -590,6 +598,10 @@ int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, c
+
+ pr_info("registering driver %s\n", hv_driver->name);
+
++ ret = vmbus_exists();
++ if (ret < 0)
++ return ret;
++
+ hv_driver->driver.name = hv_driver->name;
+ hv_driver->driver.owner = owner;
+ hv_driver->driver.mod_name = mod_name;
+@@ -614,8 +626,8 @@ void vmbus_driver_unregister(struct hv_driver *hv_driver)
+ {
+ pr_info("unregistering driver %s\n", hv_driver->name);
+
+- driver_unregister(&hv_driver->driver);
+-
++ if (!vmbus_exists())
++ driver_unregister(&hv_driver->driver);
+ }
+ EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
+
+@@ -776,6 +788,7 @@ static int __init hv_acpi_init(void)
+
+ cleanup:
+ acpi_bus_unregister_driver(&vmbus_acpi_driver);
++ hv_acpi_dev = NULL;
+ return ret;
+ }
+
+diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
+index 254f164..e3db8ef 100644
+--- a/drivers/infiniband/core/uverbs_cmd.c
++++ b/drivers/infiniband/core/uverbs_cmd.c
+@@ -241,11 +241,24 @@ static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context)
+ return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0);
+ }
+
++static struct ib_qp *idr_write_qp(int qp_handle, struct ib_ucontext *context)
++{
++ struct ib_uobject *uobj;
++
++ uobj = idr_write_uobj(&ib_uverbs_qp_idr, qp_handle, context);
++ return uobj ? uobj->object : NULL;
++}
++
+ static void put_qp_read(struct ib_qp *qp)
+ {
+ put_uobj_read(qp->uobject);
+ }
+
++static void put_qp_write(struct ib_qp *qp)
++{
++ put_uobj_write(qp->uobject);
++}
++
+ static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context)
+ {
+ return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0);
+@@ -2375,7 +2388,7 @@ ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
+ if (copy_from_user(&cmd, buf, sizeof cmd))
+ return -EFAULT;
+
+- qp = idr_read_qp(cmd.qp_handle, file->ucontext);
++ qp = idr_write_qp(cmd.qp_handle, file->ucontext);
+ if (!qp)
+ return -EINVAL;
+
+@@ -2404,7 +2417,7 @@ ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
+ kfree(mcast);
+
+ out_put:
+- put_qp_read(qp);
++ put_qp_write(qp);
+
+ return ret ? ret : in_len;
+ }
+@@ -2422,7 +2435,7 @@ ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
+ if (copy_from_user(&cmd, buf, sizeof cmd))
+ return -EFAULT;
+
+- qp = idr_read_qp(cmd.qp_handle, file->ucontext);
++ qp = idr_write_qp(cmd.qp_handle, file->ucontext);
+ if (!qp)
+ return -EINVAL;
+
+@@ -2441,7 +2454,7 @@ ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
+ }
+
+ out_put:
+- put_qp_read(qp);
++ put_qp_write(qp);
+
+ return ret ? ret : in_len;
+ }
+diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
+index 781a802..4f18e2d 100644
+--- a/drivers/infiniband/hw/qib/qib_iba6120.c
++++ b/drivers/infiniband/hw/qib/qib_iba6120.c
+@@ -2076,9 +2076,11 @@ static void qib_6120_config_ctxts(struct qib_devdata *dd)
+ static void qib_update_6120_usrhead(struct qib_ctxtdata *rcd, u64 hd,
+ u32 updegr, u32 egrhd, u32 npkts)
+ {
+- qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
+ if (updegr)
+ qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
++ mmiowb();
++ qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
++ mmiowb();
+ }
+
+ static u32 qib_6120_hdrqempty(struct qib_ctxtdata *rcd)
+diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
+index 439d3c5..7ec4048 100644
+--- a/drivers/infiniband/hw/qib/qib_iba7220.c
++++ b/drivers/infiniband/hw/qib/qib_iba7220.c
+@@ -2725,9 +2725,11 @@ static int qib_7220_set_loopback(struct qib_pportdata *ppd, const char *what)
+ static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd,
+ u32 updegr, u32 egrhd, u32 npkts)
+ {
+- qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
+ if (updegr)
+ qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
++ mmiowb();
++ qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
++ mmiowb();
+ }
+
+ static u32 qib_7220_hdrqempty(struct qib_ctxtdata *rcd)
+diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
+index 1d58959..5a070e8 100644
+--- a/drivers/infiniband/hw/qib/qib_iba7322.c
++++ b/drivers/infiniband/hw/qib/qib_iba7322.c
+@@ -4082,10 +4082,12 @@ static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
+ */
+ if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT)
+ adjust_rcv_timeout(rcd, npkts);
+- qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
+- qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
+ if (updegr)
+ qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
++ mmiowb();
++ qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
++ qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
++ mmiowb();
+ }
+
+ static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd)
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 7f87568..e58aa2b 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1822,7 +1822,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
+ "but new slave device does not support netpoll.\n",
+ bond_dev->name);
+ res = -EBUSY;
+- goto err_close;
++ goto err_detach;
+ }
+ }
+ #endif
+@@ -1831,7 +1831,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
+
+ res = bond_create_slave_symlinks(bond_dev, slave_dev);
+ if (res)
+- goto err_close;
++ goto err_detach;
+
+ res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
+ new_slave);
+@@ -1852,6 +1852,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
+ err_dest_symlinks:
+ bond_destroy_slave_symlinks(bond_dev, slave_dev);
+
++err_detach:
++ write_lock_bh(&bond->lock);
++ bond_detach_slave(bond, new_slave);
++ write_unlock_bh(&bond->lock);
++
+ err_close:
+ dev_close(slave_dev);
+
+diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
+index e95f0e6..dd2625a 100644
+--- a/drivers/net/usb/asix.c
++++ b/drivers/net/usb/asix.c
+@@ -376,7 +376,7 @@ static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+
+ skb_pull(skb, (size + 1) & 0xfffe);
+
+- if (skb->len == 0)
++ if (skb->len < sizeof(header))
+ break;
+
+ head = (u8 *) skb->data;
+diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
+index 69d5f85..8b9ff28 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
++++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
+@@ -809,7 +809,7 @@ struct iwl_qosparam_cmd {
+ #define IWLAGN_STATION_COUNT 16
+
+ #define IWL_INVALID_STATION 255
+-#define IWL_MAX_TID_COUNT 9
++#define IWL_MAX_TID_COUNT 8
+
+ #define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2)
+ #define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8)
+diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
+index 2b6756e..5c29281 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
++++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
+@@ -219,9 +219,7 @@ struct iwl_trans_pcie {
+
+ /* INT ICT Table */
+ __le32 *ict_tbl;
+- void *ict_tbl_vir;
+ dma_addr_t ict_tbl_dma;
+- dma_addr_t aligned_ict_tbl_dma;
+ int ict_index;
+ u32 inta;
+ bool use_ict;
+diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
+index 374c68c..1920237 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
++++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
+@@ -1136,7 +1136,11 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
+ * ICT functions
+ *
+ ******************************************************************************/
+-#define ICT_COUNT (PAGE_SIZE/sizeof(u32))
++
++/* a device (PCI-E) page is 4096 bytes long */
++#define ICT_SHIFT 12
++#define ICT_SIZE (1 << ICT_SHIFT)
++#define ICT_COUNT (ICT_SIZE / sizeof(u32))
+
+ /* Free dram table */
+ void iwl_free_isr_ict(struct iwl_trans *trans)
+@@ -1144,21 +1148,19 @@ void iwl_free_isr_ict(struct iwl_trans *trans)
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+
+- if (trans_pcie->ict_tbl_vir) {
+- dma_free_coherent(bus(trans)->dev,
+- (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
+- trans_pcie->ict_tbl_vir,
++ if (trans_pcie->ict_tbl) {
++ dma_free_coherent(bus(trans)->dev, ICT_SIZE,
++ trans_pcie->ict_tbl,
+ trans_pcie->ict_tbl_dma);
+- trans_pcie->ict_tbl_vir = NULL;
+- memset(&trans_pcie->ict_tbl_dma, 0,
+- sizeof(trans_pcie->ict_tbl_dma));
+- memset(&trans_pcie->aligned_ict_tbl_dma, 0,
+- sizeof(trans_pcie->aligned_ict_tbl_dma));
++ trans_pcie->ict_tbl = NULL;
++ trans_pcie->ict_tbl_dma = 0;
+ }
+ }
+
+
+-/* allocate dram shared table it is a PAGE_SIZE aligned
++/*
++ * allocate dram shared table, it is an aligned memory
++ * block of ICT_SIZE.
+ * also reset all data related to ICT table interrupt.
+ */
+ int iwl_alloc_isr_ict(struct iwl_trans *trans)
+@@ -1166,36 +1168,26 @@ int iwl_alloc_isr_ict(struct iwl_trans *trans)
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+
+- /* allocate shrared data table */
+- trans_pcie->ict_tbl_vir =
+- dma_alloc_coherent(bus(trans)->dev,
+- (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
+- &trans_pcie->ict_tbl_dma, GFP_KERNEL);
+- if (!trans_pcie->ict_tbl_vir)
++ trans_pcie->ict_tbl =
++ dma_alloc_coherent(bus(trans)->dev, ICT_SIZE,
++ &trans_pcie->ict_tbl_dma,
++ GFP_KERNEL);
++ if (!trans_pcie->ict_tbl)
+ return -ENOMEM;
+
+- /* align table to PAGE_SIZE boundary */
+- trans_pcie->aligned_ict_tbl_dma =
+- ALIGN(trans_pcie->ict_tbl_dma, PAGE_SIZE);
+-
+- IWL_DEBUG_ISR(trans, "ict dma addr %Lx dma aligned %Lx diff %d\n",
+- (unsigned long long)trans_pcie->ict_tbl_dma,
+- (unsigned long long)trans_pcie->aligned_ict_tbl_dma,
+- (int)(trans_pcie->aligned_ict_tbl_dma -
+- trans_pcie->ict_tbl_dma));
++ /* just an API sanity check ... it is guaranteed to be aligned */
++ if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
++ iwl_free_isr_ict(trans);
++ return -EINVAL;
++ }
+
+- trans_pcie->ict_tbl = trans_pcie->ict_tbl_vir +
+- (trans_pcie->aligned_ict_tbl_dma -
+- trans_pcie->ict_tbl_dma);
++ IWL_DEBUG_ISR(trans, "ict dma addr %Lx\n",
++ (unsigned long long)trans_pcie->ict_tbl_dma);
+
+- IWL_DEBUG_ISR(trans, "ict vir addr %p vir aligned %p diff %d\n",
+- trans_pcie->ict_tbl, trans_pcie->ict_tbl_vir,
+- (int)(trans_pcie->aligned_ict_tbl_dma -
+- trans_pcie->ict_tbl_dma));
++ IWL_DEBUG_ISR(trans, "ict vir addr %p\n", trans_pcie->ict_tbl);
+
+ /* reset table and index to all 0 */
+- memset(trans_pcie->ict_tbl_vir, 0,
+- (sizeof(u32) * ICT_COUNT) + PAGE_SIZE);
++ memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
+ trans_pcie->ict_index = 0;
+
+ /* add periodic RX interrupt */
+@@ -1213,23 +1205,20 @@ int iwl_reset_ict(struct iwl_trans *trans)
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
+
+- if (!trans_pcie->ict_tbl_vir)
++ if (!trans_pcie->ict_tbl)
+ return 0;
+
+ spin_lock_irqsave(&trans->shrd->lock, flags);
+ iwl_disable_interrupts(trans);
+
+- memset(&trans_pcie->ict_tbl[0], 0, sizeof(u32) * ICT_COUNT);
++ memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
+
+- val = trans_pcie->aligned_ict_tbl_dma >> PAGE_SHIFT;
++ val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
+
+ val |= CSR_DRAM_INT_TBL_ENABLE;
+ val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
+
+- IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%X "
+- "aligned dma address %Lx\n",
+- val,
+- (unsigned long long)trans_pcie->aligned_ict_tbl_dma);
++ IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
+
+ iwl_write32(bus(trans), CSR_DRAM_INT_TBL_REG, val);
+ trans_pcie->use_ict = true;
+diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
+index a7f1ab2..db64ef1 100644
+--- a/drivers/net/wireless/libertas/cfg.c
++++ b/drivers/net/wireless/libertas/cfg.c
+@@ -728,9 +728,11 @@ static void lbs_scan_worker(struct work_struct *work)
+ le16_to_cpu(scan_cmd->hdr.size),
+ lbs_ret_scan, 0);
+
+- if (priv->scan_channel >= priv->scan_req->n_channels)
++ if (priv->scan_channel >= priv->scan_req->n_channels) {
+ /* Mark scan done */
++ cancel_delayed_work(&priv->scan_work);
+ lbs_scan_done(priv);
++ }
+
+ /* Restart network */
+ if (carrier)
+@@ -759,12 +761,12 @@ static void _internal_start_scan(struct lbs_private *priv, bool internal,
+ request->n_ssids, request->n_channels, request->ie_len);
+
+ priv->scan_channel = 0;
+- queue_delayed_work(priv->work_thread, &priv->scan_work,
+- msecs_to_jiffies(50));
+-
+ priv->scan_req = request;
+ priv->internal_scan = internal;
+
++ queue_delayed_work(priv->work_thread, &priv->scan_work,
++ msecs_to_jiffies(50));
++
+ lbs_deb_leave(LBS_DEB_CFG80211);
+ }
+
+diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
+index 3778763..3265b34 100644
+--- a/drivers/net/wireless/rt2x00/rt2800usb.c
++++ b/drivers/net/wireless/rt2x00/rt2800usb.c
+@@ -976,6 +976,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ { USB_DEVICE(0x13b1, 0x0031) },
+ { USB_DEVICE(0x1737, 0x0070) },
+ { USB_DEVICE(0x1737, 0x0071) },
++ { USB_DEVICE(0x1737, 0x0077) },
+ /* Logitec */
+ { USB_DEVICE(0x0789, 0x0162) },
+ { USB_DEVICE(0x0789, 0x0163) },
+@@ -1171,7 +1172,6 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ { USB_DEVICE(0x1740, 0x0605) },
+ { USB_DEVICE(0x1740, 0x0615) },
+ /* Linksys */
+- { USB_DEVICE(0x1737, 0x0077) },
+ { USB_DEVICE(0x1737, 0x0078) },
+ /* Logitec */
+ { USB_DEVICE(0x0789, 0x0168) },
+diff --git a/drivers/net/wireless/wl12xx/boot.c b/drivers/net/wireless/wl12xx/boot.c
+index 6813379..a7b327d 100644
+--- a/drivers/net/wireless/wl12xx/boot.c
++++ b/drivers/net/wireless/wl12xx/boot.c
+@@ -347,6 +347,9 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
+ nvs_ptr += 3;
+
+ for (i = 0; i < burst_len; i++) {
++ if (nvs_ptr + 3 >= (u8 *) wl->nvs + nvs_len)
++ goto out_badnvs;
++
+ val = (nvs_ptr[0] | (nvs_ptr[1] << 8)
+ | (nvs_ptr[2] << 16) | (nvs_ptr[3] << 24));
+
+@@ -358,6 +361,9 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
+ nvs_ptr += 4;
+ dest_addr += 4;
+ }
++
++ if (nvs_ptr >= (u8 *) wl->nvs + nvs_len)
++ goto out_badnvs;
+ }
+
+ /*
+@@ -369,6 +375,10 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
+ */
+ nvs_ptr = (u8 *)wl->nvs +
+ ALIGN(nvs_ptr - (u8 *)wl->nvs + 7, 4);
++
++ if (nvs_ptr >= (u8 *) wl->nvs + nvs_len)
++ goto out_badnvs;
++
+ nvs_len -= nvs_ptr - (u8 *)wl->nvs;
+
+ /* Now we must set the partition correctly */
+@@ -384,6 +394,10 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
+
+ kfree(nvs_aligned);
+ return 0;
++
++out_badnvs:
++ wl1271_error("nvs data is malformed");
++ return -EILSEQ;
+ }
+
+ static void wl1271_boot_enable_interrupts(struct wl1271 *wl)
+diff --git a/drivers/net/wireless/wl12xx/cmd.c b/drivers/net/wireless/wl12xx/cmd.c
+index a52299e..54a0d66 100644
+--- a/drivers/net/wireless/wl12xx/cmd.c
++++ b/drivers/net/wireless/wl12xx/cmd.c
+@@ -120,6 +120,11 @@ int wl1271_cmd_general_parms(struct wl1271 *wl)
+ if (!wl->nvs)
+ return -ENODEV;
+
++ if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
++ wl1271_warning("FEM index from INI out of bounds");
++ return -EINVAL;
++ }
++
+ gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL);
+ if (!gen_parms)
+ return -ENOMEM;
+@@ -143,6 +148,12 @@ int wl1271_cmd_general_parms(struct wl1271 *wl)
+ gp->tx_bip_fem_manufacturer =
+ gen_parms->general_params.tx_bip_fem_manufacturer;
+
++ if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
++ wl1271_warning("FEM index from FW out of bounds");
++ ret = -EINVAL;
++ goto out;
++ }
++
+ wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n",
+ answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer);
+
+@@ -162,6 +173,11 @@ int wl128x_cmd_general_parms(struct wl1271 *wl)
+ if (!wl->nvs)
+ return -ENODEV;
+
++ if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
++ wl1271_warning("FEM index from ini out of bounds");
++ return -EINVAL;
++ }
++
+ gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL);
+ if (!gen_parms)
+ return -ENOMEM;
+@@ -186,6 +202,12 @@ int wl128x_cmd_general_parms(struct wl1271 *wl)
+ gp->tx_bip_fem_manufacturer =
+ gen_parms->general_params.tx_bip_fem_manufacturer;
+
++ if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
++ wl1271_warning("FEM index from FW out of bounds");
++ ret = -EINVAL;
++ goto out;
++ }
++
+ wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n",
+ answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer);
+
+diff --git a/drivers/net/wireless/wl12xx/testmode.c b/drivers/net/wireless/wl12xx/testmode.c
+index 4ae8eff..abfb120 100644
+--- a/drivers/net/wireless/wl12xx/testmode.c
++++ b/drivers/net/wireless/wl12xx/testmode.c
+@@ -36,6 +36,7 @@ enum wl1271_tm_commands {
+ WL1271_TM_CMD_TEST,
+ WL1271_TM_CMD_INTERROGATE,
+ WL1271_TM_CMD_CONFIGURE,
++ WL1271_TM_CMD_NVS_PUSH, /* Not in use. Keep to not break ABI */
+ WL1271_TM_CMD_SET_PLT_MODE,
+ WL1271_TM_CMD_RECOVER,
+
+diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
+index 4c823f3..90c8e3a 100644
+--- a/drivers/tty/serial/atmel_serial.c
++++ b/drivers/tty/serial/atmel_serial.c
+@@ -212,8 +212,9 @@ void atmel_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
+ {
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ unsigned int mode;
++ unsigned long flags;
+
+- spin_lock(&port->lock);
++ spin_lock_irqsave(&port->lock, flags);
+
+ /* Disable interrupts */
+ UART_PUT_IDR(port, atmel_port->tx_done_mask);
+@@ -244,7 +245,7 @@ void atmel_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
+ /* Enable interrupts */
+ UART_PUT_IER(port, atmel_port->tx_done_mask);
+
+- spin_unlock(&port->lock);
++ spin_unlock_irqrestore(&port->lock, flags);
+
+ }
+
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index a8078d0..e61d9c4 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -554,10 +554,18 @@ static void acm_port_down(struct acm *acm)
+
+ static void acm_tty_hangup(struct tty_struct *tty)
+ {
+- struct acm *acm = tty->driver_data;
+- tty_port_hangup(&acm->port);
++ struct acm *acm;
++
+ mutex_lock(&open_mutex);
++ acm = tty->driver_data;
++
++ if (!acm)
++ goto out;
++
++ tty_port_hangup(&acm->port);
+ acm_port_down(acm);
++
++out:
+ mutex_unlock(&open_mutex);
+ }
+
+@@ -1183,6 +1191,8 @@ made_compressed_probe:
+ i = device_create_file(&intf->dev, &dev_attr_wCountryCodes);
+ if (i < 0) {
+ kfree(acm->country_codes);
++ acm->country_codes = NULL;
++ acm->country_code_size = 0;
+ goto skip_countries;
+ }
+
+@@ -1191,6 +1201,8 @@ made_compressed_probe:
+ if (i < 0) {
+ device_remove_file(&intf->dev, &dev_attr_wCountryCodes);
+ kfree(acm->country_codes);
++ acm->country_codes = NULL;
++ acm->country_code_size = 0;
+ goto skip_countries;
+ }
+ }
+diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
+index e3beaf2..7abf060 100644
+--- a/drivers/usb/core/devio.c
++++ b/drivers/usb/core/devio.c
+@@ -249,7 +249,8 @@ static struct async *alloc_async(unsigned int numisoframes)
+ static void free_async(struct async *as)
+ {
+ put_pid(as->pid);
+- put_cred(as->cred);
++ if (as->cred)
++ put_cred(as->cred);
+ kfree(as->urb->transfer_buffer);
+ kfree(as->urb->setup_packet);
+ usb_free_urb(as->urb);
+diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
+index 13222d3..179e364 100644
+--- a/drivers/usb/core/hcd.c
++++ b/drivers/usb/core/hcd.c
+@@ -1412,11 +1412,10 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
+ ret = -EAGAIN;
+ else
+ urb->transfer_flags |= URB_DMA_MAP_SG;
+- if (n != urb->num_sgs) {
+- urb->num_sgs = n;
++ urb->num_mapped_sgs = n;
++ if (n != urb->num_sgs)
+ urb->transfer_flags |=
+ URB_DMA_SG_COMBINED;
+- }
+ } else if (urb->sg) {
+ struct scatterlist *sg = urb->sg;
+ urb->transfer_dma = dma_map_page(
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index ecf12e1..4c65eb6 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -117,9 +117,12 @@ static const struct usb_device_id usb_quirk_list[] = {
+ { USB_DEVICE(0x06a3, 0x0006), .driver_info =
+ USB_QUIRK_CONFIG_INTF_STRINGS },
+
+- /* Guillemot Webcam Hercules Dualpix Exchange*/
++ /* Guillemot Webcam Hercules Dualpix Exchange (2nd ID) */
+ { USB_DEVICE(0x06f8, 0x0804), .driver_info = USB_QUIRK_RESET_RESUME },
+
++ /* Guillemot Webcam Hercules Dualpix Exchange*/
++ { USB_DEVICE(0x06f8, 0x3005), .driver_info = USB_QUIRK_RESET_RESUME },
++
+ /* M-Systems Flash Disk Pioneers */
+ { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
+
+diff --git a/drivers/usb/host/ehci-pxa168.c b/drivers/usb/host/ehci-pxa168.c
+index ac0c16e..8d0e7a2 100644
+--- a/drivers/usb/host/ehci-pxa168.c
++++ b/drivers/usb/host/ehci-pxa168.c
+@@ -299,7 +299,7 @@ static int __devinit ehci_pxa168_drv_probe(struct platform_device *pdev)
+ ehci = hcd_to_ehci(hcd);
+ ehci->caps = hcd->regs + 0x100;
+ ehci->regs = hcd->regs + 0x100 +
+- HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
++ HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
+ ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
+ hcd->has_tt = 1;
+ ehci->sbrn = 0x20;
+diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
+index 4e4066c..fef1db3 100644
+--- a/drivers/usb/host/ehci-q.c
++++ b/drivers/usb/host/ehci-q.c
+@@ -647,7 +647,7 @@ qh_urb_transaction (
+ /*
+ * data transfer stage: buffer setup
+ */
+- i = urb->num_sgs;
++ i = urb->num_mapped_sgs;
+ if (len > 0 && i > 0) {
+ sg = urb->sg;
+ buf = sg_dma_address(sg);
+diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
+index f6ca80e..d2c6f5a 100644
+--- a/drivers/usb/host/uhci-q.c
++++ b/drivers/usb/host/uhci-q.c
+@@ -943,7 +943,7 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
+ if (usb_pipein(urb->pipe))
+ status |= TD_CTRL_SPD;
+
+- i = urb->num_sgs;
++ i = urb->num_mapped_sgs;
+ if (len > 0 && i > 0) {
+ sg = urb->sg;
+ data = sg_dma_address(sg);
+diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c
+index a403b53..76083ae 100644
+--- a/drivers/usb/host/whci/qset.c
++++ b/drivers/usb/host/whci/qset.c
+@@ -443,7 +443,7 @@ static int qset_add_urb_sg(struct whc *whc, struct whc_qset *qset, struct urb *u
+
+ remaining = urb->transfer_buffer_length;
+
+- for_each_sg(urb->sg, sg, urb->num_sgs, i) {
++ for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
+ dma_addr_t dma_addr;
+ size_t dma_remaining;
+ dma_addr_t sp, ep;
+@@ -561,7 +561,7 @@ static int qset_add_urb_sg_linearize(struct whc *whc, struct whc_qset *qset,
+
+ remaining = urb->transfer_buffer_length;
+
+- for_each_sg(urb->sg, sg, urb->num_sgs, i) {
++ for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
+ size_t len;
+ size_t sg_remaining;
+ void *orig;
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 9f1d4b1..d28c586 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -2561,7 +2561,7 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
+ struct scatterlist *sg;
+
+ sg = NULL;
+- num_sgs = urb->num_sgs;
++ num_sgs = urb->num_mapped_sgs;
+ temp = urb->transfer_buffer_length;
+
+ xhci_dbg(xhci, "count sg list trbs: \n");
+@@ -2745,7 +2745,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ return -EINVAL;
+
+ num_trbs = count_sg_trbs_needed(xhci, urb);
+- num_sgs = urb->num_sgs;
++ num_sgs = urb->num_mapped_sgs;
+ total_packet_count = roundup(urb->transfer_buffer_length,
+ usb_endpoint_maxp(&urb->ep->desc));
+
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index a1afb7c..b33f059 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -1620,6 +1620,7 @@ static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
+ /* FIXME: can we allocate more resources for the HC? */
+ break;
+ case COMP_BW_ERR:
++ case COMP_2ND_BW_ERR:
+ dev_warn(&udev->dev, "Not enough bandwidth "
+ "for new device state.\n");
+ ret = -ENOSPC;
+@@ -2796,8 +2797,7 @@ static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
+ if (ret < 0)
+ return ret;
+
+- max_streams = USB_SS_MAX_STREAMS(
+- eps[i]->ss_ep_comp.bmAttributes);
++ max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp);
+ if (max_streams < (*num_streams - 1)) {
+ xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
+ eps[i]->desc.bEndpointAddress,
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 3c8fbd2..09eda3a 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1033,7 +1033,6 @@ struct xhci_transfer_event {
+ /* Invalid Stream ID Error */
+ #define COMP_STRID_ERR 34
+ /* Secondary Bandwidth Error - may be returned by a Configure Endpoint cmd */
+-/* FIXME - check for this */
+ #define COMP_2ND_BW_ERR 35
+ /* Split Transaction Error */
+ #define COMP_SPLIT_ERR 36
+diff --git a/drivers/usb/misc/isight_firmware.c b/drivers/usb/misc/isight_firmware.c
+index fe1d443..8f725f6 100644
+--- a/drivers/usb/misc/isight_firmware.c
++++ b/drivers/usb/misc/isight_firmware.c
+@@ -55,8 +55,9 @@ static int isight_firmware_load(struct usb_interface *intf,
+
+ ptr = firmware->data;
+
++ buf[0] = 0x01;
+ if (usb_control_msg
+- (dev, usb_sndctrlpipe(dev, 0), 0xa0, 0x40, 0xe600, 0, "\1", 1,
++ (dev, usb_sndctrlpipe(dev, 0), 0xa0, 0x40, 0xe600, 0, buf, 1,
+ 300) != 1) {
+ printk(KERN_ERR
+ "Failed to initialise isight firmware loader\n");
+@@ -100,8 +101,9 @@ static int isight_firmware_load(struct usb_interface *intf,
+ }
+ }
+
++ buf[0] = 0x00;
+ if (usb_control_msg
+- (dev, usb_sndctrlpipe(dev, 0), 0xa0, 0x40, 0xe600, 0, "\0", 1,
++ (dev, usb_sndctrlpipe(dev, 0), 0xa0, 0x40, 0xe600, 0, buf, 1,
+ 300) != 1) {
+ printk(KERN_ERR "isight firmware loading completion failed\n");
+ ret = -ENODEV;
+diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
+index b63ab15..920f04e 100644
+--- a/drivers/usb/musb/musb_core.c
++++ b/drivers/usb/musb/musb_core.c
+@@ -2012,8 +2012,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
+ if (status < 0)
+ goto fail3;
+
+- pm_runtime_put(musb->controller);
+-
+ status = musb_init_debugfs(musb);
+ if (status < 0)
+ goto fail4;
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index fd67cc5..a1a324b 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -92,6 +92,7 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
+ { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
+ { USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */
++ { USB_DEVICE(0x10C4, 0x81A9) }, /* Multiplex RC Interface */
+ { USB_DEVICE(0x10C4, 0x81AC) }, /* MSD Dash Hawk */
+ { USB_DEVICE(0x10C4, 0x81AD) }, /* INSYS USB Modem */
+ { USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */
+diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
+index 60f38d5..0a8c1e6 100644
+--- a/drivers/usb/serial/omninet.c
++++ b/drivers/usb/serial/omninet.c
+@@ -315,7 +315,7 @@ static int omninet_write_room(struct tty_struct *tty)
+ int room = 0; /* Default: no room */
+
+ /* FIXME: no consistent locking for write_urb_busy */
+- if (wport->write_urb_busy)
++ if (!wport->write_urb_busy)
+ room = wport->bulk_out_size - OMNINET_HEADERLEN;
+
+ dbg("%s - returns %d", __func__, room);
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 6dd6453..c96b6b6 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -476,6 +476,10 @@ static void option_instat_callback(struct urb *urb);
+ #define VIETTEL_VENDOR_ID 0x2262
+ #define VIETTEL_PRODUCT_VT1000 0x0002
+
++/* ZD Incorporated */
++#define ZD_VENDOR_ID 0x0685
++#define ZD_PRODUCT_7000 0x7000
++
+ /* some devices interfaces need special handling due to a number of reasons */
+ enum option_blacklist_reason {
+ OPTION_BLACKLIST_NONE = 0,
+@@ -1178,6 +1182,7 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU528) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) },
+ { USB_DEVICE_AND_INTERFACE_INFO(VIETTEL_VENDOR_ID, VIETTEL_PRODUCT_VT1000, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZD_VENDOR_ID, ZD_PRODUCT_7000, 0xff, 0xff, 0xff) },
+ { } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, option_ids);
+diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
+index c325e69..9e069ef 100644
+--- a/drivers/usb/storage/usb.c
++++ b/drivers/usb/storage/usb.c
+@@ -1073,6 +1073,7 @@ static struct usb_driver usb_storage_driver = {
+ .id_table = usb_storage_usb_ids,
+ .supports_autosuspend = 1,
+ .soft_unbind = 1,
++ .no_dynamic_id = 1,
+ };
+
+ static int __init usb_stor_init(void)
+diff --git a/drivers/video/offb.c b/drivers/video/offb.c
+index cb163a5..3251a02 100644
+--- a/drivers/video/offb.c
++++ b/drivers/video/offb.c
+@@ -100,36 +100,32 @@ static int offb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
+ u_int transp, struct fb_info *info)
+ {
+ struct offb_par *par = (struct offb_par *) info->par;
+- int i, depth;
+- u32 *pal = info->pseudo_palette;
+-
+- depth = info->var.bits_per_pixel;
+- if (depth == 16)
+- depth = (info->var.green.length == 5) ? 15 : 16;
+-
+- if (regno > 255 ||
+- (depth == 16 && regno > 63) ||
+- (depth == 15 && regno > 31))
+- return 1;
+-
+- if (regno < 16) {
+- switch (depth) {
+- case 15:
+- pal[regno] = (regno << 10) | (regno << 5) | regno;
+- break;
+- case 16:
+- pal[regno] = (regno << 11) | (regno << 5) | regno;
+- break;
+- case 24:
+- pal[regno] = (regno << 16) | (regno << 8) | regno;
+- break;
+- case 32:
+- i = (regno << 8) | regno;
+- pal[regno] = (i << 16) | i;
+- break;
++
++ if (info->fix.visual == FB_VISUAL_TRUECOLOR) {
++ u32 *pal = info->pseudo_palette;
++ u32 cr = red >> (16 - info->var.red.length);
++ u32 cg = green >> (16 - info->var.green.length);
++ u32 cb = blue >> (16 - info->var.blue.length);
++ u32 value;
++
++ if (regno >= 16)
++ return -EINVAL;
++
++ value = (cr << info->var.red.offset) |
++ (cg << info->var.green.offset) |
++ (cb << info->var.blue.offset);
++ if (info->var.transp.length > 0) {
++ u32 mask = (1 << info->var.transp.length) - 1;
++ mask <<= info->var.transp.offset;
++ value |= mask;
+ }
++ pal[regno] = value;
++ return 0;
+ }
+
++ if (regno > 255)
++ return -EINVAL;
++
+ red >>= 8;
+ green >>= 8;
+ blue >>= 8;
+@@ -381,7 +377,7 @@ static void __init offb_init_fb(const char *name, const char *full_name,
+ int pitch, unsigned long address,
+ int foreign_endian, struct device_node *dp)
+ {
+- unsigned long res_size = pitch * height * (depth + 7) / 8;
++ unsigned long res_size = pitch * height;
+ struct offb_par *par = &default_par;
+ unsigned long res_start = address;
+ struct fb_fix_screeninfo *fix;
+diff --git a/fs/Kconfig b/fs/Kconfig
+index 5f4c45d..6ad58a5 100644
+--- a/fs/Kconfig
++++ b/fs/Kconfig
+@@ -218,6 +218,8 @@ source "fs/exofs/Kconfig"
+
+ endif # MISC_FILESYSTEMS
+
++source "fs/exofs/Kconfig.ore"
++
+ menuconfig NETWORK_FILESYSTEMS
+ bool "Network File Systems"
+ default y
+diff --git a/fs/exofs/Kconfig b/fs/exofs/Kconfig
+index da42f32..86194b2 100644
+--- a/fs/exofs/Kconfig
++++ b/fs/exofs/Kconfig
+@@ -1,14 +1,3 @@
+-# Note ORE needs to "select ASYNC_XOR". So Not to force multiple selects
+-# for every ORE user we do it like this. Any user should add itself here
+-# at the "depends on EXOFS_FS || ..." with an ||. The dependencies are
+-# selected here, and we default to "ON". So in effect it is like been
+-# selected by any of the users.
+-config ORE
+- tristate
+- depends on EXOFS_FS || PNFS_OBJLAYOUT
+- select ASYNC_XOR
+- default SCSI_OSD_ULD
+-
+ config EXOFS_FS
+ tristate "exofs: OSD based file system support"
+ depends on SCSI_OSD_ULD
+diff --git a/fs/exofs/Kconfig.ore b/fs/exofs/Kconfig.ore
+new file mode 100644
+index 0000000..1ca7fb7
+--- /dev/null
++++ b/fs/exofs/Kconfig.ore
+@@ -0,0 +1,12 @@
++# ORE - Objects Raid Engine (libore.ko)
++#
++# Note ORE needs to "select ASYNC_XOR". So Not to force multiple selects
++# for every ORE user we do it like this. Any user should add itself here
++# at the "depends on EXOFS_FS || ..." with an ||. The dependencies are
++# selected here, and we default to "ON". So in effect it is like been
++# selected by any of the users.
++config ORE
++ tristate
++ depends on EXOFS_FS || PNFS_OBJLAYOUT
++ select ASYNC_XOR
++ default SCSI_OSD_ULD
+diff --git a/fs/exofs/ore.c b/fs/exofs/ore.c
+index d271ad8..49cf230 100644
+--- a/fs/exofs/ore.c
++++ b/fs/exofs/ore.c
+@@ -266,7 +266,7 @@ int ore_get_rw_state(struct ore_layout *layout, struct ore_components *oc,
+
+ /* first/last seg is split */
+ num_raid_units += layout->group_width;
+- sgs_per_dev = div_u64(num_raid_units, data_devs);
++ sgs_per_dev = div_u64(num_raid_units, data_devs) + 2;
+ } else {
+ /* For Writes add parity pages array. */
+ max_par_pages = num_raid_units * pages_in_unit *
+@@ -445,10 +445,10 @@ int ore_check_io(struct ore_io_state *ios, ore_on_dev_error on_dev_error)
+ u64 residual = ios->reading ?
+ or->in.residual : or->out.residual;
+ u64 offset = (ios->offset + ios->length) - residual;
+- struct ore_dev *od = ios->oc->ods[
+- per_dev->dev - ios->oc->first_dev];
++ unsigned dev = per_dev->dev - ios->oc->first_dev;
++ struct ore_dev *od = ios->oc->ods[dev];
+
+- on_dev_error(ios, od, per_dev->dev, osi.osd_err_pri,
++ on_dev_error(ios, od, dev, osi.osd_err_pri,
+ offset, residual);
+ }
+ if (osi.osd_err_pri >= acumulated_osd_err) {
+diff --git a/fs/exofs/ore_raid.c b/fs/exofs/ore_raid.c
+index 29c47e5..d222c77 100644
+--- a/fs/exofs/ore_raid.c
++++ b/fs/exofs/ore_raid.c
+@@ -328,8 +328,8 @@ static int _alloc_read_4_write(struct ore_io_state *ios)
+ /* @si contains info of the to-be-inserted page. Update of @si should be
+ * maintained by caller. Specificaly si->dev, si->obj_offset, ...
+ */
+-static int _add_to_read_4_write(struct ore_io_state *ios,
+- struct ore_striping_info *si, struct page *page)
++static int _add_to_r4w(struct ore_io_state *ios, struct ore_striping_info *si,
++ struct page *page, unsigned pg_len)
+ {
+ struct request_queue *q;
+ struct ore_per_dev_state *per_dev;
+@@ -366,17 +366,60 @@ static int _add_to_read_4_write(struct ore_io_state *ios,
+ _ore_add_sg_seg(per_dev, gap, true);
+ }
+ q = osd_request_queue(ore_comp_dev(read_ios->oc, per_dev->dev));
+- added_len = bio_add_pc_page(q, per_dev->bio, page, PAGE_SIZE, 0);
+- if (unlikely(added_len != PAGE_SIZE)) {
++ added_len = bio_add_pc_page(q, per_dev->bio, page, pg_len,
++ si->obj_offset % PAGE_SIZE);
++ if (unlikely(added_len != pg_len)) {
+ ORE_DBGMSG("Failed to bio_add_pc_page bi_vcnt=%d\n",
+ per_dev->bio->bi_vcnt);
+ return -ENOMEM;
+ }
+
+- per_dev->length += PAGE_SIZE;
++ per_dev->length += pg_len;
+ return 0;
+ }
+
++/* read the beginning of an unaligned first page */
++static int _add_to_r4w_first_page(struct ore_io_state *ios, struct page *page)
++{
++ struct ore_striping_info si;
++ unsigned pg_len;
++
++ ore_calc_stripe_info(ios->layout, ios->offset, 0, &si);
++
++ pg_len = si.obj_offset % PAGE_SIZE;
++ si.obj_offset -= pg_len;
++
++ ORE_DBGMSG("offset=0x%llx len=0x%x index=0x%lx dev=%x\n",
++ _LLU(si.obj_offset), pg_len, page->index, si.dev);
++
++ return _add_to_r4w(ios, &si, page, pg_len);
++}
++
++/* read the end of an incomplete last page */
++static int _add_to_r4w_last_page(struct ore_io_state *ios, u64 *offset)
++{
++ struct ore_striping_info si;
++ struct page *page;
++ unsigned pg_len, p, c;
++
++ ore_calc_stripe_info(ios->layout, *offset, 0, &si);
++
++ p = si.unit_off / PAGE_SIZE;
++ c = _dev_order(ios->layout->group_width * ios->layout->mirrors_p1,
++ ios->layout->mirrors_p1, si.par_dev, si.dev);
++ page = ios->sp2d->_1p_stripes[p].pages[c];
++
++ pg_len = PAGE_SIZE - (si.unit_off % PAGE_SIZE);
++ *offset += pg_len;
++
++ ORE_DBGMSG("p=%d, c=%d next-offset=0x%llx len=0x%x dev=%x par_dev=%d\n",
++ p, c, _LLU(*offset), pg_len, si.dev, si.par_dev);
++
++ BUG_ON(!page);
++
++ return _add_to_r4w(ios, &si, page, pg_len);
++}
++
+ static void _mark_read4write_pages_uptodate(struct ore_io_state *ios, int ret)
+ {
+ struct bio_vec *bv;
+@@ -444,9 +487,13 @@ static int _read_4_write(struct ore_io_state *ios)
+ struct page **pp = &_1ps->pages[c];
+ bool uptodate;
+
+- if (*pp)
++ if (*pp) {
++ if (ios->offset % PAGE_SIZE)
++ /* Read the remainder of the page */
++ _add_to_r4w_first_page(ios, *pp);
+ /* to-be-written pages start here */
+ goto read_last_stripe;
++ }
+
+ *pp = ios->r4w->get_page(ios->private, offset,
+ &uptodate);
+@@ -454,7 +501,7 @@ static int _read_4_write(struct ore_io_state *ios)
+ return -ENOMEM;
+
+ if (!uptodate)
+- _add_to_read_4_write(ios, &read_si, *pp);
++ _add_to_r4w(ios, &read_si, *pp, PAGE_SIZE);
+
+ /* Mark read-pages to be cache_released */
+ _1ps->page_is_read[c] = true;
+@@ -465,8 +512,11 @@ static int _read_4_write(struct ore_io_state *ios)
+ }
+
+ read_last_stripe:
+- offset = ios->offset + (ios->length + PAGE_SIZE - 1) /
+- PAGE_SIZE * PAGE_SIZE;
++ offset = ios->offset + ios->length;
++ if (offset % PAGE_SIZE)
++ _add_to_r4w_last_page(ios, &offset);
++ /* offset will be aligned to next page */
++
+ last_stripe_end = div_u64(offset + bytes_in_stripe - 1, bytes_in_stripe)
+ * bytes_in_stripe;
+ if (offset == last_stripe_end) /* Optimize for the aligned case */
+@@ -503,7 +553,7 @@ read_last_stripe:
+ /* Mark read-pages to be cache_released */
+ _1ps->page_is_read[c] = true;
+ if (!uptodate)
+- _add_to_read_4_write(ios, &read_si, page);
++ _add_to_r4w(ios, &read_si, page, PAGE_SIZE);
+ }
+
+ offset += PAGE_SIZE;
+@@ -551,7 +601,11 @@ int _ore_add_parity_unit(struct ore_io_state *ios,
+ unsigned cur_len)
+ {
+ if (ios->reading) {
+- BUG_ON(per_dev->cur_sg >= ios->sgs_per_dev);
++ if (per_dev->cur_sg >= ios->sgs_per_dev) {
++ ORE_DBGMSG("cur_sg(%d) >= sgs_per_dev(%d)\n" ,
++ per_dev->cur_sg, ios->sgs_per_dev);
++ return -ENOMEM;
++ }
+ _ore_add_sg_seg(per_dev, cur_len, true);
+ } else {
+ struct __stripe_pages_2d *sp2d = ios->sp2d;
+@@ -612,8 +666,6 @@ int _ore_post_alloc_raid_stuff(struct ore_io_state *ios)
+ return -ENOMEM;
+ }
+
+- BUG_ON(ios->offset % PAGE_SIZE);
+-
+ /* Round io down to last full strip */
+ first_stripe = div_u64(ios->offset, stripe_size);
+ last_stripe = div_u64(ios->offset + ios->length, stripe_size);
+diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
+index 85fe655..5b3f907 100644
+--- a/fs/ext3/inode.c
++++ b/fs/ext3/inode.c
+@@ -1617,7 +1617,13 @@ static int ext3_ordered_writepage(struct page *page,
+ int err;
+
+ J_ASSERT(PageLocked(page));
+- WARN_ON_ONCE(IS_RDONLY(inode));
++ /*
++ * We don't want to warn for emergency remount. The condition is
++ * ordered to avoid dereferencing inode->i_sb in non-error case to
++ * avoid slow-downs.
++ */
++ WARN_ON_ONCE(IS_RDONLY(inode) &&
++ !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ERROR_FS));
+
+ /*
+ * We give up here if we're reentered, because it might be for a
+@@ -1692,7 +1698,13 @@ static int ext3_writeback_writepage(struct page *page,
+ int err;
+
+ J_ASSERT(PageLocked(page));
+- WARN_ON_ONCE(IS_RDONLY(inode));
++ /*
++ * We don't want to warn for emergency remount. The condition is
++ * ordered to avoid dereferencing inode->i_sb in non-error case to
++ * avoid slow-downs.
++ */
++ WARN_ON_ONCE(IS_RDONLY(inode) &&
++ !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ERROR_FS));
+
+ if (ext3_journal_current_handle())
+ goto out_fail;
+@@ -1735,7 +1747,13 @@ static int ext3_journalled_writepage(struct page *page,
+ int err;
+
+ J_ASSERT(PageLocked(page));
+- WARN_ON_ONCE(IS_RDONLY(inode));
++ /*
++ * We don't want to warn for emergency remount. The condition is
++ * ordered to avoid dereferencing inode->i_sb in non-error case to
++ * avoid slow-downs.
++ */
++ WARN_ON_ONCE(IS_RDONLY(inode) &&
++ !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ERROR_FS));
+
+ if (ext3_journal_current_handle())
+ goto no_write;
+diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
+index 14363b9..5e3527b 100644
+--- a/fs/reiserfs/super.c
++++ b/fs/reiserfs/super.c
+@@ -453,16 +453,20 @@ int remove_save_link(struct inode *inode, int truncate)
+ static void reiserfs_kill_sb(struct super_block *s)
+ {
+ if (REISERFS_SB(s)) {
+- if (REISERFS_SB(s)->xattr_root) {
+- d_invalidate(REISERFS_SB(s)->xattr_root);
+- dput(REISERFS_SB(s)->xattr_root);
+- REISERFS_SB(s)->xattr_root = NULL;
+- }
+- if (REISERFS_SB(s)->priv_root) {
+- d_invalidate(REISERFS_SB(s)->priv_root);
+- dput(REISERFS_SB(s)->priv_root);
+- REISERFS_SB(s)->priv_root = NULL;
+- }
++ /*
++ * Force any pending inode evictions to occur now. Any
++ * inodes to be removed that have extended attributes
++ * associated with them need to clean them up before
++ * we can release the extended attribute root dentries.
++ * shrink_dcache_for_umount will BUG if we don't release
++ * those before it's called so ->put_super is too late.
++ */
++ shrink_dcache_sb(s);
++
++ dput(REISERFS_SB(s)->xattr_root);
++ REISERFS_SB(s)->xattr_root = NULL;
++ dput(REISERFS_SB(s)->priv_root);
++ REISERFS_SB(s)->priv_root = NULL;
+ }
+
+ kill_block_super(s);
+@@ -1164,7 +1168,8 @@ static void handle_quota_files(struct super_block *s, char **qf_names,
+ kfree(REISERFS_SB(s)->s_qf_names[i]);
+ REISERFS_SB(s)->s_qf_names[i] = qf_names[i];
+ }
+- REISERFS_SB(s)->s_jquota_fmt = *qfmt;
++ if (*qfmt)
++ REISERFS_SB(s)->s_jquota_fmt = *qfmt;
+ }
+ #endif
+
+diff --git a/fs/udf/file.c b/fs/udf/file.c
+index d8ffa7c..dca0c38 100644
+--- a/fs/udf/file.c
++++ b/fs/udf/file.c
+@@ -125,7 +125,6 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
+ err = udf_expand_file_adinicb(inode);
+ if (err) {
+ udf_debug("udf_expand_adinicb: err=%d\n", err);
+- up_write(&iinfo->i_data_sem);
+ return err;
+ }
+ } else {
+@@ -133,9 +132,10 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
+ iinfo->i_lenAlloc = pos + count;
+ else
+ iinfo->i_lenAlloc = inode->i_size;
++ up_write(&iinfo->i_data_sem);
+ }
+- }
+- up_write(&iinfo->i_data_sem);
++ } else
++ up_write(&iinfo->i_data_sem);
+
+ retval = generic_file_aio_write(iocb, iov, nr_segs, ppos);
+ if (retval > 0)
+diff --git a/fs/udf/inode.c b/fs/udf/inode.c
+index 4fd1d80..e2787d0 100644
+--- a/fs/udf/inode.c
++++ b/fs/udf/inode.c
+@@ -151,6 +151,12 @@ const struct address_space_operations udf_aops = {
+ .bmap = udf_bmap,
+ };
+
++/*
++ * Expand file stored in ICB to a normal one-block-file
++ *
++ * This function requires i_data_sem for writing and releases it.
++ * This function requires i_mutex held
++ */
+ int udf_expand_file_adinicb(struct inode *inode)
+ {
+ struct page *page;
+@@ -169,9 +175,15 @@ int udf_expand_file_adinicb(struct inode *inode)
+ iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
+ /* from now on we have normal address_space methods */
+ inode->i_data.a_ops = &udf_aops;
++ up_write(&iinfo->i_data_sem);
+ mark_inode_dirty(inode);
+ return 0;
+ }
++ /*
++ * Release i_data_sem so that we can lock a page - page lock ranks
++ * above i_data_sem. i_mutex still protects us against file changes.
++ */
++ up_write(&iinfo->i_data_sem);
+
+ page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
+ if (!page)
+@@ -187,6 +199,7 @@ int udf_expand_file_adinicb(struct inode *inode)
+ SetPageUptodate(page);
+ kunmap(page);
+ }
++ down_write(&iinfo->i_data_sem);
+ memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr, 0x00,
+ iinfo->i_lenAlloc);
+ iinfo->i_lenAlloc = 0;
+@@ -196,17 +209,20 @@ int udf_expand_file_adinicb(struct inode *inode)
+ iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
+ /* from now on we have normal address_space methods */
+ inode->i_data.a_ops = &udf_aops;
++ up_write(&iinfo->i_data_sem);
+ err = inode->i_data.a_ops->writepage(page, &udf_wbc);
+ if (err) {
+ /* Restore everything back so that we don't lose data... */
+ lock_page(page);
+ kaddr = kmap(page);
++ down_write(&iinfo->i_data_sem);
+ memcpy(iinfo->i_ext.i_data + iinfo->i_lenEAttr, kaddr,
+ inode->i_size);
+ kunmap(page);
+ unlock_page(page);
+ iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
+ inode->i_data.a_ops = &udf_adinicb_aops;
++ up_write(&iinfo->i_data_sem);
+ }
+ page_cache_release(page);
+ mark_inode_dirty(inode);
+@@ -1111,10 +1127,9 @@ int udf_setsize(struct inode *inode, loff_t newsize)
+ if (bsize <
+ (udf_file_entry_alloc_offset(inode) + newsize)) {
+ err = udf_expand_file_adinicb(inode);
+- if (err) {
+- up_write(&iinfo->i_data_sem);
++ if (err)
+ return err;
+- }
++ down_write(&iinfo->i_data_sem);
+ } else
+ iinfo->i_lenAlloc = newsize;
+ }
+diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
+index 76e4266..ac702a6 100644
+--- a/fs/xfs/xfs_acl.c
++++ b/fs/xfs/xfs_acl.c
+@@ -39,7 +39,7 @@ xfs_acl_from_disk(struct xfs_acl *aclp)
+ struct posix_acl_entry *acl_e;
+ struct posix_acl *acl;
+ struct xfs_acl_entry *ace;
+- int count, i;
++ unsigned int count, i;
+
+ count = be32_to_cpu(aclp->acl_cnt);
+ if (count > XFS_ACL_MAX_ENTRIES)
+diff --git a/include/linux/usb.h b/include/linux/usb.h
+index d3d0c13..7503352 100644
+--- a/include/linux/usb.h
++++ b/include/linux/usb.h
+@@ -1221,6 +1221,7 @@ struct urb {
+ void *transfer_buffer; /* (in) associated data buffer */
+ dma_addr_t transfer_dma; /* (in) dma addr for transfer_buffer */
+ struct scatterlist *sg; /* (in) scatter gather buffer list */
++ int num_mapped_sgs; /* (internal) mapped sg entries */
+ int num_sgs; /* (in) number of entries in the sg list */
+ u32 transfer_buffer_length; /* (in) data buffer length */
+ u32 actual_length; /* (return) actual transfer length */
+diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h
+index d5da6c6..61b2905 100644
+--- a/include/linux/usb/ch9.h
++++ b/include/linux/usb/ch9.h
+@@ -605,8 +605,26 @@ struct usb_ss_ep_comp_descriptor {
+ } __attribute__ ((packed));
+
+ #define USB_DT_SS_EP_COMP_SIZE 6
++
+ /* Bits 4:0 of bmAttributes if this is a bulk endpoint */
+-#define USB_SS_MAX_STREAMS(p) (1 << ((p) & 0x1f))
++static inline int
++usb_ss_max_streams(const struct usb_ss_ep_comp_descriptor *comp)
++{
++ int max_streams;
++
++ if (!comp)
++ return 0;
++
++ max_streams = comp->bmAttributes & 0x1f;
++
++ if (!max_streams)
++ return 0;
++
++ max_streams = 1 << max_streams;
++
++ return max_streams;
++}
++
+ /* Bits 1:0 of bmAttributes if this is an isoc endpoint */
+ #define USB_SS_MULT(p) (1 + ((p) & 0x3))
+
+diff --git a/kernel/cgroup.c b/kernel/cgroup.c
+index a184470..cdc0354 100644
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -1175,10 +1175,10 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
+
+ /*
+ * If the 'all' option was specified select all the subsystems,
+- * otherwise 'all, 'none' and a subsystem name options were not
+- * specified, let's default to 'all'
++ * otherwise if 'none', 'name=' and a subsystem name options
++ * were not specified, let's default to 'all'
+ */
+- if (all_ss || (!all_ss && !one_ss && !opts->none)) {
++ if (all_ss || (!one_ss && !opts->none && !opts->name)) {
+ for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
+ struct cgroup_subsys *ss = subsys[i];
+ if (ss == NULL)
+diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
+index b2ca095..c3cc64c 100644
+--- a/net/ipv4/igmp.c
++++ b/net/ipv4/igmp.c
+@@ -875,6 +875,8 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
+ * to be intended in a v3 query.
+ */
+ max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE);
++ if (!max_delay)
++ max_delay = 1; /* can't mod w/ 0 */
+ } else { /* v3 */
+ if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)))
+ return;
+diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
+index 6c164dc..bf54c48 100644
+--- a/tools/perf/util/trace-event-parse.c
++++ b/tools/perf/util/trace-event-parse.c
+@@ -1582,6 +1582,8 @@ process_symbols(struct event *event, struct print_arg *arg, char **tok)
+ field = malloc_or_die(sizeof(*field));
+
+ type = process_arg(event, field, &token);
++ while (type == EVENT_OP)
++ type = process_op(event, field, &token);
+ if (test_type_token(type, token, EVENT_DELIM, ","))
+ goto out_free;
+
diff --git a/1001_linux-3.2.2.patch b/1001_linux-3.2.2.patch
new file mode 100644
index 00000000..ec16cce2
--- /dev/null
+++ b/1001_linux-3.2.2.patch
@@ -0,0 +1,6552 @@
+diff --git a/Makefile b/Makefile
+index c5edffa..2f684da 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 1
++SUBLEVEL = 2
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
+index bfb4d01..5207035 100644
+--- a/arch/ia64/kernel/acpi.c
++++ b/arch/ia64/kernel/acpi.c
+@@ -429,22 +429,24 @@ static u32 __devinitdata pxm_flag[PXM_FLAG_LEN];
+ static struct acpi_table_slit __initdata *slit_table;
+ cpumask_t early_cpu_possible_map = CPU_MASK_NONE;
+
+-static int get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa)
++static int __init
++get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa)
+ {
+ int pxm;
+
+ pxm = pa->proximity_domain_lo;
+- if (ia64_platform_is("sn2"))
++ if (ia64_platform_is("sn2") || acpi_srat_revision >= 2)
+ pxm += pa->proximity_domain_hi[0] << 8;
+ return pxm;
+ }
+
+-static int get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma)
++static int __init
++get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma)
+ {
+ int pxm;
+
+ pxm = ma->proximity_domain;
+- if (!ia64_platform_is("sn2"))
++ if (!ia64_platform_is("sn2") && acpi_srat_revision <= 1)
+ pxm &= 0xff;
+
+ return pxm;
+diff --git a/arch/score/kernel/entry.S b/arch/score/kernel/entry.S
+index 577abba..83bb960 100644
+--- a/arch/score/kernel/entry.S
++++ b/arch/score/kernel/entry.S
+@@ -408,7 +408,7 @@ ENTRY(handle_sys)
+ sw r9, [r0, PT_EPC]
+
+ cmpi.c r27, __NR_syscalls # check syscall number
+- bgtu illegal_syscall
++ bgeu illegal_syscall
+
+ slli r8, r27, 2 # get syscall routine
+ la r11, sys_call_table
+diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h
+index 8e41071..49ad773 100644
+--- a/arch/x86/include/asm/amd_nb.h
++++ b/arch/x86/include/asm/amd_nb.h
+@@ -1,6 +1,7 @@
+ #ifndef _ASM_X86_AMD_NB_H
+ #define _ASM_X86_AMD_NB_H
+
++#include <linux/ioport.h>
+ #include <linux/pci.h>
+
+ struct amd_nb_bus_dev_range {
+@@ -13,6 +14,7 @@ extern const struct pci_device_id amd_nb_misc_ids[];
+ extern const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[];
+
+ extern bool early_is_amd_nb(u32 value);
++extern struct resource *amd_get_mmconfig_range(struct resource *res);
+ extern int amd_cache_northbridges(void);
+ extern void amd_flush_garts(void);
+ extern int amd_numa_init(void);
+diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
+index 8e862aa..1b82f7e 100644
+--- a/arch/x86/include/asm/uv/uv_bau.h
++++ b/arch/x86/include/asm/uv/uv_bau.h
+@@ -65,7 +65,7 @@
+ * UV2: Bit 19 selects between
+ * (0): 10 microsecond timebase and
+ * (1): 80 microseconds
+- * we're using 655us, similar to UV1: 65 units of 10us
++ * we're using 560us, similar to UV1: 65 units of 10us
+ */
+ #define UV1_INTD_SOFT_ACK_TIMEOUT_PERIOD (9UL)
+ #define UV2_INTD_SOFT_ACK_TIMEOUT_PERIOD (15UL)
+@@ -167,6 +167,7 @@
+ #define FLUSH_RETRY_TIMEOUT 2
+ #define FLUSH_GIVEUP 3
+ #define FLUSH_COMPLETE 4
++#define FLUSH_RETRY_BUSYBUG 5
+
+ /*
+ * tuning the action when the numalink network is extremely delayed
+@@ -235,10 +236,10 @@ struct bau_msg_payload {
+
+
+ /*
+- * Message header: 16 bytes (128 bits) (bytes 0x30-0x3f of descriptor)
++ * UV1 Message header: 16 bytes (128 bits) (bytes 0x30-0x3f of descriptor)
+ * see table 4.2.3.0.1 in broacast_assist spec.
+ */
+-struct bau_msg_header {
++struct uv1_bau_msg_header {
+ unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */
+ /* bits 5:0 */
+ unsigned int base_dest_nasid:15; /* nasid of the first bit */
+@@ -318,19 +319,87 @@ struct bau_msg_header {
+ };
+
+ /*
++ * UV2 Message header: 16 bytes (128 bits) (bytes 0x30-0x3f of descriptor)
++ * see figure 9-2 of harp_sys.pdf
++ */
++struct uv2_bau_msg_header {
++ unsigned int base_dest_nasid:15; /* nasid of the first bit */
++ /* bits 14:0 */ /* in uvhub map */
++ unsigned int dest_subnodeid:5; /* must be 0x10, for the LB */
++ /* bits 19:15 */
++ unsigned int rsvd_1:1; /* must be zero */
++ /* bit 20 */
++ /* Address bits 59:21 */
++ /* bits 25:2 of address (44:21) are payload */
++ /* these next 24 bits become bytes 12-14 of msg */
++ /* bits 28:21 land in byte 12 */
++ unsigned int replied_to:1; /* sent as 0 by the source to
++ byte 12 */
++ /* bit 21 */
++ unsigned int msg_type:3; /* software type of the
++ message */
++ /* bits 24:22 */
++ unsigned int canceled:1; /* message canceled, resource
++ is to be freed*/
++ /* bit 25 */
++ unsigned int payload_1:3; /* not currently used */
++ /* bits 28:26 */
++
++ /* bits 36:29 land in byte 13 */
++ unsigned int payload_2a:3; /* not currently used */
++ unsigned int payload_2b:5; /* not currently used */
++ /* bits 36:29 */
++
++ /* bits 44:37 land in byte 14 */
++ unsigned int payload_3:8; /* not currently used */
++ /* bits 44:37 */
++
++ unsigned int rsvd_2:7; /* reserved */
++ /* bits 51:45 */
++ unsigned int swack_flag:1; /* software acknowledge flag */
++ /* bit 52 */
++ unsigned int rsvd_3a:3; /* must be zero */
++ unsigned int rsvd_3b:8; /* must be zero */
++ unsigned int rsvd_3c:8; /* must be zero */
++ unsigned int rsvd_3d:3; /* must be zero */
++ /* bits 74:53 */
++ unsigned int fairness:3; /* usually zero */
++ /* bits 77:75 */
++
++ unsigned int sequence:16; /* message sequence number */
++ /* bits 93:78 Suppl_A */
++ unsigned int chaining:1; /* next descriptor is part of
++ this activation*/
++ /* bit 94 */
++ unsigned int multilevel:1; /* multi-level multicast
++ format */
++ /* bit 95 */
++ unsigned int rsvd_4:24; /* ordered / source node /
++ source subnode / aging
++ must be zero */
++ /* bits 119:96 */
++ unsigned int command:8; /* message type */
++ /* bits 127:120 */
++};
++
++/*
+ * The activation descriptor:
+ * The format of the message to send, plus all accompanying control
+ * Should be 64 bytes
+ */
+ struct bau_desc {
+- struct pnmask distribution;
++ struct pnmask distribution;
+ /*
+ * message template, consisting of header and payload:
+ */
+- struct bau_msg_header header;
+- struct bau_msg_payload payload;
++ union bau_msg_header {
++ struct uv1_bau_msg_header uv1_hdr;
++ struct uv2_bau_msg_header uv2_hdr;
++ } header;
++
++ struct bau_msg_payload payload;
+ };
+-/*
++/* UV1:
+ * -payload-- ---------header------
+ * bytes 0-11 bits 41-56 bits 58-81
+ * A B (2) C (3)
+@@ -340,6 +409,16 @@ struct bau_desc {
+ * bytes 0-11 bytes 12-14 bytes 16-17 (byte 15 filled in by hw as vector)
+ * ------------payload queue-----------
+ */
++/* UV2:
++ * -payload-- ---------header------
++ * bytes 0-11 bits 70-78 bits 21-44
++ * A B (2) C (3)
++ *
++ * A/B/C are moved to:
++ * A C B
++ * bytes 0-11 bytes 12-14 bytes 16-17 (byte 15 filled in by hw as vector)
++ * ------------payload queue-----------
++ */
+
+ /*
+ * The payload queue on the destination side is an array of these.
+@@ -385,7 +464,6 @@ struct bau_pq_entry {
+ struct msg_desc {
+ struct bau_pq_entry *msg;
+ int msg_slot;
+- int swack_slot;
+ struct bau_pq_entry *queue_first;
+ struct bau_pq_entry *queue_last;
+ };
+@@ -439,6 +517,9 @@ struct ptc_stats {
+ unsigned long s_retry_messages; /* retry broadcasts */
+ unsigned long s_bau_reenabled; /* for bau enable/disable */
+ unsigned long s_bau_disabled; /* for bau enable/disable */
++ unsigned long s_uv2_wars; /* uv2 workaround, perm. busy */
++ unsigned long s_uv2_wars_hw; /* uv2 workaround, hiwater */
++ unsigned long s_uv2_war_waits; /* uv2 workaround, long waits */
+ /* destination statistics */
+ unsigned long d_alltlb; /* times all tlb's on this
+ cpu were flushed */
+@@ -511,9 +592,12 @@ struct bau_control {
+ short osnode;
+ short uvhub_cpu;
+ short uvhub;
++ short uvhub_version;
+ short cpus_in_socket;
+ short cpus_in_uvhub;
+ short partition_base_pnode;
++ short using_desc; /* an index, like uvhub_cpu */
++ unsigned int inuse_map;
+ unsigned short message_number;
+ unsigned short uvhub_quiesce;
+ short socket_acknowledge_count[DEST_Q_SIZE];
+@@ -531,6 +615,7 @@ struct bau_control {
+ int cong_response_us;
+ int cong_reps;
+ int cong_period;
++ unsigned long clocks_per_100_usec;
+ cycles_t period_time;
+ long period_requests;
+ struct hub_and_pnode *thp;
+@@ -591,6 +676,11 @@ static inline void write_mmr_sw_ack(unsigned long mr)
+ uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr);
+ }
+
++static inline void write_gmmr_sw_ack(int pnode, unsigned long mr)
++{
++ write_gmmr(pnode, UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr);
++}
++
+ static inline unsigned long read_mmr_sw_ack(void)
+ {
+ return read_lmmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
+diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
+index 4c39baa..bae1efe 100644
+--- a/arch/x86/kernel/amd_nb.c
++++ b/arch/x86/kernel/amd_nb.c
+@@ -119,6 +119,37 @@ bool __init early_is_amd_nb(u32 device)
+ return false;
+ }
+
++struct resource *amd_get_mmconfig_range(struct resource *res)
++{
++ u32 address;
++ u64 base, msr;
++ unsigned segn_busn_bits;
++
++ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
++ return NULL;
++
++ /* assume all cpus from fam10h have mmconfig */
++ if (boot_cpu_data.x86 < 0x10)
++ return NULL;
++
++ address = MSR_FAM10H_MMIO_CONF_BASE;
++ rdmsrl(address, msr);
++
++ /* mmconfig is not enabled */
++ if (!(msr & FAM10H_MMIO_CONF_ENABLE))
++ return NULL;
++
++ base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
++
++ segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
++ FAM10H_MMIO_CONF_BUSRANGE_MASK;
++
++ res->flags = IORESOURCE_MEM;
++ res->start = base;
++ res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
++ return res;
++}
++
+ int amd_get_subcaches(int cpu)
+ {
+ struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
+diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
+index 9d59bba..79b05b8 100644
+--- a/arch/x86/kernel/apic/x2apic_uv_x.c
++++ b/arch/x86/kernel/apic/x2apic_uv_x.c
+@@ -769,7 +769,12 @@ void __init uv_system_init(void)
+ for(i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++)
+ uv_possible_blades +=
+ hweight64(uv_read_local_mmr( UVH_NODE_PRESENT_TABLE + i * 8));
+- printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades());
++
++ /* uv_num_possible_blades() is really the hub count */
++ printk(KERN_INFO "UV: Found %d blades, %d hubs\n",
++ is_uv1_hub() ? uv_num_possible_blades() :
++ (uv_num_possible_blades() + 1) / 2,
++ uv_num_possible_blades());
+
+ bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades();
+ uv_blade_info = kzalloc(bytes, GFP_KERNEL);
+diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
+index 4b5ba85..845df68 100644
+--- a/arch/x86/mm/mmap.c
++++ b/arch/x86/mm/mmap.c
+@@ -75,9 +75,9 @@ static unsigned long mmap_rnd(void)
+ */
+ if (current->flags & PF_RANDOMIZE) {
+ if (mmap_is_ia32())
+- rnd = (long)get_random_int() % (1<<8);
++ rnd = get_random_int() % (1<<8);
+ else
+- rnd = (long)(get_random_int() % (1<<28));
++ rnd = get_random_int() % (1<<28);
+ }
+ return rnd << PAGE_SHIFT;
+ }
+diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
+index 81dbfde..7efd0c6 100644
+--- a/arch/x86/mm/srat.c
++++ b/arch/x86/mm/srat.c
+@@ -104,6 +104,8 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
+ if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
+ return;
+ pxm = pa->proximity_domain_lo;
++ if (acpi_srat_revision >= 2)
++ pxm |= *((unsigned int*)pa->proximity_domain_hi) << 8;
+ node = setup_node(pxm);
+ if (node < 0) {
+ printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
+@@ -155,6 +157,8 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
+ start = ma->base_address;
+ end = start + ma->length;
+ pxm = ma->proximity_domain;
++ if (acpi_srat_revision <= 1)
++ pxm &= 0xff;
+ node = setup_node(pxm);
+ if (node < 0) {
+ printk(KERN_ERR "SRAT: Too many proximity domains.\n");
+diff --git a/arch/x86/pci/Makefile b/arch/x86/pci/Makefile
+index 6b8759f..d24d3da 100644
+--- a/arch/x86/pci/Makefile
++++ b/arch/x86/pci/Makefile
+@@ -18,8 +18,9 @@ obj-$(CONFIG_X86_NUMAQ) += numaq_32.o
+ obj-$(CONFIG_X86_MRST) += mrst.o
+
+ obj-y += common.o early.o
+-obj-y += amd_bus.o bus_numa.o
++obj-y += bus_numa.o
+
++obj-$(CONFIG_AMD_NB) += amd_bus.o
+ obj-$(CONFIG_PCI_CNB20LE_QUIRK) += broadcom_bus.o
+
+ ifeq ($(CONFIG_PCI_DEBUG),y)
+diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
+index 404f21a..f8348ab 100644
+--- a/arch/x86/pci/acpi.c
++++ b/arch/x86/pci/acpi.c
+@@ -149,7 +149,7 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
+ struct acpi_resource_address64 addr;
+ acpi_status status;
+ unsigned long flags;
+- u64 start, end;
++ u64 start, orig_end, end;
+
+ status = resource_to_addr(acpi_res, &addr);
+ if (!ACPI_SUCCESS(status))
+@@ -165,7 +165,21 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
+ return AE_OK;
+
+ start = addr.minimum + addr.translation_offset;
+- end = addr.maximum + addr.translation_offset;
++ orig_end = end = addr.maximum + addr.translation_offset;
++
++ /* Exclude non-addressable range or non-addressable portion of range */
++ end = min(end, (u64)iomem_resource.end);
++ if (end <= start) {
++ dev_info(&info->bridge->dev,
++ "host bridge window [%#llx-%#llx] "
++ "(ignored, not CPU addressable)\n", start, orig_end);
++ return AE_OK;
++ } else if (orig_end != end) {
++ dev_info(&info->bridge->dev,
++ "host bridge window [%#llx-%#llx] "
++ "([%#llx-%#llx] ignored, not CPU addressable)\n",
++ start, orig_end, end + 1, orig_end);
++ }
+
+ res = &info->res[info->res_num];
+ res->name = info->name;
+diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
+index 026e493..385a940 100644
+--- a/arch/x86/pci/amd_bus.c
++++ b/arch/x86/pci/amd_bus.c
+@@ -30,34 +30,6 @@ static struct pci_hostbridge_probe pci_probes[] __initdata = {
+ { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1300 },
+ };
+
+-static u64 __initdata fam10h_mmconf_start;
+-static u64 __initdata fam10h_mmconf_end;
+-static void __init get_pci_mmcfg_amd_fam10h_range(void)
+-{
+- u32 address;
+- u64 base, msr;
+- unsigned segn_busn_bits;
+-
+- /* assume all cpus from fam10h have mmconf */
+- if (boot_cpu_data.x86 < 0x10)
+- return;
+-
+- address = MSR_FAM10H_MMIO_CONF_BASE;
+- rdmsrl(address, msr);
+-
+- /* mmconfig is not enable */
+- if (!(msr & FAM10H_MMIO_CONF_ENABLE))
+- return;
+-
+- base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
+-
+- segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
+- FAM10H_MMIO_CONF_BUSRANGE_MASK;
+-
+- fam10h_mmconf_start = base;
+- fam10h_mmconf_end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
+-}
+-
+ #define RANGE_NUM 16
+
+ /**
+@@ -85,6 +57,9 @@ static int __init early_fill_mp_bus_info(void)
+ u64 val;
+ u32 address;
+ bool found;
++ struct resource fam10h_mmconf_res, *fam10h_mmconf;
++ u64 fam10h_mmconf_start;
++ u64 fam10h_mmconf_end;
+
+ if (!early_pci_allowed())
+ return -1;
+@@ -211,12 +186,17 @@ static int __init early_fill_mp_bus_info(void)
+ subtract_range(range, RANGE_NUM, 0, end);
+
+ /* get mmconfig */
+- get_pci_mmcfg_amd_fam10h_range();
++ fam10h_mmconf = amd_get_mmconfig_range(&fam10h_mmconf_res);
+ /* need to take out mmconf range */
+- if (fam10h_mmconf_end) {
+- printk(KERN_DEBUG "Fam 10h mmconf [%llx, %llx]\n", fam10h_mmconf_start, fam10h_mmconf_end);
++ if (fam10h_mmconf) {
++ printk(KERN_DEBUG "Fam 10h mmconf %pR\n", fam10h_mmconf);
++ fam10h_mmconf_start = fam10h_mmconf->start;
++ fam10h_mmconf_end = fam10h_mmconf->end;
+ subtract_range(range, RANGE_NUM, fam10h_mmconf_start,
+ fam10h_mmconf_end + 1);
++ } else {
++ fam10h_mmconf_start = 0;
++ fam10h_mmconf_end = 0;
+ }
+
+ /* mmio resource */
+diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
+index 5b55219..9010ca7 100644
+--- a/arch/x86/platform/uv/tlb_uv.c
++++ b/arch/x86/platform/uv/tlb_uv.c
+@@ -157,13 +157,14 @@ static int __init uvhub_to_first_apicid(int uvhub)
+ * clear of the Timeout bit (as well) will free the resource. No reply will
+ * be sent (the hardware will only do one reply per message).
+ */
+-static void reply_to_message(struct msg_desc *mdp, struct bau_control *bcp)
++static void reply_to_message(struct msg_desc *mdp, struct bau_control *bcp,
++ int do_acknowledge)
+ {
+ unsigned long dw;
+ struct bau_pq_entry *msg;
+
+ msg = mdp->msg;
+- if (!msg->canceled) {
++ if (!msg->canceled && do_acknowledge) {
+ dw = (msg->swack_vec << UV_SW_ACK_NPENDING) | msg->swack_vec;
+ write_mmr_sw_ack(dw);
+ }
+@@ -212,8 +213,8 @@ static void bau_process_retry_msg(struct msg_desc *mdp,
+ if (mmr & (msg_res << UV_SW_ACK_NPENDING)) {
+ unsigned long mr;
+ /*
+- * is the resource timed out?
+- * make everyone ignore the cancelled message.
++ * Is the resource timed out?
++ * Make everyone ignore the cancelled message.
+ */
+ msg2->canceled = 1;
+ stat->d_canceled++;
+@@ -231,8 +232,8 @@ static void bau_process_retry_msg(struct msg_desc *mdp,
+ * Do all the things a cpu should do for a TLB shootdown message.
+ * Other cpu's may come here at the same time for this message.
+ */
+-static void bau_process_message(struct msg_desc *mdp,
+- struct bau_control *bcp)
++static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp,
++ int do_acknowledge)
+ {
+ short socket_ack_count = 0;
+ short *sp;
+@@ -284,8 +285,9 @@ static void bau_process_message(struct msg_desc *mdp,
+ if (msg_ack_count == bcp->cpus_in_uvhub) {
+ /*
+ * All cpus in uvhub saw it; reply
++ * (unless we are in the UV2 workaround)
+ */
+- reply_to_message(mdp, bcp);
++ reply_to_message(mdp, bcp, do_acknowledge);
+ }
+ }
+
+@@ -491,27 +493,138 @@ static int uv1_wait_completion(struct bau_desc *bau_desc,
+ /*
+ * UV2 has an extra bit of status in the ACTIVATION_STATUS_2 register.
+ */
+-static unsigned long uv2_read_status(unsigned long offset, int rshft, int cpu)
++static unsigned long uv2_read_status(unsigned long offset, int rshft, int desc)
+ {
+ unsigned long descriptor_status;
+ unsigned long descriptor_status2;
+
+ descriptor_status = ((read_lmmr(offset) >> rshft) & UV_ACT_STATUS_MASK);
+- descriptor_status2 = (read_mmr_uv2_status() >> cpu) & 0x1UL;
++ descriptor_status2 = (read_mmr_uv2_status() >> desc) & 0x1UL;
+ descriptor_status = (descriptor_status << 1) | descriptor_status2;
+ return descriptor_status;
+ }
+
++/*
++ * Return whether the status of the descriptor that is normally used for this
++ * cpu (the one indexed by its hub-relative cpu number) is busy.
++ * The status of the original 32 descriptors is always reflected in the 64
++ * bits of UVH_LB_BAU_SB_ACTIVATION_STATUS_0.
++ * The bit provided by the activation_status_2 register is irrelevant to
++ * the status if it is only being tested for busy or not busy.
++ */
++int normal_busy(struct bau_control *bcp)
++{
++ int cpu = bcp->uvhub_cpu;
++ int mmr_offset;
++ int right_shift;
++
++ mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
++ right_shift = cpu * UV_ACT_STATUS_SIZE;
++ return (((((read_lmmr(mmr_offset) >> right_shift) &
++ UV_ACT_STATUS_MASK)) << 1) == UV2H_DESC_BUSY);
++}
++
++/*
++ * Entered when a bau descriptor has gone into a permanent busy wait because
++ * of a hardware bug.
++ * Workaround the bug.
++ */
++int handle_uv2_busy(struct bau_control *bcp)
++{
++ int busy_one = bcp->using_desc;
++ int normal = bcp->uvhub_cpu;
++ int selected = -1;
++ int i;
++ unsigned long descriptor_status;
++ unsigned long status;
++ int mmr_offset;
++ struct bau_desc *bau_desc_old;
++ struct bau_desc *bau_desc_new;
++ struct bau_control *hmaster = bcp->uvhub_master;
++ struct ptc_stats *stat = bcp->statp;
++ cycles_t ttm;
++
++ stat->s_uv2_wars++;
++ spin_lock(&hmaster->uvhub_lock);
++ /* try for the original first */
++ if (busy_one != normal) {
++ if (!normal_busy(bcp))
++ selected = normal;
++ }
++ if (selected < 0) {
++ /* can't use the normal, select an alternate */
++ mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
++ descriptor_status = read_lmmr(mmr_offset);
++
++ /* scan available descriptors 32-63 */
++ for (i = 0; i < UV_CPUS_PER_AS; i++) {
++ if ((hmaster->inuse_map & (1 << i)) == 0) {
++ status = ((descriptor_status >>
++ (i * UV_ACT_STATUS_SIZE)) &
++ UV_ACT_STATUS_MASK) << 1;
++ if (status != UV2H_DESC_BUSY) {
++ selected = i + UV_CPUS_PER_AS;
++ break;
++ }
++ }
++ }
++ }
++
++ if (busy_one != normal)
++ /* mark the busy alternate as not in-use */
++ hmaster->inuse_map &= ~(1 << (busy_one - UV_CPUS_PER_AS));
++
++ if (selected >= 0) {
++ /* switch to the selected descriptor */
++ if (selected != normal) {
++ /* set the selected alternate as in-use */
++ hmaster->inuse_map |=
++ (1 << (selected - UV_CPUS_PER_AS));
++ if (selected > stat->s_uv2_wars_hw)
++ stat->s_uv2_wars_hw = selected;
++ }
++ bau_desc_old = bcp->descriptor_base;
++ bau_desc_old += (ITEMS_PER_DESC * busy_one);
++ bcp->using_desc = selected;
++ bau_desc_new = bcp->descriptor_base;
++ bau_desc_new += (ITEMS_PER_DESC * selected);
++ *bau_desc_new = *bau_desc_old;
++ } else {
++ /*
++ * All are busy. Wait for the normal one for this cpu to
++ * free up.
++ */
++ stat->s_uv2_war_waits++;
++ spin_unlock(&hmaster->uvhub_lock);
++ ttm = get_cycles();
++ do {
++ cpu_relax();
++ } while (normal_busy(bcp));
++ spin_lock(&hmaster->uvhub_lock);
++ /* switch to the original descriptor */
++ bcp->using_desc = normal;
++ bau_desc_old = bcp->descriptor_base;
++ bau_desc_old += (ITEMS_PER_DESC * bcp->using_desc);
++ bcp->using_desc = (ITEMS_PER_DESC * normal);
++ bau_desc_new = bcp->descriptor_base;
++ bau_desc_new += (ITEMS_PER_DESC * normal);
++ *bau_desc_new = *bau_desc_old; /* copy the entire descriptor */
++ }
++ spin_unlock(&hmaster->uvhub_lock);
++ return FLUSH_RETRY_BUSYBUG;
++}
++
+ static int uv2_wait_completion(struct bau_desc *bau_desc,
+ unsigned long mmr_offset, int right_shift,
+ struct bau_control *bcp, long try)
+ {
+ unsigned long descriptor_stat;
+ cycles_t ttm;
+- int cpu = bcp->uvhub_cpu;
++ int desc = bcp->using_desc;
++ long busy_reps = 0;
+ struct ptc_stats *stat = bcp->statp;
+
+- descriptor_stat = uv2_read_status(mmr_offset, right_shift, cpu);
++ descriptor_stat = uv2_read_status(mmr_offset, right_shift, desc);
+
+ /* spin on the status MMR, waiting for it to go idle */
+ while (descriptor_stat != UV2H_DESC_IDLE) {
+@@ -542,12 +655,23 @@ static int uv2_wait_completion(struct bau_desc *bau_desc,
+ bcp->conseccompletes = 0;
+ return FLUSH_RETRY_TIMEOUT;
+ } else {
++ busy_reps++;
++ if (busy_reps > 1000000) {
++ /* not to hammer on the clock */
++ busy_reps = 0;
++ ttm = get_cycles();
++ if ((ttm - bcp->send_message) >
++ (bcp->clocks_per_100_usec)) {
++ return handle_uv2_busy(bcp);
++ }
++ }
+ /*
+ * descriptor_stat is still BUSY
+ */
+ cpu_relax();
+ }
+- descriptor_stat = uv2_read_status(mmr_offset, right_shift, cpu);
++ descriptor_stat = uv2_read_status(mmr_offset, right_shift,
++ desc);
+ }
+ bcp->conseccompletes++;
+ return FLUSH_COMPLETE;
+@@ -563,17 +687,17 @@ static int wait_completion(struct bau_desc *bau_desc,
+ {
+ int right_shift;
+ unsigned long mmr_offset;
+- int cpu = bcp->uvhub_cpu;
++ int desc = bcp->using_desc;
+
+- if (cpu < UV_CPUS_PER_AS) {
++ if (desc < UV_CPUS_PER_AS) {
+ mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
+- right_shift = cpu * UV_ACT_STATUS_SIZE;
++ right_shift = desc * UV_ACT_STATUS_SIZE;
+ } else {
+ mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
+- right_shift = ((cpu - UV_CPUS_PER_AS) * UV_ACT_STATUS_SIZE);
++ right_shift = ((desc - UV_CPUS_PER_AS) * UV_ACT_STATUS_SIZE);
+ }
+
+- if (is_uv1_hub())
++ if (bcp->uvhub_version == 1)
+ return uv1_wait_completion(bau_desc, mmr_offset, right_shift,
+ bcp, try);
+ else
+@@ -752,19 +876,22 @@ static void handle_cmplt(int completion_status, struct bau_desc *bau_desc,
+ * Returns 1 if it gives up entirely and the original cpu mask is to be
+ * returned to the kernel.
+ */
+-int uv_flush_send_and_wait(struct bau_desc *bau_desc,
+- struct cpumask *flush_mask, struct bau_control *bcp)
++int uv_flush_send_and_wait(struct cpumask *flush_mask, struct bau_control *bcp)
+ {
+ int seq_number = 0;
+ int completion_stat = 0;
++ int uv1 = 0;
+ long try = 0;
+ unsigned long index;
+ cycles_t time1;
+ cycles_t time2;
+ struct ptc_stats *stat = bcp->statp;
+ struct bau_control *hmaster = bcp->uvhub_master;
++ struct uv1_bau_msg_header *uv1_hdr = NULL;
++ struct uv2_bau_msg_header *uv2_hdr = NULL;
++ struct bau_desc *bau_desc;
+
+- if (is_uv1_hub())
++ if (bcp->uvhub_version == 1)
+ uv1_throttle(hmaster, stat);
+
+ while (hmaster->uvhub_quiesce)
+@@ -772,22 +899,39 @@ int uv_flush_send_and_wait(struct bau_desc *bau_desc,
+
+ time1 = get_cycles();
+ do {
+- if (try == 0) {
+- bau_desc->header.msg_type = MSG_REGULAR;
++ bau_desc = bcp->descriptor_base;
++ bau_desc += (ITEMS_PER_DESC * bcp->using_desc);
++ if (bcp->uvhub_version == 1) {
++ uv1 = 1;
++ uv1_hdr = &bau_desc->header.uv1_hdr;
++ } else
++ uv2_hdr = &bau_desc->header.uv2_hdr;
++ if ((try == 0) || (completion_stat == FLUSH_RETRY_BUSYBUG)) {
++ if (uv1)
++ uv1_hdr->msg_type = MSG_REGULAR;
++ else
++ uv2_hdr->msg_type = MSG_REGULAR;
+ seq_number = bcp->message_number++;
+ } else {
+- bau_desc->header.msg_type = MSG_RETRY;
++ if (uv1)
++ uv1_hdr->msg_type = MSG_RETRY;
++ else
++ uv2_hdr->msg_type = MSG_RETRY;
+ stat->s_retry_messages++;
+ }
+
+- bau_desc->header.sequence = seq_number;
+- index = (1UL << AS_PUSH_SHIFT) | bcp->uvhub_cpu;
++ if (uv1)
++ uv1_hdr->sequence = seq_number;
++ else
++ uv2_hdr->sequence = seq_number;
++ index = (1UL << AS_PUSH_SHIFT) | bcp->using_desc;
+ bcp->send_message = get_cycles();
+
+ write_mmr_activation(index);
+
+ try++;
+ completion_stat = wait_completion(bau_desc, bcp, try);
++ /* UV2: wait_completion() may change the bcp->using_desc */
+
+ handle_cmplt(completion_stat, bau_desc, bcp, hmaster, stat);
+
+@@ -798,6 +942,7 @@ int uv_flush_send_and_wait(struct bau_desc *bau_desc,
+ }
+ cpu_relax();
+ } while ((completion_stat == FLUSH_RETRY_PLUGGED) ||
++ (completion_stat == FLUSH_RETRY_BUSYBUG) ||
+ (completion_stat == FLUSH_RETRY_TIMEOUT));
+
+ time2 = get_cycles();
+@@ -812,6 +957,7 @@ int uv_flush_send_and_wait(struct bau_desc *bau_desc,
+ record_send_stats(time1, time2, bcp, stat, completion_stat, try);
+
+ if (completion_stat == FLUSH_GIVEUP)
++ /* FLUSH_GIVEUP will fall back to using IPI's for tlb flush */
+ return 1;
+ return 0;
+ }
+@@ -967,7 +1113,7 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
+ stat->s_ntargself++;
+
+ bau_desc = bcp->descriptor_base;
+- bau_desc += ITEMS_PER_DESC * bcp->uvhub_cpu;
++ bau_desc += (ITEMS_PER_DESC * bcp->using_desc);
+ bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
+ if (set_distrib_bits(flush_mask, bcp, bau_desc, &locals, &remotes))
+ return NULL;
+@@ -980,13 +1126,86 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
+ * uv_flush_send_and_wait returns 0 if all cpu's were messaged,
+ * or 1 if it gave up and the original cpumask should be returned.
+ */
+- if (!uv_flush_send_and_wait(bau_desc, flush_mask, bcp))
++ if (!uv_flush_send_and_wait(flush_mask, bcp))
+ return NULL;
+ else
+ return cpumask;
+ }
+
+ /*
++ * Search the message queue for any 'other' message with the same software
++ * acknowledge resource bit vector.
++ */
++struct bau_pq_entry *find_another_by_swack(struct bau_pq_entry *msg,
++ struct bau_control *bcp, unsigned char swack_vec)
++{
++ struct bau_pq_entry *msg_next = msg + 1;
++
++ if (msg_next > bcp->queue_last)
++ msg_next = bcp->queue_first;
++ while ((msg_next->swack_vec != 0) && (msg_next != msg)) {
++ if (msg_next->swack_vec == swack_vec)
++ return msg_next;
++ msg_next++;
++ if (msg_next > bcp->queue_last)
++ msg_next = bcp->queue_first;
++ }
++ return NULL;
++}
++
++/*
++ * UV2 needs to work around a bug in which an arriving message has not
++ * set a bit in the UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE register.
++ * Such a message must be ignored.
++ */
++void process_uv2_message(struct msg_desc *mdp, struct bau_control *bcp)
++{
++ unsigned long mmr_image;
++ unsigned char swack_vec;
++ struct bau_pq_entry *msg = mdp->msg;
++ struct bau_pq_entry *other_msg;
++
++ mmr_image = read_mmr_sw_ack();
++ swack_vec = msg->swack_vec;
++
++ if ((swack_vec & mmr_image) == 0) {
++ /*
++ * This message was assigned a swack resource, but no
++ * reserved acknowlegment is pending.
++ * The bug has prevented this message from setting the MMR.
++ * And no other message has used the same sw_ack resource.
++ * Do the requested shootdown but do not reply to the msg.
++ * (the 0 means make no acknowledge)
++ */
++ bau_process_message(mdp, bcp, 0);
++ return;
++ }
++
++ /*
++ * Some message has set the MMR 'pending' bit; it might have been
++ * another message. Look for that message.
++ */
++ other_msg = find_another_by_swack(msg, bcp, msg->swack_vec);
++ if (other_msg) {
++ /* There is another. Do not ack the current one. */
++ bau_process_message(mdp, bcp, 0);
++ /*
++ * Let the natural processing of that message acknowledge
++ * it. Don't get the processing of sw_ack's out of order.
++ */
++ return;
++ }
++
++ /*
++ * There is no other message using this sw_ack, so it is safe to
++ * acknowledge it.
++ */
++ bau_process_message(mdp, bcp, 1);
++
++ return;
++}
++
++/*
+ * The BAU message interrupt comes here. (registered by set_intr_gate)
+ * See entry_64.S
+ *
+@@ -1022,9 +1241,11 @@ void uv_bau_message_interrupt(struct pt_regs *regs)
+ count++;
+
+ msgdesc.msg_slot = msg - msgdesc.queue_first;
+- msgdesc.swack_slot = ffs(msg->swack_vec) - 1;
+ msgdesc.msg = msg;
+- bau_process_message(&msgdesc, bcp);
++ if (bcp->uvhub_version == 2)
++ process_uv2_message(&msgdesc, bcp);
++ else
++ bau_process_message(&msgdesc, bcp, 1);
+
+ msg++;
+ if (msg > msgdesc.queue_last)
+@@ -1083,7 +1304,7 @@ static void __init enable_timeouts(void)
+ */
+ mmr_image |= (1L << SOFTACK_MSHIFT);
+ if (is_uv2_hub()) {
+- mmr_image |= (1L << UV2_LEG_SHFT);
++ mmr_image &= ~(1L << UV2_LEG_SHFT);
+ mmr_image |= (1L << UV2_EXT_SHFT);
+ }
+ write_mmr_misc_control(pnode, mmr_image);
+@@ -1142,7 +1363,7 @@ static int ptc_seq_show(struct seq_file *file, void *data)
+ seq_printf(file,
+ "all one mult none retry canc nocan reset rcan ");
+ seq_printf(file,
+- "disable enable\n");
++ "disable enable wars warshw warwaits\n");
+ }
+ if (cpu < num_possible_cpus() && cpu_online(cpu)) {
+ stat = &per_cpu(ptcstats, cpu);
+@@ -1173,8 +1394,10 @@ static int ptc_seq_show(struct seq_file *file, void *data)
+ stat->d_nomsg, stat->d_retries, stat->d_canceled,
+ stat->d_nocanceled, stat->d_resets,
+ stat->d_rcanceled);
+- seq_printf(file, "%ld %ld\n",
+- stat->s_bau_disabled, stat->s_bau_reenabled);
++ seq_printf(file, "%ld %ld %ld %ld %ld\n",
++ stat->s_bau_disabled, stat->s_bau_reenabled,
++ stat->s_uv2_wars, stat->s_uv2_wars_hw,
++ stat->s_uv2_war_waits);
+ }
+ return 0;
+ }
+@@ -1432,12 +1655,15 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
+ {
+ int i;
+ int cpu;
++ int uv1 = 0;
+ unsigned long gpa;
+ unsigned long m;
+ unsigned long n;
+ size_t dsize;
+ struct bau_desc *bau_desc;
+ struct bau_desc *bd2;
++ struct uv1_bau_msg_header *uv1_hdr;
++ struct uv2_bau_msg_header *uv2_hdr;
+ struct bau_control *bcp;
+
+ /*
+@@ -1451,6 +1677,8 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
+ gpa = uv_gpa(bau_desc);
+ n = uv_gpa_to_gnode(gpa);
+ m = uv_gpa_to_offset(gpa);
++ if (is_uv1_hub())
++ uv1 = 1;
+
+ /* the 14-bit pnode */
+ write_mmr_descriptor_base(pnode, (n << UV_DESC_PSHIFT | m));
+@@ -1461,21 +1689,33 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
+ */
+ for (i = 0, bd2 = bau_desc; i < (ADP_SZ * ITEMS_PER_DESC); i++, bd2++) {
+ memset(bd2, 0, sizeof(struct bau_desc));
+- bd2->header.swack_flag = 1;
+- /*
+- * The base_dest_nasid set in the message header is the nasid
+- * of the first uvhub in the partition. The bit map will
+- * indicate destination pnode numbers relative to that base.
+- * They may not be consecutive if nasid striding is being used.
+- */
+- bd2->header.base_dest_nasid = UV_PNODE_TO_NASID(base_pnode);
+- bd2->header.dest_subnodeid = UV_LB_SUBNODEID;
+- bd2->header.command = UV_NET_ENDPOINT_INTD;
+- bd2->header.int_both = 1;
+- /*
+- * all others need to be set to zero:
+- * fairness chaining multilevel count replied_to
+- */
++ if (uv1) {
++ uv1_hdr = &bd2->header.uv1_hdr;
++ uv1_hdr->swack_flag = 1;
++ /*
++ * The base_dest_nasid set in the message header
++ * is the nasid of the first uvhub in the partition.
++ * The bit map will indicate destination pnode numbers
++ * relative to that base. They may not be consecutive
++ * if nasid striding is being used.
++ */
++ uv1_hdr->base_dest_nasid =
++ UV_PNODE_TO_NASID(base_pnode);
++ uv1_hdr->dest_subnodeid = UV_LB_SUBNODEID;
++ uv1_hdr->command = UV_NET_ENDPOINT_INTD;
++ uv1_hdr->int_both = 1;
++ /*
++ * all others need to be set to zero:
++ * fairness chaining multilevel count replied_to
++ */
++ } else {
++ uv2_hdr = &bd2->header.uv2_hdr;
++ uv2_hdr->swack_flag = 1;
++ uv2_hdr->base_dest_nasid =
++ UV_PNODE_TO_NASID(base_pnode);
++ uv2_hdr->dest_subnodeid = UV_LB_SUBNODEID;
++ uv2_hdr->command = UV_NET_ENDPOINT_INTD;
++ }
+ }
+ for_each_present_cpu(cpu) {
+ if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu)))
+@@ -1531,6 +1771,7 @@ static void pq_init(int node, int pnode)
+ write_mmr_payload_first(pnode, pn_first);
+ write_mmr_payload_tail(pnode, first);
+ write_mmr_payload_last(pnode, last);
++ write_gmmr_sw_ack(pnode, 0xffffUL);
+
+ /* in effect, all msg_type's are set to MSG_NOOP */
+ memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE);
+@@ -1584,14 +1825,14 @@ static int calculate_destination_timeout(void)
+ ts_ns = base * mult1 * mult2;
+ ret = ts_ns / 1000;
+ } else {
+- /* 4 bits 0/1 for 10/80us, 3 bits of multiplier */
+- mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL);
++ /* 4 bits 0/1 for 10/80us base, 3 bits of multiplier */
++ mmr_image = uv_read_local_mmr(UVH_LB_BAU_MISC_CONTROL);
+ mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT;
+ if (mmr_image & (1L << UV2_ACK_UNITS_SHFT))
+- mult1 = 80;
++ base = 80;
+ else
+- mult1 = 10;
+- base = mmr_image & UV2_ACK_MASK;
++ base = 10;
++ mult1 = mmr_image & UV2_ACK_MASK;
+ ret = mult1 * base;
+ }
+ return ret;
+@@ -1618,6 +1859,7 @@ static void __init init_per_cpu_tunables(void)
+ bcp->cong_response_us = congested_respns_us;
+ bcp->cong_reps = congested_reps;
+ bcp->cong_period = congested_period;
++ bcp->clocks_per_100_usec = usec_2_cycles(100);
+ }
+ }
+
+@@ -1728,8 +1970,17 @@ static int scan_sock(struct socket_desc *sdp, struct uvhub_desc *bdp,
+ bcp->cpus_in_socket = sdp->num_cpus;
+ bcp->socket_master = *smasterp;
+ bcp->uvhub = bdp->uvhub;
++ if (is_uv1_hub())
++ bcp->uvhub_version = 1;
++ else if (is_uv2_hub())
++ bcp->uvhub_version = 2;
++ else {
++ printk(KERN_EMERG "uvhub version not 1 or 2\n");
++ return 1;
++ }
+ bcp->uvhub_master = *hmasterp;
+ bcp->uvhub_cpu = uv_cpu_hub_info(cpu)->blade_processor_id;
++ bcp->using_desc = bcp->uvhub_cpu;
+ if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) {
+ printk(KERN_EMERG "%d cpus per uvhub invalid\n",
+ bcp->uvhub_cpu);
+@@ -1845,6 +2096,8 @@ static int __init uv_bau_init(void)
+ uv_base_pnode = uv_blade_to_pnode(uvhub);
+ }
+
++ enable_timeouts();
++
+ if (init_per_cpu(nuvhubs, uv_base_pnode)) {
+ nobau = 1;
+ return 0;
+@@ -1855,7 +2108,6 @@ static int __init uv_bau_init(void)
+ if (uv_blade_nr_possible_cpus(uvhub))
+ init_uvhub(uvhub, vector, uv_base_pnode);
+
+- enable_timeouts();
+ alloc_intr_gate(vector, uv_bau_message_intr1);
+
+ for_each_possible_blade(uvhub) {
+@@ -1867,7 +2119,8 @@ static int __init uv_bau_init(void)
+ val = 1L << 63;
+ write_gmmr_activation(pnode, val);
+ mmr = 1; /* should be 1 to broadcast to both sockets */
+- write_mmr_data_broadcast(pnode, mmr);
++ if (!is_uv1_hub())
++ write_mmr_data_broadcast(pnode, mmr);
+ }
+ }
+
+diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
+index fbdf0d8..688be8a 100644
+--- a/block/scsi_ioctl.c
++++ b/block/scsi_ioctl.c
+@@ -24,6 +24,7 @@
+ #include <linux/capability.h>
+ #include <linux/completion.h>
+ #include <linux/cdrom.h>
++#include <linux/ratelimit.h>
+ #include <linux/slab.h>
+ #include <linux/times.h>
+ #include <asm/uaccess.h>
+@@ -690,6 +691,57 @@ int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mod
+ }
+ EXPORT_SYMBOL(scsi_cmd_ioctl);
+
++int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd)
++{
++ if (bd && bd == bd->bd_contains)
++ return 0;
++
++ /* Actually none of these is particularly useful on a partition,
++ * but they are safe.
++ */
++ switch (cmd) {
++ case SCSI_IOCTL_GET_IDLUN:
++ case SCSI_IOCTL_GET_BUS_NUMBER:
++ case SCSI_IOCTL_GET_PCI:
++ case SCSI_IOCTL_PROBE_HOST:
++ case SG_GET_VERSION_NUM:
++ case SG_SET_TIMEOUT:
++ case SG_GET_TIMEOUT:
++ case SG_GET_RESERVED_SIZE:
++ case SG_SET_RESERVED_SIZE:
++ case SG_EMULATED_HOST:
++ return 0;
++ case CDROM_GET_CAPABILITY:
++ /* Keep this until we remove the printk below. udev sends it
++ * and we do not want to spam dmesg about it. CD-ROMs do
++ * not have partitions, so we get here only for disks.
++ */
++ return -ENOTTY;
++ default:
++ break;
++ }
++
++ /* In particular, rule out all resets and host-specific ioctls. */
++ printk_ratelimited(KERN_WARNING
++ "%s: sending ioctl %x to a partition!\n", current->comm, cmd);
++
++ return capable(CAP_SYS_RAWIO) ? 0 : -ENOTTY;
++}
++EXPORT_SYMBOL(scsi_verify_blk_ioctl);
++
++int scsi_cmd_blk_ioctl(struct block_device *bd, fmode_t mode,
++ unsigned int cmd, void __user *arg)
++{
++ int ret;
++
++ ret = scsi_verify_blk_ioctl(bd, cmd);
++ if (ret < 0)
++ return ret;
++
++ return scsi_cmd_ioctl(bd->bd_disk->queue, bd->bd_disk, mode, cmd, arg);
++}
++EXPORT_SYMBOL(scsi_cmd_blk_ioctl);
++
+ static int __init blk_scsi_ioctl_init(void)
+ {
+ blk_set_cmd_filter_defaults(&blk_default_cmd_filter);
+diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c
+index 8c7b997..42163d8 100644
+--- a/drivers/acpi/acpica/dsargs.c
++++ b/drivers/acpi/acpica/dsargs.c
+@@ -387,5 +387,29 @@ acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc)
+ status = acpi_ds_execute_arguments(node, node->parent,
+ extra_desc->extra.aml_length,
+ extra_desc->extra.aml_start);
++ if (ACPI_FAILURE(status)) {
++ return_ACPI_STATUS(status);
++ }
++
++ /* Validate the region address/length via the host OS */
++
++ status = acpi_os_validate_address(obj_desc->region.space_id,
++ obj_desc->region.address,
++ (acpi_size) obj_desc->region.length,
++ acpi_ut_get_node_name(node));
++
++ if (ACPI_FAILURE(status)) {
++ /*
++ * Invalid address/length. We will emit an error message and mark
++ * the region as invalid, so that it will cause an additional error if
++ * it is ever used. Then return AE_OK.
++ */
++ ACPI_EXCEPTION((AE_INFO, status,
++ "During address validation of OpRegion [%4.4s]",
++ node->name.ascii));
++ obj_desc->common.flags |= AOPOBJ_INVALID;
++ status = AE_OK;
++ }
++
+ return_ACPI_STATUS(status);
+ }
+diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
+index 3b5c318..e56f3be 100644
+--- a/drivers/acpi/numa.c
++++ b/drivers/acpi/numa.c
+@@ -45,6 +45,8 @@ static int pxm_to_node_map[MAX_PXM_DOMAINS]
+ static int node_to_pxm_map[MAX_NUMNODES]
+ = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
+
++unsigned char acpi_srat_revision __initdata;
++
+ int pxm_to_node(int pxm)
+ {
+ if (pxm < 0)
+@@ -255,9 +257,13 @@ acpi_parse_memory_affinity(struct acpi_subtable_header * header,
+
+ static int __init acpi_parse_srat(struct acpi_table_header *table)
+ {
++ struct acpi_table_srat *srat;
+ if (!table)
+ return -EINVAL;
+
++ srat = (struct acpi_table_srat *)table;
++ acpi_srat_revision = srat->header.revision;
++
+ /* Real work done in acpi_table_parse_srat below. */
+
+ return 0;
+diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
+index 3a0428e..c850de4 100644
+--- a/drivers/acpi/processor_core.c
++++ b/drivers/acpi/processor_core.c
+@@ -173,8 +173,30 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
+ apic_id = map_mat_entry(handle, type, acpi_id);
+ if (apic_id == -1)
+ apic_id = map_madt_entry(type, acpi_id);
+- if (apic_id == -1)
+- return apic_id;
++ if (apic_id == -1) {
++ /*
++ * On UP processor, there is no _MAT or MADT table.
++ * So above apic_id is always set to -1.
++ *
++ * BIOS may define multiple CPU handles even for UP processor.
++ * For example,
++ *
++ * Scope (_PR)
++ * {
++ * Processor (CPU0, 0x00, 0x00000410, 0x06) {}
++ * Processor (CPU1, 0x01, 0x00000410, 0x06) {}
++ * Processor (CPU2, 0x02, 0x00000410, 0x06) {}
++ * Processor (CPU3, 0x03, 0x00000410, 0x06) {}
++ * }
++ *
++ * Ignores apic_id and always return 0 for CPU0's handle.
++ * Return -1 for other CPU's handle.
++ */
++ if (acpi_id == 0)
++ return acpi_id;
++ else
++ return apic_id;
++ }
+
+ #ifdef CONFIG_SMP
+ for_each_possible_cpu(i) {
+diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
+index 990f5a8..48e06be 100644
+--- a/drivers/bcma/host_pci.c
++++ b/drivers/bcma/host_pci.c
+@@ -227,11 +227,14 @@ static void bcma_host_pci_remove(struct pci_dev *dev)
+ #ifdef CONFIG_PM
+ static int bcma_host_pci_suspend(struct pci_dev *dev, pm_message_t state)
+ {
++ struct bcma_bus *bus = pci_get_drvdata(dev);
++
+ /* Host specific */
+ pci_save_state(dev);
+ pci_disable_device(dev);
+ pci_set_power_state(dev, pci_choose_state(dev, state));
+
++ bus->mapped_core = NULL;
+ return 0;
+ }
+
+diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
+index 587cce5..b0f553b 100644
+--- a/drivers/block/cciss.c
++++ b/drivers/block/cciss.c
+@@ -1735,7 +1735,7 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
+ case CCISS_BIG_PASSTHRU:
+ return cciss_bigpassthru(h, argp);
+
+- /* scsi_cmd_ioctl handles these, below, though some are not */
++ /* scsi_cmd_blk_ioctl handles these, below, though some are not */
+ /* very meaningful for cciss. SG_IO is the main one people want. */
+
+ case SG_GET_VERSION_NUM:
+@@ -1746,9 +1746,9 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
+ case SG_EMULATED_HOST:
+ case SG_IO:
+ case SCSI_IOCTL_SEND_COMMAND:
+- return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp);
++ return scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
+
+- /* scsi_cmd_ioctl would normally handle these, below, but */
++ /* scsi_cmd_blk_ioctl would normally handle these, below, but */
+ /* they aren't a good fit for cciss, as CD-ROMs are */
+ /* not supported, and we don't have any bus/target/lun */
+ /* which we present to the kernel. */
+diff --git a/drivers/block/ub.c b/drivers/block/ub.c
+index 0e376d4..7333b9e 100644
+--- a/drivers/block/ub.c
++++ b/drivers/block/ub.c
+@@ -1744,12 +1744,11 @@ static int ub_bd_release(struct gendisk *disk, fmode_t mode)
+ static int ub_bd_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+ {
+- struct gendisk *disk = bdev->bd_disk;
+ void __user *usermem = (void __user *) arg;
+ int ret;
+
+ mutex_lock(&ub_mutex);
+- ret = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, usermem);
++ ret = scsi_cmd_blk_ioctl(bdev, mode, cmd, usermem);
+ mutex_unlock(&ub_mutex);
+
+ return ret;
+diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
+index 4d0b70a..e46f2f7 100644
+--- a/drivers/block/virtio_blk.c
++++ b/drivers/block/virtio_blk.c
+@@ -243,8 +243,8 @@ static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
+ if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
+ return -ENOTTY;
+
+- return scsi_cmd_ioctl(disk->queue, disk, mode, cmd,
+- (void __user *)data);
++ return scsi_cmd_blk_ioctl(bdev, mode, cmd,
++ (void __user *)data);
+ }
+
+ /* We provide getgeo only to please some old bootloader/partitioning tools */
+diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
+index f997c27..cedb231 100644
+--- a/drivers/cdrom/cdrom.c
++++ b/drivers/cdrom/cdrom.c
+@@ -2747,12 +2747,11 @@ int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev,
+ {
+ void __user *argp = (void __user *)arg;
+ int ret;
+- struct gendisk *disk = bdev->bd_disk;
+
+ /*
+ * Try the generic SCSI command ioctl's first.
+ */
+- ret = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp);
++ ret = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
+ if (ret != -ENOTTY)
+ return ret;
+
+diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
+index bfc08f6..31b0d1a 100644
+--- a/drivers/gpu/drm/radeon/r100.c
++++ b/drivers/gpu/drm/radeon/r100.c
+@@ -2177,6 +2177,7 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev)
+ void r100_bm_disable(struct radeon_device *rdev)
+ {
+ u32 tmp;
++ u16 tmp16;
+
+ /* disable bus mastering */
+ tmp = RREG32(R_000030_BUS_CNTL);
+@@ -2187,8 +2188,8 @@ void r100_bm_disable(struct radeon_device *rdev)
+ WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
+ tmp = RREG32(RADEON_BUS_CNTL);
+ mdelay(1);
+- pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
+- pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
++ pci_read_config_word(rdev->pdev, 0x4, &tmp16);
++ pci_write_config_word(rdev->pdev, 0x4, tmp16 & 0xFFFB);
+ mdelay(1);
+ }
+
+diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
+index f5ac7e7..c45d921 100644
+--- a/drivers/gpu/drm/radeon/r600_hdmi.c
++++ b/drivers/gpu/drm/radeon/r600_hdmi.c
+@@ -196,6 +196,13 @@ static void r600_hdmi_videoinfoframe(
+ frame[0xD] = (right_bar >> 8);
+
+ r600_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame);
++ /* Our header values (type, version, length) should be alright, Intel
++ * is using the same. Checksum function also seems to be OK, it works
++ * fine for audio infoframe. However calculated value is always lower
++ * by 2 in comparison to fglrx. It breaks displaying anything in case
++ * of TVs that strictly check the checksum. Hack it manually here to
++ * workaround this issue. */
++ frame[0x0] += 2;
+
+ WREG32(offset+R600_HDMI_VIDEOINFOFRAME_0,
+ frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index c4d00a1..9b39145 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -224,8 +224,11 @@ int radeon_wb_init(struct radeon_device *rdev)
+ if (radeon_no_wb == 1)
+ rdev->wb.enabled = false;
+ else {
+- /* often unreliable on AGP */
+ if (rdev->flags & RADEON_IS_AGP) {
++ /* often unreliable on AGP */
++ rdev->wb.enabled = false;
++ } else if (rdev->family < CHIP_R300) {
++ /* often unreliable on pre-r300 */
+ rdev->wb.enabled = false;
+ } else {
+ rdev->wb.enabled = true;
+diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
+index b1053d6..c259e21 100644
+--- a/drivers/gpu/drm/radeon/rs600.c
++++ b/drivers/gpu/drm/radeon/rs600.c
+@@ -324,10 +324,10 @@ void rs600_hpd_fini(struct radeon_device *rdev)
+
+ void rs600_bm_disable(struct radeon_device *rdev)
+ {
+- u32 tmp;
++ u16 tmp;
+
+ /* disable bus mastering */
+- pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
++ pci_read_config_word(rdev->pdev, 0x4, &tmp);
+ pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
+ mdelay(1);
+ }
+diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
+index 22a4a05..d21f6d0 100644
+--- a/drivers/hid/Kconfig
++++ b/drivers/hid/Kconfig
+@@ -335,6 +335,7 @@ config HID_MULTITOUCH
+ Say Y here if you have one of the following devices:
+ - 3M PCT touch screens
+ - ActionStar dual touch panels
++ - Atmel panels
+ - Cando dual touch panels
+ - Chunghwa panels
+ - CVTouch panels
+@@ -355,6 +356,7 @@ config HID_MULTITOUCH
+ - Touch International Panels
+ - Unitec Panels
+ - XAT optical touch panels
++ - Xiroku optical touch panels
+
+ If unsure, say N.
+
+@@ -620,6 +622,7 @@ config HID_WIIMOTE
+ depends on BT_HIDP
+ depends on LEDS_CLASS
+ select POWER_SUPPLY
++ select INPUT_FF_MEMLESS
+ ---help---
+ Support for the Nintendo Wii Remote bluetooth device.
+
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index af35384..bb656d8 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -362,7 +362,7 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
+
+ case HID_GLOBAL_ITEM_TAG_REPORT_SIZE:
+ parser->global.report_size = item_udata(item);
+- if (parser->global.report_size > 32) {
++ if (parser->global.report_size > 96) {
+ dbg_hid("invalid report_size %d\n",
+ parser->global.report_size);
+ return -1;
+@@ -1404,11 +1404,13 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_TRUETOUCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) },
+- { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
+- { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) },
+- { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH2) },
+- { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH3) },
+- { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH4) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480D) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480E) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_720C) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_726B) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72A1) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7302) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2515) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II) },
+@@ -1423,6 +1425,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HANVON, USB_DEVICE_ID_HANVON_MULTITOUCH) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_HANVON_ALT, USB_DEVICE_ID_HANVON_ALT_MULTITOUCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6650) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ILITEK, USB_DEVICE_ID_ILITEK_MULTITOUCH) },
+@@ -1549,6 +1552,15 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_XAT, USB_DEVICE_ID_XAT_CSR) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_SPX) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_MPX) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_CSR) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_SPX1) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_MPX1) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_CSR1) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_SPX2) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_MPX2) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_CSR2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 4a441a6..00cabb3 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -21,6 +21,7 @@
+ #define USB_VENDOR_ID_3M 0x0596
+ #define USB_DEVICE_ID_3M1968 0x0500
+ #define USB_DEVICE_ID_3M2256 0x0502
++#define USB_DEVICE_ID_3M3266 0x0506
+
+ #define USB_VENDOR_ID_A4TECH 0x09da
+ #define USB_DEVICE_ID_A4TECH_WCP32PU 0x0006
+@@ -145,6 +146,9 @@
+ #define USB_DEVICE_ID_ATEN_4PORTKVM 0x2205
+ #define USB_DEVICE_ID_ATEN_4PORTKVMC 0x2208
+
++#define USB_VENDOR_ID_ATMEL 0x03eb
++#define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c
++
+ #define USB_VENDOR_ID_AVERMEDIA 0x07ca
+ #define USB_DEVICE_ID_AVER_FM_MR800 0xb800
+
+@@ -230,11 +234,14 @@
+
+ #define USB_VENDOR_ID_DWAV 0x0eef
+ #define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER 0x0001
+-#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH 0x480d
+-#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1 0x720c
+-#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH2 0x72a1
+-#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH3 0x480e
+-#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH4 0x726b
++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480D 0x480d
++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480E 0x480e
++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_720C 0x720c
++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_726B 0x726b
++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72A1 0x72a1
++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72FA 0x72fa
++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7302 0x7302
++#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001 0xa001
+
+ #define USB_VENDOR_ID_ELECOM 0x056e
+ #define USB_DEVICE_ID_ELECOM_BM084 0x0061
+@@ -356,6 +363,9 @@
+ #define USB_VENDOR_ID_HANVON 0x20b3
+ #define USB_DEVICE_ID_HANVON_MULTITOUCH 0x0a18
+
++#define USB_VENDOR_ID_HANVON_ALT 0x22ed
++#define USB_DEVICE_ID_HANVON_ALT_MULTITOUCH 0x1010
++
+ #define USB_VENDOR_ID_HAPP 0x078b
+ #define USB_DEVICE_ID_UGCI_DRIVING 0x0010
+ #define USB_DEVICE_ID_UGCI_FLYING 0x0020
+@@ -707,6 +717,17 @@
+ #define USB_VENDOR_ID_XAT 0x2505
+ #define USB_DEVICE_ID_XAT_CSR 0x0220
+
++#define USB_VENDOR_ID_XIROKU 0x1477
++#define USB_DEVICE_ID_XIROKU_SPX 0x1006
++#define USB_DEVICE_ID_XIROKU_MPX 0x1007
++#define USB_DEVICE_ID_XIROKU_CSR 0x100e
++#define USB_DEVICE_ID_XIROKU_SPX1 0x1021
++#define USB_DEVICE_ID_XIROKU_CSR1 0x1022
++#define USB_DEVICE_ID_XIROKU_MPX1 0x1023
++#define USB_DEVICE_ID_XIROKU_SPX2 0x1024
++#define USB_DEVICE_ID_XIROKU_CSR2 0x1025
++#define USB_DEVICE_ID_XIROKU_MPX2 0x1026
++
+ #define USB_VENDOR_ID_YEALINK 0x6993
+ #define USB_DEVICE_ID_YEALINK_P1K_P4K_B2K 0xb001
+
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index f1c909f..995fc4c 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -609,12 +609,20 @@ static const struct hid_device_id mt_devices[] = {
+ { .driver_data = MT_CLS_3M,
+ HID_USB_DEVICE(USB_VENDOR_ID_3M,
+ USB_DEVICE_ID_3M2256) },
++ { .driver_data = MT_CLS_3M,
++ HID_USB_DEVICE(USB_VENDOR_ID_3M,
++ USB_DEVICE_ID_3M3266) },
+
+ /* ActionStar panels */
+ { .driver_data = MT_CLS_DEFAULT,
+ HID_USB_DEVICE(USB_VENDOR_ID_ACTIONSTAR,
+ USB_DEVICE_ID_ACTIONSTAR_1011) },
+
++ /* Atmel panels */
++ { .driver_data = MT_CLS_SERIAL,
++ HID_USB_DEVICE(USB_VENDOR_ID_ATMEL,
++ USB_DEVICE_ID_ATMEL_MULTITOUCH) },
++
+ /* Cando panels */
+ { .driver_data = MT_CLS_DUAL_INRANGE_CONTACTNUMBER,
+ HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
+@@ -645,23 +653,32 @@ static const struct hid_device_id mt_devices[] = {
+ USB_DEVICE_ID_CYPRESS_TRUETOUCH) },
+
+ /* eGalax devices (resistive) */
+- { .driver_data = MT_CLS_EGALAX,
++ { .driver_data = MT_CLS_EGALAX,
+ HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
+- USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
+- { .driver_data = MT_CLS_EGALAX,
++ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480D) },
++ { .driver_data = MT_CLS_EGALAX,
+ HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
+- USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH3) },
++ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480E) },
+
+ /* eGalax devices (capacitive) */
+- { .driver_data = MT_CLS_EGALAX,
++ { .driver_data = MT_CLS_EGALAX,
++ HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
++ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_720C) },
++ { .driver_data = MT_CLS_EGALAX,
+ HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
+- USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) },
+- { .driver_data = MT_CLS_EGALAX,
++ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_726B) },
++ { .driver_data = MT_CLS_EGALAX,
+ HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
+- USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH2) },
+- { .driver_data = MT_CLS_EGALAX,
++ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72A1) },
++ { .driver_data = MT_CLS_EGALAX,
+ HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
+- USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH4) },
++ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72FA) },
++ { .driver_data = MT_CLS_EGALAX,
++ HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
++ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7302) },
++ { .driver_data = MT_CLS_EGALAX,
++ HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
++ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) },
+
+ /* Elo TouchSystems IntelliTouch Plus panel */
+ { .driver_data = MT_CLS_DUAL_NSMU_CONTACTID,
+@@ -678,6 +695,11 @@ static const struct hid_device_id mt_devices[] = {
+ HID_USB_DEVICE(USB_VENDOR_ID_GOODTOUCH,
+ USB_DEVICE_ID_GOODTOUCH_000f) },
+
++ /* Hanvon panels */
++ { .driver_data = MT_CLS_DUAL_INRANGE_CONTACTID,
++ HID_USB_DEVICE(USB_VENDOR_ID_HANVON_ALT,
++ USB_DEVICE_ID_HANVON_ALT_MULTITOUCH) },
++
+ /* Ideacom panel */
+ { .driver_data = MT_CLS_SERIAL,
+ HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM,
+@@ -758,6 +780,35 @@ static const struct hid_device_id mt_devices[] = {
+ HID_USB_DEVICE(USB_VENDOR_ID_XAT,
+ USB_DEVICE_ID_XAT_CSR) },
+
++ /* Xiroku */
++ { .driver_data = MT_CLS_DEFAULT,
++ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
++ USB_DEVICE_ID_XIROKU_SPX) },
++ { .driver_data = MT_CLS_DEFAULT,
++ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
++ USB_DEVICE_ID_XIROKU_MPX) },
++ { .driver_data = MT_CLS_DEFAULT,
++ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
++ USB_DEVICE_ID_XIROKU_CSR) },
++ { .driver_data = MT_CLS_DEFAULT,
++ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
++ USB_DEVICE_ID_XIROKU_SPX1) },
++ { .driver_data = MT_CLS_DEFAULT,
++ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
++ USB_DEVICE_ID_XIROKU_MPX1) },
++ { .driver_data = MT_CLS_DEFAULT,
++ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
++ USB_DEVICE_ID_XIROKU_CSR1) },
++ { .driver_data = MT_CLS_DEFAULT,
++ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
++ USB_DEVICE_ID_XIROKU_SPX2) },
++ { .driver_data = MT_CLS_DEFAULT,
++ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
++ USB_DEVICE_ID_XIROKU_MPX2) },
++ { .driver_data = MT_CLS_DEFAULT,
++ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
++ USB_DEVICE_ID_XIROKU_CSR2) },
++
+ { }
+ };
+ MODULE_DEVICE_TABLE(hid, mt_devices);
+diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c
+index b6807db..5b667e5 100644
+--- a/drivers/i2c/busses/i2c-ali1535.c
++++ b/drivers/i2c/busses/i2c-ali1535.c
+@@ -140,7 +140,7 @@ static unsigned short ali1535_smba;
+ defined to make the transition easier. */
+ static int __devinit ali1535_setup(struct pci_dev *dev)
+ {
+- int retval = -ENODEV;
++ int retval;
+ unsigned char temp;
+
+ /* Check the following things:
+@@ -155,6 +155,7 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
+ if (ali1535_smba == 0) {
+ dev_warn(&dev->dev,
+ "ALI1535_smb region uninitialized - upgrade BIOS?\n");
++ retval = -ENODEV;
+ goto exit;
+ }
+
+@@ -167,6 +168,7 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
+ ali1535_driver.name)) {
+ dev_err(&dev->dev, "ALI1535_smb region 0x%x already in use!\n",
+ ali1535_smba);
++ retval = -EBUSY;
+ goto exit;
+ }
+
+@@ -174,6 +176,7 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
+ pci_read_config_byte(dev, SMBCFG, &temp);
+ if ((temp & ALI1535_SMBIO_EN) == 0) {
+ dev_err(&dev->dev, "SMB device not enabled - upgrade BIOS?\n");
++ retval = -ENODEV;
+ goto exit_free;
+ }
+
+@@ -181,6 +184,7 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
+ pci_read_config_byte(dev, SMBHSTCFG, &temp);
+ if ((temp & 1) == 0) {
+ dev_err(&dev->dev, "SMBus controller not enabled - upgrade BIOS?\n");
++ retval = -ENODEV;
+ goto exit_free;
+ }
+
+@@ -198,12 +202,11 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
+ dev_dbg(&dev->dev, "SMBREV = 0x%X\n", temp);
+ dev_dbg(&dev->dev, "ALI1535_smba = 0x%X\n", ali1535_smba);
+
+- retval = 0;
+-exit:
+- return retval;
++ return 0;
+
+ exit_free:
+ release_region(ali1535_smba, ALI1535_SMB_IOSIZE);
++exit:
+ return retval;
+ }
+
+diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
+index 18936ac..730215e 100644
+--- a/drivers/i2c/busses/i2c-eg20t.c
++++ b/drivers/i2c/busses/i2c-eg20t.c
+@@ -243,7 +243,7 @@ static void pch_i2c_init(struct i2c_algo_pch_data *adap)
+ if (pch_clk > PCH_MAX_CLK)
+ pch_clk = 62500;
+
+- pch_i2cbc = (pch_clk + (pch_i2c_speed * 4)) / pch_i2c_speed * 8;
++ pch_i2cbc = (pch_clk + (pch_i2c_speed * 4)) / (pch_i2c_speed * 8);
+ /* Set transfer speed in I2CBC */
+ iowrite32(pch_i2cbc, p + PCH_I2CBC);
+
+diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
+index ff1e127..4853b52 100644
+--- a/drivers/i2c/busses/i2c-nforce2.c
++++ b/drivers/i2c/busses/i2c-nforce2.c
+@@ -356,7 +356,7 @@ static int __devinit nforce2_probe_smb (struct pci_dev *dev, int bar,
+ error = acpi_check_region(smbus->base, smbus->size,
+ nforce2_driver.name);
+ if (error)
+- return -1;
++ return error;
+
+ if (!request_region(smbus->base, smbus->size, nforce2_driver.name)) {
+ dev_err(&smbus->adapter.dev, "Error requesting region %02x .. %02X for %s\n",
+diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
+index fa23faa..257c1a5 100644
+--- a/drivers/i2c/busses/i2c-omap.c
++++ b/drivers/i2c/busses/i2c-omap.c
+@@ -235,7 +235,7 @@ static const u8 reg_map_ip_v2[] = {
+ [OMAP_I2C_BUF_REG] = 0x94,
+ [OMAP_I2C_CNT_REG] = 0x98,
+ [OMAP_I2C_DATA_REG] = 0x9c,
+- [OMAP_I2C_SYSC_REG] = 0x20,
++ [OMAP_I2C_SYSC_REG] = 0x10,
+ [OMAP_I2C_CON_REG] = 0xa4,
+ [OMAP_I2C_OA_REG] = 0xa8,
+ [OMAP_I2C_SA_REG] = 0xac,
+diff --git a/drivers/i2c/busses/i2c-sis5595.c b/drivers/i2c/busses/i2c-sis5595.c
+index 4375866..6d60284 100644
+--- a/drivers/i2c/busses/i2c-sis5595.c
++++ b/drivers/i2c/busses/i2c-sis5595.c
+@@ -147,7 +147,7 @@ static int __devinit sis5595_setup(struct pci_dev *SIS5595_dev)
+ u16 a;
+ u8 val;
+ int *i;
+- int retval = -ENODEV;
++ int retval;
+
+ /* Look for imposters */
+ for (i = blacklist; *i != 0; i++) {
+@@ -223,7 +223,7 @@ static int __devinit sis5595_setup(struct pci_dev *SIS5595_dev)
+
+ error:
+ release_region(sis5595_base + SMB_INDEX, 2);
+- return retval;
++ return -ENODEV;
+ }
+
+ static int sis5595_transaction(struct i2c_adapter *adap)
+diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c
+index e6f539e..b617fd0 100644
+--- a/drivers/i2c/busses/i2c-sis630.c
++++ b/drivers/i2c/busses/i2c-sis630.c
+@@ -393,7 +393,7 @@ static int __devinit sis630_setup(struct pci_dev *sis630_dev)
+ {
+ unsigned char b;
+ struct pci_dev *dummy = NULL;
+- int retval = -ENODEV, i;
++ int retval, i;
+
+ /* check for supported SiS devices */
+ for (i=0; supported[i] > 0 ; i++) {
+@@ -418,18 +418,21 @@ static int __devinit sis630_setup(struct pci_dev *sis630_dev)
+ */
+ if (pci_read_config_byte(sis630_dev, SIS630_BIOS_CTL_REG,&b)) {
+ dev_err(&sis630_dev->dev, "Error: Can't read bios ctl reg\n");
++ retval = -ENODEV;
+ goto exit;
+ }
+ /* if ACPI already enabled , do nothing */
+ if (!(b & 0x80) &&
+ pci_write_config_byte(sis630_dev, SIS630_BIOS_CTL_REG, b | 0x80)) {
+ dev_err(&sis630_dev->dev, "Error: Can't enable ACPI\n");
++ retval = -ENODEV;
+ goto exit;
+ }
+
+ /* Determine the ACPI base address */
+ if (pci_read_config_word(sis630_dev,SIS630_ACPI_BASE_REG,&acpi_base)) {
+ dev_err(&sis630_dev->dev, "Error: Can't determine ACPI base address\n");
++ retval = -ENODEV;
+ goto exit;
+ }
+
+@@ -445,6 +448,7 @@ static int __devinit sis630_setup(struct pci_dev *sis630_dev)
+ sis630_driver.name)) {
+ dev_err(&sis630_dev->dev, "SMBus registers 0x%04x-0x%04x already "
+ "in use!\n", acpi_base + SMB_STS, acpi_base + SMB_SAA);
++ retval = -EBUSY;
+ goto exit;
+ }
+
+diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
+index 0b012f1..58261d4 100644
+--- a/drivers/i2c/busses/i2c-viapro.c
++++ b/drivers/i2c/busses/i2c-viapro.c
+@@ -324,7 +324,7 @@ static int __devinit vt596_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+ {
+ unsigned char temp;
+- int error = -ENODEV;
++ int error;
+
+ /* Determine the address of the SMBus areas */
+ if (force_addr) {
+@@ -390,6 +390,7 @@ found:
+ dev_err(&pdev->dev, "SMBUS: Error: Host SMBus "
+ "controller not enabled! - upgrade BIOS or "
+ "use force=1\n");
++ error = -ENODEV;
+ goto release_region;
+ }
+ }
+@@ -422,9 +423,11 @@ found:
+ "SMBus Via Pro adapter at %04x", vt596_smba);
+
+ vt596_pdev = pci_dev_get(pdev);
+- if (i2c_add_adapter(&vt596_adapter)) {
++ error = i2c_add_adapter(&vt596_adapter);
++ if (error) {
+ pci_dev_put(vt596_pdev);
+ vt596_pdev = NULL;
++ goto release_region;
+ }
+
+ /* Always return failure here. This is to allow other drivers to bind
+diff --git a/drivers/ide/ide-floppy_ioctl.c b/drivers/ide/ide-floppy_ioctl.c
+index d267b7a..a22ca84 100644
+--- a/drivers/ide/ide-floppy_ioctl.c
++++ b/drivers/ide/ide-floppy_ioctl.c
+@@ -292,8 +292,7 @@ int ide_floppy_ioctl(ide_drive_t *drive, struct block_device *bdev,
+ * and CDROM_SEND_PACKET (legacy) ioctls
+ */
+ if (cmd != CDROM_SEND_PACKET && cmd != SCSI_IOCTL_SEND_COMMAND)
+- err = scsi_cmd_ioctl(bdev->bd_disk->queue, bdev->bd_disk,
+- mode, cmd, argp);
++ err = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
+
+ if (err == -ENOTTY)
+ err = generic_ide_ioctl(drive, bdev, cmd, arg);
+diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
+index 5d2f8e1..5b39216 100644
+--- a/drivers/idle/intel_idle.c
++++ b/drivers/idle/intel_idle.c
+@@ -348,7 +348,8 @@ static int intel_idle_probe(void)
+ cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
+
+ if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
+- !(ecx & CPUID5_ECX_INTERRUPT_BREAK))
++ !(ecx & CPUID5_ECX_INTERRUPT_BREAK) ||
++ !mwait_substates)
+ return -ENODEV;
+
+ pr_debug(PREFIX "MWAIT substates: 0x%x\n", mwait_substates);
+@@ -394,7 +395,7 @@ static int intel_idle_probe(void)
+ if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */
+ lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
+ else {
+- smp_call_function(__setup_broadcast_timer, (void *)true, 1);
++ on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
+ register_cpu_notifier(&setup_broadcast_notifier);
+ }
+
+@@ -471,7 +472,7 @@ static int intel_idle_cpuidle_driver_init(void)
+ }
+
+ if (auto_demotion_disable_flags)
+- smp_call_function(auto_demotion_disable, NULL, 1);
++ on_each_cpu(auto_demotion_disable, NULL, 1);
+
+ return 0;
+ }
+@@ -568,7 +569,7 @@ static void __exit intel_idle_exit(void)
+ cpuidle_unregister_driver(&intel_idle_driver);
+
+ if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) {
+- smp_call_function(__setup_broadcast_timer, (void *)false, 1);
++ on_each_cpu(__setup_broadcast_timer, (void *)false, 1);
+ unregister_cpu_notifier(&setup_broadcast_notifier);
+ }
+
+diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
+index f84c080..9fb18c1 100644
+--- a/drivers/md/dm-flakey.c
++++ b/drivers/md/dm-flakey.c
+@@ -368,8 +368,17 @@ static int flakey_status(struct dm_target *ti, status_type_t type,
+ static int flakey_ioctl(struct dm_target *ti, unsigned int cmd, unsigned long arg)
+ {
+ struct flakey_c *fc = ti->private;
++ struct dm_dev *dev = fc->dev;
++ int r = 0;
+
+- return __blkdev_driver_ioctl(fc->dev->bdev, fc->dev->mode, cmd, arg);
++ /*
++ * Only pass ioctls through if the device sizes match exactly.
++ */
++ if (fc->start ||
++ ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
++ r = scsi_verify_blk_ioctl(NULL, cmd);
++
++ return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
+ }
+
+ static int flakey_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
+diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
+index 3921e3b..9728839 100644
+--- a/drivers/md/dm-linear.c
++++ b/drivers/md/dm-linear.c
+@@ -116,7 +116,17 @@ static int linear_ioctl(struct dm_target *ti, unsigned int cmd,
+ unsigned long arg)
+ {
+ struct linear_c *lc = (struct linear_c *) ti->private;
+- return __blkdev_driver_ioctl(lc->dev->bdev, lc->dev->mode, cmd, arg);
++ struct dm_dev *dev = lc->dev;
++ int r = 0;
++
++ /*
++ * Only pass ioctls through if the device sizes match exactly.
++ */
++ if (lc->start ||
++ ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
++ r = scsi_verify_blk_ioctl(NULL, cmd);
++
++ return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
+ }
+
+ static int linear_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
+diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
+index 5e0090e..801d92d 100644
+--- a/drivers/md/dm-mpath.c
++++ b/drivers/md/dm-mpath.c
+@@ -1520,6 +1520,12 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
+
+ spin_unlock_irqrestore(&m->lock, flags);
+
++ /*
++ * Only pass ioctls through if the device sizes match exactly.
++ */
++ if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT)
++ r = scsi_verify_blk_ioctl(NULL, cmd);
++
+ return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
+ }
+
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index ede2461..7d9e071 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -525,8 +525,17 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
+ if (test_bit(WriteMostly, &rdev->flags)) {
+ /* Don't balance among write-mostly, just
+ * use the first as a last resort */
+- if (best_disk < 0)
++ if (best_disk < 0) {
++ if (is_badblock(rdev, this_sector, sectors,
++ &first_bad, &bad_sectors)) {
++ if (first_bad < this_sector)
++ /* Cannot use this */
++ continue;
++ best_good_sectors = first_bad - this_sector;
++ } else
++ best_good_sectors = sectors;
+ best_disk = disk;
++ }
+ continue;
+ }
+ /* This is a reasonable device to use. It might
+diff --git a/drivers/media/video/cx23885/cx23885-dvb.c b/drivers/media/video/cx23885/cx23885-dvb.c
+index bcb45be..f0482b2 100644
+--- a/drivers/media/video/cx23885/cx23885-dvb.c
++++ b/drivers/media/video/cx23885/cx23885-dvb.c
+@@ -940,6 +940,11 @@ static int dvb_register(struct cx23885_tsport *port)
+
+ fe = dvb_attach(xc4000_attach, fe0->dvb.frontend,
+ &dev->i2c_bus[1].i2c_adap, &cfg);
++ if (!fe) {
++ printk(KERN_ERR "%s/2: xc4000 attach failed\n",
++ dev->name);
++ goto frontend_detach;
++ }
+ }
+ break;
+ case CX23885_BOARD_TBS_6920:
+diff --git a/drivers/media/video/cx88/cx88-cards.c b/drivers/media/video/cx88/cx88-cards.c
+index 0d719fa..3929d93 100644
+--- a/drivers/media/video/cx88/cx88-cards.c
++++ b/drivers/media/video/cx88/cx88-cards.c
+@@ -1573,8 +1573,8 @@ static const struct cx88_board cx88_boards[] = {
+ .name = "Pinnacle Hybrid PCTV",
+ .tuner_type = TUNER_XC2028,
+ .tuner_addr = 0x61,
+- .radio_type = TUNER_XC2028,
+- .radio_addr = 0x61,
++ .radio_type = UNSET,
++ .radio_addr = ADDR_UNSET,
+ .input = { {
+ .type = CX88_VMUX_TELEVISION,
+ .vmux = 0,
+@@ -1611,8 +1611,8 @@ static const struct cx88_board cx88_boards[] = {
+ .name = "Leadtek TV2000 XP Global",
+ .tuner_type = TUNER_XC2028,
+ .tuner_addr = 0x61,
+- .radio_type = TUNER_XC2028,
+- .radio_addr = 0x61,
++ .radio_type = UNSET,
++ .radio_addr = ADDR_UNSET,
+ .input = { {
+ .type = CX88_VMUX_TELEVISION,
+ .vmux = 0,
+@@ -2043,8 +2043,8 @@ static const struct cx88_board cx88_boards[] = {
+ .name = "Terratec Cinergy HT PCI MKII",
+ .tuner_type = TUNER_XC2028,
+ .tuner_addr = 0x61,
+- .radio_type = TUNER_XC2028,
+- .radio_addr = 0x61,
++ .radio_type = UNSET,
++ .radio_addr = ADDR_UNSET,
+ .input = { {
+ .type = CX88_VMUX_TELEVISION,
+ .vmux = 0,
+@@ -2082,9 +2082,9 @@ static const struct cx88_board cx88_boards[] = {
+ [CX88_BOARD_WINFAST_DTV1800H] = {
+ .name = "Leadtek WinFast DTV1800 Hybrid",
+ .tuner_type = TUNER_XC2028,
+- .radio_type = TUNER_XC2028,
++ .radio_type = UNSET,
+ .tuner_addr = 0x61,
+- .radio_addr = 0x61,
++ .radio_addr = ADDR_UNSET,
+ /*
+ * GPIO setting
+ *
+@@ -2123,9 +2123,9 @@ static const struct cx88_board cx88_boards[] = {
+ [CX88_BOARD_WINFAST_DTV1800H_XC4000] = {
+ .name = "Leadtek WinFast DTV1800 H (XC4000)",
+ .tuner_type = TUNER_XC4000,
+- .radio_type = TUNER_XC4000,
++ .radio_type = UNSET,
+ .tuner_addr = 0x61,
+- .radio_addr = 0x61,
++ .radio_addr = ADDR_UNSET,
+ /*
+ * GPIO setting
+ *
+@@ -2164,9 +2164,9 @@ static const struct cx88_board cx88_boards[] = {
+ [CX88_BOARD_WINFAST_DTV2000H_PLUS] = {
+ .name = "Leadtek WinFast DTV2000 H PLUS",
+ .tuner_type = TUNER_XC4000,
+- .radio_type = TUNER_XC4000,
++ .radio_type = UNSET,
+ .tuner_addr = 0x61,
+- .radio_addr = 0x61,
++ .radio_addr = ADDR_UNSET,
+ /*
+ * GPIO
+ * 2: 1: mute audio
+diff --git a/drivers/media/video/uvc/uvc_v4l2.c b/drivers/media/video/uvc/uvc_v4l2.c
+index dadf11f..cf7788f 100644
+--- a/drivers/media/video/uvc/uvc_v4l2.c
++++ b/drivers/media/video/uvc/uvc_v4l2.c
+@@ -58,6 +58,15 @@ static int uvc_ioctl_ctrl_map(struct uvc_video_chain *chain,
+ break;
+
+ case V4L2_CTRL_TYPE_MENU:
++ /* Prevent excessive memory consumption, as well as integer
++ * overflows.
++ */
++ if (xmap->menu_count == 0 ||
++ xmap->menu_count > UVC_MAX_CONTROL_MENU_ENTRIES) {
++ ret = -EINVAL;
++ goto done;
++ }
++
+ size = xmap->menu_count * sizeof(*map->menu_info);
+ map->menu_info = kmalloc(size, GFP_KERNEL);
+ if (map->menu_info == NULL) {
+diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h
+index 4c1392e..bc446ba 100644
+--- a/drivers/media/video/uvc/uvcvideo.h
++++ b/drivers/media/video/uvc/uvcvideo.h
+@@ -113,6 +113,7 @@
+
+ /* Maximum allowed number of control mappings per device */
+ #define UVC_MAX_CONTROL_MAPPINGS 1024
++#define UVC_MAX_CONTROL_MENU_ENTRIES 32
+
+ /* Devices quirks */
+ #define UVC_QUIRK_STATUS_INTERVAL 0x00000001
+diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
+index e1da8fc..639abee 100644
+--- a/drivers/media/video/v4l2-ioctl.c
++++ b/drivers/media/video/v4l2-ioctl.c
+@@ -2226,6 +2226,10 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
+ struct v4l2_ext_controls *ctrls = parg;
+
+ if (ctrls->count != 0) {
++ if (ctrls->count > V4L2_CID_MAX_CTRLS) {
++ ret = -EINVAL;
++ break;
++ }
+ *user_ptr = (void __user *)ctrls->controls;
+ *kernel_ptr = (void *)&ctrls->controls;
+ *array_size = sizeof(struct v4l2_ext_control)
+diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
+index d240427..fb7c27f 100644
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -1048,7 +1048,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
+ *
+ * WARNING: eMMC rules are NOT the same as SD DDR
+ */
+- if (ddr == EXT_CSD_CARD_TYPE_DDR_1_2V) {
++ if (ddr == MMC_1_2V_DDR_MODE) {
+ err = mmc_set_signal_voltage(host,
+ MMC_SIGNAL_VOLTAGE_120, 0);
+ if (err)
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 19ed580..6ce32a7 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -1364,8 +1364,7 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
+ if ((ios->timing == MMC_TIMING_UHS_SDR50) ||
+ (ios->timing == MMC_TIMING_UHS_SDR104) ||
+ (ios->timing == MMC_TIMING_UHS_DDR50) ||
+- (ios->timing == MMC_TIMING_UHS_SDR25) ||
+- (ios->timing == MMC_TIMING_UHS_SDR12))
++ (ios->timing == MMC_TIMING_UHS_SDR25))
+ ctrl |= SDHCI_CTRL_HISPD;
+
+ ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+@@ -2336,9 +2335,8 @@ int sdhci_suspend_host(struct sdhci_host *host)
+ /* Disable tuning since we are suspending */
+ if (host->version >= SDHCI_SPEC_300 && host->tuning_count &&
+ host->tuning_mode == SDHCI_TUNING_MODE_1) {
++ del_timer_sync(&host->tuning_timer);
+ host->flags &= ~SDHCI_NEEDS_RETUNING;
+- mod_timer(&host->tuning_timer, jiffies +
+- host->tuning_count * HZ);
+ }
+
+ ret = mmc_suspend_host(host->mmc);
+diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
+index ed8b5e7..424ca5f 100644
+--- a/drivers/mtd/mtd_blkdevs.c
++++ b/drivers/mtd/mtd_blkdevs.c
+@@ -215,7 +215,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
+
+ mutex_lock(&dev->lock);
+
+- if (dev->open++)
++ if (dev->open)
+ goto unlock;
+
+ kref_get(&dev->ref);
+@@ -235,6 +235,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
+ goto error_release;
+
+ unlock:
++ dev->open++;
+ mutex_unlock(&dev->lock);
+ blktrans_dev_put(dev);
+ return ret;
+diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
+index 1e2fa62..f3cdce9 100644
+--- a/drivers/mtd/mtdoops.c
++++ b/drivers/mtd/mtdoops.c
+@@ -253,6 +253,9 @@ static void find_next_position(struct mtdoops_context *cxt)
+ size_t retlen;
+
+ for (page = 0; page < cxt->oops_pages; page++) {
++ if (mtd->block_isbad &&
++ mtd->block_isbad(mtd, page * record_size))
++ continue;
+ /* Assume the page is used */
+ mark_page_used(cxt, page);
+ ret = mtd->read(mtd, page * record_size, MTDOOPS_HEADER_SIZE,
+@@ -369,7 +372,7 @@ static void mtdoops_notify_add(struct mtd_info *mtd)
+
+ /* oops_page_used is a bit field */
+ cxt->oops_page_used = vmalloc(DIV_ROUND_UP(mtdoops_pages,
+- BITS_PER_LONG));
++ BITS_PER_LONG) * sizeof(unsigned long));
+ if (!cxt->oops_page_used) {
+ printk(KERN_ERR "mtdoops: could not allocate page array\n");
+ return;
+diff --git a/drivers/mtd/tests/mtd_stresstest.c b/drivers/mtd/tests/mtd_stresstest.c
+index 52ffd91..811642f 100644
+--- a/drivers/mtd/tests/mtd_stresstest.c
++++ b/drivers/mtd/tests/mtd_stresstest.c
+@@ -284,6 +284,12 @@ static int __init mtd_stresstest_init(void)
+ (unsigned long long)mtd->size, mtd->erasesize,
+ pgsize, ebcnt, pgcnt, mtd->oobsize);
+
++ if (ebcnt < 2) {
++ printk(PRINT_PREF "error: need at least 2 eraseblocks\n");
++ err = -ENOSPC;
++ goto out_put_mtd;
++ }
++
+ /* Read or write up 2 eraseblocks at a time */
+ bufsize = mtd->erasesize * 2;
+
+@@ -322,6 +328,7 @@ out:
+ kfree(bbt);
+ vfree(writebuf);
+ vfree(readbuf);
++out_put_mtd:
+ put_mtd_device(mtd);
+ if (err)
+ printk(PRINT_PREF "error %d occurred\n", err);
+diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
+index 3320a50..ad76592 100644
+--- a/drivers/mtd/ubi/cdev.c
++++ b/drivers/mtd/ubi/cdev.c
+@@ -632,6 +632,9 @@ static int verify_mkvol_req(const struct ubi_device *ubi,
+ if (req->alignment != 1 && n)
+ goto bad;
+
++ if (!req->name[0] || !req->name_len)
++ goto bad;
++
+ if (req->name_len > UBI_VOL_NAME_MAX) {
+ err = -ENAMETOOLONG;
+ goto bad;
+diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
+index 64fbb00..ead2cd1 100644
+--- a/drivers/mtd/ubi/debug.h
++++ b/drivers/mtd/ubi/debug.h
+@@ -43,7 +43,10 @@
+ pr_debug("UBI DBG " type ": " fmt "\n", ##__VA_ARGS__)
+
+ /* Just a debugging messages not related to any specific UBI subsystem */
+-#define dbg_msg(fmt, ...) ubi_dbg_msg("msg", fmt, ##__VA_ARGS__)
++#define dbg_msg(fmt, ...) \
++ printk(KERN_DEBUG "UBI DBG (pid %d): %s: " fmt "\n", \
++ current->pid, __func__, ##__VA_ARGS__)
++
+ /* General debugging messages */
+ #define dbg_gen(fmt, ...) ubi_dbg_msg("gen", fmt, ##__VA_ARGS__)
+ /* Messages from the eraseblock association sub-system */
+diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
+index fb7f19b..cd26da8 100644
+--- a/drivers/mtd/ubi/eba.c
++++ b/drivers/mtd/ubi/eba.c
+@@ -1028,12 +1028,14 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
+ * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are
+ * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the
+ * LEB is already locked, we just do not move it and return
+- * %MOVE_CANCEL_RACE, which means that UBI will re-try, but later.
++ * %MOVE_RETRY. Note, we do not return %MOVE_CANCEL_RACE here because
++ * we do not know the reasons of the contention - it may be just a
++ * normal I/O on this LEB, so we want to re-try.
+ */
+ err = leb_write_trylock(ubi, vol_id, lnum);
+ if (err) {
+ dbg_wl("contention on LEB %d:%d, cancel", vol_id, lnum);
+- return MOVE_CANCEL_RACE;
++ return MOVE_RETRY;
+ }
+
+ /*
+diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
+index dc64c76..d51d75d 100644
+--- a/drivers/mtd/ubi/ubi.h
++++ b/drivers/mtd/ubi/ubi.h
+@@ -120,6 +120,7 @@ enum {
+ * PEB
+ * MOVE_CANCEL_BITFLIPS: canceled because a bit-flip was detected in the
+ * target PEB
++ * MOVE_RETRY: retry scrubbing the PEB
+ */
+ enum {
+ MOVE_CANCEL_RACE = 1,
+@@ -127,6 +128,7 @@ enum {
+ MOVE_TARGET_RD_ERR,
+ MOVE_TARGET_WR_ERR,
+ MOVE_CANCEL_BITFLIPS,
++ MOVE_RETRY,
+ };
+
+ /**
+diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
+index 9ad18da..890754c 100644
+--- a/drivers/mtd/ubi/vtbl.c
++++ b/drivers/mtd/ubi/vtbl.c
+@@ -306,7 +306,7 @@ static int create_vtbl(struct ubi_device *ubi, struct ubi_scan_info *si,
+ int copy, void *vtbl)
+ {
+ int err, tries = 0;
+- static struct ubi_vid_hdr *vid_hdr;
++ struct ubi_vid_hdr *vid_hdr;
+ struct ubi_scan_leb *new_seb;
+
+ ubi_msg("create volume table (copy #%d)", copy + 1);
+diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
+index 42c684c..0696e36 100644
+--- a/drivers/mtd/ubi/wl.c
++++ b/drivers/mtd/ubi/wl.c
+@@ -795,7 +795,10 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
+ protect = 1;
+ goto out_not_moved;
+ }
+-
++ if (err == MOVE_RETRY) {
++ scrubbing = 1;
++ goto out_not_moved;
++ }
+ if (err == MOVE_CANCEL_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
+ err == MOVE_TARGET_RD_ERR) {
+ /*
+@@ -1049,7 +1052,6 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
+
+ ubi_err("failed to erase PEB %d, error %d", pnum, err);
+ kfree(wl_wrk);
+- kmem_cache_free(ubi_wl_entry_slab, e);
+
+ if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
+ err == -EBUSY) {
+@@ -1062,14 +1064,16 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
+ goto out_ro;
+ }
+ return err;
+- } else if (err != -EIO) {
++ }
++
++ kmem_cache_free(ubi_wl_entry_slab, e);
++ if (err != -EIO)
+ /*
+ * If this is not %-EIO, we have no idea what to do. Scheduling
+ * this physical eraseblock for erasure again would cause
+ * errors again and again. Well, lets switch to R/O mode.
+ */
+ goto out_ro;
+- }
+
+ /* It is %-EIO, the PEB went bad */
+
+diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
+index dd2625a..f5e063a 100644
+--- a/drivers/net/usb/asix.c
++++ b/drivers/net/usb/asix.c
+@@ -974,6 +974,7 @@ static int ax88772_link_reset(struct usbnet *dev)
+
+ static int ax88772_reset(struct usbnet *dev)
+ {
++ struct asix_data *data = (struct asix_data *)&dev->data;
+ int ret, embd_phy;
+ u16 rx_ctl;
+
+@@ -1051,6 +1052,13 @@ static int ax88772_reset(struct usbnet *dev)
+ goto out;
+ }
+
++ /* Rewrite MAC address */
++ memcpy(data->mac_addr, dev->net->dev_addr, ETH_ALEN);
++ ret = asix_write_cmd(dev, AX_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
++ data->mac_addr);
++ if (ret < 0)
++ goto out;
++
+ /* Set RX_CTL to default values with 2k buffer, and enable cactus */
+ ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL);
+ if (ret < 0)
+@@ -1316,6 +1324,13 @@ static int ax88178_reset(struct usbnet *dev)
+ if (ret < 0)
+ return ret;
+
++ /* Rewrite MAC address */
++ memcpy(data->mac_addr, dev->net->dev_addr, ETH_ALEN);
++ ret = asix_write_cmd(dev, AX_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
++ data->mac_addr);
++ if (ret < 0)
++ return ret;
++
+ ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL);
+ if (ret < 0)
+ return ret;
+diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+index ccde784..f5ae3c6 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
++++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+@@ -526,10 +526,11 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
+ rxs->rs_status |= ATH9K_RXERR_DECRYPT;
+ else if (rxsp->status11 & AR_MichaelErr)
+ rxs->rs_status |= ATH9K_RXERR_MIC;
+- if (rxsp->status11 & AR_KeyMiss)
+- rxs->rs_status |= ATH9K_RXERR_KEYMISS;
+ }
+
++ if (rxsp->status11 & AR_KeyMiss)
++ rxs->rs_status |= ATH9K_RXERR_KEYMISS;
++
+ return 0;
+ }
+ EXPORT_SYMBOL(ath9k_hw_process_rxdesc_edma);
+diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
+index 9953881..8ddef3e 100644
+--- a/drivers/net/wireless/ath/ath9k/calib.c
++++ b/drivers/net/wireless/ath/ath9k/calib.c
+@@ -402,6 +402,7 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
+ ah->noise = ath9k_hw_getchan_noise(ah, chan);
+ return true;
+ }
++EXPORT_SYMBOL(ath9k_hw_getnf);
+
+ void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
+index ecdb6fd..bbcb777 100644
+--- a/drivers/net/wireless/ath/ath9k/mac.c
++++ b/drivers/net/wireless/ath/ath9k/mac.c
+@@ -621,10 +621,11 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
+ rs->rs_status |= ATH9K_RXERR_DECRYPT;
+ else if (ads.ds_rxstatus8 & AR_MichaelErr)
+ rs->rs_status |= ATH9K_RXERR_MIC;
+- if (ads.ds_rxstatus8 & AR_KeyMiss)
+- rs->rs_status |= ATH9K_RXERR_KEYMISS;
+ }
+
++ if (ads.ds_rxstatus8 & AR_KeyMiss)
++ rs->rs_status |= ATH9K_RXERR_KEYMISS;
++
+ return 0;
+ }
+ EXPORT_SYMBOL(ath9k_hw_rxprocdesc);
+diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
+index a9c5ae7..f76a814 100644
+--- a/drivers/net/wireless/ath/ath9k/main.c
++++ b/drivers/net/wireless/ath/ath9k/main.c
+@@ -1667,7 +1667,6 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
+
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ struct ieee80211_channel *curchan = hw->conf.channel;
+- struct ath9k_channel old_chan;
+ int pos = curchan->hw_value;
+ int old_pos = -1;
+ unsigned long flags;
+@@ -1693,11 +1692,8 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
+ * Preserve the current channel values, before updating
+ * the same channel
+ */
+- if (old_pos == pos) {
+- memcpy(&old_chan, &sc->sc_ah->channels[pos],
+- sizeof(struct ath9k_channel));
+- ah->curchan = &old_chan;
+- }
++ if (ah->curchan && (old_pos == pos))
++ ath9k_hw_getnf(ah, ah->curchan);
+
+ ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos],
+ curchan, conf->channel_type);
+diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
+index b282d86..05f2ad1 100644
+--- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
++++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
+@@ -2656,14 +2656,13 @@ int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
+ IWL_WARN(priv, "Invalid scan band\n");
+ return -EIO;
+ }
+-
+ /*
+- * If active scaning is requested but a certain channel
+- * is marked passive, we can do active scanning if we
+- * detect transmissions.
++ * If active scaning is requested but a certain channel is marked
++ * passive, we can do active scanning if we detect transmissions. For
++ * passive only scanning disable switching to active on any channel.
+ */
+ scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
+- IWL_GOOD_CRC_TH_DISABLED;
++ IWL_GOOD_CRC_TH_NEVER;
+
+ len = iwl_legacy_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data,
+ vif->addr, priv->scan_request->ie,
+diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+index 1a52ed2..6465983 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
++++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+@@ -827,6 +827,7 @@ static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
+ case IEEE80211_SMPS_STATIC:
+ case IEEE80211_SMPS_DYNAMIC:
+ return IWL_NUM_IDLE_CHAINS_SINGLE;
++ case IEEE80211_SMPS_AUTOMATIC:
+ case IEEE80211_SMPS_OFF:
+ return active_cnt;
+ default:
+diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+index 5c7c17c..d552fa3 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
++++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+@@ -559,6 +559,9 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
+
+ mutex_lock(&priv->shrd->mutex);
+
++ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
++ goto out;
++
+ if (unlikely(test_bit(STATUS_SCANNING, &priv->shrd->status))) {
+ IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
+ goto out;
+diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
+index da48c8a..837b460 100644
+--- a/drivers/net/wireless/rt2x00/rt2800pci.c
++++ b/drivers/net/wireless/rt2x00/rt2800pci.c
+@@ -422,7 +422,6 @@ static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev)
+ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
+ enum dev_state state)
+ {
+- int mask = (state == STATE_RADIO_IRQ_ON);
+ u32 reg;
+ unsigned long flags;
+
+@@ -436,25 +435,14 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
+ }
+
+ spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
+- rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg);
+- rt2x00_set_field32(&reg, INT_MASK_CSR_RXDELAYINT, 0);
+- rt2x00_set_field32(&reg, INT_MASK_CSR_TXDELAYINT, 0);
+- rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, mask);
+- rt2x00_set_field32(&reg, INT_MASK_CSR_AC0_DMA_DONE, 0);
+- rt2x00_set_field32(&reg, INT_MASK_CSR_AC1_DMA_DONE, 0);
+- rt2x00_set_field32(&reg, INT_MASK_CSR_AC2_DMA_DONE, 0);
+- rt2x00_set_field32(&reg, INT_MASK_CSR_AC3_DMA_DONE, 0);
+- rt2x00_set_field32(&reg, INT_MASK_CSR_HCCA_DMA_DONE, 0);
+- rt2x00_set_field32(&reg, INT_MASK_CSR_MGMT_DMA_DONE, 0);
+- rt2x00_set_field32(&reg, INT_MASK_CSR_MCU_COMMAND, 0);
+- rt2x00_set_field32(&reg, INT_MASK_CSR_RXTX_COHERENT, 0);
+- rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, mask);
+- rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, mask);
+- rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, mask);
+- rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, mask);
+- rt2x00_set_field32(&reg, INT_MASK_CSR_GPTIMER, 0);
+- rt2x00_set_field32(&reg, INT_MASK_CSR_RX_COHERENT, 0);
+- rt2x00_set_field32(&reg, INT_MASK_CSR_TX_COHERENT, 0);
++ reg = 0;
++ if (state == STATE_RADIO_IRQ_ON) {
++ rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, 1);
++ rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, 1);
++ rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, 1);
++ rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, 1);
++ rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, 1);
++ }
+ rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg);
+ spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/fw.c b/drivers/net/wireless/rtlwifi/rtl8192se/fw.c
+index 6f91a14..3fda6b1 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192se/fw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192se/fw.c
+@@ -196,6 +196,8 @@ static bool _rtl92s_firmware_downloadcode(struct ieee80211_hw *hw,
+ /* Allocate skb buffer to contain firmware */
+ /* info and tx descriptor info. */
+ skb = dev_alloc_skb(frag_length);
++ if (!skb)
++ return false;
+ skb_reserve(skb, extra_descoffset);
+ seg_ptr = (u8 *)skb_put(skb, (u32)(frag_length -
+ extra_descoffset));
+@@ -573,6 +575,8 @@ static bool _rtl92s_firmware_set_h2c_cmd(struct ieee80211_hw *hw, u8 h2c_cmd,
+
+ len = _rtl92s_get_h2c_cmdlen(MAX_TRANSMIT_BUFFER_SIZE, 1, &cmd_len);
+ skb = dev_alloc_skb(len);
++ if (!skb)
++ return false;
+ cb_desc = (struct rtl_tcb_desc *)(skb->cb);
+ cb_desc->queue_index = TXCMD_QUEUE;
+ cb_desc->cmd_or_init = DESC_PACKET_TYPE_NORMAL;
+diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
+index 0e6d04d..e3efb43 100644
+--- a/drivers/pci/msi.c
++++ b/drivers/pci/msi.c
+@@ -870,5 +870,15 @@ EXPORT_SYMBOL(pci_msi_enabled);
+
+ void pci_msi_init_pci_dev(struct pci_dev *dev)
+ {
++ int pos;
+ INIT_LIST_HEAD(&dev->msi_list);
++
++ /* Disable the msi hardware to avoid screaming interrupts
++ * during boot. This is the power on reset default so
++ * usually this should be a noop.
++ */
++ pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
++ if (pos)
++ msi_set_enable(dev, pos, 0);
++ msix_set_enable(dev, 0);
+ }
+diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c
+index dfbd5a6..258fef2 100644
+--- a/drivers/pnp/quirks.c
++++ b/drivers/pnp/quirks.c
+@@ -295,6 +295,45 @@ static void quirk_system_pci_resources(struct pnp_dev *dev)
+ }
+ }
+
++#ifdef CONFIG_AMD_NB
++
++#include <asm/amd_nb.h>
++
++static void quirk_amd_mmconfig_area(struct pnp_dev *dev)
++{
++ resource_size_t start, end;
++ struct pnp_resource *pnp_res;
++ struct resource *res;
++ struct resource mmconfig_res, *mmconfig;
++
++ mmconfig = amd_get_mmconfig_range(&mmconfig_res);
++ if (!mmconfig)
++ return;
++
++ list_for_each_entry(pnp_res, &dev->resources, list) {
++ res = &pnp_res->res;
++ if (res->end < mmconfig->start || res->start > mmconfig->end ||
++ (res->start == mmconfig->start && res->end == mmconfig->end))
++ continue;
++
++ dev_info(&dev->dev, FW_BUG
++ "%pR covers only part of AMD MMCONFIG area %pR; adding more reservations\n",
++ res, mmconfig);
++ if (mmconfig->start < res->start) {
++ start = mmconfig->start;
++ end = res->start - 1;
++ pnp_add_mem_resource(dev, start, end, 0);
++ }
++ if (mmconfig->end > res->end) {
++ start = res->end + 1;
++ end = mmconfig->end;
++ pnp_add_mem_resource(dev, start, end, 0);
++ }
++ break;
++ }
++}
++#endif
++
+ /*
+ * PnP Quirks
+ * Cards or devices that need some tweaking due to incomplete resource info
+@@ -322,6 +361,9 @@ static struct pnp_fixup pnp_fixups[] = {
+ /* PnP resources that might overlap PCI BARs */
+ {"PNP0c01", quirk_system_pci_resources},
+ {"PNP0c02", quirk_system_pci_resources},
++#ifdef CONFIG_AMD_NB
++ {"PNP0c01", quirk_amd_mmconfig_area},
++#endif
+ {""}
+ };
+
+diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
+index 8e28625..8a1c031 100644
+--- a/drivers/rtc/interface.c
++++ b/drivers/rtc/interface.c
+@@ -228,11 +228,11 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+ alarm->time.tm_hour = now.tm_hour;
+
+ /* For simplicity, only support date rollover for now */
+- if (alarm->time.tm_mday == -1) {
++ if (alarm->time.tm_mday < 1 || alarm->time.tm_mday > 31) {
+ alarm->time.tm_mday = now.tm_mday;
+ missing = day;
+ }
+- if (alarm->time.tm_mon == -1) {
++ if ((unsigned)alarm->time.tm_mon >= 12) {
+ alarm->time.tm_mon = now.tm_mon;
+ if (missing == none)
+ missing = month;
+diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
+index beda04a..0794c72 100644
+--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
++++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
+@@ -65,6 +65,8 @@ static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS];
+
+ #define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
+
++#define MAX_HBA_QUEUE_DEPTH 30000
++#define MAX_CHAIN_DEPTH 100000
+ static int max_queue_depth = -1;
+ module_param(max_queue_depth, int, 0);
+ MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
+@@ -2311,8 +2313,6 @@ _base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc)
+ }
+ if (ioc->chain_dma_pool)
+ pci_pool_destroy(ioc->chain_dma_pool);
+- }
+- if (ioc->chain_lookup) {
+ free_pages((ulong)ioc->chain_lookup, ioc->chain_pages);
+ ioc->chain_lookup = NULL;
+ }
+@@ -2330,9 +2330,7 @@ static int
+ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
+ {
+ struct mpt2sas_facts *facts;
+- u32 queue_size, queue_diff;
+ u16 max_sge_elements;
+- u16 num_of_reply_frames;
+ u16 chains_needed_per_io;
+ u32 sz, total_sz, reply_post_free_sz;
+ u32 retry_sz;
+@@ -2359,7 +2357,8 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
+ max_request_credit = (max_queue_depth < facts->RequestCredit)
+ ? max_queue_depth : facts->RequestCredit;
+ else
+- max_request_credit = facts->RequestCredit;
++ max_request_credit = min_t(u16, facts->RequestCredit,
++ MAX_HBA_QUEUE_DEPTH);
+
+ ioc->hba_queue_depth = max_request_credit;
+ ioc->hi_priority_depth = facts->HighPriorityCredit;
+@@ -2400,50 +2399,25 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
+ }
+ ioc->chains_needed_per_io = chains_needed_per_io;
+
+- /* reply free queue sizing - taking into account for events */
+- num_of_reply_frames = ioc->hba_queue_depth + 32;
+-
+- /* number of replies frames can't be a multiple of 16 */
+- /* decrease number of reply frames by 1 */
+- if (!(num_of_reply_frames % 16))
+- num_of_reply_frames--;
+-
+- /* calculate number of reply free queue entries
+- * (must be multiple of 16)
+- */
+-
+- /* (we know reply_free_queue_depth is not a multiple of 16) */
+- queue_size = num_of_reply_frames;
+- queue_size += 16 - (queue_size % 16);
+- ioc->reply_free_queue_depth = queue_size;
+-
+- /* reply descriptor post queue sizing */
+- /* this size should be the number of request frames + number of reply
+- * frames
+- */
+-
+- queue_size = ioc->hba_queue_depth + num_of_reply_frames + 1;
+- /* round up to 16 byte boundary */
+- if (queue_size % 16)
+- queue_size += 16 - (queue_size % 16);
+-
+- /* check against IOC maximum reply post queue depth */
+- if (queue_size > facts->MaxReplyDescriptorPostQueueDepth) {
+- queue_diff = queue_size -
+- facts->MaxReplyDescriptorPostQueueDepth;
++ /* reply free queue sizing - taking into account for 64 FW events */
++ ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
+
+- /* round queue_diff up to multiple of 16 */
+- if (queue_diff % 16)
+- queue_diff += 16 - (queue_diff % 16);
+-
+- /* adjust hba_queue_depth, reply_free_queue_depth,
+- * and queue_size
+- */
+- ioc->hba_queue_depth -= (queue_diff / 2);
+- ioc->reply_free_queue_depth -= (queue_diff / 2);
+- queue_size = facts->MaxReplyDescriptorPostQueueDepth;
++ /* align the reply post queue on the next 16 count boundary */
++ if (!ioc->reply_free_queue_depth % 16)
++ ioc->reply_post_queue_depth = ioc->reply_free_queue_depth + 16;
++ else
++ ioc->reply_post_queue_depth = ioc->reply_free_queue_depth +
++ 32 - (ioc->reply_free_queue_depth % 16);
++ if (ioc->reply_post_queue_depth >
++ facts->MaxReplyDescriptorPostQueueDepth) {
++ ioc->reply_post_queue_depth = min_t(u16,
++ (facts->MaxReplyDescriptorPostQueueDepth -
++ (facts->MaxReplyDescriptorPostQueueDepth % 16)),
++ (ioc->hba_queue_depth - (ioc->hba_queue_depth % 16)));
++ ioc->reply_free_queue_depth = ioc->reply_post_queue_depth - 16;
++ ioc->hba_queue_depth = ioc->reply_free_queue_depth - 64;
+ }
+- ioc->reply_post_queue_depth = queue_size;
++
+
+ dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scatter gather: "
+ "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
+@@ -2529,15 +2503,12 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
+ "depth(%d)\n", ioc->name, ioc->request,
+ ioc->scsiio_depth));
+
+- /* loop till the allocation succeeds */
+- do {
+- sz = ioc->chain_depth * sizeof(struct chain_tracker);
+- ioc->chain_pages = get_order(sz);
+- ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
+- GFP_KERNEL, ioc->chain_pages);
+- if (ioc->chain_lookup == NULL)
+- ioc->chain_depth -= 100;
+- } while (ioc->chain_lookup == NULL);
++ ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
++ sz = ioc->chain_depth * sizeof(struct chain_tracker);
++ ioc->chain_pages = get_order(sz);
++
++ ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
++ GFP_KERNEL, ioc->chain_pages);
+ ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev,
+ ioc->request_sz, 16, 0);
+ if (!ioc->chain_dma_pool) {
+diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+index d570573..9bc6fb2 100644
+--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
++++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+@@ -1007,8 +1007,8 @@ _scsih_get_chain_buffer_tracker(struct MPT2SAS_ADAPTER *ioc, u16 smid)
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ if (list_empty(&ioc->free_chain_list)) {
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+- printk(MPT2SAS_WARN_FMT "chain buffers not available\n",
+- ioc->name);
++ dfailprintk(ioc, printk(MPT2SAS_WARN_FMT "chain buffers not "
++ "available\n", ioc->name));
+ return NULL;
+ }
+ chain_req = list_entry(ioc->free_chain_list.next,
+@@ -6714,6 +6714,7 @@ _scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid,
+ } else
+ sas_target_priv_data = NULL;
+ raid_device->responding = 1;
++ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ starget_printk(KERN_INFO, raid_device->starget,
+ "handle(0x%04x), wwid(0x%016llx)\n", handle,
+ (unsigned long long)raid_device->wwid);
+@@ -6724,16 +6725,16 @@ _scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid,
+ */
+ _scsih_init_warpdrive_properties(ioc, raid_device);
+ if (raid_device->handle == handle)
+- goto out;
++ return;
+ printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n",
+ raid_device->handle);
+ raid_device->handle = handle;
+ if (sas_target_priv_data)
+ sas_target_priv_data->handle = handle;
+- goto out;
++ return;
+ }
+ }
+- out:
++
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ }
+
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index fa3a591..4b63c73 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -1074,6 +1074,10 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode,
+ SCSI_LOG_IOCTL(1, sd_printk(KERN_INFO, sdkp, "sd_ioctl: disk=%s, "
+ "cmd=0x%x\n", disk->disk_name, cmd));
+
++ error = scsi_verify_blk_ioctl(bdev, cmd);
++ if (error < 0)
++ return error;
++
+ /*
+ * If we are in the middle of error recovery, don't let anyone
+ * else try and use this device. Also, if error recovery fails, it
+@@ -1096,7 +1100,7 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode,
+ error = scsi_ioctl(sdp, cmd, p);
+ break;
+ default:
+- error = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, p);
++ error = scsi_cmd_blk_ioctl(bdev, mode, cmd, p);
+ if (error != -ENOTTY)
+ break;
+ error = scsi_ioctl(sdp, cmd, p);
+@@ -1266,6 +1270,11 @@ static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+ {
+ struct scsi_device *sdev = scsi_disk(bdev->bd_disk)->device;
++ int ret;
++
++ ret = scsi_verify_blk_ioctl(bdev, cmd);
++ if (ret < 0)
++ return -ENOIOCTLCMD;
+
+ /*
+ * If we are in the middle of error recovery, don't let anyone
+@@ -1277,8 +1286,6 @@ static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
+ return -ENODEV;
+
+ if (sdev->host->hostt->compat_ioctl) {
+- int ret;
+-
+ ret = sdev->host->hostt->compat_ioctl(sdev, cmd, (void __user *)arg);
+
+ return ret;
+diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
+index b4543f5..36d1ed7 100644
+--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
++++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
+@@ -839,6 +839,10 @@ static void sym53c8xx_slave_destroy(struct scsi_device *sdev)
+ struct sym_lcb *lp = sym_lp(tp, sdev->lun);
+ unsigned long flags;
+
++ /* if slave_alloc returned before allocating a sym_lcb, return */
++ if (!lp)
++ return;
++
+ spin_lock_irqsave(np->s.host->host_lock, flags);
+
+ if (lp->busy_itlq || lp->busy_itl) {
+diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
+index 831468b..2e8c1be 100644
+--- a/drivers/target/target_core_cdb.c
++++ b/drivers/target/target_core_cdb.c
+@@ -94,6 +94,18 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
+ buf[2] = dev->transport->get_device_rev(dev);
+
+ /*
++ * NORMACA and HISUP = 0, RESPONSE DATA FORMAT = 2
++ *
++ * SPC4 says:
++ * A RESPONSE DATA FORMAT field set to 2h indicates that the
++ * standard INQUIRY data is in the format defined in this
++ * standard. Response data format values less than 2h are
++ * obsolete. Response data format values greater than 2h are
++ * reserved.
++ */
++ buf[3] = 2;
++
++ /*
+ * Enable SCCS and TPGS fields for Emulated ALUA
+ */
+ if (dev->se_sub_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED)
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 0257658..e87d0eb 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -4353,6 +4353,7 @@ int transport_send_check_condition_and_sense(
+ case TCM_NON_EXISTENT_LUN:
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
++ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ /* ILLEGAL REQUEST */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+ /* LOGICAL UNIT NOT SUPPORTED */
+@@ -4362,6 +4363,7 @@ int transport_send_check_condition_and_sense(
+ case TCM_SECTOR_COUNT_TOO_MANY:
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
++ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ /* ILLEGAL REQUEST */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+ /* INVALID COMMAND OPERATION CODE */
+@@ -4370,6 +4372,7 @@ int transport_send_check_condition_and_sense(
+ case TCM_UNKNOWN_MODE_PAGE:
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
++ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ /* ILLEGAL REQUEST */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+ /* INVALID FIELD IN CDB */
+@@ -4378,6 +4381,7 @@ int transport_send_check_condition_and_sense(
+ case TCM_CHECK_CONDITION_ABORT_CMD:
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
++ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ /* ABORTED COMMAND */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+ /* BUS DEVICE RESET FUNCTION OCCURRED */
+@@ -4387,6 +4391,7 @@ int transport_send_check_condition_and_sense(
+ case TCM_INCORRECT_AMOUNT_OF_DATA:
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
++ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ /* ABORTED COMMAND */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+ /* WRITE ERROR */
+@@ -4397,6 +4402,7 @@ int transport_send_check_condition_and_sense(
+ case TCM_INVALID_CDB_FIELD:
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
++ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ /* ABORTED COMMAND */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+ /* INVALID FIELD IN CDB */
+@@ -4405,6 +4411,7 @@ int transport_send_check_condition_and_sense(
+ case TCM_INVALID_PARAMETER_LIST:
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
++ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ /* ABORTED COMMAND */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+ /* INVALID FIELD IN PARAMETER LIST */
+@@ -4413,6 +4420,7 @@ int transport_send_check_condition_and_sense(
+ case TCM_UNEXPECTED_UNSOLICITED_DATA:
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
++ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ /* ABORTED COMMAND */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+ /* WRITE ERROR */
+@@ -4423,6 +4431,7 @@ int transport_send_check_condition_and_sense(
+ case TCM_SERVICE_CRC_ERROR:
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
++ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ /* ABORTED COMMAND */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+ /* PROTOCOL SERVICE CRC ERROR */
+@@ -4433,6 +4442,7 @@ int transport_send_check_condition_and_sense(
+ case TCM_SNACK_REJECTED:
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
++ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ /* ABORTED COMMAND */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+ /* READ ERROR */
+@@ -4443,6 +4453,7 @@ int transport_send_check_condition_and_sense(
+ case TCM_WRITE_PROTECTED:
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
++ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ /* DATA PROTECT */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
+ /* WRITE PROTECTED */
+@@ -4451,6 +4462,7 @@ int transport_send_check_condition_and_sense(
+ case TCM_CHECK_CONDITION_UNIT_ATTENTION:
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
++ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ /* UNIT ATTENTION */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
+ core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
+@@ -4460,6 +4472,7 @@ int transport_send_check_condition_and_sense(
+ case TCM_CHECK_CONDITION_NOT_READY:
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
++ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ /* Not Ready */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
+ transport_get_sense_codes(cmd, &asc, &ascq);
+@@ -4470,6 +4483,7 @@ int transport_send_check_condition_and_sense(
+ default:
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
++ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ /* ILLEGAL REQUEST */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+ /* LOGICAL UNIT COMMUNICATION FAILURE */
+diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
+index ede860f..a580b17 100644
+--- a/drivers/xen/xenbus/xenbus_xs.c
++++ b/drivers/xen/xenbus/xenbus_xs.c
+@@ -801,6 +801,12 @@ static int process_msg(void)
+ goto out;
+ }
+
++ if (msg->hdr.len > XENSTORE_PAYLOAD_MAX) {
++ kfree(msg);
++ err = -EINVAL;
++ goto out;
++ }
++
+ body = kmalloc(msg->hdr.len + 1, GFP_NOIO | __GFP_HIGH);
+ if (body == NULL) {
+ kfree(msg);
+diff --git a/fs/aio.c b/fs/aio.c
+index 78c514c..969beb0 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -476,14 +476,21 @@ static void kiocb_batch_init(struct kiocb_batch *batch, long total)
+ batch->count = total;
+ }
+
+-static void kiocb_batch_free(struct kiocb_batch *batch)
++static void kiocb_batch_free(struct kioctx *ctx, struct kiocb_batch *batch)
+ {
+ struct kiocb *req, *n;
+
++ if (list_empty(&batch->head))
++ return;
++
++ spin_lock_irq(&ctx->ctx_lock);
+ list_for_each_entry_safe(req, n, &batch->head, ki_batch) {
+ list_del(&req->ki_batch);
++ list_del(&req->ki_list);
+ kmem_cache_free(kiocb_cachep, req);
++ ctx->reqs_active--;
+ }
++ spin_unlock_irq(&ctx->ctx_lock);
+ }
+
+ /*
+@@ -1742,7 +1749,7 @@ long do_io_submit(aio_context_t ctx_id, long nr,
+ }
+ blk_finish_plug(&plug);
+
+- kiocb_batch_free(&batch);
++ kiocb_batch_free(ctx, &batch);
+ put_ioctx(ctx);
+ return i ? i : ret;
+ }
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index f3670cf..63e4be4 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -2914,18 +2914,33 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
+ #define CIFS_DEFAULT_IOSIZE (1024 * 1024)
+
+ /*
+- * Windows only supports a max of 60k reads. Default to that when posix
+- * extensions aren't in force.
++ * Windows only supports a max of 60kb reads and 65535 byte writes. Default to
++ * those values when posix extensions aren't in force. In actuality here, we
++ * use 65536 to allow for a write that is a multiple of 4k. Most servers seem
++ * to be ok with the extra byte even though Windows doesn't send writes that
++ * are that large.
++ *
++ * Citation:
++ *
++ * http://blogs.msdn.com/b/openspecification/archive/2009/04/10/smb-maximum-transmit-buffer-size-and-performance-tuning.aspx
+ */
+ #define CIFS_DEFAULT_NON_POSIX_RSIZE (60 * 1024)
++#define CIFS_DEFAULT_NON_POSIX_WSIZE (65536)
+
+ static unsigned int
+ cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
+ {
+ __u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
+ struct TCP_Server_Info *server = tcon->ses->server;
+- unsigned int wsize = pvolume_info->wsize ? pvolume_info->wsize :
+- CIFS_DEFAULT_IOSIZE;
++ unsigned int wsize;
++
++ /* start with specified wsize, or default */
++ if (pvolume_info->wsize)
++ wsize = pvolume_info->wsize;
++ else if (tcon->unix_ext && (unix_cap & CIFS_UNIX_LARGE_WRITE_CAP))
++ wsize = CIFS_DEFAULT_IOSIZE;
++ else
++ wsize = CIFS_DEFAULT_NON_POSIX_WSIZE;
+
+ /* can server support 24-bit write sizes? (via UNIX extensions) */
+ if (!tcon->unix_ext || !(unix_cap & CIFS_UNIX_LARGE_WRITE_CAP))
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 89509b5..f7908ae 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -242,6 +242,7 @@ static void dentry_lru_add(struct dentry *dentry)
+ static void __dentry_lru_del(struct dentry *dentry)
+ {
+ list_del_init(&dentry->d_lru);
++ dentry->d_flags &= ~DCACHE_SHRINK_LIST;
+ dentry->d_sb->s_nr_dentry_unused--;
+ dentry_stat.nr_unused--;
+ }
+@@ -275,15 +276,15 @@ static void dentry_lru_prune(struct dentry *dentry)
+ }
+ }
+
+-static void dentry_lru_move_tail(struct dentry *dentry)
++static void dentry_lru_move_list(struct dentry *dentry, struct list_head *list)
+ {
+ spin_lock(&dcache_lru_lock);
+ if (list_empty(&dentry->d_lru)) {
+- list_add_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
++ list_add_tail(&dentry->d_lru, list);
+ dentry->d_sb->s_nr_dentry_unused++;
+ dentry_stat.nr_unused++;
+ } else {
+- list_move_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
++ list_move_tail(&dentry->d_lru, list);
+ }
+ spin_unlock(&dcache_lru_lock);
+ }
+@@ -769,14 +770,18 @@ static void shrink_dentry_list(struct list_head *list)
+ }
+
+ /**
+- * __shrink_dcache_sb - shrink the dentry LRU on a given superblock
+- * @sb: superblock to shrink dentry LRU.
+- * @count: number of entries to prune
+- * @flags: flags to control the dentry processing
++ * prune_dcache_sb - shrink the dcache
++ * @sb: superblock
++ * @count: number of entries to try to free
++ *
++ * Attempt to shrink the superblock dcache LRU by @count entries. This is
++ * done when we need more memory an called from the superblock shrinker
++ * function.
+ *
+- * If flags contains DCACHE_REFERENCED reference dentries will not be pruned.
++ * This function may fail to free any resources if all the dentries are in
++ * use.
+ */
+-static void __shrink_dcache_sb(struct super_block *sb, int count, int flags)
++void prune_dcache_sb(struct super_block *sb, int count)
+ {
+ struct dentry *dentry;
+ LIST_HEAD(referenced);
+@@ -795,18 +800,13 @@ relock:
+ goto relock;
+ }
+
+- /*
+- * If we are honouring the DCACHE_REFERENCED flag and the
+- * dentry has this flag set, don't free it. Clear the flag
+- * and put it back on the LRU.
+- */
+- if (flags & DCACHE_REFERENCED &&
+- dentry->d_flags & DCACHE_REFERENCED) {
++ if (dentry->d_flags & DCACHE_REFERENCED) {
+ dentry->d_flags &= ~DCACHE_REFERENCED;
+ list_move(&dentry->d_lru, &referenced);
+ spin_unlock(&dentry->d_lock);
+ } else {
+ list_move_tail(&dentry->d_lru, &tmp);
++ dentry->d_flags |= DCACHE_SHRINK_LIST;
+ spin_unlock(&dentry->d_lock);
+ if (!--count)
+ break;
+@@ -821,23 +821,6 @@ relock:
+ }
+
+ /**
+- * prune_dcache_sb - shrink the dcache
+- * @sb: superblock
+- * @nr_to_scan: number of entries to try to free
+- *
+- * Attempt to shrink the superblock dcache LRU by @nr_to_scan entries. This is
+- * done when we need more memory an called from the superblock shrinker
+- * function.
+- *
+- * This function may fail to free any resources if all the dentries are in
+- * use.
+- */
+-void prune_dcache_sb(struct super_block *sb, int nr_to_scan)
+-{
+- __shrink_dcache_sb(sb, nr_to_scan, DCACHE_REFERENCED);
+-}
+-
+-/**
+ * shrink_dcache_sb - shrink dcache for a superblock
+ * @sb: superblock
+ *
+@@ -1091,7 +1074,7 @@ EXPORT_SYMBOL(have_submounts);
+ * drop the lock and return early due to latency
+ * constraints.
+ */
+-static int select_parent(struct dentry * parent)
++static int select_parent(struct dentry *parent, struct list_head *dispose)
+ {
+ struct dentry *this_parent;
+ struct list_head *next;
+@@ -1113,17 +1096,21 @@ resume:
+
+ spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
+
+- /*
+- * move only zero ref count dentries to the end
+- * of the unused list for prune_dcache
++ /*
++ * move only zero ref count dentries to the dispose list.
++ *
++ * Those which are presently on the shrink list, being processed
++ * by shrink_dentry_list(), shouldn't be moved. Otherwise the
++ * loop in shrink_dcache_parent() might not make any progress
++ * and loop forever.
+ */
+- if (!dentry->d_count) {
+- dentry_lru_move_tail(dentry);
+- found++;
+- } else {
++ if (dentry->d_count) {
+ dentry_lru_del(dentry);
++ } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
++ dentry_lru_move_list(dentry, dispose);
++ dentry->d_flags |= DCACHE_SHRINK_LIST;
++ found++;
+ }
+-
+ /*
+ * We can return to the caller if we have found some (this
+ * ensures forward progress). We'll be coming back to find
+@@ -1180,14 +1167,13 @@ rename_retry:
+ *
+ * Prune the dcache to remove unused children of the parent dentry.
+ */
+-
+ void shrink_dcache_parent(struct dentry * parent)
+ {
+- struct super_block *sb = parent->d_sb;
++ LIST_HEAD(dispose);
+ int found;
+
+- while ((found = select_parent(parent)) != 0)
+- __shrink_dcache_sb(sb, found, 0);
++ while ((found = select_parent(parent, &dispose)) != 0)
++ shrink_dentry_list(&dispose);
+ }
+ EXPORT_SYMBOL(shrink_dcache_parent);
+
+diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
+index a567968..ab25f57 100644
+--- a/fs/ext4/ioctl.c
++++ b/fs/ext4/ioctl.c
+@@ -182,19 +182,22 @@ setversion_out:
+ if (err)
+ return err;
+
+- if (get_user(n_blocks_count, (__u32 __user *)arg))
+- return -EFAULT;
++ if (get_user(n_blocks_count, (__u32 __user *)arg)) {
++ err = -EFAULT;
++ goto group_extend_out;
++ }
+
+ if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+ EXT4_FEATURE_RO_COMPAT_BIGALLOC)) {
+ ext4_msg(sb, KERN_ERR,
+ "Online resizing not supported with bigalloc");
+- return -EOPNOTSUPP;
++ err = -EOPNOTSUPP;
++ goto group_extend_out;
+ }
+
+ err = mnt_want_write(filp->f_path.mnt);
+ if (err)
+- return err;
++ goto group_extend_out;
+
+ err = ext4_group_extend(sb, EXT4_SB(sb)->s_es, n_blocks_count);
+ if (EXT4_SB(sb)->s_journal) {
+@@ -204,9 +207,10 @@ setversion_out:
+ }
+ if (err == 0)
+ err = err2;
++
+ mnt_drop_write(filp->f_path.mnt);
++group_extend_out:
+ ext4_resize_end(sb);
+-
+ return err;
+ }
+
+@@ -267,19 +271,22 @@ mext_out:
+ return err;
+
+ if (copy_from_user(&input, (struct ext4_new_group_input __user *)arg,
+- sizeof(input)))
+- return -EFAULT;
++ sizeof(input))) {
++ err = -EFAULT;
++ goto group_add_out;
++ }
+
+ if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+ EXT4_FEATURE_RO_COMPAT_BIGALLOC)) {
+ ext4_msg(sb, KERN_ERR,
+ "Online resizing not supported with bigalloc");
+- return -EOPNOTSUPP;
++ err = -EOPNOTSUPP;
++ goto group_add_out;
+ }
+
+ err = mnt_want_write(filp->f_path.mnt);
+ if (err)
+- return err;
++ goto group_add_out;
+
+ err = ext4_group_add(sb, &input);
+ if (EXT4_SB(sb)->s_journal) {
+@@ -289,9 +296,10 @@ mext_out:
+ }
+ if (err == 0)
+ err = err2;
++
+ mnt_drop_write(filp->f_path.mnt);
++group_add_out:
+ ext4_resize_end(sb);
+-
+ return err;
+ }
+
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 3e1329e..9281dbe 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -2006,17 +2006,16 @@ static int ext4_fill_flex_info(struct super_block *sb)
+ struct ext4_group_desc *gdp = NULL;
+ ext4_group_t flex_group_count;
+ ext4_group_t flex_group;
+- int groups_per_flex = 0;
++ unsigned int groups_per_flex = 0;
+ size_t size;
+ int i;
+
+ sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
+- groups_per_flex = 1 << sbi->s_log_groups_per_flex;
+-
+- if (groups_per_flex < 2) {
++ if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) {
+ sbi->s_log_groups_per_flex = 0;
+ return 1;
+ }
++ groups_per_flex = 1 << sbi->s_log_groups_per_flex;
+
+ /* We allocate both existing and potentially added groups */
+ flex_group_count = ((sbi->s_groups_count + groups_per_flex - 1) +
+diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
+index 281ae95..3db6b82 100644
+--- a/fs/nfs/blocklayout/blocklayout.c
++++ b/fs/nfs/blocklayout/blocklayout.c
+@@ -146,14 +146,19 @@ static struct bio *bl_alloc_init_bio(int npg, sector_t isect,
+ {
+ struct bio *bio;
+
++ npg = min(npg, BIO_MAX_PAGES);
+ bio = bio_alloc(GFP_NOIO, npg);
+- if (!bio)
+- return NULL;
++ if (!bio && (current->flags & PF_MEMALLOC)) {
++ while (!bio && (npg /= 2))
++ bio = bio_alloc(GFP_NOIO, npg);
++ }
+
+- bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
+- bio->bi_bdev = be->be_mdev;
+- bio->bi_end_io = end_io;
+- bio->bi_private = par;
++ if (bio) {
++ bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
++ bio->bi_bdev = be->be_mdev;
++ bio->bi_end_io = end_io;
++ bio->bi_private = par;
++ }
+ return bio;
+ }
+
+@@ -779,16 +784,13 @@ bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data *lcdata)
+ static void free_blk_mountid(struct block_mount_id *mid)
+ {
+ if (mid) {
+- struct pnfs_block_dev *dev;
+- spin_lock(&mid->bm_lock);
+- while (!list_empty(&mid->bm_devlist)) {
+- dev = list_first_entry(&mid->bm_devlist,
+- struct pnfs_block_dev,
+- bm_node);
++ struct pnfs_block_dev *dev, *tmp;
++
++ /* No need to take bm_lock as we are last user freeing bm_devlist */
++ list_for_each_entry_safe(dev, tmp, &mid->bm_devlist, bm_node) {
+ list_del(&dev->bm_node);
+ bl_free_block_dev(dev);
+ }
+- spin_unlock(&mid->bm_lock);
+ kfree(mid);
+ }
+ }
+diff --git a/fs/nfs/blocklayout/extents.c b/fs/nfs/blocklayout/extents.c
+index 19fa7b0..c69682a 100644
+--- a/fs/nfs/blocklayout/extents.c
++++ b/fs/nfs/blocklayout/extents.c
+@@ -139,11 +139,13 @@ static int _set_range(struct my_tree *tree, int32_t tag, u64 s, u64 length)
+ }
+
+ /* Ensure that future operations on given range of tree will not malloc */
+-static int _preload_range(struct my_tree *tree, u64 offset, u64 length)
++static int _preload_range(struct pnfs_inval_markings *marks,
++ u64 offset, u64 length)
+ {
+ u64 start, end, s;
+ int count, i, used = 0, status = -ENOMEM;
+ struct pnfs_inval_tracking **storage;
++ struct my_tree *tree = &marks->im_tree;
+
+ dprintk("%s(%llu, %llu) enter\n", __func__, offset, length);
+ start = normalize(offset, tree->mtt_step_size);
+@@ -161,12 +163,11 @@ static int _preload_range(struct my_tree *tree, u64 offset, u64 length)
+ goto out_cleanup;
+ }
+
+- /* Now need lock - HOW??? */
+-
++ spin_lock(&marks->im_lock);
+ for (s = start; s < end; s += tree->mtt_step_size)
+ used += _add_entry(tree, s, INTERNAL_EXISTS, storage[used]);
++ spin_unlock(&marks->im_lock);
+
+- /* Unlock - HOW??? */
+ status = 0;
+
+ out_cleanup:
+@@ -286,7 +287,7 @@ int bl_mark_sectors_init(struct pnfs_inval_markings *marks,
+
+ start = normalize(offset, marks->im_block_size);
+ end = normalize_up(offset + length, marks->im_block_size);
+- if (_preload_range(&marks->im_tree, start, end - start))
++ if (_preload_range(marks, start, end - start))
+ goto outerr;
+
+ spin_lock(&marks->im_lock);
+diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
+index 43926ad..54cea8a 100644
+--- a/fs/nfs/callback_proc.c
++++ b/fs/nfs/callback_proc.c
+@@ -339,7 +339,7 @@ validate_seqid(struct nfs4_slot_table *tbl, struct cb_sequenceargs * args)
+ dprintk("%s enter. slotid %d seqid %d\n",
+ __func__, args->csa_slotid, args->csa_sequenceid);
+
+- if (args->csa_slotid > NFS41_BC_MAX_CALLBACKS)
++ if (args->csa_slotid >= NFS41_BC_MAX_CALLBACKS)
+ return htonl(NFS4ERR_BADSLOT);
+
+ slot = tbl->slots + args->csa_slotid;
+diff --git a/fs/nfs/file.c b/fs/nfs/file.c
+index 606ef0f..c43a452 100644
+--- a/fs/nfs/file.c
++++ b/fs/nfs/file.c
+@@ -272,13 +272,13 @@ nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
+ datasync);
+
+ ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
+- if (ret)
+- return ret;
+ mutex_lock(&inode->i_mutex);
+
+ nfs_inc_stats(inode, NFSIOS_VFSFSYNC);
+ have_error = test_and_clear_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
+ status = nfs_commit_inode(inode, FLUSH_SYNC);
++ if (status >= 0 && ret < 0)
++ status = ret;
+ have_error |= test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
+ if (have_error)
+ ret = xchg(&ctx->error, 0);
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index d9f4d78..055d702 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -3430,19 +3430,6 @@ static inline int nfs4_server_supports_acls(struct nfs_server *server)
+ */
+ #define NFS4ACL_MAXPAGES (XATTR_SIZE_MAX >> PAGE_CACHE_SHIFT)
+
+-static void buf_to_pages(const void *buf, size_t buflen,
+- struct page **pages, unsigned int *pgbase)
+-{
+- const void *p = buf;
+-
+- *pgbase = offset_in_page(buf);
+- p -= *pgbase;
+- while (p < buf + buflen) {
+- *(pages++) = virt_to_page(p);
+- p += PAGE_CACHE_SIZE;
+- }
+-}
+-
+ static int buf_to_pages_noslab(const void *buf, size_t buflen,
+ struct page **pages, unsigned int *pgbase)
+ {
+@@ -3539,9 +3526,19 @@ out:
+ nfs4_set_cached_acl(inode, acl);
+ }
+
++/*
++ * The getxattr API returns the required buffer length when called with a
++ * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
++ * the required buf. On a NULL buf, we send a page of data to the server
++ * guessing that the ACL request can be serviced by a page. If so, we cache
++ * up to the page of ACL data, and the 2nd call to getxattr is serviced by
++ * the cache. If not so, we throw away the page, and cache the required
++ * length. The next getxattr call will then produce another round trip to
++ * the server, this time with the input buf of the required size.
++ */
+ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
+ {
+- struct page *pages[NFS4ACL_MAXPAGES];
++ struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
+ struct nfs_getaclargs args = {
+ .fh = NFS_FH(inode),
+ .acl_pages = pages,
+@@ -3556,41 +3553,60 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ };
+- struct page *localpage = NULL;
+- int ret;
++ int ret = -ENOMEM, npages, i, acl_len = 0;
+
+- if (buflen < PAGE_SIZE) {
+- /* As long as we're doing a round trip to the server anyway,
+- * let's be prepared for a page of acl data. */
+- localpage = alloc_page(GFP_KERNEL);
+- resp_buf = page_address(localpage);
+- if (localpage == NULL)
+- return -ENOMEM;
+- args.acl_pages[0] = localpage;
+- args.acl_pgbase = 0;
+- args.acl_len = PAGE_SIZE;
+- } else {
+- resp_buf = buf;
+- buf_to_pages(buf, buflen, args.acl_pages, &args.acl_pgbase);
++ npages = (buflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ /* As long as we're doing a round trip to the server anyway,
++ * let's be prepared for a page of acl data. */
++ if (npages == 0)
++ npages = 1;
++
++ for (i = 0; i < npages; i++) {
++ pages[i] = alloc_page(GFP_KERNEL);
++ if (!pages[i])
++ goto out_free;
++ }
++ if (npages > 1) {
++ /* for decoding across pages */
++ args.acl_scratch = alloc_page(GFP_KERNEL);
++ if (!args.acl_scratch)
++ goto out_free;
+ }
+- ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
++ args.acl_len = npages * PAGE_SIZE;
++ args.acl_pgbase = 0;
++ /* Let decode_getfacl know not to fail if the ACL data is larger than
++ * the page we send as a guess */
++ if (buf == NULL)
++ res.acl_flags |= NFS4_ACL_LEN_REQUEST;
++ resp_buf = page_address(pages[0]);
++
++ dprintk("%s buf %p buflen %ld npages %d args.acl_len %ld\n",
++ __func__, buf, buflen, npages, args.acl_len);
++ ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
++ &msg, &args.seq_args, &res.seq_res, 0);
+ if (ret)
+ goto out_free;
+- if (res.acl_len > args.acl_len)
+- nfs4_write_cached_acl(inode, NULL, res.acl_len);
++
++ acl_len = res.acl_len - res.acl_data_offset;
++ if (acl_len > args.acl_len)
++ nfs4_write_cached_acl(inode, NULL, acl_len);
+ else
+- nfs4_write_cached_acl(inode, resp_buf, res.acl_len);
++ nfs4_write_cached_acl(inode, resp_buf + res.acl_data_offset,
++ acl_len);
+ if (buf) {
+ ret = -ERANGE;
+- if (res.acl_len > buflen)
++ if (acl_len > buflen)
+ goto out_free;
+- if (localpage)
+- memcpy(buf, resp_buf, res.acl_len);
++ _copy_from_pages(buf, pages, res.acl_data_offset,
++ res.acl_len);
+ }
+- ret = res.acl_len;
++ ret = acl_len;
+ out_free:
+- if (localpage)
+- __free_page(localpage);
++ for (i = 0; i < npages; i++)
++ if (pages[i])
++ __free_page(pages[i]);
++ if (args.acl_scratch)
++ __free_page(args.acl_scratch);
+ return ret;
+ }
+
+@@ -3621,6 +3637,8 @@ static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
+ nfs_zap_acl_cache(inode);
+ ret = nfs4_read_cached_acl(inode, buf, buflen);
+ if (ret != -ENOENT)
++ /* -ENOENT is returned if there is no ACL or if there is an ACL
++ * but no cached acl data, just the acl length */
+ return ret;
+ return nfs4_get_acl_uncached(inode, buf, buflen);
+ }
+diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
+index e6161b2..dcaf693 100644
+--- a/fs/nfs/nfs4xdr.c
++++ b/fs/nfs/nfs4xdr.c
+@@ -2517,11 +2517,13 @@ static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr,
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+- replen = hdr.replen + op_decode_hdr_maxsz + nfs4_fattr_bitmap_maxsz + 1;
++ replen = hdr.replen + op_decode_hdr_maxsz + 1;
+ encode_getattr_two(xdr, FATTR4_WORD0_ACL, 0, &hdr);
+
+ xdr_inline_pages(&req->rq_rcv_buf, replen << 2,
+ args->acl_pages, args->acl_pgbase, args->acl_len);
++ xdr_set_scratch_buffer(xdr, page_address(args->acl_scratch), PAGE_SIZE);
++
+ encode_nops(&hdr);
+ }
+
+@@ -4957,17 +4959,18 @@ decode_restorefh(struct xdr_stream *xdr)
+ }
+
+ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
+- size_t *acl_len)
++ struct nfs_getaclres *res)
+ {
+- __be32 *savep;
++ __be32 *savep, *bm_p;
+ uint32_t attrlen,
+ bitmap[3] = {0};
+ struct kvec *iov = req->rq_rcv_buf.head;
+ int status;
+
+- *acl_len = 0;
++ res->acl_len = 0;
+ if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0)
+ goto out;
++ bm_p = xdr->p;
+ if ((status = decode_attr_bitmap(xdr, bitmap)) != 0)
+ goto out;
+ if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0)
+@@ -4979,18 +4982,30 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
+ size_t hdrlen;
+ u32 recvd;
+
++ /* The bitmap (xdr len + bitmaps) and the attr xdr len words
++ * are stored with the acl data to handle the problem of
++ * variable length bitmaps.*/
++ xdr->p = bm_p;
++ res->acl_data_offset = be32_to_cpup(bm_p) + 2;
++ res->acl_data_offset <<= 2;
++
+ /* We ignore &savep and don't do consistency checks on
+ * the attr length. Let userspace figure it out.... */
+ hdrlen = (u8 *)xdr->p - (u8 *)iov->iov_base;
++ attrlen += res->acl_data_offset;
+ recvd = req->rq_rcv_buf.len - hdrlen;
+ if (attrlen > recvd) {
+- dprintk("NFS: server cheating in getattr"
+- " acl reply: attrlen %u > recvd %u\n",
++ if (res->acl_flags & NFS4_ACL_LEN_REQUEST) {
++ /* getxattr interface called with a NULL buf */
++ res->acl_len = attrlen;
++ goto out;
++ }
++ dprintk("NFS: acl reply: attrlen %u > recvd %u\n",
+ attrlen, recvd);
+ return -EINVAL;
+ }
+ xdr_read_pages(xdr, attrlen);
+- *acl_len = attrlen;
++ res->acl_len = attrlen;
+ } else
+ status = -EOPNOTSUPP;
+
+@@ -6028,7 +6043,7 @@ nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ status = decode_putfh(xdr);
+ if (status)
+ goto out;
+- status = decode_getacl(xdr, rqstp, &res->acl_len);
++ status = decode_getacl(xdr, rqstp, res);
+
+ out:
+ return status;
+diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
+index c807ab9..55d0128 100644
+--- a/fs/nfs/objlayout/objio_osd.c
++++ b/fs/nfs/objlayout/objio_osd.c
+@@ -551,7 +551,8 @@ static const struct nfs_pageio_ops objio_pg_write_ops = {
+ static struct pnfs_layoutdriver_type objlayout_type = {
+ .id = LAYOUT_OSD2_OBJECTS,
+ .name = "LAYOUT_OSD2_OBJECTS",
+- .flags = PNFS_LAYOUTRET_ON_SETATTR,
++ .flags = PNFS_LAYOUTRET_ON_SETATTR |
++ PNFS_LAYOUTRET_ON_ERROR,
+
+ .alloc_layout_hdr = objlayout_alloc_layout_hdr,
+ .free_layout_hdr = objlayout_free_layout_hdr,
+diff --git a/fs/nfs/objlayout/objlayout.c b/fs/nfs/objlayout/objlayout.c
+index 72074e3..b3c2903 100644
+--- a/fs/nfs/objlayout/objlayout.c
++++ b/fs/nfs/objlayout/objlayout.c
+@@ -254,6 +254,8 @@ objlayout_read_done(struct objlayout_io_res *oir, ssize_t status, bool sync)
+ oir->status = rdata->task.tk_status = status;
+ if (status >= 0)
+ rdata->res.count = status;
++ else
++ rdata->pnfs_error = status;
+ objlayout_iodone(oir);
+ /* must not use oir after this point */
+
+@@ -334,6 +336,8 @@ objlayout_write_done(struct objlayout_io_res *oir, ssize_t status, bool sync)
+ if (status >= 0) {
+ wdata->res.count = status;
+ wdata->verf.committed = oir->committed;
++ } else {
++ wdata->pnfs_error = status;
+ }
+ objlayout_iodone(oir);
+ /* must not use oir after this point */
+diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
+index 8e672a2..f881a63 100644
+--- a/fs/nfs/pnfs.c
++++ b/fs/nfs/pnfs.c
+@@ -1178,6 +1178,15 @@ void pnfs_ld_write_done(struct nfs_write_data *data)
+ put_lseg(data->lseg);
+ data->lseg = NULL;
+ dprintk("pnfs write error = %d\n", data->pnfs_error);
++ if (NFS_SERVER(data->inode)->pnfs_curr_ld->flags &
++ PNFS_LAYOUTRET_ON_ERROR) {
++ /* Don't lo_commit on error, Server will needs to
++ * preform a file recovery.
++ */
++ clear_bit(NFS_INO_LAYOUTCOMMIT,
++ &NFS_I(data->inode)->flags);
++ pnfs_return_layout(data->inode);
++ }
+ }
+ data->mds_ops->rpc_release(data);
+ }
+@@ -1267,6 +1276,9 @@ static void pnfs_ld_handle_read_error(struct nfs_read_data *data)
+ put_lseg(data->lseg);
+ data->lseg = NULL;
+ dprintk("pnfs write error = %d\n", data->pnfs_error);
++ if (NFS_SERVER(data->inode)->pnfs_curr_ld->flags &
++ PNFS_LAYOUTRET_ON_ERROR)
++ pnfs_return_layout(data->inode);
+
+ nfs_pageio_init_read_mds(&pgio, data->inode);
+
+diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
+index 1509530..53d593a 100644
+--- a/fs/nfs/pnfs.h
++++ b/fs/nfs/pnfs.h
+@@ -68,6 +68,7 @@ enum {
+ enum layoutdriver_policy_flags {
+ /* Should the pNFS client commit and return the layout upon a setattr */
+ PNFS_LAYOUTRET_ON_SETATTR = 1 << 0,
++ PNFS_LAYOUTRET_ON_ERROR = 1 << 1,
+ };
+
+ struct nfs4_deviceid_node;
+diff --git a/fs/nfs/super.c b/fs/nfs/super.c
+index 1347774..3ada13c 100644
+--- a/fs/nfs/super.c
++++ b/fs/nfs/super.c
+@@ -909,10 +909,24 @@ static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(unsigned int ve
+ data->auth_flavor_len = 1;
+ data->version = version;
+ data->minorversion = 0;
++ security_init_mnt_opts(&data->lsm_opts);
+ }
+ return data;
+ }
+
++static void nfs_free_parsed_mount_data(struct nfs_parsed_mount_data *data)
++{
++ if (data) {
++ kfree(data->client_address);
++ kfree(data->mount_server.hostname);
++ kfree(data->nfs_server.export_path);
++ kfree(data->nfs_server.hostname);
++ kfree(data->fscache_uniq);
++ security_free_mnt_opts(&data->lsm_opts);
++ kfree(data);
++ }
++}
++
+ /*
+ * Sanity-check a server address provided by the mount command.
+ *
+@@ -2220,9 +2234,7 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
+ data = nfs_alloc_parsed_mount_data(NFS_DEFAULT_VERSION);
+ mntfh = nfs_alloc_fhandle();
+ if (data == NULL || mntfh == NULL)
+- goto out_free_fh;
+-
+- security_init_mnt_opts(&data->lsm_opts);
++ goto out;
+
+ /* Validate the mount data */
+ error = nfs_validate_mount_data(raw_data, data, mntfh, dev_name);
+@@ -2234,8 +2246,6 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
+ #ifdef CONFIG_NFS_V4
+ if (data->version == 4) {
+ mntroot = nfs4_try_mount(flags, dev_name, data);
+- kfree(data->client_address);
+- kfree(data->nfs_server.export_path);
+ goto out;
+ }
+ #endif /* CONFIG_NFS_V4 */
+@@ -2290,13 +2300,8 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
+ s->s_flags |= MS_ACTIVE;
+
+ out:
+- kfree(data->nfs_server.hostname);
+- kfree(data->mount_server.hostname);
+- kfree(data->fscache_uniq);
+- security_free_mnt_opts(&data->lsm_opts);
+-out_free_fh:
++ nfs_free_parsed_mount_data(data);
+ nfs_free_fhandle(mntfh);
+- kfree(data);
+ return mntroot;
+
+ out_err_nosb:
+@@ -2623,9 +2628,7 @@ nfs4_remote_mount(struct file_system_type *fs_type, int flags,
+
+ mntfh = nfs_alloc_fhandle();
+ if (data == NULL || mntfh == NULL)
+- goto out_free_fh;
+-
+- security_init_mnt_opts(&data->lsm_opts);
++ goto out;
+
+ /* Get a volume representation */
+ server = nfs4_create_server(data, mntfh);
+@@ -2677,13 +2680,10 @@ nfs4_remote_mount(struct file_system_type *fs_type, int flags,
+
+ s->s_flags |= MS_ACTIVE;
+
+- security_free_mnt_opts(&data->lsm_opts);
+ nfs_free_fhandle(mntfh);
+ return mntroot;
+
+ out:
+- security_free_mnt_opts(&data->lsm_opts);
+-out_free_fh:
+ nfs_free_fhandle(mntfh);
+ return ERR_PTR(error);
+
+@@ -2838,7 +2838,7 @@ static struct dentry *nfs4_mount(struct file_system_type *fs_type,
+
+ data = nfs_alloc_parsed_mount_data(4);
+ if (data == NULL)
+- goto out_free_data;
++ goto out;
+
+ /* Validate the mount data */
+ error = nfs4_validate_mount_data(raw_data, data, dev_name);
+@@ -2852,12 +2852,7 @@ static struct dentry *nfs4_mount(struct file_system_type *fs_type,
+ error = PTR_ERR(res);
+
+ out:
+- kfree(data->client_address);
+- kfree(data->nfs_server.export_path);
+- kfree(data->nfs_server.hostname);
+- kfree(data->fscache_uniq);
+-out_free_data:
+- kfree(data);
++ nfs_free_parsed_mount_data(data);
+ dprintk("<-- nfs4_mount() = %d%s\n", error,
+ error != 0 ? " [error]" : "");
+ return res;
+diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
+index 62f3b90..5f312ab 100644
+--- a/fs/nfsd/export.c
++++ b/fs/nfsd/export.c
+@@ -87,7 +87,7 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
+ struct svc_expkey key;
+ struct svc_expkey *ek = NULL;
+
+- if (mesg[mlen-1] != '\n')
++ if (mlen < 1 || mesg[mlen-1] != '\n')
+ return -EINVAL;
+ mesg[mlen-1] = 0;
+
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 47e94e3..5abced7 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -3809,16 +3809,29 @@ nevermind:
+ deny->ld_type = NFS4_WRITE_LT;
+ }
+
++static bool same_lockowner_ino(struct nfs4_lockowner *lo, struct inode *inode, clientid_t *clid, struct xdr_netobj *owner)
++{
++ struct nfs4_ol_stateid *lst;
++
++ if (!same_owner_str(&lo->lo_owner, owner, clid))
++ return false;
++ lst = list_first_entry(&lo->lo_owner.so_stateids,
++ struct nfs4_ol_stateid, st_perstateowner);
++ return lst->st_file->fi_inode == inode;
++}
++
+ static struct nfs4_lockowner *
+ find_lockowner_str(struct inode *inode, clientid_t *clid,
+ struct xdr_netobj *owner)
+ {
+ unsigned int hashval = lock_ownerstr_hashval(inode, clid->cl_id, owner);
++ struct nfs4_lockowner *lo;
+ struct nfs4_stateowner *op;
+
+ list_for_each_entry(op, &lock_ownerstr_hashtbl[hashval], so_strhash) {
+- if (same_owner_str(op, owner, clid))
+- return lockowner(op);
++ lo = lockowner(op);
++ if (same_lockowner_ino(lo, inode, clid, owner))
++ return lo;
+ }
+ return NULL;
+ }
+diff --git a/fs/notify/mark.c b/fs/notify/mark.c
+index e14587d..f104d56 100644
+--- a/fs/notify/mark.c
++++ b/fs/notify/mark.c
+@@ -135,9 +135,6 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark)
+
+ mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
+
+- /* 1 from caller and 1 for being on i_list/g_list */
+- BUG_ON(atomic_read(&mark->refcnt) < 2);
+-
+ spin_lock(&group->mark_lock);
+
+ if (mark->flags & FSNOTIFY_MARK_FLAG_INODE) {
+@@ -182,6 +179,11 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark)
+ iput(inode);
+
+ /*
++ * We don't necessarily have a ref on mark from caller so the above iput
++ * may have already destroyed it. Don't touch from now on.
++ */
++
++ /*
+ * it's possible that this group tried to destroy itself, but this
+ * this mark was simultaneously being freed by inode. If that's the
+ * case, we finish freeing the group here.
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index 851ba3d..1fc1dca 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -194,65 +194,7 @@ static int proc_root_link(struct inode *inode, struct path *path)
+ return result;
+ }
+
+-static struct mm_struct *__check_mem_permission(struct task_struct *task)
+-{
+- struct mm_struct *mm;
+-
+- mm = get_task_mm(task);
+- if (!mm)
+- return ERR_PTR(-EINVAL);
+-
+- /*
+- * A task can always look at itself, in case it chooses
+- * to use system calls instead of load instructions.
+- */
+- if (task == current)
+- return mm;
+-
+- /*
+- * If current is actively ptrace'ing, and would also be
+- * permitted to freshly attach with ptrace now, permit it.
+- */
+- if (task_is_stopped_or_traced(task)) {
+- int match;
+- rcu_read_lock();
+- match = (ptrace_parent(task) == current);
+- rcu_read_unlock();
+- if (match && ptrace_may_access(task, PTRACE_MODE_ATTACH))
+- return mm;
+- }
+-
+- /*
+- * No one else is allowed.
+- */
+- mmput(mm);
+- return ERR_PTR(-EPERM);
+-}
+-
+-/*
+- * If current may access user memory in @task return a reference to the
+- * corresponding mm, otherwise ERR_PTR.
+- */
+-static struct mm_struct *check_mem_permission(struct task_struct *task)
+-{
+- struct mm_struct *mm;
+- int err;
+-
+- /*
+- * Avoid racing if task exec's as we might get a new mm but validate
+- * against old credentials.
+- */
+- err = mutex_lock_killable(&task->signal->cred_guard_mutex);
+- if (err)
+- return ERR_PTR(err);
+-
+- mm = __check_mem_permission(task);
+- mutex_unlock(&task->signal->cred_guard_mutex);
+-
+- return mm;
+-}
+-
+-struct mm_struct *mm_for_maps(struct task_struct *task)
++static struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
+ {
+ struct mm_struct *mm;
+ int err;
+@@ -263,7 +205,7 @@ struct mm_struct *mm_for_maps(struct task_struct *task)
+
+ mm = get_task_mm(task);
+ if (mm && mm != current->mm &&
+- !ptrace_may_access(task, PTRACE_MODE_READ)) {
++ !ptrace_may_access(task, mode)) {
+ mmput(mm);
+ mm = ERR_PTR(-EACCES);
+ }
+@@ -272,6 +214,11 @@ struct mm_struct *mm_for_maps(struct task_struct *task)
+ return mm;
+ }
+
++struct mm_struct *mm_for_maps(struct task_struct *task)
++{
++ return mm_access(task, PTRACE_MODE_READ);
++}
++
+ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
+ {
+ int res = 0;
+@@ -816,38 +763,39 @@ static const struct file_operations proc_single_file_operations = {
+
+ static int mem_open(struct inode* inode, struct file* file)
+ {
+- file->private_data = (void*)((long)current->self_exec_id);
++ struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
++ struct mm_struct *mm;
++
++ if (!task)
++ return -ESRCH;
++
++ mm = mm_access(task, PTRACE_MODE_ATTACH);
++ put_task_struct(task);
++
++ if (IS_ERR(mm))
++ return PTR_ERR(mm);
++
+ /* OK to pass negative loff_t, we can catch out-of-range */
+ file->f_mode |= FMODE_UNSIGNED_OFFSET;
++ file->private_data = mm;
++
+ return 0;
+ }
+
+ static ssize_t mem_read(struct file * file, char __user * buf,
+ size_t count, loff_t *ppos)
+ {
+- struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
++ int ret;
+ char *page;
+ unsigned long src = *ppos;
+- int ret = -ESRCH;
+- struct mm_struct *mm;
++ struct mm_struct *mm = file->private_data;
+
+- if (!task)
+- goto out_no_task;
++ if (!mm)
++ return 0;
+
+- ret = -ENOMEM;
+ page = (char *)__get_free_page(GFP_TEMPORARY);
+ if (!page)
+- goto out;
+-
+- mm = check_mem_permission(task);
+- ret = PTR_ERR(mm);
+- if (IS_ERR(mm))
+- goto out_free;
+-
+- ret = -EIO;
+-
+- if (file->private_data != (void*)((long)current->self_exec_id))
+- goto out_put;
++ return -ENOMEM;
+
+ ret = 0;
+
+@@ -874,13 +822,7 @@ static ssize_t mem_read(struct file * file, char __user * buf,
+ }
+ *ppos = src;
+
+-out_put:
+- mmput(mm);
+-out_free:
+ free_page((unsigned long) page);
+-out:
+- put_task_struct(task);
+-out_no_task:
+ return ret;
+ }
+
+@@ -889,27 +831,15 @@ static ssize_t mem_write(struct file * file, const char __user *buf,
+ {
+ int copied;
+ char *page;
+- struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
+ unsigned long dst = *ppos;
+- struct mm_struct *mm;
++ struct mm_struct *mm = file->private_data;
+
+- copied = -ESRCH;
+- if (!task)
+- goto out_no_task;
++ if (!mm)
++ return 0;
+
+- copied = -ENOMEM;
+ page = (char *)__get_free_page(GFP_TEMPORARY);
+ if (!page)
+- goto out_task;
+-
+- mm = check_mem_permission(task);
+- copied = PTR_ERR(mm);
+- if (IS_ERR(mm))
+- goto out_free;
+-
+- copied = -EIO;
+- if (file->private_data != (void *)((long)current->self_exec_id))
+- goto out_mm;
++ return -ENOMEM;
+
+ copied = 0;
+ while (count > 0) {
+@@ -933,13 +863,7 @@ static ssize_t mem_write(struct file * file, const char __user *buf,
+ }
+ *ppos = dst;
+
+-out_mm:
+- mmput(mm);
+-out_free:
+ free_page((unsigned long) page);
+-out_task:
+- put_task_struct(task);
+-out_no_task:
+ return copied;
+ }
+
+@@ -959,11 +883,20 @@ loff_t mem_lseek(struct file *file, loff_t offset, int orig)
+ return file->f_pos;
+ }
+
++static int mem_release(struct inode *inode, struct file *file)
++{
++ struct mm_struct *mm = file->private_data;
++
++ mmput(mm);
++ return 0;
++}
++
+ static const struct file_operations proc_mem_operations = {
+ .llseek = mem_lseek,
+ .read = mem_read,
+ .write = mem_write,
+ .open = mem_open,
++ .release = mem_release,
+ };
+
+ static ssize_t environ_read(struct file *file, char __user *buf,
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index e418c5a..7dcd2a2 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -518,6 +518,9 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
+ if (!page)
+ continue;
+
++ if (PageReserved(page))
++ continue;
++
+ /* Clear accessed and referenced bits. */
+ ptep_test_and_clear_young(vma, addr, pte);
+ ClearPageReferenced(page);
+diff --git a/fs/proc/uptime.c b/fs/proc/uptime.c
+index 766b1d4..29166ec 100644
+--- a/fs/proc/uptime.c
++++ b/fs/proc/uptime.c
+@@ -11,15 +11,20 @@ static int uptime_proc_show(struct seq_file *m, void *v)
+ {
+ struct timespec uptime;
+ struct timespec idle;
++ cputime64_t idletime;
++ u64 nsec;
++ u32 rem;
+ int i;
+- cputime_t idletime = cputime_zero;
+
++ idletime = 0;
+ for_each_possible_cpu(i)
+ idletime = cputime64_add(idletime, kstat_cpu(i).cpustat.idle);
+
+ do_posix_clock_monotonic_gettime(&uptime);
+ monotonic_to_bootbased(&uptime);
+- cputime_to_timespec(idletime, &idle);
++ nsec = cputime64_to_jiffies64(idletime) * TICK_NSEC;
++ idle.tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
++ idle.tv_nsec = rem;
+ seq_printf(m, "%lu.%02lu %lu.%02lu\n",
+ (unsigned long) uptime.tv_sec,
+ (uptime.tv_nsec / (NSEC_PER_SEC / 100)),
+diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h
+index 8d9c468..c9d2941 100644
+--- a/fs/ubifs/debug.h
++++ b/fs/ubifs/debug.h
+@@ -175,22 +175,23 @@ const char *dbg_key_str1(const struct ubifs_info *c,
+ const union ubifs_key *key);
+
+ /*
+- * DBGKEY macros require @dbg_lock to be held, which it is in the dbg message
+- * macros.
++ * TODO: these macros are now broken because there is no locking around them
++ * and we use a global buffer for the key string. This means that in case of
++ * concurrent execution we will end up with incorrect and messy key strings.
+ */
+ #define DBGKEY(key) dbg_key_str0(c, (key))
+ #define DBGKEY1(key) dbg_key_str1(c, (key))
+
+ extern spinlock_t dbg_lock;
+
+-#define ubifs_dbg_msg(type, fmt, ...) do { \
+- spin_lock(&dbg_lock); \
+- pr_debug("UBIFS DBG " type ": " fmt "\n", ##__VA_ARGS__); \
+- spin_unlock(&dbg_lock); \
+-} while (0)
++#define ubifs_dbg_msg(type, fmt, ...) \
++ pr_debug("UBIFS DBG " type ": " fmt "\n", ##__VA_ARGS__)
+
+ /* Just a debugging messages not related to any specific UBIFS subsystem */
+-#define dbg_msg(fmt, ...) ubifs_dbg_msg("msg", fmt, ##__VA_ARGS__)
++#define dbg_msg(fmt, ...) \
++ printk(KERN_DEBUG "UBIFS DBG (pid %d): %s: " fmt "\n", current->pid, \
++ __func__, ##__VA_ARGS__)
++
+ /* General messages */
+ #define dbg_gen(fmt, ...) ubifs_dbg_msg("gen", fmt, ##__VA_ARGS__)
+ /* Additional journal messages */
+diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
+index 8a24f0c..286a051 100644
+--- a/fs/xfs/xfs_discard.c
++++ b/fs/xfs/xfs_discard.c
+@@ -68,7 +68,7 @@ xfs_trim_extents(
+ * Look up the longest btree in the AGF and start with it.
+ */
+ error = xfs_alloc_lookup_le(cur, 0,
+- XFS_BUF_TO_AGF(agbp)->agf_longest, &i);
++ be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_longest), &i);
+ if (error)
+ goto out_del_cursor;
+
+@@ -84,7 +84,7 @@ xfs_trim_extents(
+ if (error)
+ goto out_del_cursor;
+ XFS_WANT_CORRUPTED_GOTO(i == 1, out_del_cursor);
+- ASSERT(flen <= XFS_BUF_TO_AGF(agbp)->agf_longest);
++ ASSERT(flen <= be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_longest));
+
+ /*
+ * Too small? Give up.
+diff --git a/include/acpi/acpi_numa.h b/include/acpi/acpi_numa.h
+index 1739726..451823c 100644
+--- a/include/acpi/acpi_numa.h
++++ b/include/acpi/acpi_numa.h
+@@ -15,6 +15,7 @@ extern int pxm_to_node(int);
+ extern int node_to_pxm(int);
+ extern void __acpi_map_pxm_to_node(int, int);
+ extern int acpi_map_pxm_to_node(int);
++extern unsigned char acpi_srat_revision;
+
+ #endif /* CONFIG_ACPI_NUMA */
+ #endif /* __ACP_NUMA_H */
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 94acd81..0ed1eb0 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -675,6 +675,9 @@ extern int blk_insert_cloned_request(struct request_queue *q,
+ struct request *rq);
+ extern void blk_delay_queue(struct request_queue *, unsigned long);
+ extern void blk_recount_segments(struct request_queue *, struct bio *);
++extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
++extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
++ unsigned int, void __user *);
+ extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
+ unsigned int, void __user *);
+ extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
+diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
+index 5c4abce..b936763 100644
+--- a/include/linux/crash_dump.h
++++ b/include/linux/crash_dump.h
+@@ -5,6 +5,7 @@
+ #include <linux/kexec.h>
+ #include <linux/device.h>
+ #include <linux/proc_fs.h>
++#include <linux/elf.h>
+
+ #define ELFCORE_ADDR_MAX (-1ULL)
+ #define ELFCORE_ADDR_ERR (-2ULL)
+diff --git a/include/linux/dcache.h b/include/linux/dcache.h
+index ed9f74f..4eb8c80 100644
+--- a/include/linux/dcache.h
++++ b/include/linux/dcache.h
+@@ -203,6 +203,7 @@ struct dentry_operations {
+
+ #define DCACHE_CANT_MOUNT 0x0100
+ #define DCACHE_GENOCIDE 0x0200
++#define DCACHE_SHRINK_LIST 0x0400
+
+ #define DCACHE_NFSFS_RENAMED 0x1000
+ /* this dentry has been "silly renamed" and has to be deleted on the last
+diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
+index b87068a..81572af 100644
+--- a/include/linux/memcontrol.h
++++ b/include/linux/memcontrol.h
+@@ -119,6 +119,8 @@ struct zone_reclaim_stat*
+ mem_cgroup_get_reclaim_stat_from_page(struct page *page);
+ extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
+ struct task_struct *p);
++extern void mem_cgroup_replace_page_cache(struct page *oldpage,
++ struct page *newpage);
+
+ #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
+ extern int do_swap_account;
+@@ -366,6 +368,10 @@ static inline
+ void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
+ {
+ }
++static inline void mem_cgroup_replace_page_cache(struct page *oldpage,
++ struct page *newpage)
++{
++}
+ #endif /* CONFIG_CGROUP_MEM_CONT */
+
+ #if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM)
+diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
+index 2a7c533..6c898af 100644
+--- a/include/linux/nfs_xdr.h
++++ b/include/linux/nfs_xdr.h
+@@ -602,11 +602,16 @@ struct nfs_getaclargs {
+ size_t acl_len;
+ unsigned int acl_pgbase;
+ struct page ** acl_pages;
++ struct page * acl_scratch;
+ struct nfs4_sequence_args seq_args;
+ };
+
++/* getxattr ACL interface flags */
++#define NFS4_ACL_LEN_REQUEST 0x0001 /* zero length getxattr buffer */
+ struct nfs_getaclres {
+ size_t acl_len;
++ size_t acl_data_offset;
++ int acl_flags;
+ struct nfs4_sequence_res seq_res;
+ };
+
+diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h
+index b5d9657..411c412 100644
+--- a/include/linux/pci_regs.h
++++ b/include/linux/pci_regs.h
+@@ -392,7 +392,7 @@
+ #define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */
+ #define PCI_EXP_TYPE_PCI_BRIDGE 0x7 /* PCI/PCI-X Bridge */
+ #define PCI_EXP_TYPE_RC_END 0x9 /* Root Complex Integrated Endpoint */
+-#define PCI_EXP_TYPE_RC_EC 0x10 /* Root Complex Event Collector */
++#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */
+ #define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */
+ #define PCI_EXP_FLAGS_IRQ 0x3e00 /* Interrupt message number */
+ #define PCI_EXP_DEVCAP 4 /* Device capabilities */
+diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
+index 9291ac3..6f10c9c 100644
+--- a/include/linux/shmem_fs.h
++++ b/include/linux/shmem_fs.h
+@@ -48,6 +48,7 @@ extern struct file *shmem_file_setup(const char *name,
+ loff_t size, unsigned long flags);
+ extern int shmem_zero_setup(struct vm_area_struct *);
+ extern int shmem_lock(struct file *file, int lock, struct user_struct *user);
++extern void shmem_unlock_mapping(struct address_space *mapping);
+ extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
+ pgoff_t index, gfp_t gfp_mask);
+ extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
+diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
+index 85c50b4..c84e974 100644
+--- a/include/linux/sunrpc/svcsock.h
++++ b/include/linux/sunrpc/svcsock.h
+@@ -34,7 +34,7 @@ struct svc_sock {
+ /*
+ * Function prototypes.
+ */
+-void svc_close_all(struct list_head *);
++void svc_close_all(struct svc_serv *);
+ int svc_recv(struct svc_rqst *, long);
+ int svc_send(struct svc_rqst *);
+ void svc_drop(struct svc_rqst *);
+diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
+index a20970e..af70af3 100644
+--- a/include/linux/sunrpc/xdr.h
++++ b/include/linux/sunrpc/xdr.h
+@@ -191,6 +191,8 @@ extern int xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
+ struct xdr_array2_desc *desc);
+ extern int xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
+ struct xdr_array2_desc *desc);
++extern void _copy_from_pages(char *p, struct page **pages, size_t pgbase,
++ size_t len);
+
+ /*
+ * Provide some simple tools for XDR buffer overflow-checking etc.
+diff --git a/include/linux/swap.h b/include/linux/swap.h
+index 1e22e12..67b3fa3 100644
+--- a/include/linux/swap.h
++++ b/include/linux/swap.h
+@@ -272,7 +272,7 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order)
+ #endif
+
+ extern int page_evictable(struct page *page, struct vm_area_struct *vma);
+-extern void scan_mapping_unevictable_pages(struct address_space *);
++extern void check_move_unevictable_pages(struct page **, int nr_pages);
+
+ extern unsigned long scan_unevictable_pages;
+ extern int scan_unevictable_handler(struct ctl_table *, int,
+diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
+index 4b752d5..45a7698 100644
+--- a/include/linux/videodev2.h
++++ b/include/linux/videodev2.h
+@@ -1131,6 +1131,7 @@ struct v4l2_querymenu {
+ #define V4L2_CTRL_FLAG_NEXT_CTRL 0x80000000
+
+ /* User-class control IDs defined by V4L2 */
++#define V4L2_CID_MAX_CTRLS 1024
+ #define V4L2_CID_BASE (V4L2_CTRL_CLASS_USER | 0x900)
+ #define V4L2_CID_USER_BASE V4L2_CID_BASE
+ /* IDs reserved for driver specific controls */
+diff --git a/include/media/tuner.h b/include/media/tuner.h
+index 89c290b..29e1920 100644
+--- a/include/media/tuner.h
++++ b/include/media/tuner.h
+@@ -127,7 +127,6 @@
+ #define TUNER_PHILIPS_FMD1216MEX_MK3 78
+ #define TUNER_PHILIPS_FM1216MK5 79
+ #define TUNER_PHILIPS_FQ1216LME_MK3 80 /* Active loopthrough, no FM */
+-#define TUNER_XC4000 81 /* Xceive Silicon Tuner */
+
+ #define TUNER_PARTSNIC_PTI_5NF05 81
+ #define TUNER_PHILIPS_CU1216L 82
+@@ -136,6 +135,8 @@
+ #define TUNER_PHILIPS_FQ1236_MK5 85 /* NTSC, TDA9885, no FM radio */
+ #define TUNER_TENA_TNF_5337 86
+
++#define TUNER_XC4000 87 /* Xceive Silicon Tuner */
++
+ /* tv card specific */
+ #define TDA9887_PRESENT (1<<0)
+ #define TDA9887_PORT1_INACTIVE (1<<1)
+diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
+index 6873c7d..a79886c 100644
+--- a/include/target/target_core_base.h
++++ b/include/target/target_core_base.h
+@@ -34,6 +34,7 @@
+ #define TRANSPORT_SENSE_BUFFER SCSI_SENSE_BUFFERSIZE
+ /* Used by transport_send_check_condition_and_sense() */
+ #define SPC_SENSE_KEY_OFFSET 2
++#define SPC_ADD_SENSE_LEN_OFFSET 7
+ #define SPC_ASC_KEY_OFFSET 12
+ #define SPC_ASCQ_KEY_OFFSET 13
+ #define TRANSPORT_IQN_LEN 224
+diff --git a/include/xen/interface/io/xs_wire.h b/include/xen/interface/io/xs_wire.h
+index f6f07aa..7cdfca2 100644
+--- a/include/xen/interface/io/xs_wire.h
++++ b/include/xen/interface/io/xs_wire.h
+@@ -87,4 +87,7 @@ struct xenstore_domain_interface {
+ XENSTORE_RING_IDX rsp_cons, rsp_prod;
+ };
+
++/* Violating this is very bad. See docs/misc/xenstore.txt. */
++#define XENSTORE_PAYLOAD_MAX 4096
++
+ #endif /* _XS_WIRE_H */
+diff --git a/init/do_mounts.c b/init/do_mounts.c
+index 0f6e1d9..db6e5ee 100644
+--- a/init/do_mounts.c
++++ b/init/do_mounts.c
+@@ -398,15 +398,42 @@ out:
+ }
+
+ #ifdef CONFIG_ROOT_NFS
++
++#define NFSROOT_TIMEOUT_MIN 5
++#define NFSROOT_TIMEOUT_MAX 30
++#define NFSROOT_RETRY_MAX 5
++
+ static int __init mount_nfs_root(void)
+ {
+ char *root_dev, *root_data;
++ unsigned int timeout;
++ int try, err;
+
+- if (nfs_root_data(&root_dev, &root_data) != 0)
+- return 0;
+- if (do_mount_root(root_dev, "nfs", root_mountflags, root_data) != 0)
++ err = nfs_root_data(&root_dev, &root_data);
++ if (err != 0)
+ return 0;
+- return 1;
++
++ /*
++ * The server or network may not be ready, so try several
++ * times. Stop after a few tries in case the client wants
++ * to fall back to other boot methods.
++ */
++ timeout = NFSROOT_TIMEOUT_MIN;
++ for (try = 1; ; try++) {
++ err = do_mount_root(root_dev, "nfs",
++ root_mountflags, root_data);
++ if (err == 0)
++ return 1;
++ if (try > NFSROOT_RETRY_MAX)
++ break;
++
++ /* Wait, in case the server refused us immediately */
++ ssleep(timeout);
++ timeout <<= 1;
++ if (timeout > NFSROOT_TIMEOUT_MAX)
++ timeout = NFSROOT_TIMEOUT_MAX;
++ }
++ return 0;
+ }
+ #endif
+
+diff --git a/ipc/shm.c b/ipc/shm.c
+index 02ecf2c..b76be5b 100644
+--- a/ipc/shm.c
++++ b/ipc/shm.c
+@@ -870,9 +870,7 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
+ case SHM_LOCK:
+ case SHM_UNLOCK:
+ {
+- struct file *uninitialized_var(shm_file);
+-
+- lru_add_drain_all(); /* drain pagevecs to lru lists */
++ struct file *shm_file;
+
+ shp = shm_lock_check(ns, shmid);
+ if (IS_ERR(shp)) {
+@@ -895,22 +893,31 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
+ err = security_shm_shmctl(shp, cmd);
+ if (err)
+ goto out_unlock;
+-
+- if(cmd==SHM_LOCK) {
++
++ shm_file = shp->shm_file;
++ if (is_file_hugepages(shm_file))
++ goto out_unlock;
++
++ if (cmd == SHM_LOCK) {
+ struct user_struct *user = current_user();
+- if (!is_file_hugepages(shp->shm_file)) {
+- err = shmem_lock(shp->shm_file, 1, user);
+- if (!err && !(shp->shm_perm.mode & SHM_LOCKED)){
+- shp->shm_perm.mode |= SHM_LOCKED;
+- shp->mlock_user = user;
+- }
++ err = shmem_lock(shm_file, 1, user);
++ if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
++ shp->shm_perm.mode |= SHM_LOCKED;
++ shp->mlock_user = user;
+ }
+- } else if (!is_file_hugepages(shp->shm_file)) {
+- shmem_lock(shp->shm_file, 0, shp->mlock_user);
+- shp->shm_perm.mode &= ~SHM_LOCKED;
+- shp->mlock_user = NULL;
++ goto out_unlock;
+ }
++
++ /* SHM_UNLOCK */
++ if (!(shp->shm_perm.mode & SHM_LOCKED))
++ goto out_unlock;
++ shmem_lock(shm_file, 0, shp->mlock_user);
++ shp->shm_perm.mode &= ~SHM_LOCKED;
++ shp->mlock_user = NULL;
++ get_file(shm_file);
+ shm_unlock(shp);
++ shmem_unlock_mapping(shm_file->f_mapping);
++ fput(shm_file);
+ goto out;
+ }
+ case IPC_RMID:
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index e5d8464..52fd049 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -1077,6 +1077,7 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
+ /* Early boot. kretprobe_table_locks not yet initialized. */
+ return;
+
++ INIT_HLIST_HEAD(&empty_rp);
+ hash = hash_ptr(tk, KPROBE_HASH_BITS);
+ head = &kretprobe_inst_table[hash];
+ kretprobe_table_lock(hash, &flags);
+@@ -1085,7 +1086,6 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
+ recycle_rp_inst(ri, &empty_rp);
+ }
+ kretprobe_table_unlock(hash, &flags);
+- INIT_HLIST_HEAD(&empty_rp);
+ hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
+ hlist_del(&ri->hlist);
+ kfree(ri);
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index b1e8943..25b4f4d 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -948,7 +948,7 @@ struct ftrace_func_probe {
+ };
+
+ enum {
+- FTRACE_ENABLE_CALLS = (1 << 0),
++ FTRACE_UPDATE_CALLS = (1 << 0),
+ FTRACE_DISABLE_CALLS = (1 << 1),
+ FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
+ FTRACE_START_FUNC_RET = (1 << 3),
+@@ -1519,7 +1519,7 @@ int ftrace_text_reserved(void *start, void *end)
+
+
+ static int
+-__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
++__ftrace_replace_code(struct dyn_ftrace *rec, int update)
+ {
+ unsigned long ftrace_addr;
+ unsigned long flag = 0UL;
+@@ -1527,17 +1527,17 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
+ ftrace_addr = (unsigned long)FTRACE_ADDR;
+
+ /*
+- * If we are enabling tracing:
++ * If we are updating calls:
+ *
+ * If the record has a ref count, then we need to enable it
+ * because someone is using it.
+ *
+ * Otherwise we make sure its disabled.
+ *
+- * If we are disabling tracing, then disable all records that
++ * If we are disabling calls, then disable all records that
+ * are enabled.
+ */
+- if (enable && (rec->flags & ~FTRACE_FL_MASK))
++ if (update && (rec->flags & ~FTRACE_FL_MASK))
+ flag = FTRACE_FL_ENABLED;
+
+ /* If the state of this record hasn't changed, then do nothing */
+@@ -1553,7 +1553,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
+ return ftrace_make_nop(NULL, rec, ftrace_addr);
+ }
+
+-static void ftrace_replace_code(int enable)
++static void ftrace_replace_code(int update)
+ {
+ struct dyn_ftrace *rec;
+ struct ftrace_page *pg;
+@@ -1567,7 +1567,7 @@ static void ftrace_replace_code(int enable)
+ if (rec->flags & FTRACE_FL_FREE)
+ continue;
+
+- failed = __ftrace_replace_code(rec, enable);
++ failed = __ftrace_replace_code(rec, update);
+ if (failed) {
+ ftrace_bug(failed, rec->ip);
+ /* Stop processing */
+@@ -1623,7 +1623,7 @@ static int __ftrace_modify_code(void *data)
+ */
+ function_trace_stop++;
+
+- if (*command & FTRACE_ENABLE_CALLS)
++ if (*command & FTRACE_UPDATE_CALLS)
+ ftrace_replace_code(1);
+ else if (*command & FTRACE_DISABLE_CALLS)
+ ftrace_replace_code(0);
+@@ -1691,7 +1691,7 @@ static int ftrace_startup(struct ftrace_ops *ops, int command)
+ return -ENODEV;
+
+ ftrace_start_up++;
+- command |= FTRACE_ENABLE_CALLS;
++ command |= FTRACE_UPDATE_CALLS;
+
+ /* ops marked global share the filter hashes */
+ if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
+@@ -1743,8 +1743,7 @@ static void ftrace_shutdown(struct ftrace_ops *ops, int command)
+ if (ops != &global_ops || !global_start_up)
+ ops->flags &= ~FTRACE_OPS_FL_ENABLED;
+
+- if (!ftrace_start_up)
+- command |= FTRACE_DISABLE_CALLS;
++ command |= FTRACE_UPDATE_CALLS;
+
+ if (saved_ftrace_func != ftrace_trace_function) {
+ saved_ftrace_func = ftrace_trace_function;
+@@ -1766,7 +1765,7 @@ static void ftrace_startup_sysctl(void)
+ saved_ftrace_func = NULL;
+ /* ftrace_start_up is true if we want ftrace running */
+ if (ftrace_start_up)
+- ftrace_run_update_code(FTRACE_ENABLE_CALLS);
++ ftrace_run_update_code(FTRACE_UPDATE_CALLS);
+ }
+
+ static void ftrace_shutdown_sysctl(void)
+@@ -2919,7 +2918,7 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
+ ret = ftrace_hash_move(ops, enable, orig_hash, hash);
+ if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
+ && ftrace_enabled)
+- ftrace_run_update_code(FTRACE_ENABLE_CALLS);
++ ftrace_run_update_code(FTRACE_UPDATE_CALLS);
+
+ mutex_unlock(&ftrace_lock);
+
+@@ -3107,7 +3106,7 @@ ftrace_regex_release(struct inode *inode, struct file *file)
+ orig_hash, iter->hash);
+ if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
+ && ftrace_enabled)
+- ftrace_run_update_code(FTRACE_ENABLE_CALLS);
++ ftrace_run_update_code(FTRACE_UPDATE_CALLS);
+
+ mutex_unlock(&ftrace_lock);
+ }
+diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
+index db110b8..f1539de 100644
+--- a/kernel/tracepoint.c
++++ b/kernel/tracepoint.c
+@@ -634,10 +634,11 @@ static int tracepoint_module_coming(struct module *mod)
+ int ret = 0;
+
+ /*
+- * We skip modules that tain the kernel, especially those with different
+- * module header (for forced load), to make sure we don't cause a crash.
++ * We skip modules that taint the kernel, especially those with different
++ * module headers (for forced load), to make sure we don't cause a crash.
++ * Staging and out-of-tree GPL modules are fine.
+ */
+- if (mod->taints)
++ if (mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP)))
+ return 0;
+ mutex_lock(&tracepoints_mutex);
+ tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL);
+diff --git a/mm/filemap.c b/mm/filemap.c
+index 5f0a3c9..90286a4 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -393,24 +393,11 @@ EXPORT_SYMBOL(filemap_write_and_wait_range);
+ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
+ {
+ int error;
+- struct mem_cgroup *memcg = NULL;
+
+ VM_BUG_ON(!PageLocked(old));
+ VM_BUG_ON(!PageLocked(new));
+ VM_BUG_ON(new->mapping);
+
+- /*
+- * This is not page migration, but prepare_migration and
+- * end_migration does enough work for charge replacement.
+- *
+- * In the longer term we probably want a specialized function
+- * for moving the charge from old to new in a more efficient
+- * manner.
+- */
+- error = mem_cgroup_prepare_migration(old, new, &memcg, gfp_mask);
+- if (error)
+- return error;
+-
+ error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
+ if (!error) {
+ struct address_space *mapping = old->mapping;
+@@ -432,13 +419,12 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
+ if (PageSwapBacked(new))
+ __inc_zone_page_state(new, NR_SHMEM);
+ spin_unlock_irq(&mapping->tree_lock);
++ /* mem_cgroup codes must not be called under tree_lock */
++ mem_cgroup_replace_page_cache(old, new);
+ radix_tree_preload_end();
+ if (freepage)
+ freepage(old);
+ page_cache_release(old);
+- mem_cgroup_end_migration(memcg, old, new, true);
+- } else {
+- mem_cgroup_end_migration(memcg, old, new, false);
+ }
+
+ return error;
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index b63f5f7..f538e9b 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -3366,6 +3366,50 @@ void mem_cgroup_end_migration(struct mem_cgroup *memcg,
+ cgroup_release_and_wakeup_rmdir(&memcg->css);
+ }
+
++/*
++ * At replace page cache, newpage is not under any memcg but it's on
++ * LRU. So, this function doesn't touch res_counter but handles LRU
++ * in correct way. Both pages are locked so we cannot race with uncharge.
++ */
++void mem_cgroup_replace_page_cache(struct page *oldpage,
++ struct page *newpage)
++{
++ struct mem_cgroup *memcg;
++ struct page_cgroup *pc;
++ struct zone *zone;
++ enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
++ unsigned long flags;
++
++ if (mem_cgroup_disabled())
++ return;
++
++ pc = lookup_page_cgroup(oldpage);
++ /* fix accounting on old pages */
++ lock_page_cgroup(pc);
++ memcg = pc->mem_cgroup;
++ mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), -1);
++ ClearPageCgroupUsed(pc);
++ unlock_page_cgroup(pc);
++
++ if (PageSwapBacked(oldpage))
++ type = MEM_CGROUP_CHARGE_TYPE_SHMEM;
++
++ zone = page_zone(newpage);
++ pc = lookup_page_cgroup(newpage);
++ /*
++ * Even if newpage->mapping was NULL before starting replacement,
++ * the newpage may be on LRU(or pagevec for LRU) already. We lock
++ * LRU while we overwrite pc->mem_cgroup.
++ */
++ spin_lock_irqsave(&zone->lru_lock, flags);
++ if (PageLRU(newpage))
++ del_page_from_lru_list(zone, newpage, page_lru(newpage));
++ __mem_cgroup_commit_charge(memcg, newpage, 1, pc, type);
++ if (PageLRU(newpage))
++ add_page_to_lru_list(zone, newpage, page_lru(newpage));
++ spin_unlock_irqrestore(&zone->lru_lock, flags);
++}
++
+ #ifdef CONFIG_DEBUG_VM
+ static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
+ {
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 2b8ba3a..485be89 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -5608,6 +5608,17 @@ __count_immobile_pages(struct zone *zone, struct page *page, int count)
+ bool is_pageblock_removable_nolock(struct page *page)
+ {
+ struct zone *zone = page_zone(page);
++ unsigned long pfn = page_to_pfn(page);
++
++ /*
++ * We have to be careful here because we are iterating over memory
++ * sections which are not zone aware so we might end up outside of
++ * the zone but still within the section.
++ */
++ if (!zone || zone->zone_start_pfn > pfn ||
++ zone->zone_start_pfn + zone->spanned_pages <= pfn)
++ return false;
++
+ return __count_immobile_pages(zone, page, 0);
+ }
+
+diff --git a/mm/shmem.c b/mm/shmem.c
+index d672250..6c253f7 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -379,7 +379,7 @@ static int shmem_free_swap(struct address_space *mapping,
+ /*
+ * Pagevec may contain swap entries, so shuffle up pages before releasing.
+ */
+-static void shmem_pagevec_release(struct pagevec *pvec)
++static void shmem_deswap_pagevec(struct pagevec *pvec)
+ {
+ int i, j;
+
+@@ -389,7 +389,36 @@ static void shmem_pagevec_release(struct pagevec *pvec)
+ pvec->pages[j++] = page;
+ }
+ pvec->nr = j;
+- pagevec_release(pvec);
++}
++
++/*
++ * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
++ */
++void shmem_unlock_mapping(struct address_space *mapping)
++{
++ struct pagevec pvec;
++ pgoff_t indices[PAGEVEC_SIZE];
++ pgoff_t index = 0;
++
++ pagevec_init(&pvec, 0);
++ /*
++ * Minor point, but we might as well stop if someone else SHM_LOCKs it.
++ */
++ while (!mapping_unevictable(mapping)) {
++ /*
++ * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
++ * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
++ */
++ pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
++ PAGEVEC_SIZE, pvec.pages, indices);
++ if (!pvec.nr)
++ break;
++ index = indices[pvec.nr - 1] + 1;
++ shmem_deswap_pagevec(&pvec);
++ check_move_unevictable_pages(pvec.pages, pvec.nr);
++ pagevec_release(&pvec);
++ cond_resched();
++ }
+ }
+
+ /*
+@@ -440,7 +469,8 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
+ }
+ unlock_page(page);
+ }
+- shmem_pagevec_release(&pvec);
++ shmem_deswap_pagevec(&pvec);
++ pagevec_release(&pvec);
+ mem_cgroup_uncharge_end();
+ cond_resched();
+ index++;
+@@ -470,7 +500,8 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
+ continue;
+ }
+ if (index == start && indices[0] > end) {
+- shmem_pagevec_release(&pvec);
++ shmem_deswap_pagevec(&pvec);
++ pagevec_release(&pvec);
+ break;
+ }
+ mem_cgroup_uncharge_start();
+@@ -494,7 +525,8 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
+ }
+ unlock_page(page);
+ }
+- shmem_pagevec_release(&pvec);
++ shmem_deswap_pagevec(&pvec);
++ pagevec_release(&pvec);
+ mem_cgroup_uncharge_end();
+ index++;
+ }
+@@ -1068,13 +1100,6 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
+ user_shm_unlock(inode->i_size, user);
+ info->flags &= ~VM_LOCKED;
+ mapping_clear_unevictable(file->f_mapping);
+- /*
+- * Ensure that a racing putback_lru_page() can see
+- * the pages of this mapping are evictable when we
+- * skip them due to !PageLRU during the scan.
+- */
+- smp_mb__after_clear_bit();
+- scan_mapping_unevictable_pages(file->f_mapping);
+ }
+ retval = 0;
+
+@@ -2446,6 +2471,10 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
+ return 0;
+ }
+
++void shmem_unlock_mapping(struct address_space *mapping)
++{
++}
++
+ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
+ {
+ truncate_inode_pages_range(inode->i_mapping, lstart, lend);
+diff --git a/mm/slub.c b/mm/slub.c
+index ed3334d..1a919f0 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -2166,6 +2166,11 @@ redo:
+ goto new_slab;
+ }
+
++ /* must check again c->freelist in case of cpu migration or IRQ */
++ object = c->freelist;
++ if (object)
++ goto load_freelist;
++
+ stat(s, ALLOC_SLOWPATH);
+
+ do {
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index f54a05b..cb33d9c 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -636,7 +636,7 @@ redo:
+ * When racing with an mlock or AS_UNEVICTABLE clearing
+ * (page is unlocked) make sure that if the other thread
+ * does not observe our setting of PG_lru and fails
+- * isolation/check_move_unevictable_page,
++ * isolation/check_move_unevictable_pages,
+ * we see PG_mlocked/AS_UNEVICTABLE cleared below and move
+ * the page back to the evictable list.
+ *
+@@ -3353,97 +3353,59 @@ int page_evictable(struct page *page, struct vm_area_struct *vma)
+ return 1;
+ }
+
++#ifdef CONFIG_SHMEM
+ /**
+- * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list
+- * @page: page to check evictability and move to appropriate lru list
+- * @zone: zone page is in
++ * check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list
++ * @pages: array of pages to check
++ * @nr_pages: number of pages to check
+ *
+- * Checks a page for evictability and moves the page to the appropriate
+- * zone lru list.
++ * Checks pages for evictability and moves them to the appropriate lru list.
+ *
+- * Restrictions: zone->lru_lock must be held, page must be on LRU and must
+- * have PageUnevictable set.
++ * This function is only used for SysV IPC SHM_UNLOCK.
+ */
+-static void check_move_unevictable_page(struct page *page, struct zone *zone)
++void check_move_unevictable_pages(struct page **pages, int nr_pages)
+ {
+- VM_BUG_ON(PageActive(page));
+-
+-retry:
+- ClearPageUnevictable(page);
+- if (page_evictable(page, NULL)) {
+- enum lru_list l = page_lru_base_type(page);
++ struct zone *zone = NULL;
++ int pgscanned = 0;
++ int pgrescued = 0;
++ int i;
+
+- __dec_zone_state(zone, NR_UNEVICTABLE);
+- list_move(&page->lru, &zone->lru[l].list);
+- mem_cgroup_move_lists(page, LRU_UNEVICTABLE, l);
+- __inc_zone_state(zone, NR_INACTIVE_ANON + l);
+- __count_vm_event(UNEVICTABLE_PGRESCUED);
+- } else {
+- /*
+- * rotate unevictable list
+- */
+- SetPageUnevictable(page);
+- list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list);
+- mem_cgroup_rotate_lru_list(page, LRU_UNEVICTABLE);
+- if (page_evictable(page, NULL))
+- goto retry;
+- }
+-}
++ for (i = 0; i < nr_pages; i++) {
++ struct page *page = pages[i];
++ struct zone *pagezone;
+
+-/**
+- * scan_mapping_unevictable_pages - scan an address space for evictable pages
+- * @mapping: struct address_space to scan for evictable pages
+- *
+- * Scan all pages in mapping. Check unevictable pages for
+- * evictability and move them to the appropriate zone lru list.
+- */
+-void scan_mapping_unevictable_pages(struct address_space *mapping)
+-{
+- pgoff_t next = 0;
+- pgoff_t end = (i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1) >>
+- PAGE_CACHE_SHIFT;
+- struct zone *zone;
+- struct pagevec pvec;
++ pgscanned++;
++ pagezone = page_zone(page);
++ if (pagezone != zone) {
++ if (zone)
++ spin_unlock_irq(&zone->lru_lock);
++ zone = pagezone;
++ spin_lock_irq(&zone->lru_lock);
++ }
+
+- if (mapping->nrpages == 0)
+- return;
++ if (!PageLRU(page) || !PageUnevictable(page))
++ continue;
+
+- pagevec_init(&pvec, 0);
+- while (next < end &&
+- pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
+- int i;
+- int pg_scanned = 0;
+-
+- zone = NULL;
+-
+- for (i = 0; i < pagevec_count(&pvec); i++) {
+- struct page *page = pvec.pages[i];
+- pgoff_t page_index = page->index;
+- struct zone *pagezone = page_zone(page);
+-
+- pg_scanned++;
+- if (page_index > next)
+- next = page_index;
+- next++;
+-
+- if (pagezone != zone) {
+- if (zone)
+- spin_unlock_irq(&zone->lru_lock);
+- zone = pagezone;
+- spin_lock_irq(&zone->lru_lock);
+- }
++ if (page_evictable(page, NULL)) {
++ enum lru_list lru = page_lru_base_type(page);
+
+- if (PageLRU(page) && PageUnevictable(page))
+- check_move_unevictable_page(page, zone);
++ VM_BUG_ON(PageActive(page));
++ ClearPageUnevictable(page);
++ __dec_zone_state(zone, NR_UNEVICTABLE);
++ list_move(&page->lru, &zone->lru[lru].list);
++ mem_cgroup_move_lists(page, LRU_UNEVICTABLE, lru);
++ __inc_zone_state(zone, NR_INACTIVE_ANON + lru);
++ pgrescued++;
+ }
+- if (zone)
+- spin_unlock_irq(&zone->lru_lock);
+- pagevec_release(&pvec);
+-
+- count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned);
+ }
+
++ if (zone) {
++ __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
++ __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
++ spin_unlock_irq(&zone->lru_lock);
++ }
+ }
++#endif /* CONFIG_SHMEM */
+
+ static void warn_scan_unevictable_pages(void)
+ {
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index ea10a51..73495f1 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -702,6 +702,8 @@ struct tpt_led_trigger {
+ * well be on the operating channel
+ * @SCAN_HW_SCANNING: The hardware is scanning for us, we have no way to
+ * determine if we are on the operating channel or not
++ * @SCAN_OFF_CHANNEL: We're off our operating channel for scanning,
++ * gets only set in conjunction with SCAN_SW_SCANNING
+ * @SCAN_COMPLETED: Set for our scan work function when the driver reported
+ * that the scan completed.
+ * @SCAN_ABORTED: Set for our scan work function when the driver reported
+@@ -710,6 +712,7 @@ struct tpt_led_trigger {
+ enum {
+ SCAN_SW_SCANNING,
+ SCAN_HW_SCANNING,
++ SCAN_OFF_CHANNEL,
+ SCAN_COMPLETED,
+ SCAN_ABORTED,
+ };
+@@ -1140,14 +1143,10 @@ int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata);
+ void ieee80211_sched_scan_stopped_work(struct work_struct *work);
+
+ /* off-channel helpers */
+-bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local);
+-void ieee80211_offchannel_enable_all_ps(struct ieee80211_local *local,
+- bool tell_ap);
+-void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
+- bool offchannel_ps_enable);
++void ieee80211_offchannel_stop_beaconing(struct ieee80211_local *local);
++void ieee80211_offchannel_stop_station(struct ieee80211_local *local);
+ void ieee80211_offchannel_return(struct ieee80211_local *local,
+- bool enable_beaconing,
+- bool offchannel_ps_disable);
++ bool enable_beaconing);
+ void ieee80211_hw_roc_setup(struct ieee80211_local *local);
+
+ /* interface handling */
+diff --git a/net/mac80211/main.c b/net/mac80211/main.c
+index cae4435..a7536fd 100644
+--- a/net/mac80211/main.c
++++ b/net/mac80211/main.c
+@@ -92,47 +92,6 @@ static void ieee80211_reconfig_filter(struct work_struct *work)
+ ieee80211_configure_filter(local);
+ }
+
+-/*
+- * Returns true if we are logically configured to be on
+- * the operating channel AND the hardware-conf is currently
+- * configured on the operating channel. Compares channel-type
+- * as well.
+- */
+-bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local)
+-{
+- struct ieee80211_channel *chan, *scan_chan;
+- enum nl80211_channel_type channel_type;
+-
+- /* This logic needs to match logic in ieee80211_hw_config */
+- if (local->scan_channel) {
+- chan = local->scan_channel;
+- /* If scanning on oper channel, use whatever channel-type
+- * is currently in use.
+- */
+- if (chan == local->oper_channel)
+- channel_type = local->_oper_channel_type;
+- else
+- channel_type = NL80211_CHAN_NO_HT;
+- } else if (local->tmp_channel) {
+- chan = scan_chan = local->tmp_channel;
+- channel_type = local->tmp_channel_type;
+- } else {
+- chan = local->oper_channel;
+- channel_type = local->_oper_channel_type;
+- }
+-
+- if (chan != local->oper_channel ||
+- channel_type != local->_oper_channel_type)
+- return false;
+-
+- /* Check current hardware-config against oper_channel. */
+- if ((local->oper_channel != local->hw.conf.channel) ||
+- (local->_oper_channel_type != local->hw.conf.channel_type))
+- return false;
+-
+- return true;
+-}
+-
+ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
+ {
+ struct ieee80211_channel *chan, *scan_chan;
+@@ -145,9 +104,6 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
+
+ scan_chan = local->scan_channel;
+
+- /* If this off-channel logic ever changes, ieee80211_on_oper_channel
+- * may need to change as well.
+- */
+ offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
+ if (scan_chan) {
+ chan = scan_chan;
+@@ -158,19 +114,17 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
+ channel_type = local->_oper_channel_type;
+ else
+ channel_type = NL80211_CHAN_NO_HT;
+- } else if (local->tmp_channel) {
++ local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL;
++ } else if (local->tmp_channel &&
++ local->oper_channel != local->tmp_channel) {
+ chan = scan_chan = local->tmp_channel;
+ channel_type = local->tmp_channel_type;
++ local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL;
+ } else {
+ chan = local->oper_channel;
+ channel_type = local->_oper_channel_type;
+- }
+-
+- if (chan != local->oper_channel ||
+- channel_type != local->_oper_channel_type)
+- local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL;
+- else
+ local->hw.conf.flags &= ~IEEE80211_CONF_OFFCHANNEL;
++ }
+
+ offchannel_flag ^= local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
+
+@@ -279,7 +233,7 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED) {
+ if (local->quiescing || !ieee80211_sdata_running(sdata) ||
+- test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state)) {
++ test_bit(SCAN_SW_SCANNING, &local->scanning)) {
+ sdata->vif.bss_conf.enable_beacon = false;
+ } else {
+ /*
+diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
+index 3d41441..1b239be 100644
+--- a/net/mac80211/offchannel.c
++++ b/net/mac80211/offchannel.c
+@@ -18,14 +18,10 @@
+ #include "driver-trace.h"
+
+ /*
+- * Tell our hardware to disable PS.
+- * Optionally inform AP that we will go to sleep so that it will buffer
+- * the frames while we are doing off-channel work. This is optional
+- * because we *may* be doing work on-operating channel, and want our
+- * hardware unconditionally awake, but still let the AP send us normal frames.
++ * inform AP that we will go to sleep so that it will buffer the frames
++ * while we scan
+ */
+-static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata,
+- bool tell_ap)
++static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata)
+ {
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+@@ -46,8 +42,8 @@ static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata,
+ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
+ }
+
+- if (tell_ap && (!local->offchannel_ps_enabled ||
+- !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)))
++ if (!(local->offchannel_ps_enabled) ||
++ !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK))
+ /*
+ * If power save was enabled, no need to send a nullfunc
+ * frame because AP knows that we are sleeping. But if the
+@@ -82,9 +78,6 @@ static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata)
+ * we are sleeping, let's just enable power save mode in
+ * hardware.
+ */
+- /* TODO: Only set hardware if CONF_PS changed?
+- * TODO: Should we set offchannel_ps_enabled to false?
+- */
+ local->hw.conf.flags |= IEEE80211_CONF_PS;
+ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
+ } else if (local->hw.conf.dynamic_ps_timeout > 0) {
+@@ -103,61 +96,63 @@ static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata)
+ ieee80211_sta_reset_conn_monitor(sdata);
+ }
+
+-void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
+- bool offchannel_ps_enable)
++void ieee80211_offchannel_stop_beaconing(struct ieee80211_local *local)
+ {
+ struct ieee80211_sub_if_data *sdata;
+
+- /*
+- * notify the AP about us leaving the channel and stop all
+- * STA interfaces.
+- */
+ mutex_lock(&local->iflist_mtx);
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ if (!ieee80211_sdata_running(sdata))
+ continue;
+
+- if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
+- set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
+-
+- /* Check to see if we should disable beaconing. */
++ /* disable beaconing */
+ if (sdata->vif.type == NL80211_IFTYPE_AP ||
+ sdata->vif.type == NL80211_IFTYPE_ADHOC ||
+ sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
+ ieee80211_bss_info_change_notify(
+ sdata, BSS_CHANGED_BEACON_ENABLED);
+
+- if (sdata->vif.type != NL80211_IFTYPE_MONITOR) {
++ /*
++ * only handle non-STA interfaces here, STA interfaces
++ * are handled in ieee80211_offchannel_stop_station(),
++ * e.g., from the background scan state machine.
++ *
++ * In addition, do not stop monitor interface to allow it to be
++ * used from user space controlled off-channel operations.
++ */
++ if (sdata->vif.type != NL80211_IFTYPE_STATION &&
++ sdata->vif.type != NL80211_IFTYPE_MONITOR) {
++ set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
+ netif_tx_stop_all_queues(sdata->dev);
+- if (offchannel_ps_enable &&
+- (sdata->vif.type == NL80211_IFTYPE_STATION) &&
+- sdata->u.mgd.associated)
+- ieee80211_offchannel_ps_enable(sdata, true);
+ }
+ }
+ mutex_unlock(&local->iflist_mtx);
+ }
+
+-void ieee80211_offchannel_enable_all_ps(struct ieee80211_local *local,
+- bool tell_ap)
++void ieee80211_offchannel_stop_station(struct ieee80211_local *local)
+ {
+ struct ieee80211_sub_if_data *sdata;
+
++ /*
++ * notify the AP about us leaving the channel and stop all STA interfaces
++ */
+ mutex_lock(&local->iflist_mtx);
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ if (!ieee80211_sdata_running(sdata))
+ continue;
+
+- if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+- sdata->u.mgd.associated)
+- ieee80211_offchannel_ps_enable(sdata, tell_ap);
++ if (sdata->vif.type == NL80211_IFTYPE_STATION) {
++ set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
++ netif_tx_stop_all_queues(sdata->dev);
++ if (sdata->u.mgd.associated)
++ ieee80211_offchannel_ps_enable(sdata);
++ }
+ }
+ mutex_unlock(&local->iflist_mtx);
+ }
+
+ void ieee80211_offchannel_return(struct ieee80211_local *local,
+- bool enable_beaconing,
+- bool offchannel_ps_disable)
++ bool enable_beaconing)
+ {
+ struct ieee80211_sub_if_data *sdata;
+
+@@ -167,8 +162,7 @@ void ieee80211_offchannel_return(struct ieee80211_local *local,
+ continue;
+
+ /* Tell AP we're back */
+- if (offchannel_ps_disable &&
+- sdata->vif.type == NL80211_IFTYPE_STATION) {
++ if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+ if (sdata->u.mgd.associated)
+ ieee80211_offchannel_ps_disable(sdata);
+ }
+@@ -188,7 +182,7 @@ void ieee80211_offchannel_return(struct ieee80211_local *local,
+ netif_tx_wake_all_queues(sdata->dev);
+ }
+
+- /* Check to see if we should re-enable beaconing */
++ /* re-enable beaconing */
+ if (enable_beaconing &&
+ (sdata->vif.type == NL80211_IFTYPE_AP ||
+ sdata->vif.type == NL80211_IFTYPE_ADHOC ||
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index fb123e2..5c51607 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -421,10 +421,16 @@ ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
+ return RX_CONTINUE;
+
+ if (test_bit(SCAN_HW_SCANNING, &local->scanning) ||
+- test_bit(SCAN_SW_SCANNING, &local->scanning) ||
+ local->sched_scanning)
+ return ieee80211_scan_rx(rx->sdata, skb);
+
++ if (test_bit(SCAN_SW_SCANNING, &local->scanning)) {
++ /* drop all the other packets during a software scan anyway */
++ if (ieee80211_scan_rx(rx->sdata, skb) != RX_QUEUED)
++ dev_kfree_skb(skb);
++ return RX_QUEUED;
++ }
++
+ /* scanning finished during invoking of handlers */
+ I802_DEBUG_INC(local->rx_handlers_drop_passive_scan);
+ return RX_DROP_UNUSABLE;
+@@ -2858,7 +2864,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
+ local->dot11ReceivedFragmentCount++;
+
+ if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) ||
+- test_bit(SCAN_SW_SCANNING, &local->scanning)))
++ test_bit(SCAN_OFF_CHANNEL, &local->scanning)))
+ status->rx_flags |= IEEE80211_RX_IN_SCAN;
+
+ if (ieee80211_is_mgmt(fc))
+diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
+index 105436d..5279300 100644
+--- a/net/mac80211/scan.c
++++ b/net/mac80211/scan.c
+@@ -213,14 +213,6 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
+ if (bss)
+ ieee80211_rx_bss_put(sdata->local, bss);
+
+- /* If we are on-operating-channel, and this packet is for the
+- * current channel, pass the pkt on up the stack so that
+- * the rest of the stack can make use of it.
+- */
+- if (ieee80211_cfg_on_oper_channel(sdata->local)
+- && (channel == sdata->local->oper_channel))
+- return RX_CONTINUE;
+-
+ dev_kfree_skb(skb);
+ return RX_QUEUED;
+ }
+@@ -264,8 +256,6 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
+ bool was_hw_scan)
+ {
+ struct ieee80211_local *local = hw_to_local(hw);
+- bool on_oper_chan;
+- bool enable_beacons = false;
+
+ lockdep_assert_held(&local->mtx);
+
+@@ -298,25 +288,11 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
+ local->scanning = 0;
+ local->scan_channel = NULL;
+
+- on_oper_chan = ieee80211_cfg_on_oper_channel(local);
+-
+- if (was_hw_scan || !on_oper_chan)
+- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
+- else
+- /* Set power back to normal operating levels. */
+- ieee80211_hw_config(local, 0);
+-
++ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
+ if (!was_hw_scan) {
+- bool on_oper_chan2;
+ ieee80211_configure_filter(local);
+ drv_sw_scan_complete(local);
+- on_oper_chan2 = ieee80211_cfg_on_oper_channel(local);
+- /* We should always be on-channel at this point. */
+- WARN_ON(!on_oper_chan2);
+- if (on_oper_chan2 && (on_oper_chan != on_oper_chan2))
+- enable_beacons = true;
+-
+- ieee80211_offchannel_return(local, enable_beacons, true);
++ ieee80211_offchannel_return(local, true);
+ }
+
+ ieee80211_recalc_idle(local);
+@@ -357,15 +333,13 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
+ */
+ drv_sw_scan_start(local);
+
++ ieee80211_offchannel_stop_beaconing(local);
++
+ local->leave_oper_channel_time = 0;
+ local->next_scan_state = SCAN_DECISION;
+ local->scan_channel_idx = 0;
+
+- /* We always want to use off-channel PS, even if we
+- * are not really leaving oper-channel. Don't
+- * tell the AP though, as long as we are on-channel.
+- */
+- ieee80211_offchannel_enable_all_ps(local, false);
++ drv_flush(local, false);
+
+ ieee80211_configure_filter(local);
+
+@@ -508,20 +482,7 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
+ }
+ mutex_unlock(&local->iflist_mtx);
+
+- next_chan = local->scan_req->channels[local->scan_channel_idx];
+-
+- if (ieee80211_cfg_on_oper_channel(local)) {
+- /* We're currently on operating channel. */
+- if (next_chan == local->oper_channel)
+- /* We don't need to move off of operating channel. */
+- local->next_scan_state = SCAN_SET_CHANNEL;
+- else
+- /*
+- * We do need to leave operating channel, as next
+- * scan is somewhere else.
+- */
+- local->next_scan_state = SCAN_LEAVE_OPER_CHANNEL;
+- } else {
++ if (local->scan_channel) {
+ /*
+ * we're currently scanning a different channel, let's
+ * see if we can scan another channel without interfering
+@@ -537,6 +498,7 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
+ *
+ * Otherwise switch back to the operating channel.
+ */
++ next_chan = local->scan_req->channels[local->scan_channel_idx];
+
+ bad_latency = time_after(jiffies +
+ ieee80211_scan_get_channel_time(next_chan),
+@@ -554,6 +516,12 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
+ local->next_scan_state = SCAN_ENTER_OPER_CHANNEL;
+ else
+ local->next_scan_state = SCAN_SET_CHANNEL;
++ } else {
++ /*
++ * we're on the operating channel currently, let's
++ * leave that channel now to scan another one
++ */
++ local->next_scan_state = SCAN_LEAVE_OPER_CHANNEL;
+ }
+
+ *next_delay = 0;
+@@ -562,10 +530,9 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
+ static void ieee80211_scan_state_leave_oper_channel(struct ieee80211_local *local,
+ unsigned long *next_delay)
+ {
+- /* PS will already be in off-channel mode,
+- * we do that once at the beginning of scanning.
+- */
+- ieee80211_offchannel_stop_vifs(local, false);
++ ieee80211_offchannel_stop_station(local);
++
++ __set_bit(SCAN_OFF_CHANNEL, &local->scanning);
+
+ /*
+ * What if the nullfunc frames didn't arrive?
+@@ -588,15 +555,15 @@ static void ieee80211_scan_state_enter_oper_channel(struct ieee80211_local *loca
+ {
+ /* switch back to the operating channel */
+ local->scan_channel = NULL;
+- if (!ieee80211_cfg_on_oper_channel(local))
+- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
++ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
+
+ /*
+- * Re-enable vifs and beaconing. Leave PS
+- * in off-channel state..will put that back
+- * on-channel at the end of scanning.
++ * Only re-enable station mode interface now; beaconing will be
++ * re-enabled once the full scan has been completed.
+ */
+- ieee80211_offchannel_return(local, true, false);
++ ieee80211_offchannel_return(local, false);
++
++ __clear_bit(SCAN_OFF_CHANNEL, &local->scanning);
+
+ *next_delay = HZ / 5;
+ local->next_scan_state = SCAN_DECISION;
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index 1f8b120..eff1f4e 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -259,8 +259,7 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
+ if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED))
+ return TX_CONTINUE;
+
+- if (unlikely(test_bit(SCAN_SW_SCANNING, &tx->local->scanning)) &&
+- test_bit(SDATA_STATE_OFFCHANNEL, &tx->sdata->state) &&
++ if (unlikely(test_bit(SCAN_OFF_CHANNEL, &tx->local->scanning)) &&
+ !ieee80211_is_probe_req(hdr->frame_control) &&
+ !ieee80211_is_nullfunc(hdr->frame_control))
+ /*
+diff --git a/net/mac80211/work.c b/net/mac80211/work.c
+index 6c53b6d..99165ef 100644
+--- a/net/mac80211/work.c
++++ b/net/mac80211/work.c
+@@ -899,26 +899,6 @@ static bool ieee80211_work_ct_coexists(enum nl80211_channel_type wk_ct,
+ return false;
+ }
+
+-static enum nl80211_channel_type
+-ieee80211_calc_ct(enum nl80211_channel_type wk_ct,
+- enum nl80211_channel_type oper_ct)
+-{
+- switch (wk_ct) {
+- case NL80211_CHAN_NO_HT:
+- return oper_ct;
+- case NL80211_CHAN_HT20:
+- if (oper_ct != NL80211_CHAN_NO_HT)
+- return oper_ct;
+- return wk_ct;
+- case NL80211_CHAN_HT40MINUS:
+- case NL80211_CHAN_HT40PLUS:
+- return wk_ct;
+- }
+- WARN_ON(1); /* shouldn't get here */
+- return wk_ct;
+-}
+-
+-
+ static void ieee80211_work_timer(unsigned long data)
+ {
+ struct ieee80211_local *local = (void *) data;
+@@ -969,52 +949,18 @@ static void ieee80211_work_work(struct work_struct *work)
+ }
+
+ if (!started && !local->tmp_channel) {
+- bool on_oper_chan;
+- bool tmp_chan_changed = false;
+- bool on_oper_chan2;
+- enum nl80211_channel_type wk_ct;
+- on_oper_chan = ieee80211_cfg_on_oper_channel(local);
+-
+- /* Work with existing channel type if possible. */
+- wk_ct = wk->chan_type;
+- if (wk->chan == local->hw.conf.channel)
+- wk_ct = ieee80211_calc_ct(wk->chan_type,
+- local->hw.conf.channel_type);
+-
+- if (local->tmp_channel)
+- if ((local->tmp_channel != wk->chan) ||
+- (local->tmp_channel_type != wk_ct))
+- tmp_chan_changed = true;
+-
+- local->tmp_channel = wk->chan;
+- local->tmp_channel_type = wk_ct;
+ /*
+- * Leave the station vifs in awake mode if they
+- * happen to be on the same channel as
+- * the requested channel.
++ * TODO: could optimize this by leaving the
++ * station vifs in awake mode if they
++ * happen to be on the same channel as
++ * the requested channel
+ */
+- on_oper_chan2 = ieee80211_cfg_on_oper_channel(local);
+- if (on_oper_chan != on_oper_chan2) {
+- if (on_oper_chan2) {
+- /* going off oper channel, PS too */
+- ieee80211_offchannel_stop_vifs(local,
+- true);
+- ieee80211_hw_config(local, 0);
+- } else {
+- /* going on channel, but leave PS
+- * off-channel. */
+- ieee80211_hw_config(local, 0);
+- ieee80211_offchannel_return(local,
+- true,
+- false);
+- }
+- } else if (tmp_chan_changed)
+- /* Still off-channel, but on some other
+- * channel, so update hardware.
+- * PS should already be off-channel.
+- */
+- ieee80211_hw_config(local, 0);
++ ieee80211_offchannel_stop_beaconing(local);
++ ieee80211_offchannel_stop_station(local);
+
++ local->tmp_channel = wk->chan;
++ local->tmp_channel_type = wk->chan_type;
++ ieee80211_hw_config(local, 0);
+ started = true;
+ wk->timeout = jiffies;
+ }
+@@ -1100,8 +1046,7 @@ static void ieee80211_work_work(struct work_struct *work)
+ * we still need to do a hardware config. Currently,
+ * we cannot be here while scanning, however.
+ */
+- if (!ieee80211_cfg_on_oper_channel(local))
+- ieee80211_hw_config(local, 0);
++ ieee80211_hw_config(local, 0);
+
+ /* At the least, we need to disable offchannel_ps,
+ * so just go ahead and run the entire offchannel
+@@ -1109,7 +1054,7 @@ static void ieee80211_work_work(struct work_struct *work)
+ * beaconing if we were already on-oper-channel
+ * as a future optimization.
+ */
+- ieee80211_offchannel_return(local, true, true);
++ ieee80211_offchannel_return(local, true);
+
+ /* give connection some time to breathe */
+ run_again(local, jiffies + HZ/2);
+diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
+index f614ce7..28a39bb 100644
+--- a/net/mac80211/wpa.c
++++ b/net/mac80211/wpa.c
+@@ -106,7 +106,7 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
+ if (status->flag & RX_FLAG_MMIC_ERROR)
+ goto mic_fail;
+
+- if (!(status->flag & RX_FLAG_IV_STRIPPED))
++ if (!(status->flag & RX_FLAG_IV_STRIPPED) && rx->key)
+ goto update_iv;
+
+ return RX_CONTINUE;
+diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
+index 6e03888..d4ad50e 100644
+--- a/net/sunrpc/svc.c
++++ b/net/sunrpc/svc.c
+@@ -167,6 +167,7 @@ svc_pool_map_alloc_arrays(struct svc_pool_map *m, unsigned int maxpools)
+
+ fail_free:
+ kfree(m->to_pool);
++ m->to_pool = NULL;
+ fail:
+ return -ENOMEM;
+ }
+@@ -287,7 +288,9 @@ svc_pool_map_put(void)
+ if (!--m->count) {
+ m->mode = SVC_POOL_DEFAULT;
+ kfree(m->to_pool);
++ m->to_pool = NULL;
+ kfree(m->pool_to);
++ m->pool_to = NULL;
+ m->npools = 0;
+ }
+
+@@ -527,17 +530,20 @@ svc_destroy(struct svc_serv *serv)
+ printk("svc_destroy: no threads for serv=%p!\n", serv);
+
+ del_timer_sync(&serv->sv_temptimer);
+-
+- svc_close_all(&serv->sv_tempsocks);
++ /*
++ * The set of xprts (contained in the sv_tempsocks and
++ * sv_permsocks lists) is now constant, since it is modified
++ * only by accepting new sockets (done by service threads in
++ * svc_recv) or aging old ones (done by sv_temptimer), or
++ * configuration changes (excluded by whatever locking the
++ * caller is using--nfsd_mutex in the case of nfsd). So it's
++ * safe to traverse those lists and shut everything down:
++ */
++ svc_close_all(serv);
+
+ if (serv->sv_shutdown)
+ serv->sv_shutdown(serv);
+
+- svc_close_all(&serv->sv_permsocks);
+-
+- BUG_ON(!list_empty(&serv->sv_permsocks));
+- BUG_ON(!list_empty(&serv->sv_tempsocks));
+-
+ cache_clean_deferred(serv);
+
+ if (svc_serv_is_pooled(serv))
+diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
+index 447cd0e..9ed2cd0 100644
+--- a/net/sunrpc/svc_xprt.c
++++ b/net/sunrpc/svc_xprt.c
+@@ -893,14 +893,7 @@ void svc_delete_xprt(struct svc_xprt *xprt)
+ spin_lock_bh(&serv->sv_lock);
+ if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags))
+ list_del_init(&xprt->xpt_list);
+- /*
+- * The only time we're called while xpt_ready is still on a list
+- * is while the list itself is about to be destroyed (in
+- * svc_destroy). BUT svc_xprt_enqueue could still be attempting
+- * to add new entries to the sp_sockets list, so we can't leave
+- * a freed xprt on it.
+- */
+- list_del_init(&xprt->xpt_ready);
++ BUG_ON(!list_empty(&xprt->xpt_ready));
+ if (test_bit(XPT_TEMP, &xprt->xpt_flags))
+ serv->sv_tmpcnt--;
+ spin_unlock_bh(&serv->sv_lock);
+@@ -928,22 +921,48 @@ void svc_close_xprt(struct svc_xprt *xprt)
+ }
+ EXPORT_SYMBOL_GPL(svc_close_xprt);
+
+-void svc_close_all(struct list_head *xprt_list)
++static void svc_close_list(struct list_head *xprt_list)
++{
++ struct svc_xprt *xprt;
++
++ list_for_each_entry(xprt, xprt_list, xpt_list) {
++ set_bit(XPT_CLOSE, &xprt->xpt_flags);
++ set_bit(XPT_BUSY, &xprt->xpt_flags);
++ }
++}
++
++void svc_close_all(struct svc_serv *serv)
+ {
++ struct svc_pool *pool;
+ struct svc_xprt *xprt;
+ struct svc_xprt *tmp;
++ int i;
++
++ svc_close_list(&serv->sv_tempsocks);
++ svc_close_list(&serv->sv_permsocks);
+
++ for (i = 0; i < serv->sv_nrpools; i++) {
++ pool = &serv->sv_pools[i];
++
++ spin_lock_bh(&pool->sp_lock);
++ while (!list_empty(&pool->sp_sockets)) {
++ xprt = list_first_entry(&pool->sp_sockets, struct svc_xprt, xpt_ready);
++ list_del_init(&xprt->xpt_ready);
++ }
++ spin_unlock_bh(&pool->sp_lock);
++ }
+ /*
+- * The server is shutting down, and no more threads are running.
+- * svc_xprt_enqueue() might still be running, but at worst it
+- * will re-add the xprt to sp_sockets, which will soon get
+- * freed. So we don't bother with any more locking, and don't
+- * leave the close to the (nonexistent) server threads:
++ * At this point the sp_sockets lists will stay empty, since
++ * svc_enqueue will not add new entries without taking the
++ * sp_lock and checking XPT_BUSY.
+ */
+- list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) {
+- set_bit(XPT_CLOSE, &xprt->xpt_flags);
++ list_for_each_entry_safe(xprt, tmp, &serv->sv_tempsocks, xpt_list)
+ svc_delete_xprt(xprt);
+- }
++ list_for_each_entry_safe(xprt, tmp, &serv->sv_permsocks, xpt_list)
++ svc_delete_xprt(xprt);
++
++ BUG_ON(!list_empty(&serv->sv_permsocks));
++ BUG_ON(!list_empty(&serv->sv_tempsocks));
+ }
+
+ /*
+diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
+index 277ebd4..593f4c6 100644
+--- a/net/sunrpc/xdr.c
++++ b/net/sunrpc/xdr.c
+@@ -296,7 +296,7 @@ _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
+ * Copies data into an arbitrary memory location from an array of pages
+ * The copy is assumed to be non-overlapping.
+ */
+-static void
++void
+ _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
+ {
+ struct page **pgfrom;
+@@ -324,6 +324,7 @@ _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
+
+ } while ((len -= copy) != 0);
+ }
++EXPORT_SYMBOL_GPL(_copy_from_pages);
+
+ /*
+ * xdr_shrink_bufhead
+diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl
+index ec7afce..bccf07d 100644
+--- a/scripts/kconfig/streamline_config.pl
++++ b/scripts/kconfig/streamline_config.pl
+@@ -250,33 +250,61 @@ if ($kconfig) {
+ read_kconfig($kconfig);
+ }
+
++sub convert_vars {
++ my ($line, %vars) = @_;
++
++ my $process = "";
++
++ while ($line =~ s/^(.*?)(\$\((.*?)\))//) {
++ my $start = $1;
++ my $variable = $2;
++ my $var = $3;
++
++ if (defined($vars{$var})) {
++ $process .= $start . $vars{$var};
++ } else {
++ $process .= $start . $variable;
++ }
++ }
++
++ $process .= $line;
++
++ return $process;
++}
++
+ # Read all Makefiles to map the configs to the objects
+ foreach my $makefile (@makefiles) {
+
+- my $cont = 0;
++ my $line = "";
++ my %make_vars;
+
+ open(MIN,$makefile) || die "Can't open $makefile";
+ while (<MIN>) {
++ # if this line ends with a backslash, continue
++ chomp;
++ if (/^(.*)\\$/) {
++ $line .= $1;
++ next;
++ }
++
++ $line .= $_;
++ $_ = $line;
++ $line = "";
++
+ my $objs;
+
+- # is this a line after a line with a backslash?
+- if ($cont && /(\S.*)$/) {
+- $objs = $1;
+- }
+- $cont = 0;
++ $_ = convert_vars($_, %make_vars);
+
+ # collect objects after obj-$(CONFIG_FOO_BAR)
+ if (/obj-\$\((CONFIG_[^\)]*)\)\s*[+:]?=\s*(.*)/) {
+ $var = $1;
+ $objs = $2;
++
++ # check if variables are set
++ } elsif (/^\s*(\S+)\s*[:]?=\s*(.*\S)/) {
++ $make_vars{$1} = $2;
+ }
+ if (defined($objs)) {
+- # test if the line ends with a backslash
+- if ($objs =~ m,(.*)\\$,) {
+- $objs = $1;
+- $cont = 1;
+- }
+-
+ foreach my $obj (split /\s+/,$objs) {
+ $obj =~ s/-/_/g;
+ if ($obj =~ /(.*)\.o$/) {
+diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h
+index f40a6af6..54e35c1 100644
+--- a/scripts/recordmcount.h
++++ b/scripts/recordmcount.h
+@@ -462,7 +462,7 @@ __has_rel_mcount(Elf_Shdr const *const relhdr, /* is SHT_REL or SHT_RELA */
+ succeed_file();
+ }
+ if (w(txthdr->sh_type) != SHT_PROGBITS ||
+- !(w(txthdr->sh_flags) & SHF_EXECINSTR))
++ !(_w(txthdr->sh_flags) & SHF_EXECINSTR))
+ return NULL;
+ return txtname;
+ }
+diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
+index 0d50df0..88a2788 100644
+--- a/security/integrity/ima/ima_api.c
++++ b/security/integrity/ima/ima_api.c
+@@ -178,8 +178,8 @@ void ima_store_measurement(struct integrity_iint_cache *iint,
+ strncpy(entry->template.file_name, filename, IMA_EVENT_NAME_LEN_MAX);
+
+ result = ima_store_template(entry, violation, inode);
+- if (!result)
++ if (!result || result == -EEXIST)
+ iint->flags |= IMA_MEASURED;
+- else
++ if (result < 0)
+ kfree(entry);
+ }
+diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
+index 8e28f04..55a6271 100644
+--- a/security/integrity/ima/ima_queue.c
++++ b/security/integrity/ima/ima_queue.c
+@@ -23,6 +23,8 @@
+ #include <linux/slab.h>
+ #include "ima.h"
+
++#define AUDIT_CAUSE_LEN_MAX 32
++
+ LIST_HEAD(ima_measurements); /* list of all measurements */
+
+ /* key: inode (before secure-hashing a file) */
+@@ -94,7 +96,8 @@ static int ima_pcr_extend(const u8 *hash)
+
+ result = tpm_pcr_extend(TPM_ANY_NUM, CONFIG_IMA_MEASURE_PCR_IDX, hash);
+ if (result != 0)
+- pr_err("IMA: Error Communicating to TPM chip\n");
++ pr_err("IMA: Error Communicating to TPM chip, result: %d\n",
++ result);
+ return result;
+ }
+
+@@ -106,14 +109,16 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation,
+ {
+ u8 digest[IMA_DIGEST_SIZE];
+ const char *audit_cause = "hash_added";
++ char tpm_audit_cause[AUDIT_CAUSE_LEN_MAX];
+ int audit_info = 1;
+- int result = 0;
++ int result = 0, tpmresult = 0;
+
+ mutex_lock(&ima_extend_list_mutex);
+ if (!violation) {
+ memcpy(digest, entry->digest, sizeof digest);
+ if (ima_lookup_digest_entry(digest)) {
+ audit_cause = "hash_exists";
++ result = -EEXIST;
+ goto out;
+ }
+ }
+@@ -128,9 +133,11 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation,
+ if (violation) /* invalidate pcr */
+ memset(digest, 0xff, sizeof digest);
+
+- result = ima_pcr_extend(digest);
+- if (result != 0) {
+- audit_cause = "TPM error";
++ tpmresult = ima_pcr_extend(digest);
++ if (tpmresult != 0) {
++ snprintf(tpm_audit_cause, AUDIT_CAUSE_LEN_MAX, "TPM_error(%d)",
++ tpmresult);
++ audit_cause = tpm_audit_cause;
+ audit_info = 0;
+ }
+ out:
+diff --git a/security/tomoyo/util.c b/security/tomoyo/util.c
+index 4a9b4b2..867558c 100644
+--- a/security/tomoyo/util.c
++++ b/security/tomoyo/util.c
+@@ -492,13 +492,13 @@ static bool tomoyo_correct_word2(const char *string, size_t len)
+ if (d < '0' || d > '7' || e < '0' || e > '7')
+ break;
+ c = tomoyo_make_byte(c, d, e);
+- if (tomoyo_invalid(c))
+- continue; /* pattern is not \000 */
++ if (c <= ' ' || c >= 127)
++ continue;
+ }
+ goto out;
+ } else if (in_repetition && c == '/') {
+ goto out;
+- } else if (tomoyo_invalid(c)) {
++ } else if (c <= ' ' || c >= 127) {
+ goto out;
+ }
+ }
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index c2f79e6..5b2b75b 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2509,6 +2509,7 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = {
+ SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS 1101HA", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB),
++ SND_PCI_QUIRK(0x10de, 0xcb89, "Macbook Pro 7,1", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x1297, 0x3166, "Shuttle", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB),
+diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
+index 618ddad..368f0c5 100644
+--- a/sound/pci/hda/hda_local.h
++++ b/sound/pci/hda/hda_local.h
+@@ -487,7 +487,12 @@ static inline u32 get_wcaps(struct hda_codec *codec, hda_nid_t nid)
+ }
+
+ /* get the widget type from widget capability bits */
+-#define get_wcaps_type(wcaps) (((wcaps) & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT)
++static inline int get_wcaps_type(unsigned int wcaps)
++{
++ if (!wcaps)
++ return -1; /* invalid type */
++ return (wcaps & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT;
++}
+
+ static inline unsigned int get_wcaps_channels(u32 wcaps)
+ {
+diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c
+index 2c981b5..254ab52 100644
+--- a/sound/pci/hda/hda_proc.c
++++ b/sound/pci/hda/hda_proc.c
+@@ -54,6 +54,8 @@ static const char *get_wid_type_name(unsigned int wid_value)
+ [AC_WID_BEEP] = "Beep Generator Widget",
+ [AC_WID_VENDOR] = "Vendor Defined Widget",
+ };
++ if (wid_value == -1)
++ return "UNKNOWN Widget";
+ wid_value &= 0xf;
+ if (names[wid_value])
+ return names[wid_value];
+diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
+index 70a7abd..5b0a9bb 100644
+--- a/sound/pci/hda/patch_cirrus.c
++++ b/sound/pci/hda/patch_cirrus.c
+@@ -920,16 +920,14 @@ static void cs_automute(struct hda_codec *codec)
+
+ /* mute speakers if spdif or hp jack is plugged in */
+ for (i = 0; i < cfg->speaker_outs; i++) {
++ int pin_ctl = hp_present ? 0 : PIN_OUT;
++ /* detect on spdif is specific to CS421x */
++ if (spdif_present && (spec->vendor_nid == CS421X_VENDOR_NID))
++ pin_ctl = 0;
++
+ nid = cfg->speaker_pins[i];
+ snd_hda_codec_write(codec, nid, 0,
+- AC_VERB_SET_PIN_WIDGET_CONTROL,
+- hp_present ? 0 : PIN_OUT);
+- /* detect on spdif is specific to CS421x */
+- if (spec->vendor_nid == CS421X_VENDOR_NID) {
+- snd_hda_codec_write(codec, nid, 0,
+- AC_VERB_SET_PIN_WIDGET_CONTROL,
+- spdif_present ? 0 : PIN_OUT);
+- }
++ AC_VERB_SET_PIN_WIDGET_CONTROL, pin_ctl);
+ }
+ if (spec->gpio_eapd_hp) {
+ unsigned int gpio = hp_present ?
+@@ -1771,30 +1769,19 @@ static int build_cs421x_output(struct hda_codec *codec)
+ struct auto_pin_cfg *cfg = &spec->autocfg;
+ struct snd_kcontrol *kctl;
+ int err;
+- char *name = "HP/Speakers";
++ char *name = "Master";
+
+ fix_volume_caps(codec, dac);
+- if (!spec->vmaster_sw) {
+- err = add_vmaster(codec, dac);
+- if (err < 0)
+- return err;
+- }
+
+ err = add_mute(codec, name, 0,
+ HDA_COMPOSE_AMP_VAL(dac, 3, 0, HDA_OUTPUT), 0, &kctl);
+ if (err < 0)
+ return err;
+- err = snd_ctl_add_slave(spec->vmaster_sw, kctl);
+- if (err < 0)
+- return err;
+
+ err = add_volume(codec, name, 0,
+ HDA_COMPOSE_AMP_VAL(dac, 3, 0, HDA_OUTPUT), 0, &kctl);
+ if (err < 0)
+ return err;
+- err = snd_ctl_add_slave(spec->vmaster_vol, kctl);
+- if (err < 0)
+- return err;
+
+ if (cfg->speaker_outs) {
+ err = snd_hda_ctl_add(codec, 0,
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 0de2119..7072251 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -1120,8 +1120,6 @@ static const char * const cxt5045_models[CXT5045_MODELS] = {
+
+ static const struct snd_pci_quirk cxt5045_cfg_tbl[] = {
+ SND_PCI_QUIRK(0x103c, 0x30d5, "HP 530", CXT5045_LAPTOP_HP530),
+- SND_PCI_QUIRK_MASK(0x103c, 0xff00, 0x3000, "HP DV Series",
+- CXT5045_LAPTOP_HPSENSE),
+ SND_PCI_QUIRK(0x1179, 0xff31, "Toshiba P105", CXT5045_LAPTOP_MICSENSE),
+ SND_PCI_QUIRK(0x152d, 0x0753, "Benq R55E", CXT5045_BENQ),
+ SND_PCI_QUIRK(0x1734, 0x10ad, "Fujitsu Si1520", CXT5045_LAPTOP_MICSENSE),
+diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
+index 616678f..f3c73a9 100644
+--- a/sound/pci/hda/patch_sigmatel.c
++++ b/sound/pci/hda/patch_sigmatel.c
+@@ -1631,7 +1631,7 @@ static const struct snd_pci_quirk stac92hd73xx_cfg_tbl[] = {
+ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02bd,
+ "Dell Studio 1557", STAC_DELL_M6_DMIC),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02fe,
+- "Dell Studio XPS 1645", STAC_DELL_M6_BOTH),
++ "Dell Studio XPS 1645", STAC_DELL_M6_DMIC),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0413,
+ "Dell Studio 1558", STAC_DELL_M6_DMIC),
+ {} /* terminator */
+@@ -4326,6 +4326,27 @@ static void stac_store_hints(struct hda_codec *codec)
+ }
+ }
+
++static void stac_issue_unsol_events(struct hda_codec *codec, int num_pins,
++ const hda_nid_t *pins)
++{
++ while (num_pins--)
++ stac_issue_unsol_event(codec, *pins++);
++}
++
++/* fake event to set up pins */
++static void stac_fake_hp_events(struct hda_codec *codec)
++{
++ struct sigmatel_spec *spec = codec->spec;
++
++ if (spec->autocfg.hp_outs)
++ stac_issue_unsol_events(codec, spec->autocfg.hp_outs,
++ spec->autocfg.hp_pins);
++ if (spec->autocfg.line_outs &&
++ spec->autocfg.line_out_pins[0] != spec->autocfg.hp_pins[0])
++ stac_issue_unsol_events(codec, spec->autocfg.line_outs,
++ spec->autocfg.line_out_pins);
++}
++
+ static int stac92xx_init(struct hda_codec *codec)
+ {
+ struct sigmatel_spec *spec = codec->spec;
+@@ -4376,10 +4397,7 @@ static int stac92xx_init(struct hda_codec *codec)
+ stac92xx_auto_set_pinctl(codec, spec->autocfg.line_out_pins[0],
+ AC_PINCTL_OUT_EN);
+ /* fake event to set up pins */
+- if (cfg->hp_pins[0])
+- stac_issue_unsol_event(codec, cfg->hp_pins[0]);
+- else if (cfg->line_out_pins[0])
+- stac_issue_unsol_event(codec, cfg->line_out_pins[0]);
++ stac_fake_hp_events(codec);
+ } else {
+ stac92xx_auto_init_multi_out(codec);
+ stac92xx_auto_init_hp_out(codec);
+@@ -5028,19 +5046,11 @@ static void stac927x_proc_hook(struct snd_info_buffer *buffer,
+ #ifdef CONFIG_PM
+ static int stac92xx_resume(struct hda_codec *codec)
+ {
+- struct sigmatel_spec *spec = codec->spec;
+-
+ stac92xx_init(codec);
+ snd_hda_codec_resume_amp(codec);
+ snd_hda_codec_resume_cache(codec);
+ /* fake event to set up pins again to override cached values */
+- if (spec->hp_detect) {
+- if (spec->autocfg.hp_pins[0])
+- stac_issue_unsol_event(codec, spec->autocfg.hp_pins[0]);
+- else if (spec->autocfg.line_out_pins[0])
+- stac_issue_unsol_event(codec,
+- spec->autocfg.line_out_pins[0]);
+- }
++ stac_fake_hp_events(codec);
+ return 0;
+ }
+
+diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
+index b513762..8d69e59 100644
+--- a/sound/pci/hda/patch_via.c
++++ b/sound/pci/hda/patch_via.c
+@@ -2200,7 +2200,10 @@ static int via_auto_create_loopback_switch(struct hda_codec *codec)
+ {
+ struct via_spec *spec = codec->spec;
+
+- if (!spec->aa_mix_nid || !spec->out_mix_path.depth)
++ if (!spec->aa_mix_nid)
++ return 0; /* no loopback switching available */
++ if (!(spec->out_mix_path.depth || spec->hp_mix_path.depth ||
++ spec->speaker_path.depth))
+ return 0; /* no loopback switching available */
+ if (!via_clone_control(spec, &via_aamix_ctl_enum))
+ return -ENOMEM;
+diff --git a/sound/pci/ice1712/amp.c b/sound/pci/ice1712/amp.c
+index e328cfb..e525da2 100644
+--- a/sound/pci/ice1712/amp.c
++++ b/sound/pci/ice1712/amp.c
+@@ -68,8 +68,11 @@ static int __devinit snd_vt1724_amp_init(struct snd_ice1712 *ice)
+
+ static int __devinit snd_vt1724_amp_add_controls(struct snd_ice1712 *ice)
+ {
+- /* we use pins 39 and 41 of the VT1616 for left and right read outputs */
+- snd_ac97_write_cache(ice->ac97, 0x5a, snd_ac97_read(ice->ac97, 0x5a) & ~0x8000);
++ if (ice->ac97)
++ /* we use pins 39 and 41 of the VT1616 for left and right
++ read outputs */
++ snd_ac97_write_cache(ice->ac97, 0x5a,
++ snd_ac97_read(ice->ac97, 0x5a) & ~0x8000);
+ return 0;
+ }
+
+diff --git a/sound/pci/oxygen/xonar_wm87x6.c b/sound/pci/oxygen/xonar_wm87x6.c
+index 42d1ab1..915546a 100644
+--- a/sound/pci/oxygen/xonar_wm87x6.c
++++ b/sound/pci/oxygen/xonar_wm87x6.c
+@@ -177,6 +177,7 @@ static void wm8776_registers_init(struct oxygen *chip)
+ struct xonar_wm87x6 *data = chip->model_data;
+
+ wm8776_write(chip, WM8776_RESET, 0);
++ wm8776_write(chip, WM8776_PHASESWAP, WM8776_PH_MASK);
+ wm8776_write(chip, WM8776_DACCTRL1, WM8776_DZCEN |
+ WM8776_PL_LEFT_LEFT | WM8776_PL_RIGHT_RIGHT);
+ wm8776_write(chip, WM8776_DACMUTE, chip->dac_mute ? WM8776_DMUTE : 0);
+diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
+index 81c6ede..08dcce5 100644
+--- a/sound/usb/endpoint.c
++++ b/sound/usb/endpoint.c
+@@ -17,6 +17,7 @@
+
+ #include <linux/gfp.h>
+ #include <linux/init.h>
++#include <linux/ratelimit.h>
+ #include <linux/usb.h>
+ #include <linux/usb/audio.h>
+
+@@ -458,8 +459,8 @@ static int retire_capture_urb(struct snd_usb_substream *subs,
+
+ for (i = 0; i < urb->number_of_packets; i++) {
+ cp = (unsigned char *)urb->transfer_buffer + urb->iso_frame_desc[i].offset;
+- if (urb->iso_frame_desc[i].status) {
+- snd_printd(KERN_ERR "frame %d active: %d\n", i, urb->iso_frame_desc[i].status);
++ if (urb->iso_frame_desc[i].status && printk_ratelimit()) {
++ snd_printdd("frame %d active: %d\n", i, urb->iso_frame_desc[i].status);
+ // continue;
+ }
+ bytes = urb->iso_frame_desc[i].actual_length;
+diff --git a/sound/usb/usx2y/usb_stream.c b/sound/usb/usx2y/usb_stream.c
+index c400ade..1e7a47a 100644
+--- a/sound/usb/usx2y/usb_stream.c
++++ b/sound/usb/usx2y/usb_stream.c
+@@ -674,7 +674,7 @@ dotry:
+ inurb->transfer_buffer_length =
+ inurb->number_of_packets *
+ inurb->iso_frame_desc[0].length;
+- preempt_disable();
++
+ if (u == 0) {
+ int now;
+ struct usb_device *dev = inurb->dev;
+@@ -686,19 +686,17 @@ dotry:
+ }
+ err = usb_submit_urb(inurb, GFP_ATOMIC);
+ if (err < 0) {
+- preempt_enable();
+ snd_printk(KERN_ERR"usb_submit_urb(sk->inurb[%i])"
+ " returned %i\n", u, err);
+ return err;
+ }
+ err = usb_submit_urb(outurb, GFP_ATOMIC);
+ if (err < 0) {
+- preempt_enable();
+ snd_printk(KERN_ERR"usb_submit_urb(sk->outurb[%i])"
+ " returned %i\n", u, err);
+ return err;
+ }
+- preempt_enable();
++
+ if (inurb->start_frame != outurb->start_frame) {
+ snd_printd(KERN_DEBUG
+ "u[%i] start_frames differ in:%u out:%u\n",
diff --git a/1002_linux-3.2.3.patch b/1002_linux-3.2.3.patch
new file mode 100644
index 00000000..98925b00
--- /dev/null
+++ b/1002_linux-3.2.3.patch
@@ -0,0 +1,3760 @@
+diff --git a/Makefile b/Makefile
+index 2f684da..d45e887 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 2
++SUBLEVEL = 3
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/mach-at91/setup.c b/arch/arm/mach-at91/setup.c
+index aa64294..f5bbe0ef 100644
+--- a/arch/arm/mach-at91/setup.c
++++ b/arch/arm/mach-at91/setup.c
+@@ -27,9 +27,12 @@ EXPORT_SYMBOL(at91_soc_initdata);
+ void __init at91rm9200_set_type(int type)
+ {
+ if (type == ARCH_REVISON_9200_PQFP)
+- at91_soc_initdata.subtype = AT91_SOC_RM9200_BGA;
+- else
+ at91_soc_initdata.subtype = AT91_SOC_RM9200_PQFP;
++ else
++ at91_soc_initdata.subtype = AT91_SOC_RM9200_BGA;
++
++ pr_info("AT91: filled in soc subtype: %s\n",
++ at91_get_soc_subtype(&at91_soc_initdata));
+ }
+
+ void __init at91_init_irq_default(void)
+diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig
+index a3e0c86..52af004 100644
+--- a/arch/arm/mach-ux500/Kconfig
++++ b/arch/arm/mach-ux500/Kconfig
+@@ -7,6 +7,7 @@ config UX500_SOC_COMMON
+ select HAS_MTU
+ select ARM_ERRATA_753970
+ select ARM_ERRATA_754322
++ select ARM_ERRATA_764369
+
+ menu "Ux500 SoC"
+
+diff --git a/arch/arm/mach-ux500/board-mop500-sdi.c b/arch/arm/mach-ux500/board-mop500-sdi.c
+index 6826fae..306cff0 100644
+--- a/arch/arm/mach-ux500/board-mop500-sdi.c
++++ b/arch/arm/mach-ux500/board-mop500-sdi.c
+@@ -233,6 +233,8 @@ void __init snowball_sdi_init(void)
+ {
+ u32 periphid = 0x10480180;
+
++ /* On Snowball MMC_CAP_SD_HIGHSPEED isn't supported on sdi0 */
++ mop500_sdi0_data.capabilities &= ~MMC_CAP_SD_HIGHSPEED;
+ mop500_sdi2_data.capabilities |= MMC_CAP_MMC_HIGHSPEED;
+
+ /* On-board eMMC */
+diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
+index e70a737..40cc7aa 100644
+--- a/arch/arm/mm/proc-v7.S
++++ b/arch/arm/mm/proc-v7.S
+@@ -271,10 +271,6 @@ ENDPROC(cpu_v7_do_resume)
+ * Initialise TLB, Caches, and MMU state ready to switch the MMU
+ * on. Return in r0 the new CP15 C1 control register setting.
+ *
+- * We automatically detect if we have a Harvard cache, and use the
+- * Harvard cache control instructions insead of the unified cache
+- * control instructions.
+- *
+ * This should be able to cover all ARMv7 cores.
+ *
+ * It is assumed that:
+@@ -373,9 +369,7 @@ __v7_setup:
+ #endif
+
+ 3: mov r10, #0
+-#ifdef HARVARD_CACHE
+ mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate
+-#endif
+ dsb
+ #ifdef CONFIG_MMU
+ mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs
+diff --git a/arch/m68k/atari/config.c b/arch/m68k/atari/config.c
+index 4203d10..c4ac15c 100644
+--- a/arch/m68k/atari/config.c
++++ b/arch/m68k/atari/config.c
+@@ -414,9 +414,9 @@ void __init config_atari(void)
+ * FDC val = 4 -> Supervisor only */
+ asm volatile ("\n"
+ " .chip 68030\n"
+- " pmove %0@,%/tt1\n"
++ " pmove %0,%/tt1\n"
+ " .chip 68k"
+- : : "a" (&tt1_val));
++ : : "m" (tt1_val));
+ } else {
+ asm volatile ("\n"
+ " .chip 68040\n"
+@@ -569,10 +569,10 @@ static void atari_reset(void)
+ : "d0");
+ } else
+ asm volatile ("\n"
+- " pmove %0@,%%tc\n"
++ " pmove %0,%%tc\n"
+ " jmp %1@"
+ : /* no outputs */
+- : "a" (&tc_val), "a" (reset_addr));
++ : "m" (tc_val), "a" (reset_addr));
+ }
+
+
+diff --git a/arch/m68k/kernel/process_mm.c b/arch/m68k/kernel/process_mm.c
+index 1bc223a..aa4ffb8 100644
+--- a/arch/m68k/kernel/process_mm.c
++++ b/arch/m68k/kernel/process_mm.c
+@@ -189,8 +189,8 @@ void flush_thread(void)
+ current->thread.fs = __USER_DS;
+ if (!FPU_IS_EMU)
+ asm volatile (".chip 68k/68881\n\t"
+- "frestore %0@\n\t"
+- ".chip 68k" : : "a" (&zero));
++ "frestore %0\n\t"
++ ".chip 68k" : : "m" (zero));
+ }
+
+ /*
+diff --git a/arch/m68k/kernel/process_no.c b/arch/m68k/kernel/process_no.c
+index 69c1803..5e1078c 100644
+--- a/arch/m68k/kernel/process_no.c
++++ b/arch/m68k/kernel/process_no.c
+@@ -163,8 +163,8 @@ void flush_thread(void)
+ #ifdef CONFIG_FPU
+ if (!FPU_IS_EMU)
+ asm volatile (".chip 68k/68881\n\t"
+- "frestore %0@\n\t"
+- ".chip 68k" : : "a" (&zero));
++ "frestore %0\n\t"
++ ".chip 68k" : : "m" (zero));
+ #endif
+ }
+
+diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c
+index 89362f2..eb67469 100644
+--- a/arch/m68k/kernel/traps.c
++++ b/arch/m68k/kernel/traps.c
+@@ -552,13 +552,13 @@ static inline void bus_error030 (struct frame *fp)
+
+ #ifdef DEBUG
+ asm volatile ("ptestr %3,%2@,#7,%0\n\t"
+- "pmove %%psr,%1@"
+- : "=a&" (desc)
+- : "a" (&temp), "a" (addr), "d" (ssw));
++ "pmove %%psr,%1"
++ : "=a&" (desc), "=m" (temp)
++ : "a" (addr), "d" (ssw));
+ #else
+ asm volatile ("ptestr %2,%1@,#7\n\t"
+- "pmove %%psr,%0@"
+- : : "a" (&temp), "a" (addr), "d" (ssw));
++ "pmove %%psr,%0"
++ : "=m" (temp) : "a" (addr), "d" (ssw));
+ #endif
+ mmusr = temp;
+
+@@ -605,20 +605,18 @@ static inline void bus_error030 (struct frame *fp)
+ !(ssw & RW) ? "write" : "read", addr,
+ fp->ptregs.pc, ssw);
+ asm volatile ("ptestr #1,%1@,#0\n\t"
+- "pmove %%psr,%0@"
+- : /* no outputs */
+- : "a" (&temp), "a" (addr));
++ "pmove %%psr,%0"
++ : "=m" (temp)
++ : "a" (addr));
+ mmusr = temp;
+
+ printk ("level 0 mmusr is %#x\n", mmusr);
+ #if 0
+- asm volatile ("pmove %%tt0,%0@"
+- : /* no outputs */
+- : "a" (&tlong));
++ asm volatile ("pmove %%tt0,%0"
++ : "=m" (tlong));
+ printk("tt0 is %#lx, ", tlong);
+- asm volatile ("pmove %%tt1,%0@"
+- : /* no outputs */
+- : "a" (&tlong));
++ asm volatile ("pmove %%tt1,%0"
++ : "=m" (tlong));
+ printk("tt1 is %#lx\n", tlong);
+ #endif
+ #ifdef DEBUG
+@@ -668,13 +666,13 @@ static inline void bus_error030 (struct frame *fp)
+
+ #ifdef DEBUG
+ asm volatile ("ptestr #1,%2@,#7,%0\n\t"
+- "pmove %%psr,%1@"
+- : "=a&" (desc)
+- : "a" (&temp), "a" (addr));
++ "pmove %%psr,%1"
++ : "=a&" (desc), "=m" (temp)
++ : "a" (addr));
+ #else
+ asm volatile ("ptestr #1,%1@,#7\n\t"
+- "pmove %%psr,%0@"
+- : : "a" (&temp), "a" (addr));
++ "pmove %%psr,%0"
++ : "=m" (temp) : "a" (addr));
+ #endif
+ mmusr = temp;
+
+diff --git a/arch/m68k/mm/cache.c b/arch/m68k/mm/cache.c
+index 5437fff..5550aa4 100644
+--- a/arch/m68k/mm/cache.c
++++ b/arch/m68k/mm/cache.c
+@@ -52,9 +52,9 @@ static unsigned long virt_to_phys_slow(unsigned long vaddr)
+ unsigned long *descaddr;
+
+ asm volatile ("ptestr %3,%2@,#7,%0\n\t"
+- "pmove %%psr,%1@"
+- : "=a&" (descaddr)
+- : "a" (&mmusr), "a" (vaddr), "d" (get_fs().seg));
++ "pmove %%psr,%1"
++ : "=a&" (descaddr), "=m" (mmusr)
++ : "a" (vaddr), "d" (get_fs().seg));
+ if (mmusr & (MMU_I|MMU_B|MMU_L))
+ return 0;
+ descaddr = phys_to_virt((unsigned long)descaddr);
+diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
+index 54a13aa..21f7385 100644
+--- a/arch/x86/include/asm/uv/uv_hub.h
++++ b/arch/x86/include/asm/uv/uv_hub.h
+@@ -318,13 +318,13 @@ uv_gpa_in_mmr_space(unsigned long gpa)
+ /* UV global physical address --> socket phys RAM */
+ static inline unsigned long uv_gpa_to_soc_phys_ram(unsigned long gpa)
+ {
+- unsigned long paddr = gpa & uv_hub_info->gpa_mask;
++ unsigned long paddr;
+ unsigned long remap_base = uv_hub_info->lowmem_remap_base;
+ unsigned long remap_top = uv_hub_info->lowmem_remap_top;
+
+ gpa = ((gpa << uv_hub_info->m_shift) >> uv_hub_info->m_shift) |
+ ((gpa >> uv_hub_info->n_lshift) << uv_hub_info->m_val);
+- gpa = gpa & uv_hub_info->gpa_mask;
++ paddr = gpa & uv_hub_info->gpa_mask;
+ if (paddr >= remap_base && paddr < remap_base + remap_top)
+ paddr -= remap_base;
+ return paddr;
+diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
+index d494799..ac52c15 100644
+--- a/arch/x86/kernel/microcode_amd.c
++++ b/arch/x86/kernel/microcode_amd.c
+@@ -300,13 +300,33 @@ free_table:
+ return state;
+ }
+
++/*
++ * AMD microcode firmware naming convention, up to family 15h they are in
++ * the legacy file:
++ *
++ * amd-ucode/microcode_amd.bin
++ *
++ * This legacy file is always smaller than 2K in size.
++ *
++ * Starting at family 15h they are in family specific firmware files:
++ *
++ * amd-ucode/microcode_amd_fam15h.bin
++ * amd-ucode/microcode_amd_fam16h.bin
++ * ...
++ *
++ * These might be larger than 2K.
++ */
+ static enum ucode_state request_microcode_amd(int cpu, struct device *device)
+ {
+- const char *fw_name = "amd-ucode/microcode_amd.bin";
++ char fw_name[36] = "amd-ucode/microcode_amd.bin";
+ const struct firmware *fw;
+ enum ucode_state ret = UCODE_NFOUND;
++ struct cpuinfo_x86 *c = &cpu_data(cpu);
++
++ if (c->x86 >= 0x15)
++ snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86);
+
+- if (request_firmware(&fw, fw_name, device)) {
++ if (request_firmware(&fw, (const char *)fw_name, device)) {
+ pr_err("failed to load file %s\n", fw_name);
+ goto out;
+ }
+diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
+index 7b65f75..7c1b765 100644
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -151,17 +151,18 @@ void bpf_jit_compile(struct sk_filter *fp)
+ cleanup_addr = proglen; /* epilogue address */
+
+ for (pass = 0; pass < 10; pass++) {
++ u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen;
+ /* no prologue/epilogue for trivial filters (RET something) */
+ proglen = 0;
+ prog = temp;
+
+- if (seen) {
++ if (seen_or_pass0) {
+ EMIT4(0x55, 0x48, 0x89, 0xe5); /* push %rbp; mov %rsp,%rbp */
+ EMIT4(0x48, 0x83, 0xec, 96); /* subq $96,%rsp */
+ /* note : must save %rbx in case bpf_error is hit */
+- if (seen & (SEEN_XREG | SEEN_DATAREF))
++ if (seen_or_pass0 & (SEEN_XREG | SEEN_DATAREF))
+ EMIT4(0x48, 0x89, 0x5d, 0xf8); /* mov %rbx, -8(%rbp) */
+- if (seen & SEEN_XREG)
++ if (seen_or_pass0 & SEEN_XREG)
+ CLEAR_X(); /* make sure we dont leek kernel memory */
+
+ /*
+@@ -170,7 +171,7 @@ void bpf_jit_compile(struct sk_filter *fp)
+ * r9 = skb->len - skb->data_len
+ * r8 = skb->data
+ */
+- if (seen & SEEN_DATAREF) {
++ if (seen_or_pass0 & SEEN_DATAREF) {
+ if (offsetof(struct sk_buff, len) <= 127)
+ /* mov off8(%rdi),%r9d */
+ EMIT4(0x44, 0x8b, 0x4f, offsetof(struct sk_buff, len));
+@@ -260,9 +261,14 @@ void bpf_jit_compile(struct sk_filter *fp)
+ case BPF_S_ALU_DIV_X: /* A /= X; */
+ seen |= SEEN_XREG;
+ EMIT2(0x85, 0xdb); /* test %ebx,%ebx */
+- if (pc_ret0 != -1)
+- EMIT_COND_JMP(X86_JE, addrs[pc_ret0] - (addrs[i] - 4));
+- else {
++ if (pc_ret0 > 0) {
++ /* addrs[pc_ret0 - 1] is start address of target
++ * (addrs[i] - 4) is the address following this jmp
++ * ("xor %edx,%edx; div %ebx" being 4 bytes long)
++ */
++ EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] -
++ (addrs[i] - 4));
++ } else {
+ EMIT_COND_JMP(X86_JNE, 2 + 5);
+ CLEAR_A();
+ EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 4)); /* jmp .+off32 */
+@@ -335,12 +341,12 @@ void bpf_jit_compile(struct sk_filter *fp)
+ }
+ /* fallinto */
+ case BPF_S_RET_A:
+- if (seen) {
++ if (seen_or_pass0) {
+ if (i != flen - 1) {
+ EMIT_JMP(cleanup_addr - addrs[i]);
+ break;
+ }
+- if (seen & SEEN_XREG)
++ if (seen_or_pass0 & SEEN_XREG)
+ EMIT4(0x48, 0x8b, 0x5d, 0xf8); /* mov -8(%rbp),%rbx */
+ EMIT1(0xc9); /* leaveq */
+ }
+@@ -483,8 +489,9 @@ common_load: seen |= SEEN_DATAREF;
+ goto common_load;
+ case BPF_S_LDX_B_MSH:
+ if ((int)K < 0) {
+- if (pc_ret0 != -1) {
+- EMIT_JMP(addrs[pc_ret0] - addrs[i]);
++ if (pc_ret0 > 0) {
++ /* addrs[pc_ret0 - 1] is the start address */
++ EMIT_JMP(addrs[pc_ret0 - 1] - addrs[i]);
+ break;
+ }
+ CLEAR_A();
+@@ -599,13 +606,14 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
+ * use it to give the cleanup instruction(s) addr
+ */
+ cleanup_addr = proglen - 1; /* ret */
+- if (seen)
++ if (seen_or_pass0)
+ cleanup_addr -= 1; /* leaveq */
+- if (seen & SEEN_XREG)
++ if (seen_or_pass0 & SEEN_XREG)
+ cleanup_addr -= 4; /* mov -8(%rbp),%rbx */
+
+ if (image) {
+- WARN_ON(proglen != oldproglen);
++ if (proglen != oldproglen)
++ pr_err("bpb_jit_compile proglen=%u != oldproglen=%u\n", proglen, oldproglen);
+ break;
+ }
+ if (proglen == oldproglen) {
+diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
+index 9010ca7..81aee5a 100644
+--- a/arch/x86/platform/uv/tlb_uv.c
++++ b/arch/x86/platform/uv/tlb_uv.c
+@@ -1860,6 +1860,8 @@ static void __init init_per_cpu_tunables(void)
+ bcp->cong_reps = congested_reps;
+ bcp->cong_period = congested_period;
+ bcp->clocks_per_100_usec = usec_2_cycles(100);
++ spin_lock_init(&bcp->queue_lock);
++ spin_lock_init(&bcp->uvhub_lock);
+ }
+ }
+
+diff --git a/arch/x86/platform/uv/uv_irq.c b/arch/x86/platform/uv/uv_irq.c
+index 374a05d..f25c276 100644
+--- a/arch/x86/platform/uv/uv_irq.c
++++ b/arch/x86/platform/uv/uv_irq.c
+@@ -25,7 +25,7 @@ struct uv_irq_2_mmr_pnode{
+ int irq;
+ };
+
+-static spinlock_t uv_irq_lock;
++static DEFINE_SPINLOCK(uv_irq_lock);
+ static struct rb_root uv_irq_root;
+
+ static int uv_set_irq_affinity(struct irq_data *, const struct cpumask *, bool);
+diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
+index cc9b1e1..d69cc6c 100644
+--- a/arch/x86/xen/spinlock.c
++++ b/arch/x86/xen/spinlock.c
+@@ -116,9 +116,26 @@ static inline void spin_time_accum_blocked(u64 start)
+ }
+ #endif /* CONFIG_XEN_DEBUG_FS */
+
++/*
++ * Size struct xen_spinlock so it's the same as arch_spinlock_t.
++ */
++#if NR_CPUS < 256
++typedef u8 xen_spinners_t;
++# define inc_spinners(xl) \
++ asm(LOCK_PREFIX " incb %0" : "+m" ((xl)->spinners) : : "memory");
++# define dec_spinners(xl) \
++ asm(LOCK_PREFIX " decb %0" : "+m" ((xl)->spinners) : : "memory");
++#else
++typedef u16 xen_spinners_t;
++# define inc_spinners(xl) \
++ asm(LOCK_PREFIX " incw %0" : "+m" ((xl)->spinners) : : "memory");
++# define dec_spinners(xl) \
++ asm(LOCK_PREFIX " decw %0" : "+m" ((xl)->spinners) : : "memory");
++#endif
++
+ struct xen_spinlock {
+ unsigned char lock; /* 0 -> free; 1 -> locked */
+- unsigned short spinners; /* count of waiting cpus */
++ xen_spinners_t spinners; /* count of waiting cpus */
+ };
+
+ static int xen_spin_is_locked(struct arch_spinlock *lock)
+@@ -164,8 +181,7 @@ static inline struct xen_spinlock *spinning_lock(struct xen_spinlock *xl)
+
+ wmb(); /* set lock of interest before count */
+
+- asm(LOCK_PREFIX " incw %0"
+- : "+m" (xl->spinners) : : "memory");
++ inc_spinners(xl);
+
+ return prev;
+ }
+@@ -176,8 +192,7 @@ static inline struct xen_spinlock *spinning_lock(struct xen_spinlock *xl)
+ */
+ static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock *prev)
+ {
+- asm(LOCK_PREFIX " decw %0"
+- : "+m" (xl->spinners) : : "memory");
++ dec_spinners(xl);
+ wmb(); /* decrement count before restoring lock */
+ __this_cpu_write(lock_spinners, prev);
+ }
+@@ -373,6 +388,8 @@ void xen_uninit_lock_cpu(int cpu)
+
+ void __init xen_init_spinlocks(void)
+ {
++ BUILD_BUG_ON(sizeof(struct xen_spinlock) > sizeof(arch_spinlock_t));
++
+ pv_lock_ops.spin_is_locked = xen_spin_is_locked;
+ pv_lock_ops.spin_is_contended = xen_spin_is_contended;
+ pv_lock_ops.spin_lock = xen_spin_lock;
+diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
+index 9ed9f60..88f160b 100644
+--- a/crypto/sha512_generic.c
++++ b/crypto/sha512_generic.c
+@@ -21,8 +21,6 @@
+ #include <linux/percpu.h>
+ #include <asm/byteorder.h>
+
+-static DEFINE_PER_CPU(u64[80], msg_schedule);
+-
+ static inline u64 Ch(u64 x, u64 y, u64 z)
+ {
+ return z ^ (x & (y ^ z));
+@@ -80,7 +78,7 @@ static inline void LOAD_OP(int I, u64 *W, const u8 *input)
+
+ static inline void BLEND_OP(int I, u64 *W)
+ {
+- W[I] = s1(W[I-2]) + W[I-7] + s0(W[I-15]) + W[I-16];
++ W[I % 16] += s1(W[(I-2) % 16]) + W[(I-7) % 16] + s0(W[(I-15) % 16]);
+ }
+
+ static void
+@@ -89,38 +87,48 @@ sha512_transform(u64 *state, const u8 *input)
+ u64 a, b, c, d, e, f, g, h, t1, t2;
+
+ int i;
+- u64 *W = get_cpu_var(msg_schedule);
++ u64 W[16];
+
+ /* load the input */
+ for (i = 0; i < 16; i++)
+ LOAD_OP(i, W, input);
+
+- for (i = 16; i < 80; i++) {
+- BLEND_OP(i, W);
+- }
+-
+ /* load the state into our registers */
+ a=state[0]; b=state[1]; c=state[2]; d=state[3];
+ e=state[4]; f=state[5]; g=state[6]; h=state[7];
+
+- /* now iterate */
+- for (i=0; i<80; i+=8) {
+- t1 = h + e1(e) + Ch(e,f,g) + sha512_K[i ] + W[i ];
+- t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2;
+- t1 = g + e1(d) + Ch(d,e,f) + sha512_K[i+1] + W[i+1];
+- t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2;
+- t1 = f + e1(c) + Ch(c,d,e) + sha512_K[i+2] + W[i+2];
+- t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2;
+- t1 = e + e1(b) + Ch(b,c,d) + sha512_K[i+3] + W[i+3];
+- t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2;
+- t1 = d + e1(a) + Ch(a,b,c) + sha512_K[i+4] + W[i+4];
+- t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2;
+- t1 = c + e1(h) + Ch(h,a,b) + sha512_K[i+5] + W[i+5];
+- t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2;
+- t1 = b + e1(g) + Ch(g,h,a) + sha512_K[i+6] + W[i+6];
+- t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2;
+- t1 = a + e1(f) + Ch(f,g,h) + sha512_K[i+7] + W[i+7];
+- t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2;
++#define SHA512_0_15(i, a, b, c, d, e, f, g, h) \
++ t1 = h + e1(e) + Ch(e, f, g) + sha512_K[i] + W[i]; \
++ t2 = e0(a) + Maj(a, b, c); \
++ d += t1; \
++ h = t1 + t2
++
++#define SHA512_16_79(i, a, b, c, d, e, f, g, h) \
++ BLEND_OP(i, W); \
++ t1 = h + e1(e) + Ch(e, f, g) + sha512_K[i] + W[(i)%16]; \
++ t2 = e0(a) + Maj(a, b, c); \
++ d += t1; \
++ h = t1 + t2
++
++ for (i = 0; i < 16; i += 8) {
++ SHA512_0_15(i, a, b, c, d, e, f, g, h);
++ SHA512_0_15(i + 1, h, a, b, c, d, e, f, g);
++ SHA512_0_15(i + 2, g, h, a, b, c, d, e, f);
++ SHA512_0_15(i + 3, f, g, h, a, b, c, d, e);
++ SHA512_0_15(i + 4, e, f, g, h, a, b, c, d);
++ SHA512_0_15(i + 5, d, e, f, g, h, a, b, c);
++ SHA512_0_15(i + 6, c, d, e, f, g, h, a, b);
++ SHA512_0_15(i + 7, b, c, d, e, f, g, h, a);
++ }
++ for (i = 16; i < 80; i += 8) {
++ SHA512_16_79(i, a, b, c, d, e, f, g, h);
++ SHA512_16_79(i + 1, h, a, b, c, d, e, f, g);
++ SHA512_16_79(i + 2, g, h, a, b, c, d, e, f);
++ SHA512_16_79(i + 3, f, g, h, a, b, c, d, e);
++ SHA512_16_79(i + 4, e, f, g, h, a, b, c, d);
++ SHA512_16_79(i + 5, d, e, f, g, h, a, b, c);
++ SHA512_16_79(i + 6, c, d, e, f, g, h, a, b);
++ SHA512_16_79(i + 7, b, c, d, e, f, g, h, a);
+ }
+
+ state[0] += a; state[1] += b; state[2] += c; state[3] += d;
+@@ -128,8 +136,6 @@ sha512_transform(u64 *state, const u8 *input)
+
+ /* erase our data */
+ a = b = c = d = e = f = g = h = t1 = t2 = 0;
+- memset(W, 0, sizeof(__get_cpu_var(msg_schedule)));
+- put_cpu_var(msg_schedule);
+ }
+
+ static int
+diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
+index 3f4051a..c7e5282 100644
+--- a/drivers/char/tpm/tpm_tis.c
++++ b/drivers/char/tpm/tpm_tis.c
+@@ -432,6 +432,9 @@ static int probe_itpm(struct tpm_chip *chip)
+ out:
+ itpm = rem_itpm;
+ tpm_tis_ready(chip);
++ /* some TPMs need a break here otherwise they will not work
++ * correctly on the immediately subsequent command */
++ msleep(chip->vendor.timeout_b);
+ release_locality(chip, chip->vendor.locality, 0);
+
+ return rc;
+diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
+index 3f46772..ba23790 100644
+--- a/drivers/gpu/drm/drm_auth.c
++++ b/drivers/gpu/drm/drm_auth.c
+@@ -101,7 +101,7 @@ static int drm_add_magic(struct drm_master *master, struct drm_file *priv,
+ * Searches and unlinks the entry in drm_device::magiclist with the magic
+ * number hash key, while holding the drm_device::struct_mutex lock.
+ */
+-static int drm_remove_magic(struct drm_master *master, drm_magic_t magic)
++int drm_remove_magic(struct drm_master *master, drm_magic_t magic)
+ {
+ struct drm_magic_entry *pt;
+ struct drm_hash_item *hash;
+@@ -136,6 +136,8 @@ static int drm_remove_magic(struct drm_master *master, drm_magic_t magic)
+ * If there is a magic number in drm_file::magic then use it, otherwise
+ * searches an unique non-zero magic number and add it associating it with \p
+ * file_priv.
++ * This ioctl needs protection by the drm_global_mutex, which protects
++ * struct drm_file::magic and struct drm_magic_entry::priv.
+ */
+ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
+ {
+@@ -173,6 +175,8 @@ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
+ * \return zero if authentication successed, or a negative number otherwise.
+ *
+ * Checks if \p file_priv is associated with the magic number passed in \arg.
++ * This ioctl needs protection by the drm_global_mutex, which protects
++ * struct drm_file::magic and struct drm_magic_entry::priv.
+ */
+ int drm_authmagic(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
+index 4911e1d..828bf65 100644
+--- a/drivers/gpu/drm/drm_fops.c
++++ b/drivers/gpu/drm/drm_fops.c
+@@ -487,6 +487,11 @@ int drm_release(struct inode *inode, struct file *filp)
+ (long)old_encode_dev(file_priv->minor->device),
+ dev->open_count);
+
++ /* Release any auth tokens that might point to this file_priv,
++ (do that under the drm_global_mutex) */
++ if (file_priv->magic)
++ (void) drm_remove_magic(file_priv->master, file_priv->magic);
++
+ /* if the master has gone away we can't do anything with the lock */
+ if (file_priv->minor->master)
+ drm_master_release(dev, filp);
+diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
+index 7886e4f..43cbafe 100644
+--- a/drivers/gpu/drm/i915/i915_suspend.c
++++ b/drivers/gpu/drm/i915/i915_suspend.c
+@@ -822,7 +822,7 @@ int i915_save_state(struct drm_device *dev)
+
+ if (IS_IRONLAKE_M(dev))
+ ironlake_disable_drps(dev);
+- if (IS_GEN6(dev))
++ if (INTEL_INFO(dev)->gen >= 6)
+ gen6_disable_rps(dev);
+
+ /* Cache mode state */
+@@ -881,7 +881,7 @@ int i915_restore_state(struct drm_device *dev)
+ intel_init_emon(dev);
+ }
+
+- if (IS_GEN6(dev)) {
++ if (INTEL_INFO(dev)->gen >= 6) {
+ gen6_enable_rps(dev_priv);
+ gen6_update_ring_freq(dev_priv);
+ }
+diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
+index ca70e2f..30a9af9 100644
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
+@@ -631,6 +631,19 @@ render_ring_add_request(struct intel_ring_buffer *ring,
+ }
+
+ static u32
++gen6_ring_get_seqno(struct intel_ring_buffer *ring)
++{
++ struct drm_device *dev = ring->dev;
++
++ /* Workaround to force correct ordering between irq and seqno writes on
++ * ivb (and maybe also on snb) by reading from a CS register (like
++ * ACTHD) before reading the status page. */
++ if (IS_GEN7(dev))
++ intel_ring_get_active_head(ring);
++ return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
++}
++
++static u32
+ ring_get_seqno(struct intel_ring_buffer *ring)
+ {
+ return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+@@ -795,6 +808,12 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
+ if (!dev->irq_enabled)
+ return false;
+
++ /* It looks like we need to prevent the gt from suspending while waiting
++ * for an notifiy irq, otherwise irqs seem to get lost on at least the
++ * blt/bsd rings on ivb. */
++ if (IS_GEN7(dev))
++ gen6_gt_force_wake_get(dev_priv);
++
+ spin_lock(&ring->irq_lock);
+ if (ring->irq_refcount++ == 0) {
+ ring->irq_mask &= ~rflag;
+@@ -819,6 +838,9 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
+ ironlake_disable_irq(dev_priv, gflag);
+ }
+ spin_unlock(&ring->irq_lock);
++
++ if (IS_GEN7(dev))
++ gen6_gt_force_wake_put(dev_priv);
+ }
+
+ static bool
+@@ -1316,7 +1338,7 @@ static const struct intel_ring_buffer gen6_bsd_ring = {
+ .write_tail = gen6_bsd_ring_write_tail,
+ .flush = gen6_ring_flush,
+ .add_request = gen6_add_request,
+- .get_seqno = ring_get_seqno,
++ .get_seqno = gen6_ring_get_seqno,
+ .irq_get = gen6_bsd_ring_get_irq,
+ .irq_put = gen6_bsd_ring_put_irq,
+ .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
+@@ -1451,7 +1473,7 @@ static const struct intel_ring_buffer gen6_blt_ring = {
+ .write_tail = ring_write_tail,
+ .flush = blt_ring_flush,
+ .add_request = gen6_add_request,
+- .get_seqno = ring_get_seqno,
++ .get_seqno = gen6_ring_get_seqno,
+ .irq_get = blt_ring_get_irq,
+ .irq_put = blt_ring_put_irq,
+ .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
+@@ -1474,6 +1496,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
+ ring->flush = gen6_render_ring_flush;
+ ring->irq_get = gen6_render_ring_get_irq;
+ ring->irq_put = gen6_render_ring_put_irq;
++ ring->get_seqno = gen6_ring_get_seqno;
+ } else if (IS_GEN5(dev)) {
+ ring->add_request = pc_render_add_request;
+ ring->get_seqno = pc_render_get_seqno;
+diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
+index f7b9268..e334ec3 100644
+--- a/drivers/gpu/drm/i915/intel_sdvo.c
++++ b/drivers/gpu/drm/i915/intel_sdvo.c
+@@ -1066,15 +1066,13 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
+
+ /* Set the SDVO control regs. */
+ if (INTEL_INFO(dev)->gen >= 4) {
+- sdvox = 0;
++ /* The real mode polarity is set by the SDVO commands, using
++ * struct intel_sdvo_dtd. */
++ sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
+ if (intel_sdvo->is_hdmi)
+ sdvox |= intel_sdvo->color_range;
+ if (INTEL_INFO(dev)->gen < 5)
+ sdvox |= SDVO_BORDER_ENABLE;
+- if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+- sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
+- if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+- sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
+ } else {
+ sdvox = I915_READ(intel_sdvo->sdvo_reg);
+ switch (intel_sdvo->sdvo_reg) {
+diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
+index 6fb335a..a71557c 100644
+--- a/drivers/gpu/drm/radeon/atombios_dp.c
++++ b/drivers/gpu/drm/radeon/atombios_dp.c
+@@ -549,8 +549,8 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
+ return false;
+ }
+
+-static void radeon_dp_set_panel_mode(struct drm_encoder *encoder,
+- struct drm_connector *connector)
++int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
++ struct drm_connector *connector)
+ {
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+@@ -558,7 +558,7 @@ static void radeon_dp_set_panel_mode(struct drm_encoder *encoder,
+ int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
+
+ if (!ASIC_IS_DCE4(rdev))
+- return;
++ return panel_mode;
+
+ if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
+ ENCODER_OBJECT_ID_NUTMEG)
+@@ -572,14 +572,7 @@ static void radeon_dp_set_panel_mode(struct drm_encoder *encoder,
+ panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
+ }
+
+- atombios_dig_encoder_setup(encoder,
+- ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
+- panel_mode);
+-
+- if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) &&
+- (panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)) {
+- radeon_write_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_SET, 1);
+- }
++ return panel_mode;
+ }
+
+ void radeon_dp_set_link_config(struct drm_connector *connector,
+@@ -717,6 +710,8 @@ static void radeon_dp_set_tp(struct radeon_dp_link_train_info *dp_info, int tp)
+
+ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
+ {
++ struct radeon_encoder *radeon_encoder = to_radeon_encoder(dp_info->encoder);
++ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ u8 tmp;
+
+ /* power up the sink */
+@@ -732,7 +727,10 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
+ radeon_write_dpcd_reg(dp_info->radeon_connector,
+ DP_DOWNSPREAD_CTRL, 0);
+
+- radeon_dp_set_panel_mode(dp_info->encoder, dp_info->connector);
++ if ((dp_info->connector->connector_type == DRM_MODE_CONNECTOR_eDP) &&
++ (dig->panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)) {
++ radeon_write_dpcd_reg(dp_info->radeon_connector, DP_EDP_CONFIGURATION_SET, 1);
++ }
+
+ /* set the lane count on the sink */
+ tmp = dp_info->dp_lane_count;
+diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
+index 39c04c1..0f8eb48 100644
+--- a/drivers/gpu/drm/radeon/atombios_encoders.c
++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
+@@ -1352,7 +1352,8 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ /* some early dce3.2 boards have a bug in their transmitter control table */
+- if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730))
++ if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730) ||
++ ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev))
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+ else
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
+@@ -1362,8 +1363,6 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
+ ATOM_TRANSMITTER_ACTION_POWER_ON);
+ radeon_dig_connector->edp_on = true;
+ }
+- if (ASIC_IS_DCE4(rdev))
+- atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
+ radeon_dp_link_train(encoder, connector);
+ if (ASIC_IS_DCE4(rdev))
+ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
+@@ -1374,7 +1373,10 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
++ if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev))
++ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
++ else
++ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
+ if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
+ if (ASIC_IS_DCE4(rdev))
+ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
+@@ -1821,7 +1823,21 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+- if (ASIC_IS_DCE4(rdev)) {
++ if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
++ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
++ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
++
++ if (!connector)
++ dig->panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
++ else
++ dig->panel_mode = radeon_dp_get_panel_mode(encoder, connector);
++
++ /* setup and enable the encoder */
++ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
++ atombios_dig_encoder_setup(encoder,
++ ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
++ dig->panel_mode);
++ } else if (ASIC_IS_DCE4(rdev)) {
+ /* disable the transmitter */
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+ /* setup and enable the encoder */
+diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
+index 8f86aeb..e7ddb49 100644
+--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
++++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
+@@ -134,6 +134,12 @@ static bool radeon_msi_ok(struct radeon_device *rdev)
+ /* Dell RS690 only seems to work with MSIs. */
+ if ((rdev->pdev->device == 0x791f) &&
+ (rdev->pdev->subsystem_vendor == 0x1028) &&
++ (rdev->pdev->subsystem_device == 0x01fc))
++ return true;
++
++ /* Dell RS690 only seems to work with MSIs. */
++ if ((rdev->pdev->device == 0x791f) &&
++ (rdev->pdev->subsystem_vendor == 0x1028) &&
+ (rdev->pdev->subsystem_device == 0x01fd))
+ return true;
+
+diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
+index 2c2e75e..8254d5a 100644
+--- a/drivers/gpu/drm/radeon/radeon_mode.h
++++ b/drivers/gpu/drm/radeon/radeon_mode.h
+@@ -362,6 +362,7 @@ struct radeon_encoder_atom_dig {
+ struct backlight_device *bl_dev;
+ int dpms_mode;
+ uint8_t backlight_level;
++ int panel_mode;
+ };
+
+ struct radeon_encoder_atom_dac {
+@@ -482,6 +483,8 @@ extern void radeon_dp_link_train(struct drm_encoder *encoder,
+ extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector);
+ extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
+ extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
++extern int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
++ struct drm_connector *connector);
+ extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode);
+ extern void radeon_atom_encoder_init(struct radeon_device *rdev);
+ extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder,
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+index f94b33a..7c88f1f 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+@@ -378,7 +378,7 @@ int vmw_framebuffer_create_handle(struct drm_framebuffer *fb,
+ unsigned int *handle)
+ {
+ if (handle)
+- handle = 0;
++ *handle = 0;
+
+ return 0;
+ }
+diff --git a/drivers/hwmon/f71805f.c b/drivers/hwmon/f71805f.c
+index 92f9497..6dbfd3e 100644
+--- a/drivers/hwmon/f71805f.c
++++ b/drivers/hwmon/f71805f.c
+@@ -283,11 +283,11 @@ static inline long temp_from_reg(u8 reg)
+
+ static inline u8 temp_to_reg(long val)
+ {
+- if (val < 0)
+- val = 0;
+- else if (val > 1000 * 0xff)
+- val = 0xff;
+- return ((val + 500) / 1000);
++ if (val <= 0)
++ return 0;
++ if (val >= 1000 * 0xff)
++ return 0xff;
++ return (val + 500) / 1000;
+ }
+
+ /*
+diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
+index fe4104c..5357925 100644
+--- a/drivers/hwmon/sht15.c
++++ b/drivers/hwmon/sht15.c
+@@ -883,7 +883,7 @@ static int sht15_invalidate_voltage(struct notifier_block *nb,
+
+ static int __devinit sht15_probe(struct platform_device *pdev)
+ {
+- int ret = 0;
++ int ret;
+ struct sht15_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
+ u8 status = 0;
+
+@@ -901,6 +901,7 @@ static int __devinit sht15_probe(struct platform_device *pdev)
+ init_waitqueue_head(&data->wait_queue);
+
+ if (pdev->dev.platform_data == NULL) {
++ ret = -EINVAL;
+ dev_err(&pdev->dev, "no platform data supplied\n");
+ goto err_free_data;
+ }
+diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
+index 93f5fc7..4b57ab6 100644
+--- a/drivers/hwmon/w83627ehf.c
++++ b/drivers/hwmon/w83627ehf.c
+@@ -1319,6 +1319,7 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr,
+ {
+ struct w83627ehf_data *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
++ struct w83627ehf_sio_data *sio_data = dev->platform_data;
+ int nr = sensor_attr->index;
+ unsigned long val;
+ int err;
+@@ -1330,6 +1331,11 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr,
+
+ if (val > 1)
+ return -EINVAL;
++
++ /* On NCT67766F, DC mode is only supported for pwm1 */
++ if (sio_data->kind == nct6776 && nr && val != 1)
++ return -EINVAL;
++
+ mutex_lock(&data->update_lock);
+ reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[nr]);
+ data->pwm_mode[nr] = val;
+diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
+index 106b88a..30431d8 100644
+--- a/drivers/net/bonding/bond_alb.c
++++ b/drivers/net/bonding/bond_alb.c
+@@ -871,16 +871,12 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
+ }
+ }
+
+-/* hw is a boolean parameter that determines whether we should try and
+- * set the hw address of the device as well as the hw address of the
+- * net_device
+- */
+-static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[], int hw)
++static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[])
+ {
+ struct net_device *dev = slave->dev;
+ struct sockaddr s_addr;
+
+- if (!hw) {
++ if (slave->bond->params.mode == BOND_MODE_TLB) {
+ memcpy(dev->dev_addr, addr, dev->addr_len);
+ return 0;
+ }
+@@ -910,8 +906,8 @@ static void alb_swap_mac_addr(struct bonding *bond, struct slave *slave1, struct
+ u8 tmp_mac_addr[ETH_ALEN];
+
+ memcpy(tmp_mac_addr, slave1->dev->dev_addr, ETH_ALEN);
+- alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr, bond->alb_info.rlb_enabled);
+- alb_set_slave_mac_addr(slave2, tmp_mac_addr, bond->alb_info.rlb_enabled);
++ alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr);
++ alb_set_slave_mac_addr(slave2, tmp_mac_addr);
+
+ }
+
+@@ -1058,8 +1054,7 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
+
+ /* Try setting slave mac to bond address and fall-through
+ to code handling that situation below... */
+- alb_set_slave_mac_addr(slave, bond->dev->dev_addr,
+- bond->alb_info.rlb_enabled);
++ alb_set_slave_mac_addr(slave, bond->dev->dev_addr);
+ }
+
+ /* The slave's address is equal to the address of the bond.
+@@ -1095,8 +1090,7 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
+ }
+
+ if (free_mac_slave) {
+- alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr,
+- bond->alb_info.rlb_enabled);
++ alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr);
+
+ pr_warning("%s: Warning: the hw address of slave %s is in use by the bond; giving it the hw address of %s\n",
+ bond->dev->name, slave->dev->name,
+@@ -1451,8 +1445,7 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
+ {
+ int res;
+
+- res = alb_set_slave_mac_addr(slave, slave->perm_hwaddr,
+- bond->alb_info.rlb_enabled);
++ res = alb_set_slave_mac_addr(slave, slave->perm_hwaddr);
+ if (res) {
+ return res;
+ }
+@@ -1603,8 +1596,7 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
+ alb_swap_mac_addr(bond, swap_slave, new_slave);
+ } else {
+ /* set the new_slave to the bond mac address */
+- alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr,
+- bond->alb_info.rlb_enabled);
++ alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr);
+ }
+
+ if (swap_slave) {
+@@ -1664,8 +1656,7 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
+ alb_swap_mac_addr(bond, swap_slave, bond->curr_active_slave);
+ alb_fasten_mac_swap(bond, swap_slave, bond->curr_active_slave);
+ } else {
+- alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr,
+- bond->alb_info.rlb_enabled);
++ alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr);
+
+ read_lock(&bond->lock);
+ alb_send_learning_packets(bond->curr_active_slave, bond_dev->dev_addr);
+diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
+index 7413497..959d448 100644
+--- a/drivers/net/macvlan.c
++++ b/drivers/net/macvlan.c
+@@ -172,6 +172,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
+ skb = ip_check_defrag(skb, IP_DEFRAG_MACVLAN);
+ if (!skb)
+ return RX_HANDLER_CONSUMED;
++ eth = eth_hdr(skb);
+ src = macvlan_hash_lookup(port, eth->h_source);
+ if (!src)
+ /* frame comes from an external address */
+diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
+index 510e9bb..453f58e 100644
+--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
++++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
+@@ -8217,13 +8217,21 @@ int brcms_c_get_curband(struct brcms_c_info *wlc)
+
+ void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc, bool drop)
+ {
++ int timeout = 20;
++
+ /* flush packet queue when requested */
+ if (drop)
+ brcmu_pktq_flush(&wlc->pkt_queue->q, false, NULL, NULL);
+
+ /* wait for queue and DMA fifos to run dry */
+- while (!pktq_empty(&wlc->pkt_queue->q) || brcms_txpktpendtot(wlc) > 0)
++ while (!pktq_empty(&wlc->pkt_queue->q) || brcms_txpktpendtot(wlc) > 0) {
+ brcms_msleep(wlc->wl, 1);
++
++ if (--timeout == 0)
++ break;
++ }
++
++ WARN_ON_ONCE(timeout == 0);
+ }
+
+ void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc, u8 interval)
+diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
+index 1920237..1daf01e 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
++++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
+@@ -957,11 +957,11 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
+ }
+ #endif
+
+- spin_unlock_irqrestore(&trans->shrd->lock, flags);
+-
+ /* saved interrupt in inta variable now we can reset trans_pcie->inta */
+ trans_pcie->inta = 0;
+
++ spin_unlock_irqrestore(&trans->shrd->lock, flags);
++
+ /* Now service all interrupt bits discovered above. */
+ if (inta & CSR_INT_BIT_HW_ERR) {
+ IWL_ERR(trans, "Hardware error detected. Restarting.\n");
+diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
+index 0794c72..b1ddfef 100644
+--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
++++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
+@@ -4033,7 +4033,8 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
+ ioc->reply_free[i] = cpu_to_le32(reply_address);
+
+ /* initialize reply queues */
+- _base_assign_reply_queues(ioc);
++ if (ioc->is_driver_loading)
++ _base_assign_reply_queues(ioc);
+
+ /* initialize Reply Post Free Queue */
+ reply_post_free = (long)ioc->reply_post_free;
+@@ -4081,24 +4082,17 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
+
+
+ if (ioc->is_driver_loading) {
+-
+-
+-
+- ioc->wait_for_discovery_to_complete =
+- _base_determine_wait_on_discovery(ioc);
+- return r; /* scan_start and scan_finished support */
+- }
+-
+-
+- if (ioc->wait_for_discovery_to_complete && ioc->is_warpdrive) {
+- if (ioc->manu_pg10.OEMIdentifier == 0x80) {
++ if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
++ == 0x80) {
+ hide_flag = (u8) (ioc->manu_pg10.OEMSpecificFlags0 &
+ MFG_PAGE10_HIDE_SSDS_MASK);
+ if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK)
+ ioc->mfg_pg10_hide_flag = hide_flag;
+ }
++ ioc->wait_for_discovery_to_complete =
++ _base_determine_wait_on_discovery(ioc);
++ return r; /* scan_start and scan_finished support */
+ }
+-
+ r = _base_send_port_enable(ioc, sleep_flag);
+ if (r)
+ return r;
+diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+index 9bc6fb2..2824a90 100644
+--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
++++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+@@ -8001,7 +8001,6 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ goto out_attach_fail;
+ }
+
+- scsi_scan_host(shost);
+ if (ioc->is_warpdrive) {
+ if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS)
+ ioc->hide_drives = 0;
+@@ -8015,8 +8014,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ }
+ } else
+ ioc->hide_drives = 0;
++ scsi_scan_host(shost);
+
+- _scsih_probe_devices(ioc);
+ return 0;
+
+ out_attach_fail:
+diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
+index 00233af..8e00926 100644
+--- a/drivers/tty/serial/amba-pl011.c
++++ b/drivers/tty/serial/amba-pl011.c
+@@ -1740,9 +1740,19 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
+ {
+ struct uart_amba_port *uap = amba_ports[co->index];
+ unsigned int status, old_cr, new_cr;
++ unsigned long flags;
++ int locked = 1;
+
+ clk_enable(uap->clk);
+
++ local_irq_save(flags);
++ if (uap->port.sysrq)
++ locked = 0;
++ else if (oops_in_progress)
++ locked = spin_trylock(&uap->port.lock);
++ else
++ spin_lock(&uap->port.lock);
++
+ /*
+ * First save the CR then disable the interrupts
+ */
+@@ -1762,6 +1772,10 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
+ } while (status & UART01x_FR_BUSY);
+ writew(old_cr, uap->port.membase + UART011_CR);
+
++ if (locked)
++ spin_unlock(&uap->port.lock);
++ local_irq_restore(flags);
++
+ clk_disable(uap->clk);
+ }
+
+diff --git a/drivers/tty/serial/jsm/jsm_driver.c b/drivers/tty/serial/jsm/jsm_driver.c
+index 7c867a0..7545fe1 100644
+--- a/drivers/tty/serial/jsm/jsm_driver.c
++++ b/drivers/tty/serial/jsm/jsm_driver.c
+@@ -251,6 +251,7 @@ static void jsm_io_resume(struct pci_dev *pdev)
+ struct jsm_board *brd = pci_get_drvdata(pdev);
+
+ pci_restore_state(pdev);
++ pci_save_state(pdev);
+
+ jsm_uart_port_init(brd);
+ }
+diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
+index ef9dd62..bf6e238 100644
+--- a/drivers/tty/tty_port.c
++++ b/drivers/tty/tty_port.c
+@@ -227,7 +227,6 @@ int tty_port_block_til_ready(struct tty_port *port,
+ int do_clocal = 0, retval;
+ unsigned long flags;
+ DEFINE_WAIT(wait);
+- int cd;
+
+ /* block if port is in the process of being closed */
+ if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING) {
+@@ -284,11 +283,14 @@ int tty_port_block_til_ready(struct tty_port *port,
+ retval = -ERESTARTSYS;
+ break;
+ }
+- /* Probe the carrier. For devices with no carrier detect this
+- will always return true */
+- cd = tty_port_carrier_raised(port);
++ /*
++ * Probe the carrier. For devices with no carrier detect
++ * tty_port_carrier_raised will always return true.
++ * Never ask drivers if CLOCAL is set, this causes troubles
++ * on some hardware.
++ */
+ if (!(port->flags & ASYNC_CLOSING) &&
+- (do_clocal || cd))
++ (do_clocal || tty_port_carrier_raised(port)))
+ break;
+ if (signal_pending(current)) {
+ retval = -ERESTARTSYS;
+diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
+index efe6849..fd4aee1 100644
+--- a/drivers/usb/class/cdc-wdm.c
++++ b/drivers/usb/class/cdc-wdm.c
+@@ -57,6 +57,8 @@ MODULE_DEVICE_TABLE (usb, wdm_ids);
+
+ #define WDM_MAX 16
+
++/* CDC-WMC r1.1 requires wMaxCommand to be "at least 256 decimal (0x100)" */
++#define WDM_DEFAULT_BUFSIZE 256
+
+ static DEFINE_MUTEX(wdm_mutex);
+
+@@ -88,7 +90,8 @@ struct wdm_device {
+ int count;
+ dma_addr_t shandle;
+ dma_addr_t ihandle;
+- struct mutex lock;
++ struct mutex wlock;
++ struct mutex rlock;
+ wait_queue_head_t wait;
+ struct work_struct rxwork;
+ int werr;
+@@ -323,7 +326,7 @@ static ssize_t wdm_write
+ }
+
+ /* concurrent writes and disconnect */
+- r = mutex_lock_interruptible(&desc->lock);
++ r = mutex_lock_interruptible(&desc->wlock);
+ rv = -ERESTARTSYS;
+ if (r) {
+ kfree(buf);
+@@ -386,7 +389,7 @@ static ssize_t wdm_write
+ out:
+ usb_autopm_put_interface(desc->intf);
+ outnp:
+- mutex_unlock(&desc->lock);
++ mutex_unlock(&desc->wlock);
+ outnl:
+ return rv < 0 ? rv : count;
+ }
+@@ -399,7 +402,7 @@ static ssize_t wdm_read
+ struct wdm_device *desc = file->private_data;
+
+
+- rv = mutex_lock_interruptible(&desc->lock); /*concurrent reads */
++ rv = mutex_lock_interruptible(&desc->rlock); /*concurrent reads */
+ if (rv < 0)
+ return -ERESTARTSYS;
+
+@@ -467,14 +470,16 @@ retry:
+ for (i = 0; i < desc->length - cntr; i++)
+ desc->ubuf[i] = desc->ubuf[i + cntr];
+
++ spin_lock_irq(&desc->iuspin);
+ desc->length -= cntr;
++ spin_unlock_irq(&desc->iuspin);
+ /* in case we had outstanding data */
+ if (!desc->length)
+ clear_bit(WDM_READ, &desc->flags);
+ rv = cntr;
+
+ err:
+- mutex_unlock(&desc->lock);
++ mutex_unlock(&desc->rlock);
+ return rv;
+ }
+
+@@ -540,7 +545,8 @@ static int wdm_open(struct inode *inode, struct file *file)
+ }
+ intf->needs_remote_wakeup = 1;
+
+- mutex_lock(&desc->lock);
++ /* using write lock to protect desc->count */
++ mutex_lock(&desc->wlock);
+ if (!desc->count++) {
+ desc->werr = 0;
+ desc->rerr = 0;
+@@ -553,7 +559,7 @@ static int wdm_open(struct inode *inode, struct file *file)
+ } else {
+ rv = 0;
+ }
+- mutex_unlock(&desc->lock);
++ mutex_unlock(&desc->wlock);
+ usb_autopm_put_interface(desc->intf);
+ out:
+ mutex_unlock(&wdm_mutex);
+@@ -565,9 +571,11 @@ static int wdm_release(struct inode *inode, struct file *file)
+ struct wdm_device *desc = file->private_data;
+
+ mutex_lock(&wdm_mutex);
+- mutex_lock(&desc->lock);
++
++ /* using write lock to protect desc->count */
++ mutex_lock(&desc->wlock);
+ desc->count--;
+- mutex_unlock(&desc->lock);
++ mutex_unlock(&desc->wlock);
+
+ if (!desc->count) {
+ dev_dbg(&desc->intf->dev, "wdm_release: cleanup");
+@@ -630,7 +638,7 @@ static int wdm_probe(struct usb_interface *intf, const struct usb_device_id *id)
+ struct usb_cdc_dmm_desc *dmhd;
+ u8 *buffer = intf->altsetting->extra;
+ int buflen = intf->altsetting->extralen;
+- u16 maxcom = 0;
++ u16 maxcom = WDM_DEFAULT_BUFSIZE;
+
+ if (!buffer)
+ goto out;
+@@ -665,7 +673,8 @@ next_desc:
+ desc = kzalloc(sizeof(struct wdm_device), GFP_KERNEL);
+ if (!desc)
+ goto out;
+- mutex_init(&desc->lock);
++ mutex_init(&desc->rlock);
++ mutex_init(&desc->wlock);
+ spin_lock_init(&desc->iuspin);
+ init_waitqueue_head(&desc->wait);
+ desc->wMaxCommand = maxcom;
+@@ -716,7 +725,7 @@ next_desc:
+ goto err;
+
+ desc->inbuf = usb_alloc_coherent(interface_to_usbdev(intf),
+- desc->bMaxPacketSize0,
++ desc->wMaxCommand,
+ GFP_KERNEL,
+ &desc->response->transfer_dma);
+ if (!desc->inbuf)
+@@ -779,11 +788,13 @@ static void wdm_disconnect(struct usb_interface *intf)
+ /* to terminate pending flushes */
+ clear_bit(WDM_IN_USE, &desc->flags);
+ spin_unlock_irqrestore(&desc->iuspin, flags);
+- mutex_lock(&desc->lock);
++ wake_up_all(&desc->wait);
++ mutex_lock(&desc->rlock);
++ mutex_lock(&desc->wlock);
+ kill_urbs(desc);
+ cancel_work_sync(&desc->rxwork);
+- mutex_unlock(&desc->lock);
+- wake_up_all(&desc->wait);
++ mutex_unlock(&desc->wlock);
++ mutex_unlock(&desc->rlock);
+ if (!desc->count)
+ cleanup(desc);
+ mutex_unlock(&wdm_mutex);
+@@ -798,8 +809,10 @@ static int wdm_suspend(struct usb_interface *intf, pm_message_t message)
+ dev_dbg(&desc->intf->dev, "wdm%d_suspend\n", intf->minor);
+
+ /* if this is an autosuspend the caller does the locking */
+- if (!PMSG_IS_AUTO(message))
+- mutex_lock(&desc->lock);
++ if (!PMSG_IS_AUTO(message)) {
++ mutex_lock(&desc->rlock);
++ mutex_lock(&desc->wlock);
++ }
+ spin_lock_irq(&desc->iuspin);
+
+ if (PMSG_IS_AUTO(message) &&
+@@ -815,8 +828,10 @@ static int wdm_suspend(struct usb_interface *intf, pm_message_t message)
+ kill_urbs(desc);
+ cancel_work_sync(&desc->rxwork);
+ }
+- if (!PMSG_IS_AUTO(message))
+- mutex_unlock(&desc->lock);
++ if (!PMSG_IS_AUTO(message)) {
++ mutex_unlock(&desc->wlock);
++ mutex_unlock(&desc->rlock);
++ }
+
+ return rv;
+ }
+@@ -854,7 +869,8 @@ static int wdm_pre_reset(struct usb_interface *intf)
+ {
+ struct wdm_device *desc = usb_get_intfdata(intf);
+
+- mutex_lock(&desc->lock);
++ mutex_lock(&desc->rlock);
++ mutex_lock(&desc->wlock);
+ kill_urbs(desc);
+
+ /*
+@@ -876,7 +892,8 @@ static int wdm_post_reset(struct usb_interface *intf)
+ int rv;
+
+ rv = recover_from_urb_loss(desc);
+- mutex_unlock(&desc->lock);
++ mutex_unlock(&desc->wlock);
++ mutex_unlock(&desc->rlock);
+ return 0;
+ }
+
+diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
+index 69a4e43..27bd50a 100644
+--- a/drivers/usb/dwc3/ep0.c
++++ b/drivers/usb/dwc3/ep0.c
+@@ -149,20 +149,14 @@ static int __dwc3_gadget_ep0_queue(struct dwc3_ep *dep,
+
+ direction = !!(dep->flags & DWC3_EP0_DIR_IN);
+
+- if (dwc->ep0state == EP0_STATUS_PHASE) {
+- type = dwc->three_stage_setup
+- ? DWC3_TRBCTL_CONTROL_STATUS3
+- : DWC3_TRBCTL_CONTROL_STATUS2;
+- } else if (dwc->ep0state == EP0_DATA_PHASE) {
+- type = DWC3_TRBCTL_CONTROL_DATA;
+- } else {
+- /* should never happen */
+- WARN_ON(1);
++ if (dwc->ep0state != EP0_DATA_PHASE) {
++ dev_WARN(dwc->dev, "Unexpected pending request\n");
+ return 0;
+ }
+
+ ret = dwc3_ep0_start_trans(dwc, direction,
+- req->request.dma, req->request.length, type);
++ req->request.dma, req->request.length,
++ DWC3_TRBCTL_CONTROL_DATA);
+ dep->flags &= ~(DWC3_EP_PENDING_REQUEST |
+ DWC3_EP0_DIR_IN);
+ }
+diff --git a/drivers/usb/gadget/langwell_udc.c b/drivers/usb/gadget/langwell_udc.c
+index c9fa3bf..6ad0ad6 100644
+--- a/drivers/usb/gadget/langwell_udc.c
++++ b/drivers/usb/gadget/langwell_udc.c
+@@ -1522,8 +1522,7 @@ static void langwell_udc_stop(struct langwell_udc *dev)
+
+
+ /* stop all USB activities */
+-static void stop_activity(struct langwell_udc *dev,
+- struct usb_gadget_driver *driver)
++static void stop_activity(struct langwell_udc *dev)
+ {
+ struct langwell_ep *ep;
+ dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
+@@ -1535,9 +1534,9 @@ static void stop_activity(struct langwell_udc *dev,
+ }
+
+ /* report disconnect; the driver is already quiesced */
+- if (driver) {
++ if (dev->driver) {
+ spin_unlock(&dev->lock);
+- driver->disconnect(&dev->gadget);
++ dev->driver->disconnect(&dev->gadget);
+ spin_lock(&dev->lock);
+ }
+
+@@ -1925,11 +1924,10 @@ static int langwell_stop(struct usb_gadget *g,
+
+ /* stop all usb activities */
+ dev->gadget.speed = USB_SPEED_UNKNOWN;
+- stop_activity(dev, driver);
+- spin_unlock_irqrestore(&dev->lock, flags);
+-
+ dev->gadget.dev.driver = NULL;
+ dev->driver = NULL;
++ stop_activity(dev);
++ spin_unlock_irqrestore(&dev->lock, flags);
+
+ device_remove_file(&dev->pdev->dev, &dev_attr_function);
+
+@@ -2733,7 +2731,7 @@ static void handle_usb_reset(struct langwell_udc *dev)
+ dev->bus_reset = 1;
+
+ /* reset all the queues, stop all USB activities */
+- stop_activity(dev, dev->driver);
++ stop_activity(dev);
+ dev->usb_state = USB_STATE_DEFAULT;
+ } else {
+ dev_vdbg(&dev->pdev->dev, "device controller reset\n");
+@@ -2741,7 +2739,7 @@ static void handle_usb_reset(struct langwell_udc *dev)
+ langwell_udc_reset(dev);
+
+ /* reset all the queues, stop all USB activities */
+- stop_activity(dev, dev->driver);
++ stop_activity(dev);
+
+ /* reset ep0 dQH and endptctrl */
+ ep0_reset(dev);
+@@ -3367,7 +3365,7 @@ static int langwell_udc_suspend(struct pci_dev *pdev, pm_message_t state)
+
+ spin_lock_irq(&dev->lock);
+ /* stop all usb activities */
+- stop_activity(dev, dev->driver);
++ stop_activity(dev);
+ spin_unlock_irq(&dev->lock);
+
+ /* free dTD dma_pool and dQH */
+diff --git a/drivers/usb/gadget/storage_common.c b/drivers/usb/gadget/storage_common.c
+index c7f291a..85ea14e 100644
+--- a/drivers/usb/gadget/storage_common.c
++++ b/drivers/usb/gadget/storage_common.c
+@@ -598,16 +598,16 @@ static __maybe_unused struct usb_ss_cap_descriptor fsg_ss_cap_desc = {
+ | USB_5GBPS_OPERATION),
+ .bFunctionalitySupport = USB_LOW_SPEED_OPERATION,
+ .bU1devExitLat = USB_DEFAULT_U1_DEV_EXIT_LAT,
+- .bU2DevExitLat = USB_DEFAULT_U2_DEV_EXIT_LAT,
++ .bU2DevExitLat = cpu_to_le16(USB_DEFAULT_U2_DEV_EXIT_LAT),
+ };
+
+ static __maybe_unused struct usb_bos_descriptor fsg_bos_desc = {
+ .bLength = USB_DT_BOS_SIZE,
+ .bDescriptorType = USB_DT_BOS,
+
+- .wTotalLength = USB_DT_BOS_SIZE
++ .wTotalLength = cpu_to_le16(USB_DT_BOS_SIZE
+ + USB_DT_USB_EXT_CAP_SIZE
+- + USB_DT_USB_SS_CAP_SIZE,
++ + USB_DT_USB_SS_CAP_SIZE),
+
+ .bNumDeviceCaps = 2,
+ };
+diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
+index e90344a..b556a72 100644
+--- a/drivers/usb/host/ehci-fsl.c
++++ b/drivers/usb/host/ehci-fsl.c
+@@ -125,7 +125,7 @@ static int usb_hcd_fsl_probe(const struct hc_driver *driver,
+ */
+ if (pdata->init && pdata->init(pdev)) {
+ retval = -ENODEV;
+- goto err3;
++ goto err4;
+ }
+
+ /* Enable USB controller, 83xx or 8536 */
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index d28c586..ae92dc4 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -1215,6 +1215,7 @@ static void handle_vendor_event(struct xhci_hcd *xhci,
+ *
+ * Returns a zero-based port number, which is suitable for indexing into each of
+ * the split roothubs' port arrays and bus state arrays.
++ * Add one to it in order to call xhci_find_slot_id_by_port.
+ */
+ static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd,
+ struct xhci_hcd *xhci, u32 port_id)
+@@ -1335,7 +1336,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
+ xhci_set_link_state(xhci, port_array, faked_port_index,
+ XDEV_U0);
+ slot_id = xhci_find_slot_id_by_port(hcd, xhci,
+- faked_port_index);
++ faked_port_index + 1);
+ if (!slot_id) {
+ xhci_dbg(xhci, "slot_id is zero\n");
+ goto cleanup;
+@@ -3372,7 +3373,8 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ /* Check TD length */
+ if (running_total != td_len) {
+ xhci_err(xhci, "ISOC TD length unmatch\n");
+- return -EINVAL;
++ ret = -EINVAL;
++ goto cleanup;
+ }
+ }
+
+diff --git a/drivers/usb/misc/usbsevseg.c b/drivers/usb/misc/usbsevseg.c
+index 417b8f2..59689fa 100644
+--- a/drivers/usb/misc/usbsevseg.c
++++ b/drivers/usb/misc/usbsevseg.c
+@@ -24,7 +24,7 @@
+
+ #define VENDOR_ID 0x0fc5
+ #define PRODUCT_ID 0x1227
+-#define MAXLEN 6
++#define MAXLEN 8
+
+ /* table of devices that work with this driver */
+ static const struct usb_device_id id_table[] = {
+diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
+index f9a3f62..7c569f5 100644
+--- a/drivers/usb/musb/davinci.c
++++ b/drivers/usb/musb/davinci.c
+@@ -33,9 +33,6 @@
+ #include <linux/platform_device.h>
+ #include <linux/dma-mapping.h>
+
+-#include <mach/hardware.h>
+-#include <mach/memory.h>
+-#include <asm/gpio.h>
+ #include <mach/cputype.h>
+
+ #include <asm/mach-types.h>
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index a1a324b..a515237 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -39,6 +39,8 @@ static void cp210x_get_termios(struct tty_struct *,
+ struct usb_serial_port *port);
+ static void cp210x_get_termios_port(struct usb_serial_port *port,
+ unsigned int *cflagp, unsigned int *baudp);
++static void cp210x_change_speed(struct tty_struct *, struct usb_serial_port *,
++ struct ktermios *);
+ static void cp210x_set_termios(struct tty_struct *, struct usb_serial_port *,
+ struct ktermios*);
+ static int cp210x_tiocmget(struct tty_struct *);
+@@ -138,6 +140,7 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
+ { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
+ { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
++ { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
+ { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
+ { } /* Terminating Entry */
+ };
+@@ -201,6 +204,8 @@ static struct usb_serial_driver cp210x_device = {
+ #define CP210X_EMBED_EVENTS 0x15
+ #define CP210X_GET_EVENTSTATE 0x16
+ #define CP210X_SET_CHARS 0x19
++#define CP210X_GET_BAUDRATE 0x1D
++#define CP210X_SET_BAUDRATE 0x1E
+
+ /* CP210X_IFC_ENABLE */
+ #define UART_ENABLE 0x0001
+@@ -354,8 +359,8 @@ static inline int cp210x_set_config_single(struct usb_serial_port *port,
+ * Quantises the baud rate as per AN205 Table 1
+ */
+ static unsigned int cp210x_quantise_baudrate(unsigned int baud) {
+- if (baud <= 56) baud = 0;
+- else if (baud <= 300) baud = 300;
++ if (baud <= 300)
++ baud = 300;
+ else if (baud <= 600) baud = 600;
+ else if (baud <= 1200) baud = 1200;
+ else if (baud <= 1800) baud = 1800;
+@@ -383,17 +388,15 @@ static unsigned int cp210x_quantise_baudrate(unsigned int baud) {
+ else if (baud <= 491520) baud = 460800;
+ else if (baud <= 567138) baud = 500000;
+ else if (baud <= 670254) baud = 576000;
+- else if (baud <= 1053257) baud = 921600;
+- else if (baud <= 1474560) baud = 1228800;
+- else if (baud <= 2457600) baud = 1843200;
+- else baud = 3686400;
++ else if (baud < 1000000)
++ baud = 921600;
++ else if (baud > 2000000)
++ baud = 2000000;
+ return baud;
+ }
+
+ static int cp210x_open(struct tty_struct *tty, struct usb_serial_port *port)
+ {
+- int result;
+-
+ dbg("%s - port %d", __func__, port->number);
+
+ if (cp210x_set_config_single(port, CP210X_IFC_ENABLE, UART_ENABLE)) {
+@@ -402,13 +405,14 @@ static int cp210x_open(struct tty_struct *tty, struct usb_serial_port *port)
+ return -EPROTO;
+ }
+
+- result = usb_serial_generic_open(tty, port);
+- if (result)
+- return result;
+-
+ /* Configure the termios structure */
+ cp210x_get_termios(tty, port);
+- return 0;
++
++ /* The baud rate must be initialised on cp2104 */
++ if (tty)
++ cp210x_change_speed(tty, port, NULL);
++
++ return usb_serial_generic_open(tty, port);
+ }
+
+ static void cp210x_close(struct usb_serial_port *port)
+@@ -460,10 +464,7 @@ static void cp210x_get_termios_port(struct usb_serial_port *port,
+
+ dbg("%s - port %d", __func__, port->number);
+
+- cp210x_get_config(port, CP210X_GET_BAUDDIV, &baud, 2);
+- /* Convert to baudrate */
+- if (baud)
+- baud = cp210x_quantise_baudrate((BAUD_RATE_GEN_FREQ + baud/2)/ baud);
++ cp210x_get_config(port, CP210X_GET_BAUDRATE, &baud, 4);
+
+ dbg("%s - baud rate = %d", __func__, baud);
+ *baudp = baud;
+@@ -577,11 +578,64 @@ static void cp210x_get_termios_port(struct usb_serial_port *port,
+ *cflagp = cflag;
+ }
+
++/*
++ * CP2101 supports the following baud rates:
++ *
++ * 300, 600, 1200, 1800, 2400, 4800, 7200, 9600, 14400, 19200, 28800,
++ * 38400, 56000, 57600, 115200, 128000, 230400, 460800, 921600
++ *
++ * CP2102 and CP2103 support the following additional rates:
++ *
++ * 4000, 16000, 51200, 64000, 76800, 153600, 250000, 256000, 500000,
++ * 576000
++ *
++ * The device will map a requested rate to a supported one, but the result
++ * of requests for rates greater than 1053257 is undefined (see AN205).
++ *
++ * CP2104, CP2105 and CP2110 support most rates up to 2M, 921k and 1M baud,
++ * respectively, with an error less than 1%. The actual rates are determined
++ * by
++ *
++ * div = round(freq / (2 x prescale x request))
++ * actual = freq / (2 x prescale x div)
++ *
++ * For CP2104 and CP2105 freq is 48Mhz and prescale is 4 for request <= 365bps
++ * or 1 otherwise.
++ * For CP2110 freq is 24Mhz and prescale is 4 for request <= 300bps or 1
++ * otherwise.
++ */
++static void cp210x_change_speed(struct tty_struct *tty,
++ struct usb_serial_port *port, struct ktermios *old_termios)
++{
++ u32 baud;
++
++ baud = tty->termios->c_ospeed;
++
++ /* This maps the requested rate to a rate valid on cp2102 or cp2103,
++ * or to an arbitrary rate in [1M,2M].
++ *
++ * NOTE: B0 is not implemented.
++ */
++ baud = cp210x_quantise_baudrate(baud);
++
++ dbg("%s - setting baud rate to %u", __func__, baud);
++ if (cp210x_set_config(port, CP210X_SET_BAUDRATE, &baud,
++ sizeof(baud))) {
++ dev_warn(&port->dev, "failed to set baud rate to %u\n", baud);
++ if (old_termios)
++ baud = old_termios->c_ospeed;
++ else
++ baud = 9600;
++ }
++
++ tty_encode_baud_rate(tty, baud, baud);
++}
++
+ static void cp210x_set_termios(struct tty_struct *tty,
+ struct usb_serial_port *port, struct ktermios *old_termios)
+ {
+ unsigned int cflag, old_cflag;
+- unsigned int baud = 0, bits;
++ unsigned int bits;
+ unsigned int modem_ctl[4];
+
+ dbg("%s - port %d", __func__, port->number);
+@@ -592,20 +646,9 @@ static void cp210x_set_termios(struct tty_struct *tty,
+ tty->termios->c_cflag &= ~CMSPAR;
+ cflag = tty->termios->c_cflag;
+ old_cflag = old_termios->c_cflag;
+- baud = cp210x_quantise_baudrate(tty_get_baud_rate(tty));
+-
+- /* If the baud rate is to be updated*/
+- if (baud != tty_termios_baud_rate(old_termios) && baud != 0) {
+- dbg("%s - Setting baud rate to %d baud", __func__,
+- baud);
+- if (cp210x_set_config_single(port, CP210X_SET_BAUDDIV,
+- ((BAUD_RATE_GEN_FREQ + baud/2) / baud))) {
+- dbg("Baud rate requested not supported by device");
+- baud = tty_termios_baud_rate(old_termios);
+- }
+- }
+- /* Report back the resulting baud rate */
+- tty_encode_baud_rate(tty, baud, baud);
++
++ if (tty->termios->c_ospeed != old_termios->c_ospeed)
++ cp210x_change_speed(tty, port, old_termios);
+
+ /* If the number of data bits is to be updated */
+ if ((cflag & CSIZE) != (old_cflag & CSIZE)) {
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index ff3db5d..058b92c 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -797,6 +797,7 @@ static struct usb_device_id id_table_combined [] = {
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(ADI_VID, ADI_GNICEPLUS_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
++ { USB_DEVICE(HORNBY_VID, HORNBY_ELITE_PID) },
+ { USB_DEVICE(JETI_VID, JETI_SPC1201_PID) },
+ { USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+@@ -805,6 +806,8 @@ static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(BAYER_VID, BAYER_CONTOUR_CABLE_PID) },
+ { USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
++ { USB_DEVICE(FTDI_VID, TI_XDS100V2_PID),
++ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(FTDI_VID, HAMEG_HO820_PID) },
+ { USB_DEVICE(FTDI_VID, HAMEG_HO720_PID) },
+ { USB_DEVICE(FTDI_VID, HAMEG_HO730_PID) },
+@@ -841,6 +844,7 @@ static struct usb_device_id id_table_combined [] = {
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(ST_VID, ST_STMCLT1030_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_stmclite_quirk },
++ { USB_DEVICE(FTDI_VID, FTDI_RF_R106) },
+ { }, /* Optional parameter entry */
+ { } /* Terminating entry */
+ };
+@@ -1333,8 +1337,7 @@ static int set_serial_info(struct tty_struct *tty,
+ goto check_and_exit;
+ }
+
+- if ((new_serial.baud_base != priv->baud_base) &&
+- (new_serial.baud_base < 9600)) {
++ if (new_serial.baud_base != priv->baud_base) {
+ mutex_unlock(&priv->cfg_lock);
+ return -EINVAL;
+ }
+@@ -1824,6 +1827,7 @@ static int ftdi_sio_port_remove(struct usb_serial_port *port)
+
+ static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port)
+ {
++ struct ktermios dummy;
+ struct usb_device *dev = port->serial->dev;
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+ int result;
+@@ -1842,8 +1846,10 @@ static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port)
+ This is same behaviour as serial.c/rs_open() - Kuba */
+
+ /* ftdi_set_termios will send usb control messages */
+- if (tty)
+- ftdi_set_termios(tty, port, tty->termios);
++ if (tty) {
++ memset(&dummy, 0, sizeof(dummy));
++ ftdi_set_termios(tty, port, &dummy);
++ }
+
+ /* Start reading from the device */
+ result = usb_serial_generic_open(tty, port);
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 055b64e..76d4f31 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -39,6 +39,13 @@
+ /* www.candapter.com Ewert Energy Systems CANdapter device */
+ #define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */
+
++/*
++ * Texas Instruments XDS100v2 JTAG / BeagleBone A3
++ * http://processors.wiki.ti.com/index.php/XDS100
++ * http://beagleboard.org/bone
++ */
++#define TI_XDS100V2_PID 0xa6d0
++
+ #define FTDI_NXTCAM_PID 0xABB8 /* NXTCam for Mindstorms NXT */
+
+ /* US Interface Navigator (http://www.usinterface.com/) */
+@@ -525,6 +532,12 @@
+ #define ADI_GNICEPLUS_PID 0xF001
+
+ /*
++ * Hornby Elite
++ */
++#define HORNBY_VID 0x04D8
++#define HORNBY_ELITE_PID 0x000A
++
++/*
+ * RATOC REX-USB60F
+ */
+ #define RATOC_VENDOR_ID 0x0584
+@@ -1168,3 +1181,9 @@
+ */
+ /* TagTracer MIFARE*/
+ #define FTDI_ZEITCONTROL_TAGTRACE_MIFARE_PID 0xF7C0
++
++/*
++ * Rainforest Automation
++ */
++/* ZigBee controller */
++#define FTDI_RF_R106 0x8A28
+diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
+index 0aac00a..8a90d58 100644
+--- a/drivers/usb/serial/io_ti.c
++++ b/drivers/usb/serial/io_ti.c
+@@ -2677,15 +2677,7 @@ cleanup:
+
+ static void edge_disconnect(struct usb_serial *serial)
+ {
+- int i;
+- struct edgeport_port *edge_port;
+-
+ dbg("%s", __func__);
+-
+- for (i = 0; i < serial->num_ports; ++i) {
+- edge_port = usb_get_serial_port_data(serial->port[i]);
+- edge_remove_sysfs_attrs(edge_port->port);
+- }
+ }
+
+ static void edge_release(struct usb_serial *serial)
+@@ -2764,6 +2756,7 @@ static struct usb_serial_driver edgeport_1port_device = {
+ .disconnect = edge_disconnect,
+ .release = edge_release,
+ .port_probe = edge_create_sysfs_attrs,
++ .port_remove = edge_remove_sysfs_attrs,
+ .ioctl = edge_ioctl,
+ .set_termios = edge_set_termios,
+ .tiocmget = edge_tiocmget,
+@@ -2795,6 +2788,7 @@ static struct usb_serial_driver edgeport_2port_device = {
+ .disconnect = edge_disconnect,
+ .release = edge_release,
+ .port_probe = edge_create_sysfs_attrs,
++ .port_remove = edge_remove_sysfs_attrs,
+ .ioctl = edge_ioctl,
+ .set_termios = edge_set_termios,
+ .tiocmget = edge_tiocmget,
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index c96b6b6..2a9ed6e 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -480,6 +480,10 @@ static void option_instat_callback(struct urb *urb);
+ #define ZD_VENDOR_ID 0x0685
+ #define ZD_PRODUCT_7000 0x7000
+
++/* LG products */
++#define LG_VENDOR_ID 0x1004
++#define LG_PRODUCT_L02C 0x618f
++
+ /* some devices interfaces need special handling due to a number of reasons */
+ enum option_blacklist_reason {
+ OPTION_BLACKLIST_NONE = 0,
+@@ -1183,6 +1187,7 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) },
+ { USB_DEVICE_AND_INTERFACE_INFO(VIETTEL_VENDOR_ID, VIETTEL_PRODUCT_VT1000, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZD_VENDOR_ID, ZD_PRODUCT_7000, 0xff, 0xff, 0xff) },
++ { USB_DEVICE(LG_VENDOR_ID, LG_PRODUCT_L02C) }, /* docomo L-02C modem */
+ { } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, option_ids);
+diff --git a/drivers/usb/serial/qcaux.c b/drivers/usb/serial/qcaux.c
+index 30b73e6..a348198 100644
+--- a/drivers/usb/serial/qcaux.c
++++ b/drivers/usb/serial/qcaux.c
+@@ -36,6 +36,7 @@
+ #define UTSTARCOM_PRODUCT_UM175_V1 0x3712
+ #define UTSTARCOM_PRODUCT_UM175_V2 0x3714
+ #define UTSTARCOM_PRODUCT_UM175_ALLTEL 0x3715
++#define PANTECH_PRODUCT_UML190_VZW 0x3716
+ #define PANTECH_PRODUCT_UML290_VZW 0x3718
+
+ /* CMOTECH devices */
+@@ -67,7 +68,11 @@ static struct usb_device_id id_table[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(LG_VENDOR_ID, LG_PRODUCT_VX4400_6000, 0xff, 0xff, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(SANYO_VENDOR_ID, SANYO_PRODUCT_KATANA_LX, 0xff, 0xff, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_U520, 0xff, 0x00, 0x00) },
+- { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML290_VZW, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML190_VZW, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML190_VZW, 0xff, 0xfe, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML290_VZW, 0xff, 0xfd, 0xff) }, /* NMEA */
++ { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML290_VZW, 0xff, 0xfe, 0xff) }, /* WMC */
++ { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML290_VZW, 0xff, 0xff, 0xff) }, /* DIAG */
+ { },
+ };
+ MODULE_DEVICE_TABLE(usb, id_table);
+diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
+index 0ce5f79..32c93d7 100644
+--- a/drivers/usb/storage/realtek_cr.c
++++ b/drivers/usb/storage/realtek_cr.c
+@@ -791,7 +791,7 @@ static void rts51x_suspend_timer_fn(unsigned long data)
+ rts51x_set_stat(chip, RTS51X_STAT_SS);
+ /* ignore mass storage interface's children */
+ pm_suspend_ignore_children(&us->pusb_intf->dev, true);
+- usb_autopm_put_interface(us->pusb_intf);
++ usb_autopm_put_interface_async(us->pusb_intf);
+ US_DEBUGP("%s: RTS51X_STAT_SS 01,"
+ "intf->pm_usage_cnt:%d, power.usage:%d\n",
+ __func__,
+diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
+index 2a83425..68b19ab 100644
+--- a/fs/ecryptfs/crypto.c
++++ b/fs/ecryptfs/crypto.c
+@@ -417,17 +417,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page,
+ (unsigned long long)(extent_base + extent_offset), rc);
+ goto out;
+ }
+- if (unlikely(ecryptfs_verbosity > 0)) {
+- ecryptfs_printk(KERN_DEBUG, "Encrypting extent "
+- "with iv:\n");
+- ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes);
+- ecryptfs_printk(KERN_DEBUG, "First 8 bytes before "
+- "encryption:\n");
+- ecryptfs_dump_hex((char *)
+- (page_address(page)
+- + (extent_offset * crypt_stat->extent_size)),
+- 8);
+- }
+ rc = ecryptfs_encrypt_page_offset(crypt_stat, enc_extent_page, 0,
+ page, (extent_offset
+ * crypt_stat->extent_size),
+@@ -440,14 +429,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page,
+ goto out;
+ }
+ rc = 0;
+- if (unlikely(ecryptfs_verbosity > 0)) {
+- ecryptfs_printk(KERN_DEBUG, "Encrypt extent [0x%.16llx]; "
+- "rc = [%d]\n",
+- (unsigned long long)(extent_base + extent_offset), rc);
+- ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
+- "encryption:\n");
+- ecryptfs_dump_hex((char *)(page_address(enc_extent_page)), 8);
+- }
+ out:
+ return rc;
+ }
+@@ -543,17 +524,6 @@ static int ecryptfs_decrypt_extent(struct page *page,
+ (unsigned long long)(extent_base + extent_offset), rc);
+ goto out;
+ }
+- if (unlikely(ecryptfs_verbosity > 0)) {
+- ecryptfs_printk(KERN_DEBUG, "Decrypting extent "
+- "with iv:\n");
+- ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes);
+- ecryptfs_printk(KERN_DEBUG, "First 8 bytes before "
+- "decryption:\n");
+- ecryptfs_dump_hex((char *)
+- (page_address(enc_extent_page)
+- + (extent_offset * crypt_stat->extent_size)),
+- 8);
+- }
+ rc = ecryptfs_decrypt_page_offset(crypt_stat, page,
+ (extent_offset
+ * crypt_stat->extent_size),
+@@ -567,16 +537,6 @@ static int ecryptfs_decrypt_extent(struct page *page,
+ goto out;
+ }
+ rc = 0;
+- if (unlikely(ecryptfs_verbosity > 0)) {
+- ecryptfs_printk(KERN_DEBUG, "Decrypt extent [0x%.16llx]; "
+- "rc = [%d]\n",
+- (unsigned long long)(extent_base + extent_offset), rc);
+- ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
+- "decryption:\n");
+- ecryptfs_dump_hex((char *)(page_address(page)
+- + (extent_offset
+- * crypt_stat->extent_size)), 8);
+- }
+ out:
+ return rc;
+ }
+@@ -1620,7 +1580,8 @@ int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry)
+ rc = ecryptfs_read_xattr_region(page_virt, ecryptfs_inode);
+ if (rc) {
+ printk(KERN_DEBUG "Valid eCryptfs headers not found in "
+- "file header region or xattr region\n");
++ "file header region or xattr region, inode %lu\n",
++ ecryptfs_inode->i_ino);
+ rc = -EINVAL;
+ goto out;
+ }
+@@ -1629,7 +1590,8 @@ int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry)
+ ECRYPTFS_DONT_VALIDATE_HEADER_SIZE);
+ if (rc) {
+ printk(KERN_DEBUG "Valid eCryptfs headers not found in "
+- "file xattr region either\n");
++ "file xattr region either, inode %lu\n",
++ ecryptfs_inode->i_ino);
+ rc = -EINVAL;
+ }
+ if (crypt_stat->mount_crypt_stat->flags
+@@ -1640,7 +1602,8 @@ int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry)
+ "crypto metadata only in the extended attribute "
+ "region, but eCryptfs was mounted without "
+ "xattr support enabled. eCryptfs will not treat "
+- "this like an encrypted file.\n");
++ "this like an encrypted file, inode %lu\n",
++ ecryptfs_inode->i_ino);
+ rc = -EINVAL;
+ }
+ }
+diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
+index 32f90a3..d2039ca 100644
+--- a/fs/ecryptfs/inode.c
++++ b/fs/ecryptfs/inode.c
+@@ -841,18 +841,6 @@ static int truncate_upper(struct dentry *dentry, struct iattr *ia,
+ size_t num_zeros = (PAGE_CACHE_SIZE
+ - (ia->ia_size & ~PAGE_CACHE_MASK));
+
+-
+- /*
+- * XXX(truncate) this should really happen at the begginning
+- * of ->setattr. But the code is too messy to that as part
+- * of a larger patch. ecryptfs is also totally missing out
+- * on the inode_change_ok check at the beginning of
+- * ->setattr while would include this.
+- */
+- rc = inode_newsize_ok(inode, ia->ia_size);
+- if (rc)
+- goto out;
+-
+ if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
+ truncate_setsize(inode, ia->ia_size);
+ lower_ia->ia_size = ia->ia_size;
+@@ -902,6 +890,28 @@ out:
+ return rc;
+ }
+
++static int ecryptfs_inode_newsize_ok(struct inode *inode, loff_t offset)
++{
++ struct ecryptfs_crypt_stat *crypt_stat;
++ loff_t lower_oldsize, lower_newsize;
++
++ crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat;
++ lower_oldsize = upper_size_to_lower_size(crypt_stat,
++ i_size_read(inode));
++ lower_newsize = upper_size_to_lower_size(crypt_stat, offset);
++ if (lower_newsize > lower_oldsize) {
++ /*
++ * The eCryptfs inode and the new *lower* size are mixed here
++ * because we may not have the lower i_mutex held and/or it may
++ * not be appropriate to call inode_newsize_ok() with inodes
++ * from other filesystems.
++ */
++ return inode_newsize_ok(inode, lower_newsize);
++ }
++
++ return 0;
++}
++
+ /**
+ * ecryptfs_truncate
+ * @dentry: The ecryptfs layer dentry
+@@ -918,6 +928,10 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length)
+ struct iattr lower_ia = { .ia_valid = 0 };
+ int rc;
+
++ rc = ecryptfs_inode_newsize_ok(dentry->d_inode, new_length);
++ if (rc)
++ return rc;
++
+ rc = truncate_upper(dentry, &ia, &lower_ia);
+ if (!rc && lower_ia.ia_valid & ATTR_SIZE) {
+ struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
+@@ -997,6 +1011,16 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
+ }
+ }
+ mutex_unlock(&crypt_stat->cs_mutex);
++
++ rc = inode_change_ok(inode, ia);
++ if (rc)
++ goto out;
++ if (ia->ia_valid & ATTR_SIZE) {
++ rc = ecryptfs_inode_newsize_ok(inode, ia->ia_size);
++ if (rc)
++ goto out;
++ }
++
+ if (S_ISREG(inode->i_mode)) {
+ rc = filemap_write_and_wait(inode->i_mapping);
+ if (rc)
+diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
+index 940a82e..0dc5a3d 100644
+--- a/fs/ecryptfs/miscdev.c
++++ b/fs/ecryptfs/miscdev.c
+@@ -409,11 +409,47 @@ ecryptfs_miscdev_write(struct file *file, const char __user *buf,
+ ssize_t sz = 0;
+ char *data;
+ uid_t euid = current_euid();
++ unsigned char packet_size_peek[3];
+ int rc;
+
+- if (count == 0)
++ if (count == 0) {
+ goto out;
++ } else if (count == (1 + 4)) {
++ /* Likely a harmless MSG_HELO or MSG_QUIT - no packet length */
++ goto memdup;
++ } else if (count < (1 + 4 + 1)
++ || count > (1 + 4 + 2 + sizeof(struct ecryptfs_message) + 4
++ + ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES)) {
++ printk(KERN_WARNING "%s: Acceptable packet size range is "
++ "[%d-%lu], but amount of data written is [%zu].",
++ __func__, (1 + 4 + 1),
++ (1 + 4 + 2 + sizeof(struct ecryptfs_message) + 4
++ + ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES), count);
++ return -EINVAL;
++ }
++
++ if (copy_from_user(packet_size_peek, (buf + 1 + 4),
++ sizeof(packet_size_peek))) {
++ printk(KERN_WARNING "%s: Error while inspecting packet size\n",
++ __func__);
++ return -EFAULT;
++ }
++
++ rc = ecryptfs_parse_packet_length(packet_size_peek, &packet_size,
++ &packet_size_length);
++ if (rc) {
++ printk(KERN_WARNING "%s: Error parsing packet length; "
++ "rc = [%d]\n", __func__, rc);
++ return rc;
++ }
++
++ if ((1 + 4 + packet_size_length + packet_size) != count) {
++ printk(KERN_WARNING "%s: Invalid packet size [%zu]\n", __func__,
++ packet_size);
++ return -EINVAL;
++ }
+
++memdup:
+ data = memdup_user(buf, count);
+ if (IS_ERR(data)) {
+ printk(KERN_ERR "%s: memdup_user returned error [%ld]\n",
+@@ -435,23 +471,7 @@ ecryptfs_miscdev_write(struct file *file, const char __user *buf,
+ }
+ memcpy(&counter_nbo, &data[i], 4);
+ seq = be32_to_cpu(counter_nbo);
+- i += 4;
+- rc = ecryptfs_parse_packet_length(&data[i], &packet_size,
+- &packet_size_length);
+- if (rc) {
+- printk(KERN_WARNING "%s: Error parsing packet length; "
+- "rc = [%d]\n", __func__, rc);
+- goto out_free;
+- }
+- i += packet_size_length;
+- if ((1 + 4 + packet_size_length + packet_size) != count) {
+- printk(KERN_WARNING "%s: (1 + packet_size_length([%zd])"
+- " + packet_size([%zd]))([%zd]) != "
+- "count([%zd]). Invalid packet format.\n",
+- __func__, packet_size_length, packet_size,
+- (1 + packet_size_length + packet_size), count);
+- goto out_free;
+- }
++ i += 4 + packet_size_length;
+ rc = ecryptfs_miscdev_response(&data[i], packet_size,
+ euid, current_user_ns(),
+ task_pid(current), seq);
+diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
+index 3745f7c..54eb14c 100644
+--- a/fs/ecryptfs/read_write.c
++++ b/fs/ecryptfs/read_write.c
+@@ -132,6 +132,11 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset,
+ size_t num_bytes = (PAGE_CACHE_SIZE - start_offset_in_page);
+ size_t total_remaining_bytes = ((offset + size) - pos);
+
++ if (fatal_signal_pending(current)) {
++ rc = -EINTR;
++ break;
++ }
++
+ if (num_bytes > total_remaining_bytes)
+ num_bytes = total_remaining_bytes;
+ if (pos < offset) {
+@@ -193,15 +198,19 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset,
+ }
+ pos += num_bytes;
+ }
+- if ((offset + size) > ecryptfs_file_size) {
+- i_size_write(ecryptfs_inode, (offset + size));
++ if (pos > ecryptfs_file_size) {
++ i_size_write(ecryptfs_inode, pos);
+ if (crypt_stat->flags & ECRYPTFS_ENCRYPTED) {
+- rc = ecryptfs_write_inode_size_to_metadata(
++ int rc2;
++
++ rc2 = ecryptfs_write_inode_size_to_metadata(
+ ecryptfs_inode);
+- if (rc) {
++ if (rc2) {
+ printk(KERN_ERR "Problem with "
+ "ecryptfs_write_inode_size_to_metadata; "
+- "rc = [%d]\n", rc);
++ "rc = [%d]\n", rc2);
++ if (!rc)
++ rc = rc2;
+ goto out;
+ }
+ }
+diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
+index f94fc48..5c93ffc 100644
+--- a/fs/jbd/checkpoint.c
++++ b/fs/jbd/checkpoint.c
+@@ -453,8 +453,6 @@ out:
+ *
+ * Return <0 on error, 0 on success, 1 if there was nothing to clean up.
+ *
+- * Called with the journal lock held.
+- *
+ * This is the only part of the journaling code which really needs to be
+ * aware of transaction aborts. Checkpointing involves writing to the
+ * main filesystem area rather than to the journal, so it can proceed
+@@ -472,13 +470,14 @@ int cleanup_journal_tail(journal_t *journal)
+ if (is_journal_aborted(journal))
+ return 1;
+
+- /* OK, work out the oldest transaction remaining in the log, and
++ /*
++ * OK, work out the oldest transaction remaining in the log, and
+ * the log block it starts at.
+ *
+ * If the log is now empty, we need to work out which is the
+ * next transaction ID we will write, and where it will
+- * start. */
+-
++ * start.
++ */
+ spin_lock(&journal->j_state_lock);
+ spin_lock(&journal->j_list_lock);
+ transaction = journal->j_checkpoint_transactions;
+@@ -504,7 +503,25 @@ int cleanup_journal_tail(journal_t *journal)
+ spin_unlock(&journal->j_state_lock);
+ return 1;
+ }
++ spin_unlock(&journal->j_state_lock);
++
++ /*
++ * We need to make sure that any blocks that were recently written out
++ * --- perhaps by log_do_checkpoint() --- are flushed out before we
++ * drop the transactions from the journal. It's unlikely this will be
++ * necessary, especially with an appropriately sized journal, but we
++ * need this to guarantee correctness. Fortunately
++ * cleanup_journal_tail() doesn't get called all that often.
++ */
++ if (journal->j_flags & JFS_BARRIER)
++ blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
+
++ spin_lock(&journal->j_state_lock);
++ if (!tid_gt(first_tid, journal->j_tail_sequence)) {
++ spin_unlock(&journal->j_state_lock);
++ /* Someone else cleaned up journal so return 0 */
++ return 0;
++ }
+ /* OK, update the superblock to recover the freed space.
+ * Physical blocks come first: have we wrapped beyond the end of
+ * the log? */
+diff --git a/fs/jbd/recovery.c b/fs/jbd/recovery.c
+index 5b43e96..008bf06 100644
+--- a/fs/jbd/recovery.c
++++ b/fs/jbd/recovery.c
+@@ -20,6 +20,7 @@
+ #include <linux/fs.h>
+ #include <linux/jbd.h>
+ #include <linux/errno.h>
++#include <linux/blkdev.h>
+ #endif
+
+ /*
+@@ -263,6 +264,9 @@ int journal_recover(journal_t *journal)
+ err2 = sync_blockdev(journal->j_fs_dev);
+ if (!err)
+ err = err2;
++ /* Flush disk caches to get replayed data on the permanent storage */
++ if (journal->j_flags & JFS_BARRIER)
++ blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
+
+ return err;
+ }
+diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
+index d4e6080b..779789a 100644
+--- a/fs/sysfs/file.c
++++ b/fs/sysfs/file.c
+@@ -493,6 +493,12 @@ int sysfs_attr_ns(struct kobject *kobj, const struct attribute *attr,
+ const void *ns = NULL;
+ int err;
+
++ if (!dir_sd) {
++ WARN(1, KERN_ERR "sysfs: kobject %s without dirent\n",
++ kobject_name(kobj));
++ return -ENOENT;
++ }
++
+ err = 0;
+ if (!sysfs_ns_type(dir_sd))
+ goto out;
+diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c
+index c81b22f..deb804b 100644
+--- a/fs/sysfs/inode.c
++++ b/fs/sysfs/inode.c
+@@ -318,8 +318,11 @@ int sysfs_hash_and_remove(struct sysfs_dirent *dir_sd, const void *ns, const cha
+ struct sysfs_addrm_cxt acxt;
+ struct sysfs_dirent *sd;
+
+- if (!dir_sd)
++ if (!dir_sd) {
++ WARN(1, KERN_WARNING "sysfs: can not remove '%s', no directory\n",
++ name);
+ return -ENOENT;
++ }
+
+ sysfs_addrm_start(&acxt, dir_sd);
+
+diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
+index ce9268a..ee98d0b 100644
+--- a/fs/xfs/xfs_vnodeops.c
++++ b/fs/xfs/xfs_vnodeops.c
+@@ -131,7 +131,8 @@ xfs_readlink(
+ __func__, (unsigned long long) ip->i_ino,
+ (long long) pathlen);
+ ASSERT(0);
+- return XFS_ERROR(EFSCORRUPTED);
++ error = XFS_ERROR(EFSCORRUPTED);
++ goto out;
+ }
+
+
+diff --git a/include/drm/drmP.h b/include/drm/drmP.h
+index 1f9e951..bf4b2dc 100644
+--- a/include/drm/drmP.h
++++ b/include/drm/drmP.h
+@@ -1328,6 +1328,7 @@ extern int drm_getmagic(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+ extern int drm_authmagic(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
++extern int drm_remove_magic(struct drm_master *master, drm_magic_t magic);
+
+ /* Cache management (drm_cache.c) */
+ void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
+diff --git a/include/net/netns/generic.h b/include/net/netns/generic.h
+index 3419bf5..d55f434 100644
+--- a/include/net/netns/generic.h
++++ b/include/net/netns/generic.h
+@@ -41,6 +41,7 @@ static inline void *net_generic(const struct net *net, int id)
+ ptr = ng->ptr[id - 1];
+ rcu_read_unlock();
+
++ BUG_ON(!ptr);
+ return ptr;
+ }
+ #endif
+diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
+index f1fa1f6..68223e4 100644
+--- a/net/caif/caif_dev.c
++++ b/net/caif/caif_dev.c
+@@ -53,7 +53,6 @@ struct cfcnfg *get_cfcnfg(struct net *net)
+ struct caif_net *caifn;
+ BUG_ON(!net);
+ caifn = net_generic(net, caif_net_id);
+- BUG_ON(!caifn);
+ return caifn->cfg;
+ }
+ EXPORT_SYMBOL(get_cfcnfg);
+@@ -63,7 +62,6 @@ static struct caif_device_entry_list *caif_device_list(struct net *net)
+ struct caif_net *caifn;
+ BUG_ON(!net);
+ caifn = net_generic(net, caif_net_id);
+- BUG_ON(!caifn);
+ return &caifn->caifdevs;
+ }
+
+@@ -92,7 +90,6 @@ static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
+ struct caif_device_entry *caifd;
+
+ caifdevs = caif_device_list(dev_net(dev));
+- BUG_ON(!caifdevs);
+
+ caifd = kzalloc(sizeof(*caifd), GFP_KERNEL);
+ if (!caifd)
+@@ -112,7 +109,7 @@ static struct caif_device_entry *caif_get(struct net_device *dev)
+ struct caif_device_entry_list *caifdevs =
+ caif_device_list(dev_net(dev));
+ struct caif_device_entry *caifd;
+- BUG_ON(!caifdevs);
++
+ list_for_each_entry_rcu(caifd, &caifdevs->list, list) {
+ if (caifd->netdev == dev)
+ return caifd;
+@@ -353,7 +350,7 @@ static struct notifier_block caif_device_notifier = {
+ static int caif_init_net(struct net *net)
+ {
+ struct caif_net *caifn = net_generic(net, caif_net_id);
+- BUG_ON(!caifn);
++
+ INIT_LIST_HEAD(&caifn->caifdevs.list);
+ mutex_init(&caifn->caifdevs.lock);
+
+@@ -418,7 +415,7 @@ static int __init caif_device_init(void)
+ {
+ int result;
+
+- result = register_pernet_device(&caif_net_ops);
++ result = register_pernet_subsys(&caif_net_ops);
+
+ if (result)
+ return result;
+@@ -431,7 +428,7 @@ static int __init caif_device_init(void)
+
+ static void __exit caif_device_exit(void)
+ {
+- unregister_pernet_device(&caif_net_ops);
++ unregister_pernet_subsys(&caif_net_ops);
+ unregister_netdevice_notifier(&caif_device_notifier);
+ dev_remove_pack(&caif_packet_type);
+ }
+diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
+index 00523ec..86ff37c 100644
+--- a/net/caif/cfcnfg.c
++++ b/net/caif/cfcnfg.c
+@@ -309,7 +309,6 @@ int caif_connect_client(struct net *net, struct caif_connect_request *conn_req,
+ int err;
+ struct cfctrl_link_param param;
+ struct cfcnfg *cfg = get_cfcnfg(net);
+- caif_assert(cfg != NULL);
+
+ rcu_read_lock();
+ err = caif_connect_req_to_link_param(cfg, conn_req, &param);
+diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
+index 385aefe..0329404 100644
+--- a/net/core/net-sysfs.c
++++ b/net/core/net-sysfs.c
+@@ -990,9 +990,9 @@ static ssize_t store_xps_map(struct netdev_queue *queue,
+ nonempty = 1;
+ }
+
+- if (nonempty)
+- RCU_INIT_POINTER(dev->xps_maps, new_dev_maps);
+- else {
++ if (nonempty) {
++ rcu_assign_pointer(dev->xps_maps, new_dev_maps);
++ } else {
+ kfree(new_dev_maps);
+ RCU_INIT_POINTER(dev->xps_maps, NULL);
+ }
+diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
+index aefcd7a..0e950fd 100644
+--- a/net/core/net_namespace.c
++++ b/net/core/net_namespace.c
+@@ -30,6 +30,20 @@ EXPORT_SYMBOL(init_net);
+
+ #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */
+
++static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS;
++
++static struct net_generic *net_alloc_generic(void)
++{
++ struct net_generic *ng;
++ size_t generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]);
++
++ ng = kzalloc(generic_size, GFP_KERNEL);
++ if (ng)
++ ng->len = max_gen_ptrs;
++
++ return ng;
++}
++
+ static int net_assign_generic(struct net *net, int id, void *data)
+ {
+ struct net_generic *ng, *old_ng;
+@@ -43,8 +57,7 @@ static int net_assign_generic(struct net *net, int id, void *data)
+ if (old_ng->len >= id)
+ goto assign;
+
+- ng = kzalloc(sizeof(struct net_generic) +
+- id * sizeof(void *), GFP_KERNEL);
++ ng = net_alloc_generic();
+ if (ng == NULL)
+ return -ENOMEM;
+
+@@ -59,7 +72,6 @@ static int net_assign_generic(struct net *net, int id, void *data)
+ * the old copy for kfree after a grace period.
+ */
+
+- ng->len = id;
+ memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*));
+
+ rcu_assign_pointer(net->gen, ng);
+@@ -161,18 +173,6 @@ out_undo:
+ goto out;
+ }
+
+-static struct net_generic *net_alloc_generic(void)
+-{
+- struct net_generic *ng;
+- size_t generic_size = sizeof(struct net_generic) +
+- INITIAL_NET_GEN_PTRS * sizeof(void *);
+-
+- ng = kzalloc(generic_size, GFP_KERNEL);
+- if (ng)
+- ng->len = INITIAL_NET_GEN_PTRS;
+-
+- return ng;
+-}
+
+ #ifdef CONFIG_NET_NS
+ static struct kmem_cache *net_cachep;
+@@ -483,6 +483,7 @@ again:
+ }
+ return error;
+ }
++ max_gen_ptrs = max_t(unsigned int, max_gen_ptrs, *ops->id);
+ }
+ error = __register_pernet_operations(list, ops);
+ if (error) {
+diff --git a/net/core/netpoll.c b/net/core/netpoll.c
+index cf64c1f..5d4d896 100644
+--- a/net/core/netpoll.c
++++ b/net/core/netpoll.c
+@@ -763,7 +763,7 @@ int __netpoll_setup(struct netpoll *np)
+ }
+
+ /* last thing to do is link it to the net device structure */
+- RCU_INIT_POINTER(ndev->npinfo, npinfo);
++ rcu_assign_pointer(ndev->npinfo, npinfo);
+
+ return 0;
+
+diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
+index 2ab16e1..74d321a 100644
+--- a/net/decnet/dn_dev.c
++++ b/net/decnet/dn_dev.c
+@@ -388,7 +388,7 @@ static int dn_dev_insert_ifa(struct dn_dev *dn_db, struct dn_ifaddr *ifa)
+ }
+
+ ifa->ifa_next = dn_db->ifa_list;
+- RCU_INIT_POINTER(dn_db->ifa_list, ifa);
++ rcu_assign_pointer(dn_db->ifa_list, ifa);
+
+ dn_ifaddr_notify(RTM_NEWADDR, ifa);
+ blocking_notifier_call_chain(&dnaddr_chain, NETDEV_UP, ifa);
+@@ -1093,7 +1093,7 @@ static struct dn_dev *dn_dev_create(struct net_device *dev, int *err)
+
+ memcpy(&dn_db->parms, p, sizeof(struct dn_dev_parms));
+
+- RCU_INIT_POINTER(dev->dn_ptr, dn_db);
++ rcu_assign_pointer(dev->dn_ptr, dn_db);
+ dn_db->dev = dev;
+ init_timer(&dn_db->timer);
+
+diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
+index 65f01dc..e41c40f 100644
+--- a/net/ipv4/devinet.c
++++ b/net/ipv4/devinet.c
+@@ -258,7 +258,7 @@ static struct in_device *inetdev_init(struct net_device *dev)
+ ip_mc_up(in_dev);
+
+ /* we can receive as soon as ip_ptr is set -- do this last */
+- RCU_INIT_POINTER(dev->ip_ptr, in_dev);
++ rcu_assign_pointer(dev->ip_ptr, in_dev);
+ out:
+ return in_dev;
+ out_kfree:
+diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
+index 37b6711..3ce23f9 100644
+--- a/net/ipv4/fib_trie.c
++++ b/net/ipv4/fib_trie.c
+@@ -205,7 +205,7 @@ static inline struct tnode *node_parent_rcu(const struct rt_trie_node *node)
+ return (struct tnode *)(parent & ~NODE_TYPE_MASK);
+ }
+
+-/* Same as RCU_INIT_POINTER
++/* Same as rcu_assign_pointer
+ * but that macro() assumes that value is a pointer.
+ */
+ static inline void node_set_parent(struct rt_trie_node *node, struct tnode *ptr)
+@@ -529,7 +529,7 @@ static void tnode_put_child_reorg(struct tnode *tn, int i, struct rt_trie_node *
+ if (n)
+ node_set_parent(n, tn);
+
+- RCU_INIT_POINTER(tn->child[i], n);
++ rcu_assign_pointer(tn->child[i], n);
+ }
+
+ #define MAX_WORK 10
+@@ -1015,7 +1015,7 @@ static void trie_rebalance(struct trie *t, struct tnode *tn)
+
+ tp = node_parent((struct rt_trie_node *) tn);
+ if (!tp)
+- RCU_INIT_POINTER(t->trie, (struct rt_trie_node *)tn);
++ rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
+
+ tnode_free_flush();
+ if (!tp)
+@@ -1027,7 +1027,7 @@ static void trie_rebalance(struct trie *t, struct tnode *tn)
+ if (IS_TNODE(tn))
+ tn = (struct tnode *)resize(t, (struct tnode *)tn);
+
+- RCU_INIT_POINTER(t->trie, (struct rt_trie_node *)tn);
++ rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
+ tnode_free_flush();
+ }
+
+@@ -1164,7 +1164,7 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
+ put_child(t, (struct tnode *)tp, cindex,
+ (struct rt_trie_node *)tn);
+ } else {
+- RCU_INIT_POINTER(t->trie, (struct rt_trie_node *)tn);
++ rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
+ tp = tn;
+ }
+ }
+diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
+index c3cc64c..c8989a7 100644
+--- a/net/ipv4/igmp.c
++++ b/net/ipv4/igmp.c
+@@ -1244,7 +1244,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
+
+ im->next_rcu = in_dev->mc_list;
+ in_dev->mc_count++;
+- RCU_INIT_POINTER(in_dev->mc_list, im);
++ rcu_assign_pointer(in_dev->mc_list, im);
+
+ #ifdef CONFIG_IP_MULTICAST
+ igmpv3_del_delrec(in_dev, im->multiaddr);
+@@ -1816,7 +1816,7 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
+ iml->next_rcu = inet->mc_list;
+ iml->sflist = NULL;
+ iml->sfmode = MCAST_EXCLUDE;
+- RCU_INIT_POINTER(inet->mc_list, iml);
++ rcu_assign_pointer(inet->mc_list, iml);
+ ip_mc_inc_group(in_dev, addr);
+ err = 0;
+ done:
+@@ -2003,7 +2003,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
+ atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
+ kfree_rcu(psl, rcu);
+ }
+- RCU_INIT_POINTER(pmc->sflist, newpsl);
++ rcu_assign_pointer(pmc->sflist, newpsl);
+ psl = newpsl;
+ }
+ rv = 1; /* > 0 for insert logic below if sl_count is 0 */
+@@ -2106,7 +2106,7 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
+ } else
+ (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
+ 0, NULL, 0);
+- RCU_INIT_POINTER(pmc->sflist, newpsl);
++ rcu_assign_pointer(pmc->sflist, newpsl);
+ pmc->sfmode = msf->imsf_fmode;
+ err = 0;
+ done:
+diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
+index 0b2e732..17ad951 100644
+--- a/net/ipv4/ipip.c
++++ b/net/ipv4/ipip.c
+@@ -231,7 +231,7 @@ static void ipip_tunnel_unlink(struct ipip_net *ipn, struct ip_tunnel *t)
+ (iter = rtnl_dereference(*tp)) != NULL;
+ tp = &iter->next) {
+ if (t == iter) {
+- RCU_INIT_POINTER(*tp, t->next);
++ rcu_assign_pointer(*tp, t->next);
+ break;
+ }
+ }
+@@ -241,8 +241,8 @@ static void ipip_tunnel_link(struct ipip_net *ipn, struct ip_tunnel *t)
+ {
+ struct ip_tunnel __rcu **tp = ipip_bucket(ipn, t);
+
+- RCU_INIT_POINTER(t->next, rtnl_dereference(*tp));
+- RCU_INIT_POINTER(*tp, t);
++ rcu_assign_pointer(t->next, rtnl_dereference(*tp));
++ rcu_assign_pointer(*tp, t);
+ }
+
+ static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
+@@ -792,7 +792,7 @@ static int __net_init ipip_fb_tunnel_init(struct net_device *dev)
+ return -ENOMEM;
+
+ dev_hold(dev);
+- RCU_INIT_POINTER(ipn->tunnels_wc[0], tunnel);
++ rcu_assign_pointer(ipn->tunnels_wc[0], tunnel);
+ return 0;
+ }
+
+diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
+index 76a7f07..d2aae27 100644
+--- a/net/ipv4/ipmr.c
++++ b/net/ipv4/ipmr.c
+@@ -1225,7 +1225,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
+
+ ret = ip_ra_control(sk, 1, mrtsock_destruct);
+ if (ret == 0) {
+- RCU_INIT_POINTER(mrt->mroute_sk, sk);
++ rcu_assign_pointer(mrt->mroute_sk, sk);
+ IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
+ }
+ rtnl_unlock();
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index a9db4b1..c89e354 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -630,7 +630,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
+ arg.iov[0].iov_len = sizeof(rep.th);
+
+ #ifdef CONFIG_TCP_MD5SIG
+- key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr) : NULL;
++ key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->saddr) : NULL;
+ if (key) {
+ rep.opt[0] = htonl((TCPOPT_NOP << 24) |
+ (TCPOPT_NOP << 16) |
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 63170e2..097e0c7 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1138,11 +1138,9 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
+ sk_mem_uncharge(sk, len);
+ sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
+
+- /* Any change of skb->len requires recalculation of tso
+- * factor and mss.
+- */
++ /* Any change of skb->len requires recalculation of tso factor. */
+ if (tcp_skb_pcount(skb) > 1)
+- tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk));
++ tcp_set_skb_tso_segs(sk, skb, tcp_skb_mss(skb));
+
+ return 0;
+ }
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 36806de..836c4ea 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -429,7 +429,7 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
+ ndev->tstamp = jiffies;
+ addrconf_sysctl_register(ndev);
+ /* protected by rtnl_lock */
+- RCU_INIT_POINTER(dev->ip6_ptr, ndev);
++ rcu_assign_pointer(dev->ip6_ptr, ndev);
+
+ /* Join all-node multicast group */
+ ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes);
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index 4e2e9ff..d19f499 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -218,8 +218,8 @@ ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
+ {
+ struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms);
+
+- RCU_INIT_POINTER(t->next , rtnl_dereference(*tp));
+- RCU_INIT_POINTER(*tp, t);
++ rcu_assign_pointer(t->next , rtnl_dereference(*tp));
++ rcu_assign_pointer(*tp, t);
+ }
+
+ /**
+@@ -237,7 +237,7 @@ ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
+ (iter = rtnl_dereference(*tp)) != NULL;
+ tp = &iter->next) {
+ if (t == iter) {
+- RCU_INIT_POINTER(*tp, t->next);
++ rcu_assign_pointer(*tp, t->next);
+ break;
+ }
+ }
+@@ -1450,7 +1450,7 @@ static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
+
+ t->parms.proto = IPPROTO_IPV6;
+ dev_hold(dev);
+- RCU_INIT_POINTER(ip6n->tnls_wc[0], t);
++ rcu_assign_pointer(ip6n->tnls_wc[0], t);
+ return 0;
+ }
+
+diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
+index 331af3b..361ebf3 100644
+--- a/net/ipv6/raw.c
++++ b/net/ipv6/raw.c
+@@ -131,7 +131,7 @@ static mh_filter_t __rcu *mh_filter __read_mostly;
+
+ int rawv6_mh_filter_register(mh_filter_t filter)
+ {
+- RCU_INIT_POINTER(mh_filter, filter);
++ rcu_assign_pointer(mh_filter, filter);
+ return 0;
+ }
+ EXPORT_SYMBOL(rawv6_mh_filter_register);
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index 96f3623..72a939d 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -182,7 +182,7 @@ static void ipip6_tunnel_unlink(struct sit_net *sitn, struct ip_tunnel *t)
+ (iter = rtnl_dereference(*tp)) != NULL;
+ tp = &iter->next) {
+ if (t == iter) {
+- RCU_INIT_POINTER(*tp, t->next);
++ rcu_assign_pointer(*tp, t->next);
+ break;
+ }
+ }
+@@ -192,8 +192,8 @@ static void ipip6_tunnel_link(struct sit_net *sitn, struct ip_tunnel *t)
+ {
+ struct ip_tunnel __rcu **tp = ipip6_bucket(sitn, t);
+
+- RCU_INIT_POINTER(t->next, rtnl_dereference(*tp));
+- RCU_INIT_POINTER(*tp, t);
++ rcu_assign_pointer(t->next, rtnl_dereference(*tp));
++ rcu_assign_pointer(*tp, t);
+ }
+
+ static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn)
+@@ -393,7 +393,7 @@ ipip6_tunnel_add_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a, int chg)
+ p->addr = a->addr;
+ p->flags = a->flags;
+ t->prl_count++;
+- RCU_INIT_POINTER(t->prl, p);
++ rcu_assign_pointer(t->prl, p);
+ out:
+ return err;
+ }
+@@ -1177,7 +1177,7 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
+ if (!dev->tstats)
+ return -ENOMEM;
+ dev_hold(dev);
+- RCU_INIT_POINTER(sitn->tunnels_wc[0], tunnel);
++ rcu_assign_pointer(sitn->tunnels_wc[0], tunnel);
+ return 0;
+ }
+
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 2dea4bb..b859e4a 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -1084,7 +1084,7 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
+
+ #ifdef CONFIG_TCP_MD5SIG
+ if (sk)
+- key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr);
++ key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr);
+ #endif
+
+ if (th->ack)
+diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
+index d21e7eb..55670ec 100644
+--- a/net/l2tp/l2tp_ip.c
++++ b/net/l2tp/l2tp_ip.c
+@@ -393,11 +393,6 @@ static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb)
+ {
+ int rc;
+
+- if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
+- goto drop;
+-
+- nf_reset(skb);
+-
+ /* Charge it to the socket, dropping if the queue is full. */
+ rc = sock_queue_rcv_skb(sk, skb);
+ if (rc < 0)
+diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
+index 93b2434..41c2310 100644
+--- a/net/mac80211/agg-rx.c
++++ b/net/mac80211/agg-rx.c
+@@ -326,7 +326,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
+ status = WLAN_STATUS_SUCCESS;
+
+ /* activate it for RX */
+- RCU_INIT_POINTER(sta->ampdu_mlme.tid_rx[tid], tid_agg_rx);
++ rcu_assign_pointer(sta->ampdu_mlme.tid_rx[tid], tid_agg_rx);
+
+ if (timeout)
+ mod_timer(&tid_agg_rx->session_timer, TU_TO_EXP_TIME(timeout));
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index d06c65f..11cee76 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -575,7 +575,7 @@ static int ieee80211_config_beacon(struct ieee80211_sub_if_data *sdata,
+
+ sdata->vif.bss_conf.dtim_period = new->dtim_period;
+
+- RCU_INIT_POINTER(sdata->u.ap.beacon, new);
++ rcu_assign_pointer(sdata->u.ap.beacon, new);
+
+ synchronize_rcu();
+
+@@ -922,7 +922,7 @@ static int ieee80211_change_station(struct wiphy *wiphy,
+ return -EBUSY;
+ }
+
+- RCU_INIT_POINTER(vlansdata->u.vlan.sta, sta);
++ rcu_assign_pointer(vlansdata->u.vlan.sta, sta);
+ }
+
+ sta->sdata = vlansdata;
+diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
+index ede9a8b..3ece106 100644
+--- a/net/mac80211/ibss.c
++++ b/net/mac80211/ibss.c
+@@ -184,7 +184,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
+ *pos++ = 0; /* U-APSD no in use */
+ }
+
+- RCU_INIT_POINTER(ifibss->presp, skb);
++ rcu_assign_pointer(ifibss->presp, skb);
+
+ sdata->vif.bss_conf.beacon_int = beacon_int;
+ sdata->vif.bss_conf.basic_rates = basic_rates;
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index b1b1bb3..9da8626 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -2719,7 +2719,6 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
+ {
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+- struct ieee80211_work *wk;
+ u8 bssid[ETH_ALEN];
+ bool assoc_bss = false;
+
+@@ -2732,30 +2731,47 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
+ assoc_bss = true;
+ } else {
+ bool not_auth_yet = false;
++ struct ieee80211_work *tmp, *wk = NULL;
+
+ mutex_unlock(&ifmgd->mtx);
+
+ mutex_lock(&local->mtx);
+- list_for_each_entry(wk, &local->work_list, list) {
+- if (wk->sdata != sdata)
++ list_for_each_entry(tmp, &local->work_list, list) {
++ if (tmp->sdata != sdata)
+ continue;
+
+- if (wk->type != IEEE80211_WORK_DIRECT_PROBE &&
+- wk->type != IEEE80211_WORK_AUTH &&
+- wk->type != IEEE80211_WORK_ASSOC &&
+- wk->type != IEEE80211_WORK_ASSOC_BEACON_WAIT)
++ if (tmp->type != IEEE80211_WORK_DIRECT_PROBE &&
++ tmp->type != IEEE80211_WORK_AUTH &&
++ tmp->type != IEEE80211_WORK_ASSOC &&
++ tmp->type != IEEE80211_WORK_ASSOC_BEACON_WAIT)
+ continue;
+
+- if (memcmp(req->bss->bssid, wk->filter_ta, ETH_ALEN))
++ if (memcmp(req->bss->bssid, tmp->filter_ta, ETH_ALEN))
+ continue;
+
+- not_auth_yet = wk->type == IEEE80211_WORK_DIRECT_PROBE;
+- list_del_rcu(&wk->list);
+- free_work(wk);
++ not_auth_yet = tmp->type == IEEE80211_WORK_DIRECT_PROBE;
++ list_del_rcu(&tmp->list);
++ synchronize_rcu();
++ wk = tmp;
+ break;
+ }
+ mutex_unlock(&local->mtx);
+
++ if (wk && wk->type == IEEE80211_WORK_ASSOC) {
++ /* clean up dummy sta & TX sync */
++ sta_info_destroy_addr(wk->sdata, wk->filter_ta);
++ if (wk->assoc.synced)
++ drv_finish_tx_sync(local, wk->sdata,
++ wk->filter_ta,
++ IEEE80211_TX_SYNC_ASSOC);
++ } else if (wk && wk->type == IEEE80211_WORK_AUTH) {
++ if (wk->probe_auth.synced)
++ drv_finish_tx_sync(local, wk->sdata,
++ wk->filter_ta,
++ IEEE80211_TX_SYNC_AUTH);
++ }
++ kfree(wk);
++
+ /*
+ * If somebody requests authentication and we haven't
+ * sent out an auth frame yet there's no need to send
+diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
+index 8eaa746..1fdd8ff 100644
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -73,7 +73,7 @@ static int sta_info_hash_del(struct ieee80211_local *local,
+ if (!s)
+ return -ENOENT;
+ if (s == sta) {
+- RCU_INIT_POINTER(local->sta_hash[STA_HASH(sta->sta.addr)],
++ rcu_assign_pointer(local->sta_hash[STA_HASH(sta->sta.addr)],
+ s->hnext);
+ return 0;
+ }
+@@ -83,7 +83,7 @@ static int sta_info_hash_del(struct ieee80211_local *local,
+ s = rcu_dereference_protected(s->hnext,
+ lockdep_is_held(&local->sta_lock));
+ if (rcu_access_pointer(s->hnext)) {
+- RCU_INIT_POINTER(s->hnext, sta->hnext);
++ rcu_assign_pointer(s->hnext, sta->hnext);
+ return 0;
+ }
+
+@@ -232,7 +232,7 @@ static void sta_info_hash_add(struct ieee80211_local *local,
+ struct sta_info *sta)
+ {
+ sta->hnext = local->sta_hash[STA_HASH(sta->sta.addr)];
+- RCU_INIT_POINTER(local->sta_hash[STA_HASH(sta->sta.addr)], sta);
++ rcu_assign_pointer(local->sta_hash[STA_HASH(sta->sta.addr)], sta);
+ }
+
+ static void sta_unblock(struct work_struct *wk)
+diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
+index 7202b06..1d15193 100644
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -776,7 +776,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
+ if (exp->helper) {
+ help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
+ if (help)
+- RCU_INIT_POINTER(help->helper, exp->helper);
++ rcu_assign_pointer(help->helper, exp->helper);
+ }
+
+ #ifdef CONFIG_NF_CONNTRACK_MARK
+diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
+index b62c414..14af632 100644
+--- a/net/netfilter/nf_conntrack_ecache.c
++++ b/net/netfilter/nf_conntrack_ecache.c
+@@ -91,7 +91,7 @@ int nf_conntrack_register_notifier(struct net *net,
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+- RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, new);
++ rcu_assign_pointer(net->ct.nf_conntrack_event_cb, new);
+ mutex_unlock(&nf_ct_ecache_mutex);
+ return ret;
+
+@@ -128,7 +128,7 @@ int nf_ct_expect_register_notifier(struct net *net,
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+- RCU_INIT_POINTER(net->ct.nf_expect_event_cb, new);
++ rcu_assign_pointer(net->ct.nf_expect_event_cb, new);
+ mutex_unlock(&nf_ct_ecache_mutex);
+ return ret;
+
+diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
+index 4605c94..641ff5f 100644
+--- a/net/netfilter/nf_conntrack_extend.c
++++ b/net/netfilter/nf_conntrack_extend.c
+@@ -169,7 +169,7 @@ int nf_ct_extend_register(struct nf_ct_ext_type *type)
+ before updating alloc_size */
+ type->alloc_size = ALIGN(sizeof(struct nf_ct_ext), type->align)
+ + type->len;
+- RCU_INIT_POINTER(nf_ct_ext_types[type->id], type);
++ rcu_assign_pointer(nf_ct_ext_types[type->id], type);
+ update_alloc_size(type);
+ out:
+ mutex_unlock(&nf_ct_ext_type_mutex);
+diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
+index 93c4bdb..bbe23ba 100644
+--- a/net/netfilter/nf_conntrack_helper.c
++++ b/net/netfilter/nf_conntrack_helper.c
+@@ -145,7 +145,7 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
+ memset(&help->help, 0, sizeof(help->help));
+ }
+
+- RCU_INIT_POINTER(help->helper, helper);
++ rcu_assign_pointer(help->helper, helper);
+ out:
+ return ret;
+ }
+diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
+index 257e772..782cdcd 100644
+--- a/net/netfilter/nf_conntrack_netlink.c
++++ b/net/netfilter/nf_conntrack_netlink.c
+@@ -1163,7 +1163,7 @@ ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
+ return -EOPNOTSUPP;
+ }
+
+- RCU_INIT_POINTER(help->helper, helper);
++ rcu_assign_pointer(help->helper, helper);
+
+ return 0;
+ }
+diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
+index ce0c406..957374a 100644
+--- a/net/netfilter/nf_log.c
++++ b/net/netfilter/nf_log.c
+@@ -55,7 +55,7 @@ int nf_log_register(u_int8_t pf, struct nf_logger *logger)
+ llog = rcu_dereference_protected(nf_loggers[pf],
+ lockdep_is_held(&nf_log_mutex));
+ if (llog == NULL)
+- RCU_INIT_POINTER(nf_loggers[pf], logger);
++ rcu_assign_pointer(nf_loggers[pf], logger);
+ }
+
+ mutex_unlock(&nf_log_mutex);
+@@ -92,7 +92,7 @@ int nf_log_bind_pf(u_int8_t pf, const struct nf_logger *logger)
+ mutex_unlock(&nf_log_mutex);
+ return -ENOENT;
+ }
+- RCU_INIT_POINTER(nf_loggers[pf], logger);
++ rcu_assign_pointer(nf_loggers[pf], logger);
+ mutex_unlock(&nf_log_mutex);
+ return 0;
+ }
+@@ -250,7 +250,7 @@ static int nf_log_proc_dostring(ctl_table *table, int write,
+ mutex_unlock(&nf_log_mutex);
+ return -ENOENT;
+ }
+- RCU_INIT_POINTER(nf_loggers[tindex], logger);
++ rcu_assign_pointer(nf_loggers[tindex], logger);
+ mutex_unlock(&nf_log_mutex);
+ } else {
+ mutex_lock(&nf_log_mutex);
+diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
+index 99ffd28..b3a7db6 100644
+--- a/net/netfilter/nf_queue.c
++++ b/net/netfilter/nf_queue.c
+@@ -40,7 +40,7 @@ int nf_register_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
+ else if (old)
+ ret = -EBUSY;
+ else {
+- RCU_INIT_POINTER(queue_handler[pf], qh);
++ rcu_assign_pointer(queue_handler[pf], qh);
+ ret = 0;
+ }
+ mutex_unlock(&queue_handler_mutex);
+diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
+index c879c1a..b4f8d84 100644
+--- a/net/netfilter/nfnetlink.c
++++ b/net/netfilter/nfnetlink.c
+@@ -59,7 +59,7 @@ int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n)
+ nfnl_unlock();
+ return -EBUSY;
+ }
+- RCU_INIT_POINTER(subsys_table[n->subsys_id], n);
++ rcu_assign_pointer(subsys_table[n->subsys_id], n);
+ nfnl_unlock();
+
+ return 0;
+@@ -210,7 +210,7 @@ static int __net_init nfnetlink_net_init(struct net *net)
+ if (!nfnl)
+ return -ENOMEM;
+ net->nfnl_stash = nfnl;
+- RCU_INIT_POINTER(net->nfnl, nfnl);
++ rcu_assign_pointer(net->nfnl, nfnl);
+ return 0;
+ }
+
+diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c
+index 3f905e5..e5330ed 100644
+--- a/net/netlabel/netlabel_domainhash.c
++++ b/net/netlabel/netlabel_domainhash.c
+@@ -282,7 +282,7 @@ int __init netlbl_domhsh_init(u32 size)
+ INIT_LIST_HEAD(&hsh_tbl->tbl[iter]);
+
+ spin_lock(&netlbl_domhsh_lock);
+- RCU_INIT_POINTER(netlbl_domhsh, hsh_tbl);
++ rcu_assign_pointer(netlbl_domhsh, hsh_tbl);
+ spin_unlock(&netlbl_domhsh_lock);
+
+ return 0;
+@@ -330,7 +330,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
+ &rcu_dereference(netlbl_domhsh)->tbl[bkt]);
+ } else {
+ INIT_LIST_HEAD(&entry->list);
+- RCU_INIT_POINTER(netlbl_domhsh_def, entry);
++ rcu_assign_pointer(netlbl_domhsh_def, entry);
+ }
+
+ if (entry->type == NETLBL_NLTYPE_ADDRSELECT) {
+diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
+index e251c2c..d463f5a 100644
+--- a/net/netlabel/netlabel_unlabeled.c
++++ b/net/netlabel/netlabel_unlabeled.c
+@@ -354,7 +354,7 @@ static struct netlbl_unlhsh_iface *netlbl_unlhsh_add_iface(int ifindex)
+ INIT_LIST_HEAD(&iface->list);
+ if (netlbl_unlhsh_rcu_deref(netlbl_unlhsh_def) != NULL)
+ goto add_iface_failure;
+- RCU_INIT_POINTER(netlbl_unlhsh_def, iface);
++ rcu_assign_pointer(netlbl_unlhsh_def, iface);
+ }
+ spin_unlock(&netlbl_unlhsh_lock);
+
+@@ -1447,11 +1447,9 @@ int __init netlbl_unlabel_init(u32 size)
+ for (iter = 0; iter < hsh_tbl->size; iter++)
+ INIT_LIST_HEAD(&hsh_tbl->tbl[iter]);
+
+- rcu_read_lock();
+ spin_lock(&netlbl_unlhsh_lock);
+- RCU_INIT_POINTER(netlbl_unlhsh, hsh_tbl);
++ rcu_assign_pointer(netlbl_unlhsh, hsh_tbl);
+ spin_unlock(&netlbl_unlhsh_lock);
+- rcu_read_unlock();
+
+ register_netdevice_notifier(&netlbl_unlhsh_netdev_notifier);
+
+diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
+index bf10ea8..d65f699 100644
+--- a/net/phonet/af_phonet.c
++++ b/net/phonet/af_phonet.c
+@@ -480,7 +480,7 @@ int __init_or_module phonet_proto_register(unsigned int protocol,
+ if (proto_tab[protocol])
+ err = -EBUSY;
+ else
+- RCU_INIT_POINTER(proto_tab[protocol], pp);
++ rcu_assign_pointer(proto_tab[protocol], pp);
+ mutex_unlock(&proto_tab_lock);
+
+ return err;
+diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
+index c582761..9b9a85e 100644
+--- a/net/phonet/pn_dev.c
++++ b/net/phonet/pn_dev.c
+@@ -390,7 +390,7 @@ int phonet_route_add(struct net_device *dev, u8 daddr)
+ daddr = daddr >> 2;
+ mutex_lock(&routes->lock);
+ if (routes->table[daddr] == NULL) {
+- RCU_INIT_POINTER(routes->table[daddr], dev);
++ rcu_assign_pointer(routes->table[daddr], dev);
+ dev_hold(dev);
+ err = 0;
+ }
+diff --git a/net/phonet/socket.c b/net/phonet/socket.c
+index 3f8d0b1..4c7eff3 100644
+--- a/net/phonet/socket.c
++++ b/net/phonet/socket.c
+@@ -680,7 +680,7 @@ int pn_sock_bind_res(struct sock *sk, u8 res)
+ mutex_lock(&resource_mutex);
+ if (pnres.sk[res] == NULL) {
+ sock_hold(sk);
+- RCU_INIT_POINTER(pnres.sk[res], sk);
++ rcu_assign_pointer(pnres.sk[res], sk);
+ ret = 0;
+ }
+ mutex_unlock(&resource_mutex);
+diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
+index bb6ad81..424ff62 100644
+--- a/net/rds/af_rds.c
++++ b/net/rds/af_rds.c
+@@ -68,7 +68,6 @@ static int rds_release(struct socket *sock)
+ {
+ struct sock *sk = sock->sk;
+ struct rds_sock *rs;
+- unsigned long flags;
+
+ if (!sk)
+ goto out;
+@@ -94,10 +93,10 @@ static int rds_release(struct socket *sock)
+ rds_rdma_drop_keys(rs);
+ rds_notify_queue_get(rs, NULL);
+
+- spin_lock_irqsave(&rds_sock_lock, flags);
++ spin_lock_bh(&rds_sock_lock);
+ list_del_init(&rs->rs_item);
+ rds_sock_count--;
+- spin_unlock_irqrestore(&rds_sock_lock, flags);
++ spin_unlock_bh(&rds_sock_lock);
+
+ rds_trans_put(rs->rs_transport);
+
+@@ -409,7 +408,6 @@ static const struct proto_ops rds_proto_ops = {
+
+ static int __rds_create(struct socket *sock, struct sock *sk, int protocol)
+ {
+- unsigned long flags;
+ struct rds_sock *rs;
+
+ sock_init_data(sock, sk);
+@@ -426,10 +424,10 @@ static int __rds_create(struct socket *sock, struct sock *sk, int protocol)
+ spin_lock_init(&rs->rs_rdma_lock);
+ rs->rs_rdma_keys = RB_ROOT;
+
+- spin_lock_irqsave(&rds_sock_lock, flags);
++ spin_lock_bh(&rds_sock_lock);
+ list_add_tail(&rs->rs_item, &rds_sock_list);
+ rds_sock_count++;
+- spin_unlock_irqrestore(&rds_sock_lock, flags);
++ spin_unlock_bh(&rds_sock_lock);
+
+ return 0;
+ }
+@@ -471,12 +469,11 @@ static void rds_sock_inc_info(struct socket *sock, unsigned int len,
+ {
+ struct rds_sock *rs;
+ struct rds_incoming *inc;
+- unsigned long flags;
+ unsigned int total = 0;
+
+ len /= sizeof(struct rds_info_message);
+
+- spin_lock_irqsave(&rds_sock_lock, flags);
++ spin_lock_bh(&rds_sock_lock);
+
+ list_for_each_entry(rs, &rds_sock_list, rs_item) {
+ read_lock(&rs->rs_recv_lock);
+@@ -492,7 +489,7 @@ static void rds_sock_inc_info(struct socket *sock, unsigned int len,
+ read_unlock(&rs->rs_recv_lock);
+ }
+
+- spin_unlock_irqrestore(&rds_sock_lock, flags);
++ spin_unlock_bh(&rds_sock_lock);
+
+ lens->nr = total;
+ lens->each = sizeof(struct rds_info_message);
+@@ -504,11 +501,10 @@ static void rds_sock_info(struct socket *sock, unsigned int len,
+ {
+ struct rds_info_socket sinfo;
+ struct rds_sock *rs;
+- unsigned long flags;
+
+ len /= sizeof(struct rds_info_socket);
+
+- spin_lock_irqsave(&rds_sock_lock, flags);
++ spin_lock_bh(&rds_sock_lock);
+
+ if (len < rds_sock_count)
+ goto out;
+@@ -529,7 +525,7 @@ out:
+ lens->nr = rds_sock_count;
+ lens->each = sizeof(struct rds_info_socket);
+
+- spin_unlock_irqrestore(&rds_sock_lock, flags);
++ spin_unlock_bh(&rds_sock_lock);
+ }
+
+ static void rds_exit(void)
+diff --git a/net/socket.c b/net/socket.c
+index 2877647..2dce67a 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -2472,7 +2472,7 @@ int sock_register(const struct net_proto_family *ops)
+ lockdep_is_held(&net_family_lock)))
+ err = -EEXIST;
+ else {
+- RCU_INIT_POINTER(net_families[ops->family], ops);
++ rcu_assign_pointer(net_families[ops->family], ops);
+ err = 0;
+ }
+ spin_unlock(&net_family_lock);
+diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
+index afb5655..db0efde 100644
+--- a/net/sunrpc/auth_gss/auth_gss.c
++++ b/net/sunrpc/auth_gss/auth_gss.c
+@@ -122,7 +122,7 @@ gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx)
+ if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags))
+ return;
+ gss_get_ctx(ctx);
+- RCU_INIT_POINTER(gss_cred->gc_ctx, ctx);
++ rcu_assign_pointer(gss_cred->gc_ctx, ctx);
+ set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
+ smp_mb__before_clear_bit();
+ clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags);
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index b595a3d..d99678a 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -1915,7 +1915,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct sk_buff *skb;
+
+ unix_state_lock(sk);
+- skb = skb_dequeue(&sk->sk_receive_queue);
++ skb = skb_peek(&sk->sk_receive_queue);
+ if (skb == NULL) {
+ unix_sk(sk)->recursion_level = 0;
+ if (copied >= target)
+@@ -1955,11 +1955,8 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if (check_creds) {
+ /* Never glue messages from different writers */
+ if ((UNIXCB(skb).pid != siocb->scm->pid) ||
+- (UNIXCB(skb).cred != siocb->scm->cred)) {
+- skb_queue_head(&sk->sk_receive_queue, skb);
+- sk->sk_data_ready(sk, skb->len);
++ (UNIXCB(skb).cred != siocb->scm->cred))
+ break;
+- }
+ } else {
+ /* Copy credentials */
+ scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
+@@ -1974,8 +1971,6 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
+
+ chunk = min_t(unsigned int, skb->len, size);
+ if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
+- skb_queue_head(&sk->sk_receive_queue, skb);
+- sk->sk_data_ready(sk, skb->len);
+ if (copied == 0)
+ copied = -EFAULT;
+ break;
+@@ -1990,13 +1985,10 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if (UNIXCB(skb).fp)
+ unix_detach_fds(siocb->scm, skb);
+
+- /* put the skb back if we didn't use it up.. */
+- if (skb->len) {
+- skb_queue_head(&sk->sk_receive_queue, skb);
+- sk->sk_data_ready(sk, skb->len);
++ if (skb->len)
+ break;
+- }
+
++ skb_unlink(skb, &sk->sk_receive_queue);
+ consume_skb(skb);
+
+ if (siocb->scm->fp)
+@@ -2007,9 +1999,6 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if (UNIXCB(skb).fp)
+ siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
+
+- /* put message back and return */
+- skb_queue_head(&sk->sk_receive_queue, skb);
+- sk->sk_data_ready(sk, skb->len);
+ break;
+ }
+ } while (size);
+diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
+index d0a42df..7cae73e 100644
+--- a/net/xfrm/xfrm_user.c
++++ b/net/xfrm/xfrm_user.c
+@@ -2927,7 +2927,7 @@ static int __net_init xfrm_user_net_init(struct net *net)
+ if (nlsk == NULL)
+ return -ENOMEM;
+ net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */
+- RCU_INIT_POINTER(net->xfrm.nlsk, nlsk);
++ rcu_assign_pointer(net->xfrm.nlsk, nlsk);
+ return 0;
+ }
+
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 5b2b75b..192e6c0 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -461,6 +461,7 @@ struct azx {
+ unsigned int irq_pending_warned :1;
+ unsigned int probing :1; /* codec probing phase */
+ unsigned int snoop:1;
++ unsigned int align_buffer_size:1;
+
+ /* for debugging */
+ unsigned int last_cmd[AZX_MAX_CODECS];
+@@ -1697,7 +1698,7 @@ static int azx_pcm_open(struct snd_pcm_substream *substream)
+ runtime->hw.rates = hinfo->rates;
+ snd_pcm_limit_hw_rates(runtime);
+ snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
+- if (align_buffer_size)
++ if (chip->align_buffer_size)
+ /* constrain buffer sizes to be multiple of 128
+ bytes. This is more efficient in terms of memory
+ access but isn't required by the HDA spec and
+@@ -2753,8 +2754,9 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
+ }
+
+ /* disable buffer size rounding to 128-byte multiples if supported */
++ chip->align_buffer_size = align_buffer_size;
+ if (chip->driver_caps & AZX_DCAPS_BUFSIZE)
+- align_buffer_size = 0;
++ chip->align_buffer_size = 0;
+
+ /* allow 64bit DMA address if supported by H/W */
+ if ((gcap & ICH6_GCAP_64OK) && !pci_set_dma_mask(pci, DMA_BIT_MASK(64)))
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 1d07e8f..5f03c40 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5223,6 +5223,7 @@ static const struct hda_amp_list alc861_loopbacks[] = {
+ /* Pin config fixes */
+ enum {
+ PINFIX_FSC_AMILO_PI1505,
++ PINFIX_ASUS_A6RP,
+ };
+
+ static const struct alc_fixup alc861_fixups[] = {
+@@ -5234,9 +5235,19 @@ static const struct alc_fixup alc861_fixups[] = {
+ { }
+ }
+ },
++ [PINFIX_ASUS_A6RP] = {
++ .type = ALC_FIXUP_VERBS,
++ .v.verbs = (const struct hda_verb[]) {
++ /* node 0x0f VREF seems controlling the master output */
++ { 0x0f, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF50 },
++ { }
++ },
++ },
+ };
+
+ static const struct snd_pci_quirk alc861_fixup_tbl[] = {
++ SND_PCI_QUIRK(0x1043, 0x1393, "ASUS A6Rp", PINFIX_ASUS_A6RP),
++ SND_PCI_QUIRK(0x1584, 0x2b01, "Haier W18", PINFIX_ASUS_A6RP),
+ SND_PCI_QUIRK(0x1734, 0x10c7, "FSC Amilo Pi1505", PINFIX_FSC_AMILO_PI1505),
+ {}
+ };
+diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
+index f3c73a9..ccdac27 100644
+--- a/sound/pci/hda/patch_sigmatel.c
++++ b/sound/pci/hda/patch_sigmatel.c
+@@ -4253,13 +4253,15 @@ static int enable_pin_detect(struct hda_codec *codec, hda_nid_t nid,
+ return 1;
+ }
+
+-static int is_nid_hp_pin(struct auto_pin_cfg *cfg, hda_nid_t nid)
++static int is_nid_out_jack_pin(struct auto_pin_cfg *cfg, hda_nid_t nid)
+ {
+ int i;
+ for (i = 0; i < cfg->hp_outs; i++)
+ if (cfg->hp_pins[i] == nid)
+ return 1; /* nid is a HP-Out */
+-
++ for (i = 0; i < cfg->line_outs; i++)
++ if (cfg->line_out_pins[i] == nid)
++ return 1; /* nid is a line-Out */
+ return 0; /* nid is not a HP-Out */
+ };
+
+@@ -4465,7 +4467,7 @@ static int stac92xx_init(struct hda_codec *codec)
+ continue;
+ }
+
+- if (is_nid_hp_pin(cfg, nid))
++ if (is_nid_out_jack_pin(cfg, nid))
+ continue; /* already has an unsol event */
+
+ pinctl = snd_hda_codec_read(codec, nid, 0,
+@@ -4950,7 +4952,14 @@ static int find_mute_led_gpio(struct hda_codec *codec, int default_polarity)
+ /* BIOS bug: unfilled OEM string */
+ if (strstr(dev->name, "HP_Mute_LED_P_G")) {
+ set_hp_led_gpio(codec);
+- spec->gpio_led_polarity = 1;
++ switch (codec->subsystem_id) {
++ case 0x103c148a:
++ spec->gpio_led_polarity = 0;
++ break;
++ default:
++ spec->gpio_led_polarity = 1;
++ break;
++ }
+ return 1;
+ }
+ }
+diff --git a/sound/soc/codecs/wm5100.c b/sound/soc/codecs/wm5100.c
+index 42d9039..d0beeec 100644
+--- a/sound/soc/codecs/wm5100.c
++++ b/sound/soc/codecs/wm5100.c
+@@ -1379,6 +1379,7 @@ static int wm5100_set_bias_level(struct snd_soc_codec *codec,
+
+ switch (wm5100->rev) {
+ case 0:
++ regcache_cache_bypass(wm5100->regmap, true);
+ snd_soc_write(codec, 0x11, 0x3);
+ snd_soc_write(codec, 0x203, 0xc);
+ snd_soc_write(codec, 0x206, 0);
+@@ -1394,6 +1395,7 @@ static int wm5100_set_bias_level(struct snd_soc_codec *codec,
+ snd_soc_write(codec,
+ wm5100_reva_patches[i].reg,
+ wm5100_reva_patches[i].val);
++ regcache_cache_bypass(wm5100->regmap, false);
+ break;
+ default:
+ break;
+@@ -1404,6 +1406,7 @@ static int wm5100_set_bias_level(struct snd_soc_codec *codec,
+ break;
+
+ case SND_SOC_BIAS_OFF:
++ regcache_cache_only(wm5100->regmap, true);
+ if (wm5100->pdata.ldo_ena)
+ gpio_set_value_cansleep(wm5100->pdata.ldo_ena, 0);
+ regulator_bulk_disable(ARRAY_SIZE(wm5100->core_supplies),
+diff --git a/sound/soc/codecs/wm8996.c b/sound/soc/codecs/wm8996.c
+index a33b04d..6d98a57 100644
+--- a/sound/soc/codecs/wm8996.c
++++ b/sound/soc/codecs/wm8996.c
+@@ -1049,7 +1049,8 @@ SND_SOC_DAPM_SUPPLY_S("SYSCLK", 1, WM8996_AIF_CLOCKING_1, 0, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("SYSDSPCLK", 2, WM8996_CLOCKING_1, 1, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("AIFCLK", 2, WM8996_CLOCKING_1, 2, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("Charge Pump", 2, WM8996_CHARGE_PUMP_1, 15, 0, cp_event,
+- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
++ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
++ SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY("Bandgap", SND_SOC_NOPM, 0, 0, bg_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY("LDO2", WM8996_POWER_MANAGEMENT_2, 1, 0, NULL, 0),
+@@ -1932,6 +1933,7 @@ static int wm8996_set_sysclk(struct snd_soc_dai *dai,
+ struct wm8996_priv *wm8996 = snd_soc_codec_get_drvdata(codec);
+ int lfclk = 0;
+ int ratediv = 0;
++ int sync = WM8996_REG_SYNC;
+ int src;
+ int old;
+
+@@ -1976,6 +1978,7 @@ static int wm8996_set_sysclk(struct snd_soc_dai *dai,
+ case 32000:
+ case 32768:
+ lfclk = WM8996_LFCLK_ENA;
++ sync = 0;
+ break;
+ default:
+ dev_warn(codec->dev, "Unsupported clock rate %dHz\n",
+@@ -1989,6 +1992,8 @@ static int wm8996_set_sysclk(struct snd_soc_dai *dai,
+ WM8996_SYSCLK_SRC_MASK | WM8996_SYSCLK_DIV_MASK,
+ src << WM8996_SYSCLK_SRC_SHIFT | ratediv);
+ snd_soc_update_bits(codec, WM8996_CLOCKING_1, WM8996_LFCLK_ENA, lfclk);
++ snd_soc_update_bits(codec, WM8996_CONTROL_INTERFACE_1,
++ WM8996_REG_SYNC, sync);
+ snd_soc_update_bits(codec, WM8996_AIF_CLOCKING_1,
+ WM8996_SYSCLK_ENA, old);
+
+diff --git a/sound/soc/codecs/wm8996.h b/sound/soc/codecs/wm8996.h
+index 0fde643..de9ac3e 100644
+--- a/sound/soc/codecs/wm8996.h
++++ b/sound/soc/codecs/wm8996.h
+@@ -1567,6 +1567,10 @@ int wm8996_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack,
+ /*
+ * R257 (0x101) - Control Interface (1)
+ */
++#define WM8996_REG_SYNC 0x8000 /* REG_SYNC */
++#define WM8996_REG_SYNC_MASK 0x8000 /* REG_SYNC */
++#define WM8996_REG_SYNC_SHIFT 15 /* REG_SYNC */
++#define WM8996_REG_SYNC_WIDTH 1 /* REG_SYNC */
+ #define WM8996_AUTO_INC 0x0004 /* AUTO_INC */
+ #define WM8996_AUTO_INC_MASK 0x0004 /* AUTO_INC */
+ #define WM8996_AUTO_INC_SHIFT 2 /* AUTO_INC */
diff --git a/1003_linux-3.2.4.patch b/1003_linux-3.2.4.patch
new file mode 100644
index 00000000..e49de55e
--- /dev/null
+++ b/1003_linux-3.2.4.patch
@@ -0,0 +1,40 @@
+diff --git a/Makefile b/Makefile
+index d45e887..c8e187e 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 3
++SUBLEVEL = 4
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/sound/soc/codecs/wm5100.c b/sound/soc/codecs/wm5100.c
+index d0beeec..42d9039 100644
+--- a/sound/soc/codecs/wm5100.c
++++ b/sound/soc/codecs/wm5100.c
+@@ -1379,7 +1379,6 @@ static int wm5100_set_bias_level(struct snd_soc_codec *codec,
+
+ switch (wm5100->rev) {
+ case 0:
+- regcache_cache_bypass(wm5100->regmap, true);
+ snd_soc_write(codec, 0x11, 0x3);
+ snd_soc_write(codec, 0x203, 0xc);
+ snd_soc_write(codec, 0x206, 0);
+@@ -1395,7 +1394,6 @@ static int wm5100_set_bias_level(struct snd_soc_codec *codec,
+ snd_soc_write(codec,
+ wm5100_reva_patches[i].reg,
+ wm5100_reva_patches[i].val);
+- regcache_cache_bypass(wm5100->regmap, false);
+ break;
+ default:
+ break;
+@@ -1406,7 +1404,6 @@ static int wm5100_set_bias_level(struct snd_soc_codec *codec,
+ break;
+
+ case SND_SOC_BIAS_OFF:
+- regcache_cache_only(wm5100->regmap, true);
+ if (wm5100->pdata.ldo_ena)
+ gpio_set_value_cansleep(wm5100->pdata.ldo_ena, 0);
+ regulator_bulk_disable(ARRAY_SIZE(wm5100->core_supplies),
diff --git a/1004_linux-3.2.5.patch b/1004_linux-3.2.5.patch
new file mode 100644
index 00000000..41f87f9e
--- /dev/null
+++ b/1004_linux-3.2.5.patch
@@ -0,0 +1,205 @@
+diff --git a/Makefile b/Makefile
+index c8e187e..e9dd0ff 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 4
++SUBLEVEL = 5
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
+index 2672c79..7aff631 100644
+--- a/drivers/acpi/pci_root.c
++++ b/drivers/acpi/pci_root.c
+@@ -596,6 +596,13 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
+ if (ACPI_SUCCESS(status)) {
+ dev_info(root->bus->bridge,
+ "ACPI _OSC control (0x%02x) granted\n", flags);
++ if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
++ /*
++ * We have ASPM control, but the FADT indicates
++ * that it's unsupported. Clear it.
++ */
++ pcie_clear_aspm(root->bus);
++ }
+ } else {
+ dev_info(root->bus->bridge,
+ "ACPI _OSC request failed (%s), "
+diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
+index 4ecb640..c8e7585 100644
+--- a/drivers/pci/pci-acpi.c
++++ b/drivers/pci/pci-acpi.c
+@@ -395,7 +395,6 @@ static int __init acpi_pci_init(void)
+
+ if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
+ printk(KERN_INFO"ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n");
+- pcie_clear_aspm();
+ pcie_no_aspm();
+ }
+
+diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
+index cbfbab1..1cfbf22 100644
+--- a/drivers/pci/pcie/aspm.c
++++ b/drivers/pci/pcie/aspm.c
+@@ -68,7 +68,7 @@ struct pcie_link_state {
+ struct aspm_latency acceptable[8];
+ };
+
+-static int aspm_disabled, aspm_force, aspm_clear_state;
++static int aspm_disabled, aspm_force;
+ static bool aspm_support_enabled = true;
+ static DEFINE_MUTEX(aspm_lock);
+ static LIST_HEAD(link_list);
+@@ -500,9 +500,6 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
+ int pos;
+ u32 reg32;
+
+- if (aspm_clear_state)
+- return -EINVAL;
+-
+ /*
+ * Some functions in a slot might not all be PCIe functions,
+ * very strange. Disable ASPM for the whole slot
+@@ -574,9 +571,6 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
+ pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)
+ return;
+
+- if (aspm_disabled && !aspm_clear_state)
+- return;
+-
+ /* VIA has a strange chipset, root port is under a bridge */
+ if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT &&
+ pdev->bus->self)
+@@ -608,7 +602,7 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
+ * the BIOS's expectation, we'll do so once pci_enable_device() is
+ * called.
+ */
+- if (aspm_policy != POLICY_POWERSAVE || aspm_clear_state) {
++ if (aspm_policy != POLICY_POWERSAVE) {
+ pcie_config_aspm_path(link);
+ pcie_set_clkpm(link, policy_to_clkpm_state(link));
+ }
+@@ -649,8 +643,7 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
+ struct pci_dev *parent = pdev->bus->self;
+ struct pcie_link_state *link, *root, *parent_link;
+
+- if ((aspm_disabled && !aspm_clear_state) || !pci_is_pcie(pdev) ||
+- !parent || !parent->link_state)
++ if (!pci_is_pcie(pdev) || !parent || !parent->link_state)
+ return;
+ if ((parent->pcie_type != PCI_EXP_TYPE_ROOT_PORT) &&
+ (parent->pcie_type != PCI_EXP_TYPE_DOWNSTREAM))
+@@ -734,13 +727,18 @@ void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
+ * pci_disable_link_state - disable pci device's link state, so the link will
+ * never enter specific states
+ */
+-static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
++static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem,
++ bool force)
+ {
+ struct pci_dev *parent = pdev->bus->self;
+ struct pcie_link_state *link;
+
+- if (aspm_disabled || !pci_is_pcie(pdev))
++ if (aspm_disabled && !force)
++ return;
++
++ if (!pci_is_pcie(pdev))
+ return;
++
+ if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT ||
+ pdev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM)
+ parent = pdev;
+@@ -768,16 +766,31 @@ static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
+
+ void pci_disable_link_state_locked(struct pci_dev *pdev, int state)
+ {
+- __pci_disable_link_state(pdev, state, false);
++ __pci_disable_link_state(pdev, state, false, false);
+ }
+ EXPORT_SYMBOL(pci_disable_link_state_locked);
+
+ void pci_disable_link_state(struct pci_dev *pdev, int state)
+ {
+- __pci_disable_link_state(pdev, state, true);
++ __pci_disable_link_state(pdev, state, true, false);
+ }
+ EXPORT_SYMBOL(pci_disable_link_state);
+
++void pcie_clear_aspm(struct pci_bus *bus)
++{
++ struct pci_dev *child;
++
++ /*
++ * Clear any ASPM setup that the firmware has carried out on this bus
++ */
++ list_for_each_entry(child, &bus->devices, bus_list) {
++ __pci_disable_link_state(child, PCIE_LINK_STATE_L0S |
++ PCIE_LINK_STATE_L1 |
++ PCIE_LINK_STATE_CLKPM,
++ false, true);
++ }
++}
++
+ static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp)
+ {
+ int i;
+@@ -935,6 +948,7 @@ void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev)
+ static int __init pcie_aspm_disable(char *str)
+ {
+ if (!strcmp(str, "off")) {
++ aspm_policy = POLICY_DEFAULT;
+ aspm_disabled = 1;
+ aspm_support_enabled = false;
+ printk(KERN_INFO "PCIe ASPM is disabled\n");
+@@ -947,16 +961,18 @@ static int __init pcie_aspm_disable(char *str)
+
+ __setup("pcie_aspm=", pcie_aspm_disable);
+
+-void pcie_clear_aspm(void)
+-{
+- if (!aspm_force)
+- aspm_clear_state = 1;
+-}
+-
+ void pcie_no_aspm(void)
+ {
+- if (!aspm_force)
++ /*
++ * Disabling ASPM is intended to prevent the kernel from modifying
++ * existing hardware state, not to clear existing state. To that end:
++ * (a) set policy to POLICY_DEFAULT in order to avoid changing state
++ * (b) prevent userspace from changing policy
++ */
++ if (!aspm_force) {
++ aspm_policy = POLICY_DEFAULT;
+ aspm_disabled = 1;
++ }
+ }
+
+ /**
+diff --git a/include/linux/pci-aspm.h b/include/linux/pci-aspm.h
+index 7cea7b6..c832014 100644
+--- a/include/linux/pci-aspm.h
++++ b/include/linux/pci-aspm.h
+@@ -29,7 +29,7 @@ extern void pcie_aspm_pm_state_change(struct pci_dev *pdev);
+ extern void pcie_aspm_powersave_config_link(struct pci_dev *pdev);
+ extern void pci_disable_link_state(struct pci_dev *pdev, int state);
+ extern void pci_disable_link_state_locked(struct pci_dev *pdev, int state);
+-extern void pcie_clear_aspm(void);
++extern void pcie_clear_aspm(struct pci_bus *bus);
+ extern void pcie_no_aspm(void);
+ #else
+ static inline void pcie_aspm_init_link_state(struct pci_dev *pdev)
+@@ -47,7 +47,7 @@ static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
+ static inline void pci_disable_link_state(struct pci_dev *pdev, int state)
+ {
+ }
+-static inline void pcie_clear_aspm(void)
++static inline void pcie_clear_aspm(struct pci_bus *bus)
+ {
+ }
+ static inline void pcie_no_aspm(void)
diff --git a/1005_linux-3.2.6.patch b/1005_linux-3.2.6.patch
new file mode 100644
index 00000000..892dc282
--- /dev/null
+++ b/1005_linux-3.2.6.patch
@@ -0,0 +1,2789 @@
+diff --git a/Makefile b/Makefile
+index e9dd0ff..47fe496 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 5
++SUBLEVEL = 6
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
+index 483727a..90fa8b3 100644
+--- a/arch/arm/kernel/ptrace.c
++++ b/arch/arm/kernel/ptrace.c
+@@ -699,10 +699,13 @@ static int vfp_set(struct task_struct *target,
+ {
+ int ret;
+ struct thread_info *thread = task_thread_info(target);
+- struct vfp_hard_struct new_vfp = thread->vfpstate.hard;
++ struct vfp_hard_struct new_vfp;
+ const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs);
+ const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr);
+
++ vfp_sync_hwstate(thread);
++ new_vfp = thread->vfpstate.hard;
++
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &new_vfp.fpregs,
+ user_fpregs_offset,
+@@ -723,9 +726,8 @@ static int vfp_set(struct task_struct *target,
+ if (ret)
+ return ret;
+
+- vfp_sync_hwstate(thread);
+- thread->vfpstate.hard = new_vfp;
+ vfp_flush_hwstate(thread);
++ thread->vfpstate.hard = new_vfp;
+
+ return 0;
+ }
+diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
+index 0340224..9e617bd 100644
+--- a/arch/arm/kernel/signal.c
++++ b/arch/arm/kernel/signal.c
+@@ -227,6 +227,8 @@ static int restore_vfp_context(struct vfp_sigframe __user *frame)
+ if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
+ return -EINVAL;
+
++ vfp_flush_hwstate(thread);
++
+ /*
+ * Copy the floating point registers. There can be unused
+ * registers see asm/hwcap.h for details.
+@@ -251,9 +253,6 @@ static int restore_vfp_context(struct vfp_sigframe __user *frame)
+ __get_user_error(h->fpinst, &frame->ufp_exc.fpinst, err);
+ __get_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err);
+
+- if (!err)
+- vfp_flush_hwstate(thread);
+-
+ return err ? -EFAULT : 0;
+ }
+
+diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
+index 130034b..dfffbbf 100644
+--- a/arch/arm/mach-omap2/gpmc.c
++++ b/arch/arm/mach-omap2/gpmc.c
+@@ -528,7 +528,13 @@ int gpmc_cs_configure(int cs, int cmd, int wval)
+
+ case GPMC_CONFIG_DEV_SIZE:
+ regval = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
++
++ /* clear 2 target bits */
++ regval &= ~GPMC_CONFIG1_DEVICESIZE(3);
++
++ /* set the proper value */
+ regval |= GPMC_CONFIG1_DEVICESIZE(wval);
++
+ gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, regval);
+ break;
+
+diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
+index bce576d..ad683ec 100644
+--- a/drivers/cpufreq/powernow-k8.c
++++ b/drivers/cpufreq/powernow-k8.c
+@@ -54,6 +54,9 @@ static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data);
+
+ static int cpu_family = CPU_OPTERON;
+
++/* array to map SW pstate number to acpi state */
++static u32 ps_to_as[8];
++
+ /* core performance boost */
+ static bool cpb_capable, cpb_enabled;
+ static struct msr __percpu *msrs;
+@@ -80,9 +83,9 @@ static u32 find_khz_freq_from_fid(u32 fid)
+ }
+
+ static u32 find_khz_freq_from_pstate(struct cpufreq_frequency_table *data,
+- u32 pstate)
++ u32 pstate)
+ {
+- return data[pstate].frequency;
++ return data[ps_to_as[pstate]].frequency;
+ }
+
+ /* Return the vco fid for an input fid
+@@ -926,23 +929,27 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data,
+ invalidate_entry(powernow_table, i);
+ continue;
+ }
+- rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi);
+- if (!(hi & HW_PSTATE_VALID_MASK)) {
+- pr_debug("invalid pstate %d, ignoring\n", index);
+- invalidate_entry(powernow_table, i);
+- continue;
+- }
+
+- powernow_table[i].index = index;
++ ps_to_as[index] = i;
+
+ /* Frequency may be rounded for these */
+ if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10)
+ || boot_cpu_data.x86 == 0x11) {
++
++ rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi);
++ if (!(hi & HW_PSTATE_VALID_MASK)) {
++ pr_debug("invalid pstate %d, ignoring\n", index);
++ invalidate_entry(powernow_table, i);
++ continue;
++ }
++
+ powernow_table[i].frequency =
+ freq_from_fid_did(lo & 0x3f, (lo >> 6) & 7);
+ } else
+ powernow_table[i].frequency =
+ data->acpi_data.states[i].core_frequency * 1000;
++
++ powernow_table[i].index = index;
+ }
+ return 0;
+ }
+@@ -1189,7 +1196,8 @@ static int powernowk8_target(struct cpufreq_policy *pol,
+ powernow_k8_acpi_pst_values(data, newstate);
+
+ if (cpu_family == CPU_HW_PSTATE)
+- ret = transition_frequency_pstate(data, newstate);
++ ret = transition_frequency_pstate(data,
++ data->powernow_table[newstate].index);
+ else
+ ret = transition_frequency_fidvid(data, newstate);
+ if (ret) {
+@@ -1202,7 +1210,7 @@ static int powernowk8_target(struct cpufreq_policy *pol,
+
+ if (cpu_family == CPU_HW_PSTATE)
+ pol->cur = find_khz_freq_from_pstate(data->powernow_table,
+- newstate);
++ data->powernow_table[newstate].index);
+ else
+ pol->cur = find_khz_freq_from_fid(data->currfid);
+ ret = 0;
+diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
+index fcfa0a8..a60adbf 100644
+--- a/drivers/dma/at_hdmac.c
++++ b/drivers/dma/at_hdmac.c
+@@ -1286,7 +1286,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
+
+ tasklet_init(&atchan->tasklet, atc_tasklet,
+ (unsigned long)atchan);
+- atc_enable_irq(atchan);
++ atc_enable_chan_irq(atdma, i);
+ }
+
+ /* set base routines */
+@@ -1353,7 +1353,7 @@ static int __exit at_dma_remove(struct platform_device *pdev)
+ struct at_dma_chan *atchan = to_at_dma_chan(chan);
+
+ /* Disable interrupts */
+- atc_disable_irq(atchan);
++ atc_disable_chan_irq(atdma, chan->chan_id);
+ tasklet_disable(&atchan->tasklet);
+
+ tasklet_kill(&atchan->tasklet);
+diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
+index aa4c9ae..5aa82b4 100644
+--- a/drivers/dma/at_hdmac_regs.h
++++ b/drivers/dma/at_hdmac_regs.h
+@@ -326,28 +326,27 @@ static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli)
+ }
+
+
+-static void atc_setup_irq(struct at_dma_chan *atchan, int on)
++static void atc_setup_irq(struct at_dma *atdma, int chan_id, int on)
+ {
+- struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
+- u32 ebci;
++ u32 ebci;
+
+ /* enable interrupts on buffer transfer completion & error */
+- ebci = AT_DMA_BTC(atchan->chan_common.chan_id)
+- | AT_DMA_ERR(atchan->chan_common.chan_id);
++ ebci = AT_DMA_BTC(chan_id)
++ | AT_DMA_ERR(chan_id);
+ if (on)
+ dma_writel(atdma, EBCIER, ebci);
+ else
+ dma_writel(atdma, EBCIDR, ebci);
+ }
+
+-static inline void atc_enable_irq(struct at_dma_chan *atchan)
++static void atc_enable_chan_irq(struct at_dma *atdma, int chan_id)
+ {
+- atc_setup_irq(atchan, 1);
++ atc_setup_irq(atdma, chan_id, 1);
+ }
+
+-static inline void atc_disable_irq(struct at_dma_chan *atchan)
++static void atc_disable_chan_irq(struct at_dma *atdma, int chan_id)
+ {
+- atc_setup_irq(atchan, 0);
++ atc_setup_irq(atdma, chan_id, 0);
+ }
+
+
+diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
+index 6628fea..7f5f0da 100644
+--- a/drivers/firewire/ohci.c
++++ b/drivers/firewire/ohci.c
+@@ -263,6 +263,7 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card)
+ static char ohci_driver_name[] = KBUILD_MODNAME;
+
+ #define PCI_DEVICE_ID_AGERE_FW643 0x5901
++#define PCI_DEVICE_ID_CREATIVE_SB1394 0x4001
+ #define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380
+ #define PCI_DEVICE_ID_TI_TSB12LV22 0x8009
+ #define PCI_DEVICE_ID_TI_TSB12LV26 0x8020
+@@ -289,6 +290,9 @@ static const struct {
+ {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6,
+ QUIRK_NO_MSI},
+
++ {PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_SB1394, PCI_ANY_ID,
++ QUIRK_RESET_PACKET},
++
+ {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, PCI_ANY_ID,
+ QUIRK_NO_MSI},
+
+@@ -299,7 +303,7 @@ static const struct {
+ QUIRK_NO_MSI},
+
+ {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID,
+- QUIRK_CYCLE_TIMER},
++ QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
+
+ {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, PCI_ANY_ID,
+ QUIRK_CYCLE_TIMER | QUIRK_RESET_PACKET | QUIRK_NO_1394A},
+diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
+index 004b048..b2e3c97 100644
+--- a/drivers/gpu/drm/i915/i915_debugfs.c
++++ b/drivers/gpu/drm/i915/i915_debugfs.c
+@@ -1314,9 +1314,13 @@ static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
++ unsigned forcewake_count;
+
+- seq_printf(m, "forcewake count = %d\n",
+- atomic_read(&dev_priv->forcewake_count));
++ spin_lock_irq(&dev_priv->gt_lock);
++ forcewake_count = dev_priv->forcewake_count;
++ spin_unlock_irq(&dev_priv->gt_lock);
++
++ seq_printf(m, "forcewake count = %u\n", forcewake_count);
+
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
+index a9ae374..c4da951 100644
+--- a/drivers/gpu/drm/i915/i915_dma.c
++++ b/drivers/gpu/drm/i915/i915_dma.c
+@@ -2042,6 +2042,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
+ if (!IS_I945G(dev) && !IS_I945GM(dev))
+ pci_enable_msi(dev->pdev);
+
++ spin_lock_init(&dev_priv->gt_lock);
+ spin_lock_init(&dev_priv->irq_lock);
+ spin_lock_init(&dev_priv->error_lock);
+ spin_lock_init(&dev_priv->rps_lock);
+diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
+index a1103fc..e2d85a9 100644
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -368,11 +368,12 @@ void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
+ */
+ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+ {
+- WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
++ unsigned long irqflags;
+
+- /* Forcewake is atomic in case we get in here without the lock */
+- if (atomic_add_return(1, &dev_priv->forcewake_count) == 1)
++ spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
++ if (dev_priv->forcewake_count++ == 0)
+ dev_priv->display.force_wake_get(dev_priv);
++ spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
+ }
+
+ void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+@@ -392,10 +393,12 @@ void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
+ */
+ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+ {
+- WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
++ unsigned long irqflags;
+
+- if (atomic_dec_and_test(&dev_priv->forcewake_count))
++ spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
++ if (--dev_priv->forcewake_count == 0)
+ dev_priv->display.force_wake_put(dev_priv);
++ spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
+ }
+
+ void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
+@@ -626,6 +629,7 @@ int i915_reset(struct drm_device *dev, u8 flags)
+ * need to
+ */
+ bool need_display = true;
++ unsigned long irqflags;
+ int ret;
+
+ if (!i915_try_reset)
+@@ -644,8 +648,10 @@ int i915_reset(struct drm_device *dev, u8 flags)
+ case 6:
+ ret = gen6_do_reset(dev, flags);
+ /* If reset with a user forcewake, try to restore */
+- if (atomic_read(&dev_priv->forcewake_count))
+- __gen6_gt_force_wake_get(dev_priv);
++ spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
++ if (dev_priv->forcewake_count)
++ dev_priv->display.force_wake_get(dev_priv);
++ spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
+ break;
+ case 5:
+ ret = ironlake_do_reset(dev, flags);
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index 554bef7..ae294a0 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -286,7 +286,13 @@ typedef struct drm_i915_private {
+ int relative_constants_mode;
+
+ void __iomem *regs;
+- u32 gt_fifo_count;
++ /** gt_fifo_count and the subsequent register write are synchronized
++ * with dev->struct_mutex. */
++ unsigned gt_fifo_count;
++ /** forcewake_count is protected by gt_lock */
++ unsigned forcewake_count;
++ /** gt_lock is also taken in irq contexts. */
++ struct spinlock gt_lock;
+
+ struct intel_gmbus {
+ struct i2c_adapter adapter;
+@@ -337,6 +343,8 @@ typedef struct drm_i915_private {
+ struct timer_list hangcheck_timer;
+ int hangcheck_count;
+ uint32_t last_acthd;
++ uint32_t last_acthd_bsd;
++ uint32_t last_acthd_blt;
+ uint32_t last_instdone;
+ uint32_t last_instdone1;
+
+@@ -736,8 +744,6 @@ typedef struct drm_i915_private {
+
+ struct drm_property *broadcast_rgb_property;
+ struct drm_property *force_audio_property;
+-
+- atomic_t forcewake_count;
+ } drm_i915_private_t;
+
+ enum i915_cache_level {
+diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
+index b40004b..d47a53b 100644
+--- a/drivers/gpu/drm/i915/i915_irq.c
++++ b/drivers/gpu/drm/i915/i915_irq.c
+@@ -1669,7 +1669,7 @@ void i915_hangcheck_elapsed(unsigned long data)
+ {
+ struct drm_device *dev = (struct drm_device *)data;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+- uint32_t acthd, instdone, instdone1;
++ uint32_t acthd, instdone, instdone1, acthd_bsd, acthd_blt;
+ bool err = false;
+
+ if (!i915_enable_hangcheck)
+@@ -1686,16 +1686,21 @@ void i915_hangcheck_elapsed(unsigned long data)
+ }
+
+ if (INTEL_INFO(dev)->gen < 4) {
+- acthd = I915_READ(ACTHD);
+ instdone = I915_READ(INSTDONE);
+ instdone1 = 0;
+ } else {
+- acthd = I915_READ(ACTHD_I965);
+ instdone = I915_READ(INSTDONE_I965);
+ instdone1 = I915_READ(INSTDONE1);
+ }
++ acthd = intel_ring_get_active_head(&dev_priv->ring[RCS]);
++ acthd_bsd = HAS_BSD(dev) ?
++ intel_ring_get_active_head(&dev_priv->ring[VCS]) : 0;
++ acthd_blt = HAS_BLT(dev) ?
++ intel_ring_get_active_head(&dev_priv->ring[BCS]) : 0;
+
+ if (dev_priv->last_acthd == acthd &&
++ dev_priv->last_acthd_bsd == acthd_bsd &&
++ dev_priv->last_acthd_blt == acthd_blt &&
+ dev_priv->last_instdone == instdone &&
+ dev_priv->last_instdone1 == instdone1) {
+ if (dev_priv->hangcheck_count++ > 1) {
+@@ -1727,6 +1732,8 @@ void i915_hangcheck_elapsed(unsigned long data)
+ dev_priv->hangcheck_count = 0;
+
+ dev_priv->last_acthd = acthd;
++ dev_priv->last_acthd_bsd = acthd_bsd;
++ dev_priv->last_acthd_blt = acthd_blt;
+ dev_priv->last_instdone = instdone;
+ dev_priv->last_instdone1 = instdone1;
+ }
+diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
+index 43cbafe..a1eb83d 100644
+--- a/drivers/gpu/drm/i915/i915_suspend.c
++++ b/drivers/gpu/drm/i915/i915_suspend.c
+@@ -34,6 +34,10 @@ static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpll_reg;
+
++ /* On IVB, 3rd pipe shares PLL with another one */
++ if (pipe > 1)
++ return false;
++
+ if (HAS_PCH_SPLIT(dev))
+ dpll_reg = (pipe == PIPE_A) ? _PCH_DPLL_A : _PCH_DPLL_B;
+ else
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index 92b041b..db3b461 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -1926,6 +1926,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
+ intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
+ }
+
++ DP &= ~DP_AUDIO_OUTPUT_ENABLE;
+ I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
+ POSTING_READ(intel_dp->output_reg);
+ msleep(intel_dp->panel_power_down_delay);
+diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
+index d4f5a0b..64541f7 100644
+--- a/drivers/gpu/drm/i915/intel_hdmi.c
++++ b/drivers/gpu/drm/i915/intel_hdmi.c
+@@ -269,6 +269,10 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ u32 temp;
++ u32 enable_bits = SDVO_ENABLE;
++
++ if (intel_hdmi->has_audio)
++ enable_bits |= SDVO_AUDIO_ENABLE;
+
+ temp = I915_READ(intel_hdmi->sdvox_reg);
+
+@@ -281,9 +285,9 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
+ }
+
+ if (mode != DRM_MODE_DPMS_ON) {
+- temp &= ~SDVO_ENABLE;
++ temp &= ~enable_bits;
+ } else {
+- temp |= SDVO_ENABLE;
++ temp |= enable_bits;
+ }
+
+ I915_WRITE(intel_hdmi->sdvox_reg, temp);
+diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
+index f3c6a9a..2b1fcad 100644
+--- a/drivers/gpu/drm/i915/intel_tv.c
++++ b/drivers/gpu/drm/i915/intel_tv.c
+@@ -417,7 +417,7 @@ static const struct tv_mode tv_modes[] = {
+ {
+ .name = "NTSC-M",
+ .clock = 108000,
+- .refresh = 29970,
++ .refresh = 59940,
+ .oversample = TV_OVERSAMPLE_8X,
+ .component_only = 0,
+ /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */
+@@ -460,7 +460,7 @@ static const struct tv_mode tv_modes[] = {
+ {
+ .name = "NTSC-443",
+ .clock = 108000,
+- .refresh = 29970,
++ .refresh = 59940,
+ .oversample = TV_OVERSAMPLE_8X,
+ .component_only = 0,
+ /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 4.43MHz */
+@@ -502,7 +502,7 @@ static const struct tv_mode tv_modes[] = {
+ {
+ .name = "NTSC-J",
+ .clock = 108000,
+- .refresh = 29970,
++ .refresh = 59940,
+ .oversample = TV_OVERSAMPLE_8X,
+ .component_only = 0,
+
+@@ -545,7 +545,7 @@ static const struct tv_mode tv_modes[] = {
+ {
+ .name = "PAL-M",
+ .clock = 108000,
+- .refresh = 29970,
++ .refresh = 59940,
+ .oversample = TV_OVERSAMPLE_8X,
+ .component_only = 0,
+
+@@ -589,7 +589,7 @@ static const struct tv_mode tv_modes[] = {
+ /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
+ .name = "PAL-N",
+ .clock = 108000,
+- .refresh = 25000,
++ .refresh = 50000,
+ .oversample = TV_OVERSAMPLE_8X,
+ .component_only = 0,
+
+@@ -634,7 +634,7 @@ static const struct tv_mode tv_modes[] = {
+ /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
+ .name = "PAL",
+ .clock = 108000,
+- .refresh = 25000,
++ .refresh = 50000,
+ .oversample = TV_OVERSAMPLE_8X,
+ .component_only = 0,
+
+@@ -821,7 +821,7 @@ static const struct tv_mode tv_modes[] = {
+ {
+ .name = "1080i@50Hz",
+ .clock = 148800,
+- .refresh = 25000,
++ .refresh = 50000,
+ .oversample = TV_OVERSAMPLE_2X,
+ .component_only = 1,
+
+@@ -847,7 +847,7 @@ static const struct tv_mode tv_modes[] = {
+ {
+ .name = "1080i@60Hz",
+ .clock = 148800,
+- .refresh = 30000,
++ .refresh = 60000,
+ .oversample = TV_OVERSAMPLE_2X,
+ .component_only = 1,
+
+diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
+index 5f0bc57..7ce3fde 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
++++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
+@@ -380,6 +380,25 @@ retry:
+ }
+
+ static int
++validate_sync(struct nouveau_channel *chan, struct nouveau_bo *nvbo)
++{
++ struct nouveau_fence *fence = NULL;
++ int ret = 0;
++
++ spin_lock(&nvbo->bo.bdev->fence_lock);
++ if (nvbo->bo.sync_obj)
++ fence = nouveau_fence_ref(nvbo->bo.sync_obj);
++ spin_unlock(&nvbo->bo.bdev->fence_lock);
++
++ if (fence) {
++ ret = nouveau_fence_sync(fence, chan);
++ nouveau_fence_unref(&fence);
++ }
++
++ return ret;
++}
++
++static int
+ validate_list(struct nouveau_channel *chan, struct list_head *list,
+ struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr)
+ {
+@@ -393,7 +412,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
+ list_for_each_entry(nvbo, list, entry) {
+ struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
+
+- ret = nouveau_fence_sync(nvbo->bo.sync_obj, chan);
++ ret = validate_sync(chan, nvbo);
+ if (unlikely(ret)) {
+ NV_ERROR(dev, "fail pre-validate sync\n");
+ return ret;
+@@ -416,7 +435,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
+ return ret;
+ }
+
+- ret = nouveau_fence_sync(nvbo->bo.sync_obj, chan);
++ ret = validate_sync(chan, nvbo);
+ if (unlikely(ret)) {
+ NV_ERROR(dev, "fail post-validate sync\n");
+ return ret;
+diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
+index 2b97262..b30081f 100644
+--- a/drivers/gpu/drm/radeon/atombios_crtc.c
++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
+@@ -1189,7 +1189,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
+ WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
+
+ WREG32(EVERGREEN_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
+- crtc->mode.vdisplay);
++ target_fb->height);
+ x &= ~3;
+ y &= ~1;
+ WREG32(EVERGREEN_VIEWPORT_START + radeon_crtc->crtc_offset,
+@@ -1358,7 +1358,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
+ WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
+
+ WREG32(AVIVO_D1MODE_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
+- crtc->mode.vdisplay);
++ target_fb->height);
+ x &= ~3;
+ y &= ~1;
+ WREG32(AVIVO_D1MODE_VIEWPORT_START + radeon_crtc->crtc_offset,
+diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
+index a71557c..552b436 100644
+--- a/drivers/gpu/drm/radeon/atombios_dp.c
++++ b/drivers/gpu/drm/radeon/atombios_dp.c
+@@ -564,9 +564,21 @@ int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
+ ENCODER_OBJECT_ID_NUTMEG)
+ panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
+ else if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
+- ENCODER_OBJECT_ID_TRAVIS)
+- panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
+- else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
++ ENCODER_OBJECT_ID_TRAVIS) {
++ u8 id[6];
++ int i;
++ for (i = 0; i < 6; i++)
++ id[i] = radeon_read_dpcd_reg(radeon_connector, 0x503 + i);
++ if (id[0] == 0x73 &&
++ id[1] == 0x69 &&
++ id[2] == 0x76 &&
++ id[3] == 0x61 &&
++ id[4] == 0x72 &&
++ id[5] == 0x54)
++ panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
++ else
++ panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
++ } else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ u8 tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP);
+ if (tmp & 1)
+ panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index 9b39145..9231564 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -864,6 +864,8 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
++ drm_kms_helper_poll_disable(dev);
++
+ /* turn off display hw */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+@@ -950,6 +952,8 @@ int radeon_resume_kms(struct drm_device *dev)
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+ }
++
++ drm_kms_helper_poll_enable(dev);
+ return 0;
+ }
+
+diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
+index 4b57ab6..c25387d 100644
+--- a/drivers/hwmon/w83627ehf.c
++++ b/drivers/hwmon/w83627ehf.c
+@@ -1920,9 +1920,26 @@ w83627ehf_check_fan_inputs(const struct w83627ehf_sio_data *sio_data,
+ fan4min = 0;
+ fan5pin = 0;
+ } else if (sio_data->kind == nct6776) {
+- fan3pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x40);
+- fan4pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x01);
+- fan5pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x02);
++ bool gpok = superio_inb(sio_data->sioreg, 0x27) & 0x80;
++
++ superio_select(sio_data->sioreg, W83627EHF_LD_HWM);
++ regval = superio_inb(sio_data->sioreg, SIO_REG_ENABLE);
++
++ if (regval & 0x80)
++ fan3pin = gpok;
++ else
++ fan3pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x40);
++
++ if (regval & 0x40)
++ fan4pin = gpok;
++ else
++ fan4pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x01);
++
++ if (regval & 0x20)
++ fan5pin = gpok;
++ else
++ fan5pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x02);
++
+ fan4min = fan4pin;
+ } else if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) {
+ fan3pin = 1;
+diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
+index e3db8ef..a8445b8 100644
+--- a/drivers/infiniband/core/uverbs_cmd.c
++++ b/drivers/infiniband/core/uverbs_cmd.c
+@@ -1485,6 +1485,7 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
+ qp->event_handler = attr.event_handler;
+ qp->qp_context = attr.qp_context;
+ qp->qp_type = attr.qp_type;
++ atomic_set(&qp->usecnt, 0);
+ atomic_inc(&pd->usecnt);
+ atomic_inc(&attr.send_cq->usecnt);
+ if (attr.recv_cq)
+diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
+index 602b1bd..575b780 100644
+--- a/drivers/infiniband/core/verbs.c
++++ b/drivers/infiniband/core/verbs.c
+@@ -421,6 +421,7 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
+ qp->uobject = NULL;
+ qp->qp_type = qp_init_attr->qp_type;
+
++ atomic_set(&qp->usecnt, 0);
+ if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) {
+ qp->event_handler = __ib_shared_qp_event_handler;
+ qp->qp_context = qp;
+@@ -430,7 +431,6 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
+ qp->xrcd = qp_init_attr->xrcd;
+ atomic_inc(&qp_init_attr->xrcd->usecnt);
+ INIT_LIST_HEAD(&qp->open_list);
+- atomic_set(&qp->usecnt, 0);
+
+ real_qp = qp;
+ qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
+diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
+index f36da99..77702c0 100644
+--- a/drivers/infiniband/hw/mlx4/mad.c
++++ b/drivers/infiniband/hw/mlx4/mad.c
+@@ -256,12 +256,9 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+ return IB_MAD_RESULT_SUCCESS;
+
+ /*
+- * Don't process SMInfo queries or vendor-specific
+- * MADs -- the SMA can't handle them.
++ * Don't process SMInfo queries -- the SMA can't handle them.
+ */
+- if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO ||
+- ((in_mad->mad_hdr.attr_id & IB_SMP_ATTR_VENDOR_MASK) ==
+- IB_SMP_ATTR_VENDOR_MASK))
++ if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO)
+ return IB_MAD_RESULT_SUCCESS;
+ } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
+ in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS1 ||
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index 4ee277a..e0b3e33 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -2479,6 +2479,9 @@ static unsigned device_dma_ops_init(void)
+
+ for_each_pci_dev(pdev) {
+ if (!check_device(&pdev->dev)) {
++
++ iommu_ignore_device(&pdev->dev);
++
+ unhandled += 1;
+ continue;
+ }
+diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
+index 5865dd2..a4d134d 100644
+--- a/drivers/iommu/msm_iommu.c
++++ b/drivers/iommu/msm_iommu.c
+@@ -481,23 +481,19 @@ static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
+
+ priv = domain->priv;
+
+- if (!priv) {
+- ret = -ENODEV;
++ if (!priv)
+ goto fail;
+- }
+
+ fl_table = priv->pgtable;
+
+ if (len != SZ_16M && len != SZ_1M &&
+ len != SZ_64K && len != SZ_4K) {
+ pr_debug("Bad length: %d\n", len);
+- ret = -EINVAL;
+ goto fail;
+ }
+
+ if (!fl_table) {
+ pr_debug("Null page table\n");
+- ret = -EINVAL;
+ goto fail;
+ }
+
+@@ -506,7 +502,6 @@ static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
+
+ if (*fl_pte == 0) {
+ pr_debug("First level PTE is 0\n");
+- ret = -ENODEV;
+ goto fail;
+ }
+
+diff --git a/drivers/misc/cb710/core.c b/drivers/misc/cb710/core.c
+index 68cd05b..85cc771 100644
+--- a/drivers/misc/cb710/core.c
++++ b/drivers/misc/cb710/core.c
+@@ -245,6 +245,7 @@ static int __devinit cb710_probe(struct pci_dev *pdev,
+ if (err)
+ return err;
+
++ spin_lock_init(&chip->irq_lock);
+ chip->pdev = pdev;
+ chip->iobase = pcim_iomap_table(pdev)[0];
+
+diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
+index 23e5d77..ee6e26e 100644
+--- a/drivers/mtd/nand/atmel_nand.c
++++ b/drivers/mtd/nand/atmel_nand.c
+@@ -161,6 +161,37 @@ static int atmel_nand_device_ready(struct mtd_info *mtd)
+ !!host->board->rdy_pin_active_low;
+ }
+
++/*
++ * Minimal-overhead PIO for data access.
++ */
++static void atmel_read_buf8(struct mtd_info *mtd, u8 *buf, int len)
++{
++ struct nand_chip *nand_chip = mtd->priv;
++
++ __raw_readsb(nand_chip->IO_ADDR_R, buf, len);
++}
++
++static void atmel_read_buf16(struct mtd_info *mtd, u8 *buf, int len)
++{
++ struct nand_chip *nand_chip = mtd->priv;
++
++ __raw_readsw(nand_chip->IO_ADDR_R, buf, len / 2);
++}
++
++static void atmel_write_buf8(struct mtd_info *mtd, const u8 *buf, int len)
++{
++ struct nand_chip *nand_chip = mtd->priv;
++
++ __raw_writesb(nand_chip->IO_ADDR_W, buf, len);
++}
++
++static void atmel_write_buf16(struct mtd_info *mtd, const u8 *buf, int len)
++{
++ struct nand_chip *nand_chip = mtd->priv;
++
++ __raw_writesw(nand_chip->IO_ADDR_W, buf, len / 2);
++}
++
+ static void dma_complete_func(void *completion)
+ {
+ complete(completion);
+@@ -235,27 +266,33 @@ err_buf:
+ static void atmel_read_buf(struct mtd_info *mtd, u8 *buf, int len)
+ {
+ struct nand_chip *chip = mtd->priv;
++ struct atmel_nand_host *host = chip->priv;
+
+ if (use_dma && len > mtd->oobsize)
+ /* only use DMA for bigger than oob size: better performances */
+ if (atmel_nand_dma_op(mtd, buf, len, 1) == 0)
+ return;
+
+- /* if no DMA operation possible, use PIO */
+- memcpy_fromio(buf, chip->IO_ADDR_R, len);
++ if (host->board->bus_width_16)
++ atmel_read_buf16(mtd, buf, len);
++ else
++ atmel_read_buf8(mtd, buf, len);
+ }
+
+ static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
+ {
+ struct nand_chip *chip = mtd->priv;
++ struct atmel_nand_host *host = chip->priv;
+
+ if (use_dma && len > mtd->oobsize)
+ /* only use DMA for bigger than oob size: better performances */
+ if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) == 0)
+ return;
+
+- /* if no DMA operation possible, use PIO */
+- memcpy_toio(chip->IO_ADDR_W, buf, len);
++ if (host->board->bus_width_16)
++ atmel_write_buf16(mtd, buf, len);
++ else
++ atmel_write_buf8(mtd, buf, len);
+ }
+
+ /*
+diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+index de4db76..bb2fe60 100644
+--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
++++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+@@ -69,17 +69,19 @@ static int clear_poll_bit(void __iomem *addr, u32 mask)
+ * [1] enable the module.
+ * [2] reset the module.
+ *
+- * In most of the cases, it's ok. But there is a hardware bug in the BCH block.
++ * In most of the cases, it's ok.
++ * But in MX23, there is a hardware bug in the BCH block (see erratum #2847).
+ * If you try to soft reset the BCH block, it becomes unusable until
+ * the next hard reset. This case occurs in the NAND boot mode. When the board
+ * boots by NAND, the ROM of the chip will initialize the BCH blocks itself.
+ * So If the driver tries to reset the BCH again, the BCH will not work anymore.
+- * You will see a DMA timeout in this case.
++ * You will see a DMA timeout in this case. The bug has been fixed
++ * in the following chips, such as MX28.
+ *
+ * To avoid this bug, just add a new parameter `just_enable` for
+ * the mxs_reset_block(), and rewrite it here.
+ */
+-int gpmi_reset_block(void __iomem *reset_addr, bool just_enable)
++static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable)
+ {
+ int ret;
+ int timeout = 0x400;
+@@ -206,7 +208,15 @@ int bch_set_geometry(struct gpmi_nand_data *this)
+ if (ret)
+ goto err_out;
+
+- ret = gpmi_reset_block(r->bch_regs, true);
++ /*
++ * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
++ * chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
++ * On the other hand, the MX28 needs the reset, because one case has been
++ * seen where the BCH produced ECC errors constantly after 10000
++ * consecutive reboots. The latter case has not been seen on the MX23 yet,
++ * still we don't know if it could happen there as well.
++ */
++ ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
+ if (ret)
+ goto err_out;
+
+diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
+index ee5da92..aba4f67 100644
+--- a/drivers/net/ethernet/realtek/8139cp.c
++++ b/drivers/net/ethernet/realtek/8139cp.c
+@@ -563,6 +563,7 @@ rx_next:
+ if (cpr16(IntrStatus) & cp_rx_intr_mask)
+ goto rx_status_loop;
+
++ napi_gro_flush(napi);
+ spin_lock_irqsave(&cp->lock, flags);
+ __napi_complete(napi);
+ cpw16_f(IntrMask, cp_intr_mask);
+diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
+index 749c2a1..1932029 100644
+--- a/drivers/pcmcia/ds.c
++++ b/drivers/pcmcia/ds.c
+@@ -1269,10 +1269,8 @@ static int pcmcia_bus_add(struct pcmcia_socket *skt)
+
+ static int pcmcia_bus_early_resume(struct pcmcia_socket *skt)
+ {
+- if (!verify_cis_cache(skt)) {
+- pcmcia_put_socket(skt);
++ if (!verify_cis_cache(skt))
+ return 0;
+- }
+
+ dev_dbg(&skt->dev, "cis mismatch - different card\n");
+
+diff --git a/drivers/staging/asus_oled/asus_oled.c b/drivers/staging/asus_oled/asus_oled.c
+index 7bb7da7..63bafbb 100644
+--- a/drivers/staging/asus_oled/asus_oled.c
++++ b/drivers/staging/asus_oled/asus_oled.c
+@@ -355,7 +355,14 @@ static void send_data(struct asus_oled_dev *odev)
+
+ static int append_values(struct asus_oled_dev *odev, uint8_t val, size_t count)
+ {
+- while (count-- > 0 && val) {
++ odev->last_val = val;
++
++ if (val == 0) {
++ odev->buf_offs += count;
++ return 0;
++ }
++
++ while (count-- > 0) {
+ size_t x = odev->buf_offs % odev->width;
+ size_t y = odev->buf_offs / odev->width;
+ size_t i;
+@@ -406,7 +413,6 @@ static int append_values(struct asus_oled_dev *odev, uint8_t val, size_t count)
+ ;
+ }
+
+- odev->last_val = val;
+ odev->buf_offs++;
+ }
+
+@@ -805,10 +811,9 @@ error:
+
+ static void __exit asus_oled_exit(void)
+ {
++ usb_deregister(&oled_driver);
+ class_remove_file(oled_class, &class_attr_version.attr);
+ class_destroy(oled_class);
+-
+- usb_deregister(&oled_driver);
+ }
+
+ module_init(asus_oled_init);
+diff --git a/drivers/staging/rtl8712/drv_types.h b/drivers/staging/rtl8712/drv_types.h
+index 9b5d771..ed85b44 100644
+--- a/drivers/staging/rtl8712/drv_types.h
++++ b/drivers/staging/rtl8712/drv_types.h
+@@ -37,6 +37,8 @@ struct _adapter;
+ #include "wlan_bssdef.h"
+ #include "rtl8712_spec.h"
+ #include "rtl8712_hal.h"
++#include <linux/mutex.h>
++#include <linux/completion.h>
+
+ enum _NIC_VERSION {
+ RTL8711_NIC,
+@@ -168,6 +170,7 @@ struct _adapter {
+ s32 bSurpriseRemoved;
+ u32 IsrContent;
+ u32 ImrContent;
++ bool fw_found;
+ u8 EepromAddressSize;
+ u8 hw_init_completed;
+ struct task_struct *cmdThread;
+@@ -184,6 +187,10 @@ struct _adapter {
+ _workitem wkFilterRxFF0;
+ u8 blnEnableRxFF0Filter;
+ spinlock_t lockRxFF0Filter;
++ const struct firmware *fw;
++ struct usb_interface *pusb_intf;
++ struct mutex mutex_start;
++ struct completion rtl8712_fw_ready;
+ };
+
+ static inline u8 *myid(struct eeprom_priv *peepriv)
+diff --git a/drivers/staging/rtl8712/hal_init.c b/drivers/staging/rtl8712/hal_init.c
+index d0029aa..cc893c0 100644
+--- a/drivers/staging/rtl8712/hal_init.c
++++ b/drivers/staging/rtl8712/hal_init.c
+@@ -42,29 +42,56 @@
+ #define FWBUFF_ALIGN_SZ 512
+ #define MAX_DUMP_FWSZ 49152 /*default = 49152 (48k)*/
+
+-static u32 rtl871x_open_fw(struct _adapter *padapter, void **pphfwfile_hdl,
+- const u8 **ppmappedfw)
++static void rtl871x_load_fw_cb(const struct firmware *firmware, void *context)
+ {
++ struct _adapter *padapter = context;
++
++ complete(&padapter->rtl8712_fw_ready);
++ if (!firmware) {
++ struct usb_device *udev = padapter->dvobjpriv.pusbdev;
++ struct usb_interface *pusb_intf = padapter->pusb_intf;
++ printk(KERN_ERR "r8712u: Firmware request failed\n");
++ padapter->fw_found = false;
++ usb_put_dev(udev);
++ usb_set_intfdata(pusb_intf, NULL);
++ return;
++ }
++ padapter->fw = firmware;
++ padapter->fw_found = true;
++ /* firmware available - start netdev */
++ register_netdev(padapter->pnetdev);
++}
++
++static const char firmware_file[] = "rtlwifi/rtl8712u.bin";
++
++int rtl871x_load_fw(struct _adapter *padapter)
++{
++ struct device *dev = &padapter->dvobjpriv.pusbdev->dev;
+ int rc;
+- const char firmware_file[] = "rtlwifi/rtl8712u.bin";
+- const struct firmware **praw = (const struct firmware **)
+- (pphfwfile_hdl);
+- struct dvobj_priv *pdvobjpriv = (struct dvobj_priv *)
+- (&padapter->dvobjpriv);
+- struct usb_device *pusbdev = pdvobjpriv->pusbdev;
+
++ init_completion(&padapter->rtl8712_fw_ready);
+ printk(KERN_INFO "r8712u: Loading firmware from \"%s\"\n",
+ firmware_file);
+- rc = request_firmware(praw, firmware_file, &pusbdev->dev);
+- if (rc < 0) {
+- printk(KERN_ERR "r8712u: Unable to load firmware\n");
+- printk(KERN_ERR "r8712u: Install latest linux-firmware\n");
++ rc = request_firmware_nowait(THIS_MODULE, 1, firmware_file, dev,
++ GFP_KERNEL, padapter, rtl871x_load_fw_cb);
++ if (rc)
++ printk(KERN_ERR "r8712u: Firmware request error %d\n", rc);
++ return rc;
++}
++MODULE_FIRMWARE("rtlwifi/rtl8712u.bin");
++
++static u32 rtl871x_open_fw(struct _adapter *padapter, const u8 **ppmappedfw)
++{
++ const struct firmware **praw = &padapter->fw;
++
++ if (padapter->fw->size > 200000) {
++ printk(KERN_ERR "r8172u: Badfw->size of %d\n",
++ (int)padapter->fw->size);
+ return 0;
+ }
+ *ppmappedfw = (u8 *)((*praw)->data);
+ return (*praw)->size;
+ }
+-MODULE_FIRMWARE("rtlwifi/rtl8712u.bin");
+
+ static void fill_fwpriv(struct _adapter *padapter, struct fw_priv *pfwpriv)
+ {
+@@ -142,18 +169,17 @@ static u8 rtl8712_dl_fw(struct _adapter *padapter)
+ uint dump_imem_sz, imem_sz, dump_emem_sz, emem_sz; /* max = 49152; */
+ struct fw_hdr fwhdr;
+ u32 ulfilelength; /* FW file size */
+- void *phfwfile_hdl = NULL;
+ const u8 *pmappedfw = NULL;
+ u8 *ptmpchar = NULL, *ppayload, *ptr;
+ struct tx_desc *ptx_desc;
+ u32 txdscp_sz = sizeof(struct tx_desc);
+ u8 ret = _FAIL;
+
+- ulfilelength = rtl871x_open_fw(padapter, &phfwfile_hdl, &pmappedfw);
++ ulfilelength = rtl871x_open_fw(padapter, &pmappedfw);
+ if (pmappedfw && (ulfilelength > 0)) {
+ update_fwhdr(&fwhdr, pmappedfw);
+ if (chk_fwhdr(&fwhdr, ulfilelength) == _FAIL)
+- goto firmware_rel;
++ return ret;
+ fill_fwpriv(padapter, &fwhdr.fwpriv);
+ /* firmware check ok */
+ maxlen = (fwhdr.img_IMEM_size > fwhdr.img_SRAM_size) ?
+@@ -161,7 +187,7 @@ static u8 rtl8712_dl_fw(struct _adapter *padapter)
+ maxlen += txdscp_sz;
+ ptmpchar = _malloc(maxlen + FWBUFF_ALIGN_SZ);
+ if (ptmpchar == NULL)
+- goto firmware_rel;
++ return ret;
+
+ ptx_desc = (struct tx_desc *)(ptmpchar + FWBUFF_ALIGN_SZ -
+ ((addr_t)(ptmpchar) & (FWBUFF_ALIGN_SZ - 1)));
+@@ -297,8 +323,6 @@ static u8 rtl8712_dl_fw(struct _adapter *padapter)
+
+ exit_fail:
+ kfree(ptmpchar);
+-firmware_rel:
+- release_firmware((struct firmware *)phfwfile_hdl);
+ return ret;
+ }
+
+diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c
+index 9a75c6d..98a3d68 100644
+--- a/drivers/staging/rtl8712/os_intfs.c
++++ b/drivers/staging/rtl8712/os_intfs.c
+@@ -31,6 +31,7 @@
+ #include <linux/module.h>
+ #include <linux/init.h>
+ #include <linux/kthread.h>
++#include <linux/firmware.h>
+ #include "osdep_service.h"
+ #include "drv_types.h"
+ #include "xmit_osdep.h"
+@@ -264,12 +265,12 @@ static void start_drv_timers(struct _adapter *padapter)
+ void r8712_stop_drv_timers(struct _adapter *padapter)
+ {
+ _cancel_timer_ex(&padapter->mlmepriv.assoc_timer);
+- _cancel_timer_ex(&padapter->mlmepriv.sitesurveyctrl.
+- sitesurvey_ctrl_timer);
+ _cancel_timer_ex(&padapter->securitypriv.tkip_timer);
+ _cancel_timer_ex(&padapter->mlmepriv.scan_to_timer);
+ _cancel_timer_ex(&padapter->mlmepriv.dhcp_timer);
+ _cancel_timer_ex(&padapter->mlmepriv.wdg_timer);
++ _cancel_timer_ex(&padapter->mlmepriv.sitesurveyctrl.
++ sitesurvey_ctrl_timer);
+ }
+
+ static u8 init_default_value(struct _adapter *padapter)
+@@ -347,7 +348,8 @@ u8 r8712_free_drv_sw(struct _adapter *padapter)
+ r8712_free_mlme_priv(&padapter->mlmepriv);
+ r8712_free_io_queue(padapter);
+ _free_xmit_priv(&padapter->xmitpriv);
+- _r8712_free_sta_priv(&padapter->stapriv);
++ if (padapter->fw_found)
++ _r8712_free_sta_priv(&padapter->stapriv);
+ _r8712_free_recv_priv(&padapter->recvpriv);
+ mp871xdeinit(padapter);
+ if (pnetdev)
+@@ -388,6 +390,7 @@ static int netdev_open(struct net_device *pnetdev)
+ {
+ struct _adapter *padapter = (struct _adapter *)netdev_priv(pnetdev);
+
++ mutex_lock(&padapter->mutex_start);
+ if (padapter->bup == false) {
+ padapter->bDriverStopped = false;
+ padapter->bSurpriseRemoved = false;
+@@ -435,11 +438,13 @@ static int netdev_open(struct net_device *pnetdev)
+ /* start driver mlme relation timer */
+ start_drv_timers(padapter);
+ padapter->ledpriv.LedControlHandler(padapter, LED_CTL_NO_LINK);
++ mutex_unlock(&padapter->mutex_start);
+ return 0;
+ netdev_open_error:
+ padapter->bup = false;
+ netif_carrier_off(pnetdev);
+ netif_stop_queue(pnetdev);
++ mutex_unlock(&padapter->mutex_start);
+ return -1;
+ }
+
+@@ -473,6 +478,9 @@ static int netdev_close(struct net_device *pnetdev)
+ r8712_free_network_queue(padapter);
+ /* The interface is no longer Up: */
+ padapter->bup = false;
++ release_firmware(padapter->fw);
++ /* never exit with a firmware callback pending */
++ wait_for_completion(&padapter->rtl8712_fw_ready);
+ return 0;
+ }
+
+diff --git a/drivers/staging/rtl8712/rtl8712_hal.h b/drivers/staging/rtl8712/rtl8712_hal.h
+index 665e718..d19865a 100644
+--- a/drivers/staging/rtl8712/rtl8712_hal.h
++++ b/drivers/staging/rtl8712/rtl8712_hal.h
+@@ -145,5 +145,6 @@ struct hal_priv {
+ };
+
+ uint rtl8712_hal_init(struct _adapter *padapter);
++int rtl871x_load_fw(struct _adapter *padapter);
+
+ #endif
+diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
+index 5385da2..9bade18 100644
+--- a/drivers/staging/rtl8712/usb_intf.c
++++ b/drivers/staging/rtl8712/usb_intf.c
+@@ -89,6 +89,7 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = {
+ {USB_DEVICE(0x0DF6, 0x0045)},
+ {USB_DEVICE(0x0DF6, 0x0059)}, /* 11n mode disable */
+ {USB_DEVICE(0x0DF6, 0x004B)},
++ {USB_DEVICE(0x0DF6, 0x005B)},
+ {USB_DEVICE(0x0DF6, 0x005D)},
+ {USB_DEVICE(0x0DF6, 0x0063)},
+ /* Sweex */
+@@ -389,6 +390,7 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
+ pdvobjpriv = &padapter->dvobjpriv;
+ pdvobjpriv->padapter = padapter;
+ padapter->dvobjpriv.pusbdev = udev;
++ padapter->pusb_intf = pusb_intf;
+ usb_set_intfdata(pusb_intf, pnetdev);
+ SET_NETDEV_DEV(pnetdev, &pusb_intf->dev);
+ /* step 2. */
+@@ -595,10 +597,11 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
+ "%pM\n", mac);
+ memcpy(pnetdev->dev_addr, mac, ETH_ALEN);
+ }
+- /* step 6. Tell the network stack we exist */
+- if (register_netdev(pnetdev) != 0)
++ /* step 6. Load the firmware asynchronously */
++ if (rtl871x_load_fw(padapter))
+ goto error;
+ spin_lock_init(&padapter->lockRxFF0Filter);
++ mutex_init(&padapter->mutex_start);
+ return 0;
+ error:
+ usb_put_dev(udev);
+@@ -629,7 +632,8 @@ static void r871xu_dev_remove(struct usb_interface *pusb_intf)
+ flush_scheduled_work();
+ udelay(1);
+ /*Stop driver mlme relation timer */
+- r8712_stop_drv_timers(padapter);
++ if (padapter->fw_found)
++ r8712_stop_drv_timers(padapter);
+ r871x_dev_unload(padapter);
+ r8712_free_drv_sw(padapter);
+ }
+diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
+index 56c1f9c..f5e469d 100644
+--- a/drivers/staging/zcache/zcache-main.c
++++ b/drivers/staging/zcache/zcache-main.c
+@@ -358,8 +358,8 @@ static struct zbud_hdr *zbud_create(uint16_t client_id, uint16_t pool_id,
+ if (unlikely(zbpg == NULL))
+ goto out;
+ /* ok, have a page, now compress the data before taking locks */
+- spin_lock(&zbpg->lock);
+ spin_lock(&zbud_budlists_spinlock);
++ spin_lock(&zbpg->lock);
+ list_add_tail(&zbpg->bud_list, &zbud_unbuddied[nchunks].list);
+ zbud_unbuddied[nchunks].count++;
+ zh = &zbpg->buddy[0];
+@@ -389,12 +389,11 @@ init_zh:
+ zh->oid = *oid;
+ zh->pool_id = pool_id;
+ zh->client_id = client_id;
+- /* can wait to copy the data until the list locks are dropped */
+- spin_unlock(&zbud_budlists_spinlock);
+-
+ to = zbud_data(zh, size);
+ memcpy(to, cdata, size);
+ spin_unlock(&zbpg->lock);
++ spin_unlock(&zbud_budlists_spinlock);
++
+ zbud_cumul_chunk_counts[nchunks]++;
+ atomic_inc(&zcache_zbud_curr_zpages);
+ zcache_zbud_cumul_zpages++;
+@@ -1782,9 +1781,9 @@ static int zcache_frontswap_poolid = -1;
+ * Swizzling increases objects per swaptype, increasing tmem concurrency
+ * for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS
+ * Setting SWIZ_BITS to 27 basically reconstructs the swap entry from
+- * frontswap_get_page()
++ * frontswap_get_page(), but has side-effects. Hence using 8.
+ */
+-#define SWIZ_BITS 27
++#define SWIZ_BITS 8
+ #define SWIZ_MASK ((1 << SWIZ_BITS) - 1)
+ #define _oswiz(_type, _ind) ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK))
+ #define iswiz(_ind) (_ind >> SWIZ_BITS)
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 8599545..0c1d5c73 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -1062,7 +1062,7 @@ attach_cmd:
+ if (ret < 0)
+ return iscsit_add_reject_from_cmd(
+ ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+- 1, 1, buf, cmd);
++ 1, 0, buf, cmd);
+ /*
+ * Check the CmdSN against ExpCmdSN/MaxCmdSN here if
+ * the Immediate Bit is not set, and no Immediate
+@@ -3165,6 +3165,30 @@ static int iscsit_send_task_mgt_rsp(
+ return 0;
+ }
+
++static bool iscsit_check_inaddr_any(struct iscsi_np *np)
++{
++ bool ret = false;
++
++ if (np->np_sockaddr.ss_family == AF_INET6) {
++ const struct sockaddr_in6 sin6 = {
++ .sin6_addr = IN6ADDR_ANY_INIT };
++ struct sockaddr_in6 *sock_in6 =
++ (struct sockaddr_in6 *)&np->np_sockaddr;
++
++ if (!memcmp(sock_in6->sin6_addr.s6_addr,
++ sin6.sin6_addr.s6_addr, 16))
++ ret = true;
++ } else {
++ struct sockaddr_in * sock_in =
++ (struct sockaddr_in *)&np->np_sockaddr;
++
++ if (sock_in->sin_addr.s_addr == INADDR_ANY)
++ ret = true;
++ }
++
++ return ret;
++}
++
+ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
+ {
+ char *payload = NULL;
+@@ -3214,12 +3238,17 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
+ spin_lock(&tpg->tpg_np_lock);
+ list_for_each_entry(tpg_np, &tpg->tpg_gnp_list,
+ tpg_np_list) {
++ struct iscsi_np *np = tpg_np->tpg_np;
++ bool inaddr_any = iscsit_check_inaddr_any(np);
++
+ len = sprintf(buf, "TargetAddress="
+ "%s%s%s:%hu,%hu",
+- (tpg_np->tpg_np->np_sockaddr.ss_family == AF_INET6) ?
+- "[" : "", tpg_np->tpg_np->np_ip,
+- (tpg_np->tpg_np->np_sockaddr.ss_family == AF_INET6) ?
+- "]" : "", tpg_np->tpg_np->np_port,
++ (np->np_sockaddr.ss_family == AF_INET6) ?
++ "[" : "", (inaddr_any == false) ?
++ np->np_ip : conn->local_ip,
++ (np->np_sockaddr.ss_family == AF_INET6) ?
++ "]" : "", (inaddr_any == false) ?
++ np->np_port : conn->local_port,
+ tpg->tpgt);
+ len += 1;
+
+diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
+index f1a02da..7da2d6a 100644
+--- a/drivers/target/iscsi/iscsi_target_core.h
++++ b/drivers/target/iscsi/iscsi_target_core.h
+@@ -508,6 +508,7 @@ struct iscsi_conn {
+ u16 cid;
+ /* Remote TCP Port */
+ u16 login_port;
++ u16 local_port;
+ int net_size;
+ u32 auth_id;
+ #define CONNFLAG_SCTP_STRUCT_FILE 0x01
+@@ -527,6 +528,7 @@ struct iscsi_conn {
+ unsigned char bad_hdr[ISCSI_HDR_LEN];
+ #define IPV6_ADDRESS_SPACE 48
+ unsigned char login_ip[IPV6_ADDRESS_SPACE];
++ unsigned char local_ip[IPV6_ADDRESS_SPACE];
+ int conn_usage_count;
+ int conn_waiting_on_uc;
+ atomic_t check_immediate_queue;
+diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
+index d734bde..bd2adec 100644
+--- a/drivers/target/iscsi/iscsi_target_login.c
++++ b/drivers/target/iscsi/iscsi_target_login.c
+@@ -616,8 +616,8 @@ static int iscsi_post_login_handler(
+ }
+
+ pr_debug("iSCSI Login successful on CID: %hu from %s to"
+- " %s:%hu,%hu\n", conn->cid, conn->login_ip, np->np_ip,
+- np->np_port, tpg->tpgt);
++ " %s:%hu,%hu\n", conn->cid, conn->login_ip,
++ conn->local_ip, conn->local_port, tpg->tpgt);
+
+ list_add_tail(&conn->conn_list, &sess->sess_conn_list);
+ atomic_inc(&sess->nconn);
+@@ -659,7 +659,8 @@ static int iscsi_post_login_handler(
+ sess->session_state = TARG_SESS_STATE_LOGGED_IN;
+
+ pr_debug("iSCSI Login successful on CID: %hu from %s to %s:%hu,%hu\n",
+- conn->cid, conn->login_ip, np->np_ip, np->np_port, tpg->tpgt);
++ conn->cid, conn->login_ip, conn->local_ip, conn->local_port,
++ tpg->tpgt);
+
+ spin_lock_bh(&sess->conn_lock);
+ list_add_tail(&conn->conn_list, &sess->sess_conn_list);
+@@ -1019,6 +1020,18 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
+ snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
+ &sock_in6.sin6_addr.in6_u);
+ conn->login_port = ntohs(sock_in6.sin6_port);
++
++ if (conn->sock->ops->getname(conn->sock,
++ (struct sockaddr *)&sock_in6, &err, 0) < 0) {
++ pr_err("sock_ops->getname() failed.\n");
++ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
++ ISCSI_LOGIN_STATUS_TARGET_ERROR);
++ goto new_sess_out;
++ }
++ snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c",
++ &sock_in6.sin6_addr.in6_u);
++ conn->local_port = ntohs(sock_in6.sin6_port);
++
+ } else {
+ memset(&sock_in, 0, sizeof(struct sockaddr_in));
+
+@@ -1031,6 +1044,16 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
+ }
+ sprintf(conn->login_ip, "%pI4", &sock_in.sin_addr.s_addr);
+ conn->login_port = ntohs(sock_in.sin_port);
++
++ if (conn->sock->ops->getname(conn->sock,
++ (struct sockaddr *)&sock_in, &err, 0) < 0) {
++ pr_err("sock_ops->getname() failed.\n");
++ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
++ ISCSI_LOGIN_STATUS_TARGET_ERROR);
++ goto new_sess_out;
++ }
++ sprintf(conn->local_ip, "%pI4", &sock_in.sin_addr.s_addr);
++ conn->local_port = ntohs(sock_in.sin_port);
+ }
+
+ conn->network_transport = np->np_network_transport;
+@@ -1038,7 +1061,7 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
+ pr_debug("Received iSCSI login request from %s on %s Network"
+ " Portal %s:%hu\n", conn->login_ip,
+ (conn->network_transport == ISCSI_TCP) ? "TCP" : "SCTP",
+- np->np_ip, np->np_port);
++ conn->local_ip, conn->local_port);
+
+ pr_debug("Moving to TARG_CONN_STATE_IN_LOGIN.\n");
+ conn->conn_state = TARG_CONN_STATE_IN_LOGIN;
+diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
+index 02348f7..99f2af3 100644
+--- a/drivers/target/iscsi/iscsi_target_util.c
++++ b/drivers/target/iscsi/iscsi_target_util.c
+@@ -851,6 +851,17 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd)
+ case ISCSI_OP_SCSI_TMFUNC:
+ transport_generic_free_cmd(&cmd->se_cmd, 1);
+ break;
++ case ISCSI_OP_REJECT:
++ /*
++ * Handle special case for REJECT when iscsi_add_reject*() has
++ * overwritten the original iscsi_opcode assignment, and the
++ * associated cmd->se_cmd needs to be released.
++ */
++ if (cmd->se_cmd.se_tfo != NULL) {
++ transport_generic_free_cmd(&cmd->se_cmd, 1);
++ break;
++ }
++ /* Fall-through */
+ default:
+ iscsit_release_cmd(cmd);
+ break;
+diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
+index 2e8c1be..251e48f 100644
+--- a/drivers/target/target_core_cdb.c
++++ b/drivers/target/target_core_cdb.c
+@@ -701,6 +701,13 @@ int target_emulate_inquiry(struct se_task *task)
+ int p, ret;
+
+ if (!(cdb[1] & 0x1)) {
++ if (cdb[2]) {
++ pr_err("INQUIRY with EVPD==0 but PAGE CODE=%02x\n",
++ cdb[2]);
++ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
++ return -EINVAL;
++ }
++
+ ret = target_emulate_inquiry_std(cmd);
+ goto out;
+ }
+@@ -732,7 +739,7 @@ int target_emulate_inquiry(struct se_task *task)
+ }
+
+ pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
+- cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
++ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ ret = -EINVAL;
+
+ out_unmap:
+diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
+index 95dee70..9119d92 100644
+--- a/drivers/target/target_core_pr.c
++++ b/drivers/target/target_core_pr.c
+@@ -481,6 +481,7 @@ static int core_scsi3_pr_seq_non_holder(
+ case READ_MEDIA_SERIAL_NUMBER:
+ case REPORT_LUNS:
+ case REQUEST_SENSE:
++ case PERSISTENT_RESERVE_IN:
+ ret = 0; /*/ Allowed CDBs */
+ break;
+ default:
+@@ -3138,7 +3139,7 @@ static int core_scsi3_pro_preempt(
+ if (!calling_it_nexus)
+ core_scsi3_ua_allocate(pr_reg_nacl,
+ pr_res_mapped_lun, 0x2A,
+- ASCQ_2AH_RESERVATIONS_PREEMPTED);
++ ASCQ_2AH_REGISTRATIONS_PREEMPTED);
+ }
+ spin_unlock(&pr_tmpl->registration_lock);
+ /*
+@@ -3251,7 +3252,7 @@ static int core_scsi3_pro_preempt(
+ * additional sense code set to REGISTRATIONS PREEMPTED;
+ */
+ core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun, 0x2A,
+- ASCQ_2AH_RESERVATIONS_PREEMPTED);
++ ASCQ_2AH_REGISTRATIONS_PREEMPTED);
+ }
+ spin_unlock(&pr_tmpl->registration_lock);
+ /*
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index e87d0eb..861628e 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -3701,6 +3701,11 @@ transport_allocate_control_task(struct se_cmd *cmd)
+ struct se_task *task;
+ unsigned long flags;
+
++ /* Workaround for handling zero-length control CDBs */
++ if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
++ !cmd->data_length)
++ return 0;
++
+ task = transport_generic_get_task(cmd, cmd->data_direction);
+ if (!task)
+ return -ENOMEM;
+@@ -3772,6 +3777,14 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
+ else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
+ cmd->t_state = TRANSPORT_COMPLETE;
+ atomic_set(&cmd->t_transport_active, 1);
++
++ if (cmd->t_task_cdb[0] == REQUEST_SENSE) {
++ u8 ua_asc = 0, ua_ascq = 0;
++
++ core_scsi3_ua_clear_for_request_sense(cmd,
++ &ua_asc, &ua_ascq);
++ }
++
+ INIT_WORK(&cmd->work, target_complete_ok_work);
+ queue_work(target_completion_wq, &cmd->work);
+ return 0;
+@@ -4403,8 +4416,8 @@ int transport_send_check_condition_and_sense(
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
+ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+- /* ABORTED COMMAND */
+- buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
++ /* ILLEGAL REQUEST */
++ buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+ /* INVALID FIELD IN CDB */
+ buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
+ break;
+@@ -4412,8 +4425,8 @@ int transport_send_check_condition_and_sense(
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
+ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+- /* ABORTED COMMAND */
+- buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
++ /* ILLEGAL REQUEST */
++ buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+ /* INVALID FIELD IN PARAMETER LIST */
+ buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26;
+ break;
+diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
+index 5e096f4..65447c5 100644
+--- a/drivers/tty/vt/vt_ioctl.c
++++ b/drivers/tty/vt/vt_ioctl.c
+@@ -1463,7 +1463,6 @@ compat_kdfontop_ioctl(struct compat_console_font_op __user *fontop,
+ if (!perm && op->op != KD_FONT_OP_GET)
+ return -EPERM;
+ op->data = compat_ptr(((struct compat_console_font_op *)op)->data);
+- op->flags |= KD_FONT_FLAG_OLD;
+ i = con_font_op(vc, op);
+ if (i)
+ return i;
+diff --git a/drivers/usb/gadget/f_loopback.c b/drivers/usb/gadget/f_loopback.c
+index 6d87f28..2c0cd82 100644
+--- a/drivers/usb/gadget/f_loopback.c
++++ b/drivers/usb/gadget/f_loopback.c
+@@ -418,7 +418,7 @@ int __init loopback_add(struct usb_composite_dev *cdev, bool autoresume)
+
+ /* support autoresume for remote wakeup testing */
+ if (autoresume)
+- sourcesink_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
++ loopback_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+
+ /* support OTG systems */
+ if (gadget_is_otg(cdev->gadget)) {
+diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
+index caf8742..ac53a66 100644
+--- a/drivers/usb/host/pci-quirks.c
++++ b/drivers/usb/host/pci-quirks.c
+@@ -867,6 +867,12 @@ hc_init:
+
+ static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev)
+ {
++ /* Skip Netlogic mips SoC's internal PCI USB controller.
++ * This device does not need/support EHCI/OHCI handoff
++ */
++ if (pdev->vendor == 0x184e) /* vendor Netlogic */
++ return;
++
+ if (pdev->class == PCI_CLASS_SERIAL_USB_UHCI)
+ quirk_usb_handoff_uhci(pdev);
+ else if (pdev->class == PCI_CLASS_SERIAL_USB_OHCI)
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 058b92c..f030471 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -839,6 +839,7 @@ static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LOGBOOKML_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LS_LOGBOOK_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_HS_LOGBOOK_PID) },
++ { USB_DEVICE(FTDI_VID, FTDI_CINTERION_MC55I_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) },
+ { USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 76d4f31..4eb7715 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -1187,3 +1187,10 @@
+ */
+ /* ZigBee controller */
+ #define FTDI_RF_R106 0x8A28
++
++/*
++ * Product: HCP HIT GPRS modem
++ * Manufacturer: HCP d.o.o.
++ * ATI command output: Cinterion MC55i
++ */
++#define FTDI_CINTERION_MC55I_PID 0xA951
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 2a9ed6e..338d082 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -855,6 +855,18 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0083, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0086, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0087, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0088, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0089, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0090, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0091, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0092, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0093, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0094, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0095, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0096, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0097, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0098, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0099, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0104, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0105, 0xff, 0xff, 0xff) },
+@@ -883,7 +895,6 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0151, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0153, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0154, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0155, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0156, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff) },
+@@ -892,6 +903,12 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0160, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0164, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0168, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0170, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0176, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) },
+@@ -1066,6 +1083,116 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1401, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1402, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1403, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1404, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1405, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1406, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1407, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1408, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1409, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1410, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1411, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1412, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1413, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1414, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1415, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1416, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1417, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1418, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1419, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1420, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1421, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1422, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1423, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1424, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1425, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1427, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1429, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1430, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1431, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1432, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1433, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1434, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1435, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1436, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1437, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1438, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1439, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1440, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1441, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1442, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1443, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1444, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1445, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1446, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1447, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1448, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1449, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1450, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1451, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1452, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1453, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1454, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1455, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1456, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1457, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1458, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1459, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1460, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1461, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1462, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1463, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1464, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1465, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1466, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1467, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1468, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1469, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1470, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1471, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1472, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1473, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1474, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1475, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1476, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1477, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1478, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1479, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1480, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1481, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1482, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1483, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1484, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1485, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1486, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1487, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1488, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1489, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1490, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1491, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1492, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1493, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1494, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1495, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1496, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1497, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1498, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1499, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1500, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1501, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1502, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1503, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1504, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1505, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1506, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1507, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1508, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1509, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1510, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0027, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) },
+diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
+index 63409c1..e919c70 100644
+--- a/drivers/video/atmel_lcdfb.c
++++ b/drivers/video/atmel_lcdfb.c
+@@ -1089,7 +1089,7 @@ static int atmel_lcdfb_suspend(struct platform_device *pdev, pm_message_t mesg)
+ */
+ lcdc_writel(sinfo, ATMEL_LCDC_IDR, ~0UL);
+
+- sinfo->saved_lcdcon = lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
++ sinfo->saved_lcdcon = lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_CTR);
+ lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_CTR, 0);
+ if (sinfo->atmel_lcdfb_power_control)
+ sinfo->atmel_lcdfb_power_control(0);
+diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
+index 4ec3ee9..2504809 100644
+--- a/fs/cifs/sess.c
++++ b/fs/cifs/sess.c
+@@ -246,16 +246,15 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
+ /* copy user */
+ /* BB what about null user mounts - check that we do this BB */
+ /* copy user */
+- if (ses->user_name != NULL)
++ if (ses->user_name != NULL) {
+ strncpy(bcc_ptr, ses->user_name, MAX_USERNAME_SIZE);
++ bcc_ptr += strnlen(ses->user_name, MAX_USERNAME_SIZE);
++ }
+ /* else null user mount */
+-
+- bcc_ptr += strnlen(ses->user_name, MAX_USERNAME_SIZE);
+ *bcc_ptr = 0;
+ bcc_ptr++; /* account for null termination */
+
+ /* copy domain */
+-
+ if (ses->domainName != NULL) {
+ strncpy(bcc_ptr, ses->domainName, 256);
+ bcc_ptr += strnlen(ses->domainName, 256);
+diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
+index 54eb14c..608c1c3 100644
+--- a/fs/ecryptfs/read_write.c
++++ b/fs/ecryptfs/read_write.c
+@@ -130,7 +130,7 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset,
+ pgoff_t ecryptfs_page_idx = (pos >> PAGE_CACHE_SHIFT);
+ size_t start_offset_in_page = (pos & ~PAGE_CACHE_MASK);
+ size_t num_bytes = (PAGE_CACHE_SIZE - start_offset_in_page);
+- size_t total_remaining_bytes = ((offset + size) - pos);
++ loff_t total_remaining_bytes = ((offset + size) - pos);
+
+ if (fatal_signal_pending(current)) {
+ rc = -EINTR;
+@@ -141,7 +141,7 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset,
+ num_bytes = total_remaining_bytes;
+ if (pos < offset) {
+ /* remaining zeros to write, up to destination offset */
+- size_t total_remaining_zeros = (offset - pos);
++ loff_t total_remaining_zeros = (offset - pos);
+
+ if (num_bytes > total_remaining_zeros)
+ num_bytes = total_remaining_zeros;
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index 1fc1dca..1ace83d 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -775,6 +775,13 @@ static int mem_open(struct inode* inode, struct file* file)
+ if (IS_ERR(mm))
+ return PTR_ERR(mm);
+
++ if (mm) {
++ /* ensure this mm_struct can't be freed */
++ atomic_inc(&mm->mm_count);
++ /* but do not pin its memory */
++ mmput(mm);
++ }
++
+ /* OK to pass negative loff_t, we can catch out-of-range */
+ file->f_mode |= FMODE_UNSIGNED_OFFSET;
+ file->private_data = mm;
+@@ -782,57 +789,13 @@ static int mem_open(struct inode* inode, struct file* file)
+ return 0;
+ }
+
+-static ssize_t mem_read(struct file * file, char __user * buf,
+- size_t count, loff_t *ppos)
++static ssize_t mem_rw(struct file *file, char __user *buf,
++ size_t count, loff_t *ppos, int write)
+ {
+- int ret;
+- char *page;
+- unsigned long src = *ppos;
+ struct mm_struct *mm = file->private_data;
+-
+- if (!mm)
+- return 0;
+-
+- page = (char *)__get_free_page(GFP_TEMPORARY);
+- if (!page)
+- return -ENOMEM;
+-
+- ret = 0;
+-
+- while (count > 0) {
+- int this_len, retval;
+-
+- this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
+- retval = access_remote_vm(mm, src, page, this_len, 0);
+- if (!retval) {
+- if (!ret)
+- ret = -EIO;
+- break;
+- }
+-
+- if (copy_to_user(buf, page, retval)) {
+- ret = -EFAULT;
+- break;
+- }
+-
+- ret += retval;
+- src += retval;
+- buf += retval;
+- count -= retval;
+- }
+- *ppos = src;
+-
+- free_page((unsigned long) page);
+- return ret;
+-}
+-
+-static ssize_t mem_write(struct file * file, const char __user *buf,
+- size_t count, loff_t *ppos)
+-{
+- int copied;
++ unsigned long addr = *ppos;
++ ssize_t copied;
+ char *page;
+- unsigned long dst = *ppos;
+- struct mm_struct *mm = file->private_data;
+
+ if (!mm)
+ return 0;
+@@ -842,31 +805,54 @@ static ssize_t mem_write(struct file * file, const char __user *buf,
+ return -ENOMEM;
+
+ copied = 0;
++ if (!atomic_inc_not_zero(&mm->mm_users))
++ goto free;
++
+ while (count > 0) {
+- int this_len, retval;
++ int this_len = min_t(int, count, PAGE_SIZE);
+
+- this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
+- if (copy_from_user(page, buf, this_len)) {
++ if (write && copy_from_user(page, buf, this_len)) {
+ copied = -EFAULT;
+ break;
+ }
+- retval = access_remote_vm(mm, dst, page, this_len, 1);
+- if (!retval) {
++
++ this_len = access_remote_vm(mm, addr, page, this_len, write);
++ if (!this_len) {
+ if (!copied)
+ copied = -EIO;
+ break;
+ }
+- copied += retval;
+- buf += retval;
+- dst += retval;
+- count -= retval;
++
++ if (!write && copy_to_user(buf, page, this_len)) {
++ copied = -EFAULT;
++ break;
++ }
++
++ buf += this_len;
++ addr += this_len;
++ copied += this_len;
++ count -= this_len;
+ }
+- *ppos = dst;
++ *ppos = addr;
+
++ mmput(mm);
++free:
+ free_page((unsigned long) page);
+ return copied;
+ }
+
++static ssize_t mem_read(struct file *file, char __user *buf,
++ size_t count, loff_t *ppos)
++{
++ return mem_rw(file, buf, count, ppos, 0);
++}
++
++static ssize_t mem_write(struct file *file, const char __user *buf,
++ size_t count, loff_t *ppos)
++{
++ return mem_rw(file, (char __user*)buf, count, ppos, 1);
++}
++
+ loff_t mem_lseek(struct file *file, loff_t offset, int orig)
+ {
+ switch (orig) {
+@@ -886,8 +872,8 @@ loff_t mem_lseek(struct file *file, loff_t offset, int orig)
+ static int mem_release(struct inode *inode, struct file *file)
+ {
+ struct mm_struct *mm = file->private_data;
+-
+- mmput(mm);
++ if (mm)
++ mmdrop(mm);
+ return 0;
+ }
+
+diff --git a/fs/udf/super.c b/fs/udf/super.c
+index e185253..87cb24a 100644
+--- a/fs/udf/super.c
++++ b/fs/udf/super.c
+@@ -1799,6 +1799,12 @@ static void udf_close_lvid(struct super_block *sb)
+ le16_to_cpu(lvid->descTag.descCRCLength)));
+
+ lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
++ /*
++ * We set buffer uptodate unconditionally here to avoid spurious
++ * warnings from mark_buffer_dirty() when previous EIO has marked
++ * the buffer as !uptodate
++ */
++ set_buffer_uptodate(bh);
+ mark_buffer_dirty(bh);
+ sbi->s_lvid_dirty = 0;
+ mutex_unlock(&sbi->s_alloc_mutex);
+diff --git a/include/linux/freezer.h b/include/linux/freezer.h
+index a5386e3..b5d6b6a 100644
+--- a/include/linux/freezer.h
++++ b/include/linux/freezer.h
+@@ -51,6 +51,7 @@ extern void refrigerator(void);
+ extern int freeze_processes(void);
+ extern int freeze_kernel_threads(void);
+ extern void thaw_processes(void);
++extern void thaw_kernel_threads(void);
+
+ static inline int try_to_freeze(void)
+ {
+@@ -185,6 +186,7 @@ static inline void refrigerator(void) {}
+ static inline int freeze_processes(void) { return -ENOSYS; }
+ static inline int freeze_kernel_threads(void) { return -ENOSYS; }
+ static inline void thaw_processes(void) {}
++static inline void thaw_kernel_threads(void) {}
+
+ static inline int try_to_freeze(void) { return 0; }
+
+diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
+index 83b0ea3..8a0ede4 100644
+--- a/include/linux/pm_qos.h
++++ b/include/linux/pm_qos.h
+@@ -107,7 +107,19 @@ static inline void pm_qos_remove_request(struct pm_qos_request *req)
+ { return; }
+
+ static inline int pm_qos_request(int pm_qos_class)
+- { return 0; }
++{
++ switch (pm_qos_class) {
++ case PM_QOS_CPU_DMA_LATENCY:
++ return PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
++ case PM_QOS_NETWORK_LATENCY:
++ return PM_QOS_NETWORK_LAT_DEFAULT_VALUE;
++ case PM_QOS_NETWORK_THROUGHPUT:
++ return PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE;
++ default:
++ return PM_QOS_DEFAULT_VALUE;
++ }
++}
++
+ static inline int pm_qos_add_notifier(int pm_qos_class,
+ struct notifier_block *notifier)
+ { return 0; }
+diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h
+index 61b2905..3b6f628 100644
+--- a/include/linux/usb/ch9.h
++++ b/include/linux/usb/ch9.h
+@@ -589,7 +589,7 @@ static inline int usb_endpoint_is_isoc_out(
+ */
+ static inline int usb_endpoint_maxp(const struct usb_endpoint_descriptor *epd)
+ {
+- return le16_to_cpu(epd->wMaxPacketSize);
++ return __le16_to_cpu(epd->wMaxPacketSize);
+ }
+
+ /*-------------------------------------------------------------------------*/
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index 52fd049..faa39d1 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -1673,8 +1673,12 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
+ ri->rp = rp;
+ ri->task = current;
+
+- if (rp->entry_handler && rp->entry_handler(ri, regs))
++ if (rp->entry_handler && rp->entry_handler(ri, regs)) {
++ raw_spin_lock_irqsave(&rp->lock, flags);
++ hlist_add_head(&ri->hlist, &rp->free_instances);
++ raw_spin_unlock_irqrestore(&rp->lock, flags);
+ return 0;
++ }
+
+ arch_prepare_kretprobe(ri, regs);
+
+diff --git a/kernel/panic.c b/kernel/panic.c
+index b2659360..3458469 100644
+--- a/kernel/panic.c
++++ b/kernel/panic.c
+@@ -237,11 +237,20 @@ void add_taint(unsigned flag)
+ * Can't trust the integrity of the kernel anymore.
+ * We don't call directly debug_locks_off() because the issue
+ * is not necessarily serious enough to set oops_in_progress to 1
+- * Also we want to keep up lockdep for staging development and
+- * post-warning case.
++ * Also we want to keep up lockdep for staging/out-of-tree
++ * development and post-warning case.
+ */
+- if (flag != TAINT_CRAP && flag != TAINT_WARN && __debug_locks_off())
+- printk(KERN_WARNING "Disabling lock debugging due to kernel taint\n");
++ switch (flag) {
++ case TAINT_CRAP:
++ case TAINT_OOT_MODULE:
++ case TAINT_WARN:
++ case TAINT_FIRMWARE_WORKAROUND:
++ break;
++
++ default:
++ if (__debug_locks_off())
++ printk(KERN_WARNING "Disabling lock debugging due to kernel taint\n");
++ }
+
+ set_bit(flag, &tainted_mask);
+ }
+diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
+index a6b0503..624538a 100644
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -55,7 +55,7 @@ enum {
+
+ static int hibernation_mode = HIBERNATION_SHUTDOWN;
+
+-static bool freezer_test_done;
++bool freezer_test_done;
+
+ static const struct platform_hibernation_ops *hibernation_ops;
+
+diff --git a/kernel/power/power.h b/kernel/power/power.h
+index 23a2db1..0c4defe 100644
+--- a/kernel/power/power.h
++++ b/kernel/power/power.h
+@@ -50,6 +50,8 @@ static inline char *check_image_kernel(struct swsusp_info *info)
+ #define SPARE_PAGES ((1024 * 1024) >> PAGE_SHIFT)
+
+ /* kernel/power/hibernate.c */
++extern bool freezer_test_done;
++
+ extern int hibernation_snapshot(int platform_mode);
+ extern int hibernation_restore(int platform_mode);
+ extern int hibernation_platform_enter(void);
+diff --git a/kernel/power/process.c b/kernel/power/process.c
+index addbbe5..3d4b954 100644
+--- a/kernel/power/process.c
++++ b/kernel/power/process.c
+@@ -203,3 +203,12 @@ void thaw_processes(void)
+ printk("done.\n");
+ }
+
++void thaw_kernel_threads(void)
++{
++ printk("Restarting kernel threads ... ");
++ thaw_workqueues();
++ thaw_tasks(true);
++ schedule();
++ printk("done.\n");
++}
++
+diff --git a/kernel/power/user.c b/kernel/power/user.c
+index 6d8f535..f08d227 100644
+--- a/kernel/power/user.c
++++ b/kernel/power/user.c
+@@ -283,10 +283,17 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
+ }
+ pm_restore_gfp_mask();
+ error = hibernation_snapshot(data->platform_support);
+- if (!error)
++ if (error) {
++ thaw_kernel_threads();
++ } else {
+ error = put_user(in_suspend, (int __user *)arg);
+- if (!error)
+- data->ready = 1;
++ if (!error && !freezer_test_done)
++ data->ready = 1;
++ if (freezer_test_done) {
++ freezer_test_done = false;
++ thaw_kernel_threads();
++ }
++ }
+ break;
+
+ case SNAPSHOT_ATOMIC_RESTORE:
+@@ -303,6 +310,15 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
+ swsusp_free();
+ memset(&data->handle, 0, sizeof(struct snapshot_handle));
+ data->ready = 0;
++ /*
++ * It is necessary to thaw kernel threads here, because
++ * SNAPSHOT_CREATE_IMAGE may be invoked directly after
++ * SNAPSHOT_FREE. In that case, if kernel threads were not
++ * thawed, the preallocation of memory carried out by
++ * hibernation_snapshot() might run into problems (i.e. it
++ * might fail or even deadlock).
++ */
++ thaw_kernel_threads();
+ break;
+
+ case SNAPSHOT_SET_IMAGE_SIZE:
+diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
+index 583a136..78fcacf 100644
+--- a/kernel/sched_rt.c
++++ b/kernel/sched_rt.c
+@@ -1388,6 +1388,11 @@ static int push_rt_task(struct rq *rq)
+ if (!next_task)
+ return 0;
+
++#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
++ if (unlikely(task_running(rq, next_task)))
++ return 0;
++#endif
++
+ retry:
+ if (unlikely(next_task == rq->curr)) {
+ WARN_ON(1);
+diff --git a/mm/compaction.c b/mm/compaction.c
+index 899d956..8fb8a40 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -313,12 +313,34 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
+ } else if (!locked)
+ spin_lock_irq(&zone->lru_lock);
+
++ /*
++ * migrate_pfn does not necessarily start aligned to a
++ * pageblock. Ensure that pfn_valid is called when moving
++ * into a new MAX_ORDER_NR_PAGES range in case of large
++ * memory holes within the zone
++ */
++ if ((low_pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) {
++ if (!pfn_valid(low_pfn)) {
++ low_pfn += MAX_ORDER_NR_PAGES - 1;
++ continue;
++ }
++ }
++
+ if (!pfn_valid_within(low_pfn))
+ continue;
+ nr_scanned++;
+
+- /* Get the page and skip if free */
++ /*
++ * Get the page and ensure the page is within the same zone.
++ * See the comment in isolate_freepages about overlapping
++ * nodes. It is deliberate that the new zone lock is not taken
++ * as memory compaction should not move pages between nodes.
++ */
+ page = pfn_to_page(low_pfn);
++ if (page_zone(page) != zone)
++ continue;
++
++ /* Skip if free */
+ if (PageBuddy(page))
+ continue;
+
+diff --git a/mm/filemap.c b/mm/filemap.c
+index 90286a4..03c5b0e 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -1400,15 +1400,12 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long seg = 0;
+ size_t count;
+ loff_t *ppos = &iocb->ki_pos;
+- struct blk_plug plug;
+
+ count = 0;
+ retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
+ if (retval)
+ return retval;
+
+- blk_start_plug(&plug);
+-
+ /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
+ if (filp->f_flags & O_DIRECT) {
+ loff_t size;
+@@ -1424,8 +1421,12 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
+ retval = filemap_write_and_wait_range(mapping, pos,
+ pos + iov_length(iov, nr_segs) - 1);
+ if (!retval) {
++ struct blk_plug plug;
++
++ blk_start_plug(&plug);
+ retval = mapping->a_ops->direct_IO(READ, iocb,
+ iov, pos, nr_segs);
++ blk_finish_plug(&plug);
+ }
+ if (retval > 0) {
+ *ppos = pos + retval;
+@@ -1481,7 +1482,6 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
+ break;
+ }
+ out:
+- blk_finish_plug(&plug);
+ return retval;
+ }
+ EXPORT_SYMBOL(generic_file_aio_read);
+diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
+index f91b2f6..a4eb311 100644
+--- a/mm/filemap_xip.c
++++ b/mm/filemap_xip.c
+@@ -263,7 +263,12 @@ found:
+ xip_pfn);
+ if (err == -ENOMEM)
+ return VM_FAULT_OOM;
+- BUG_ON(err);
++ /*
++ * err == -EBUSY is fine, we've raced against another thread
++ * that faulted-in the same page
++ */
++ if (err != -EBUSY)
++ BUG_ON(err);
+ return VM_FAULT_NOPAGE;
+ } else {
+ int err, ret = VM_FAULT_OOM;
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 36b3d98..33141f5 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -2064,7 +2064,7 @@ static void collect_mm_slot(struct mm_slot *mm_slot)
+ {
+ struct mm_struct *mm = mm_slot->mm;
+
+- VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock));
++ VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
+
+ if (khugepaged_test_exit(mm)) {
+ /* free mm_slot */
+@@ -2094,7 +2094,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
+ int progress = 0;
+
+ VM_BUG_ON(!pages);
+- VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock));
++ VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
+
+ if (khugepaged_scan.mm_slot)
+ mm_slot = khugepaged_scan.mm_slot;
+diff --git a/mm/swap.c b/mm/swap.c
+index a91caf7..55b266d 100644
+--- a/mm/swap.c
++++ b/mm/swap.c
+@@ -667,7 +667,7 @@ void lru_add_page_tail(struct zone* zone,
+ VM_BUG_ON(!PageHead(page));
+ VM_BUG_ON(PageCompound(page_tail));
+ VM_BUG_ON(PageLRU(page_tail));
+- VM_BUG_ON(!spin_is_locked(&zone->lru_lock));
++ VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&zone->lru_lock));
+
+ SetPageLRU(page_tail);
+
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index 4562e9d..05c8768 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -1446,7 +1446,7 @@ void snd_hda_codec_setup_stream(struct hda_codec *codec, hda_nid_t nid,
+ for (i = 0; i < c->cvt_setups.used; i++) {
+ p = snd_array_elem(&c->cvt_setups, i);
+ if (!p->active && p->stream_tag == stream_tag &&
+- get_wcaps_type(get_wcaps(codec, p->nid)) == type)
++ get_wcaps_type(get_wcaps(c, p->nid)) == type)
+ p->dirty = 1;
+ }
+ }
+diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
+index 5b0a9bb..ec0518e 100644
+--- a/sound/pci/hda/patch_cirrus.c
++++ b/sound/pci/hda/patch_cirrus.c
+@@ -976,8 +976,10 @@ static void cs_automic(struct hda_codec *codec)
+ /* specific to CS421x, single ADC */
+ if (spec->vendor_nid == CS421X_VENDOR_NID) {
+ if (present) {
+- spec->last_input = spec->cur_input;
+- spec->cur_input = spec->automic_idx;
++ if (spec->cur_input != spec->automic_idx) {
++ spec->last_input = spec->cur_input;
++ spec->cur_input = spec->automic_idx;
++ }
+ } else {
+ spec->cur_input = spec->last_input;
+ }
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 5f03c40..34e5fcc 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -176,6 +176,7 @@ struct alc_spec {
+ unsigned int detect_lo:1; /* Line-out detection enabled */
+ unsigned int automute_speaker_possible:1; /* there are speakers and either LO or HP */
+ unsigned int automute_lo_possible:1; /* there are line outs and HP */
++ unsigned int keep_vref_in_automute:1; /* Don't clear VREF in automute */
+
+ /* other flags */
+ unsigned int no_analog :1; /* digital I/O only */
+@@ -519,13 +520,24 @@ static void do_automute(struct hda_codec *codec, int num_pins, hda_nid_t *pins,
+
+ for (i = 0; i < num_pins; i++) {
+ hda_nid_t nid = pins[i];
++ unsigned int val;
+ if (!nid)
+ break;
+ switch (spec->automute_mode) {
+ case ALC_AUTOMUTE_PIN:
++ /* don't reset VREF value in case it's controlling
++ * the amp (see alc861_fixup_asus_amp_vref_0f())
++ */
++ if (spec->keep_vref_in_automute) {
++ val = snd_hda_codec_read(codec, nid, 0,
++ AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
++ val &= ~PIN_HP;
++ } else
++ val = 0;
++ val |= pin_bits;
+ snd_hda_codec_write(codec, nid, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL,
+- pin_bits);
++ val);
+ break;
+ case ALC_AUTOMUTE_AMP:
+ snd_hda_codec_amp_stereo(codec, nid, HDA_OUTPUT, 0,
+@@ -5011,7 +5023,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1043, 0x8330, "ASUS Eeepc P703 P900A",
+ ALC269_FIXUP_AMIC),
+ SND_PCI_QUIRK(0x1043, 0x1013, "ASUS N61Da", ALC269_FIXUP_AMIC),
+- SND_PCI_QUIRK(0x1043, 0x1113, "ASUS N63Jn", ALC269_FIXUP_AMIC),
+ SND_PCI_QUIRK(0x1043, 0x1143, "ASUS B53f", ALC269_FIXUP_AMIC),
+ SND_PCI_QUIRK(0x1043, 0x1133, "ASUS UJ20ft", ALC269_FIXUP_AMIC),
+ SND_PCI_QUIRK(0x1043, 0x1183, "ASUS K72DR", ALC269_FIXUP_AMIC),
+@@ -5226,6 +5237,25 @@ enum {
+ PINFIX_ASUS_A6RP,
+ };
+
++/* On some laptops, VREF of pin 0x0f is abused for controlling the main amp */
++static void alc861_fixup_asus_amp_vref_0f(struct hda_codec *codec,
++ const struct alc_fixup *fix, int action)
++{
++ struct alc_spec *spec = codec->spec;
++ unsigned int val;
++
++ if (action != ALC_FIXUP_ACT_INIT)
++ return;
++ val = snd_hda_codec_read(codec, 0x0f, 0,
++ AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
++ if (!(val & (AC_PINCTL_IN_EN | AC_PINCTL_OUT_EN)))
++ val |= AC_PINCTL_IN_EN;
++ val |= AC_PINCTL_VREF_50;
++ snd_hda_codec_write(codec, 0x0f, 0,
++ AC_VERB_SET_PIN_WIDGET_CONTROL, val);
++ spec->keep_vref_in_automute = 1;
++}
++
+ static const struct alc_fixup alc861_fixups[] = {
+ [PINFIX_FSC_AMILO_PI1505] = {
+ .type = ALC_FIXUP_PINS,
+@@ -5236,17 +5266,13 @@ static const struct alc_fixup alc861_fixups[] = {
+ }
+ },
+ [PINFIX_ASUS_A6RP] = {
+- .type = ALC_FIXUP_VERBS,
+- .v.verbs = (const struct hda_verb[]) {
+- /* node 0x0f VREF seems controlling the master output */
+- { 0x0f, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF50 },
+- { }
+- },
++ .type = ALC_FIXUP_FUNC,
++ .v.func = alc861_fixup_asus_amp_vref_0f,
+ },
+ };
+
+ static const struct snd_pci_quirk alc861_fixup_tbl[] = {
+- SND_PCI_QUIRK(0x1043, 0x1393, "ASUS A6Rp", PINFIX_ASUS_A6RP),
++ SND_PCI_QUIRK_VENDOR(0x1043, "ASUS laptop", PINFIX_ASUS_A6RP),
+ SND_PCI_QUIRK(0x1584, 0x2b01, "Haier W18", PINFIX_ASUS_A6RP),
+ SND_PCI_QUIRK(0x1734, 0x10c7, "FSC Amilo Pi1505", PINFIX_FSC_AMILO_PI1505),
+ {}
+diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
+index 8d69e59..a0a3f50 100644
+--- a/sound/pci/hda/patch_via.c
++++ b/sound/pci/hda/patch_via.c
+@@ -198,6 +198,9 @@ struct via_spec {
+ unsigned int no_pin_power_ctl;
+ enum VIA_HDA_CODEC codec_type;
+
++ /* analog low-power control */
++ bool alc_mode;
++
+ /* smart51 setup */
+ unsigned int smart51_nums;
+ hda_nid_t smart51_pins[2];
+@@ -748,6 +751,7 @@ static int via_pin_power_ctl_put(struct snd_kcontrol *kcontrol,
+ return 0;
+ spec->no_pin_power_ctl = val;
+ set_widgets_power_state(codec);
++ analog_low_current_mode(codec);
+ return 1;
+ }
+
+@@ -1035,13 +1039,19 @@ static bool is_aa_path_mute(struct hda_codec *codec)
+ }
+
+ /* enter/exit analog low-current mode */
+-static void analog_low_current_mode(struct hda_codec *codec)
++static void __analog_low_current_mode(struct hda_codec *codec, bool force)
+ {
+ struct via_spec *spec = codec->spec;
+ bool enable;
+ unsigned int verb, parm;
+
+- enable = is_aa_path_mute(codec) && (spec->opened_streams != 0);
++ if (spec->no_pin_power_ctl)
++ enable = false;
++ else
++ enable = is_aa_path_mute(codec) && !spec->opened_streams;
++ if (enable == spec->alc_mode && !force)
++ return;
++ spec->alc_mode = enable;
+
+ /* decide low current mode's verb & parameter */
+ switch (spec->codec_type) {
+@@ -1073,6 +1083,11 @@ static void analog_low_current_mode(struct hda_codec *codec)
+ snd_hda_codec_write(codec, codec->afg, 0, verb, parm);
+ }
+
++static void analog_low_current_mode(struct hda_codec *codec)
++{
++ return __analog_low_current_mode(codec, false);
++}
++
+ /*
+ * generic initialization of ADC, input mixers and output mixers
+ */
+@@ -1445,6 +1460,7 @@ static int via_build_controls(struct hda_codec *codec)
+ struct snd_kcontrol *kctl;
+ int err, i;
+
++ spec->no_pin_power_ctl = 1;
+ if (spec->set_widgets_power_state)
+ if (!via_clone_control(spec, &via_pin_power_ctl_enum))
+ return -ENOMEM;
+@@ -1498,10 +1514,6 @@ static int via_build_controls(struct hda_codec *codec)
+ return err;
+ }
+
+- /* init power states */
+- set_widgets_power_state(codec);
+- analog_low_current_mode(codec);
+-
+ via_free_kctls(codec); /* no longer needed */
+ return 0;
+ }
+@@ -2771,6 +2783,10 @@ static int via_init(struct hda_codec *codec)
+ for (i = 0; i < spec->num_iverbs; i++)
+ snd_hda_sequence_write(codec, spec->init_verbs[i]);
+
++ /* init power states */
++ set_widgets_power_state(codec);
++ __analog_low_current_mode(codec, true);
++
+ via_auto_init_multi_out(codec);
+ via_auto_init_hp_out(codec);
+ via_auto_init_speaker_out(codec);
+diff --git a/sound/pci/oxygen/oxygen_mixer.c b/sound/pci/oxygen/oxygen_mixer.c
+index 26c7e8b..c0dbb52 100644
+--- a/sound/pci/oxygen/oxygen_mixer.c
++++ b/sound/pci/oxygen/oxygen_mixer.c
+@@ -618,9 +618,12 @@ static int ac97_volume_get(struct snd_kcontrol *ctl,
+ mutex_lock(&chip->mutex);
+ reg = oxygen_read_ac97(chip, codec, index);
+ mutex_unlock(&chip->mutex);
+- value->value.integer.value[0] = 31 - (reg & 0x1f);
+- if (stereo)
+- value->value.integer.value[1] = 31 - ((reg >> 8) & 0x1f);
++ if (!stereo) {
++ value->value.integer.value[0] = 31 - (reg & 0x1f);
++ } else {
++ value->value.integer.value[0] = 31 - ((reg >> 8) & 0x1f);
++ value->value.integer.value[1] = 31 - (reg & 0x1f);
++ }
+ return 0;
+ }
+
+@@ -636,14 +639,14 @@ static int ac97_volume_put(struct snd_kcontrol *ctl,
+
+ mutex_lock(&chip->mutex);
+ oldreg = oxygen_read_ac97(chip, codec, index);
+- newreg = oldreg;
+- newreg = (newreg & ~0x1f) |
+- (31 - (value->value.integer.value[0] & 0x1f));
+- if (stereo)
+- newreg = (newreg & ~0x1f00) |
+- ((31 - (value->value.integer.value[1] & 0x1f)) << 8);
+- else
+- newreg = (newreg & ~0x1f00) | ((newreg & 0x1f) << 8);
++ if (!stereo) {
++ newreg = oldreg & ~0x1f;
++ newreg |= 31 - (value->value.integer.value[0] & 0x1f);
++ } else {
++ newreg = oldreg & ~0x1f1f;
++ newreg |= (31 - (value->value.integer.value[0] & 0x1f)) << 8;
++ newreg |= 31 - (value->value.integer.value[1] & 0x1f);
++ }
+ change = newreg != oldreg;
+ if (change)
+ oxygen_write_ac97(chip, codec, index, newreg);
+diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
+index 53edd9a..d795294 100644
+--- a/sound/soc/codecs/wm8962.c
++++ b/sound/soc/codecs/wm8962.c
+@@ -3172,13 +3172,13 @@ static int wm8962_hw_params(struct snd_pcm_substream *substream,
+ case SNDRV_PCM_FORMAT_S16_LE:
+ break;
+ case SNDRV_PCM_FORMAT_S20_3LE:
+- aif0 |= 0x40;
++ aif0 |= 0x4;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+- aif0 |= 0x80;
++ aif0 |= 0x8;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+- aif0 |= 0xc0;
++ aif0 |= 0xc;
+ break;
+ default:
+ return -EINVAL;
+diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
+index d0c545b..6e502af 100644
+--- a/sound/soc/codecs/wm8994.c
++++ b/sound/soc/codecs/wm8994.c
+@@ -729,6 +729,8 @@ static void vmid_reference(struct snd_soc_codec *codec)
+ {
+ struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+
++ pm_runtime_get_sync(codec->dev);
++
+ wm8994->vmid_refcount++;
+
+ dev_dbg(codec->dev, "Referencing VMID, refcount is now %d\n",
+@@ -742,7 +744,7 @@ static void vmid_reference(struct snd_soc_codec *codec)
+ WM8994_VMID_RAMP_MASK,
+ WM8994_STARTUP_BIAS_ENA |
+ WM8994_VMID_BUF_ENA |
+- (0x11 << WM8994_VMID_RAMP_SHIFT));
++ (0x3 << WM8994_VMID_RAMP_SHIFT));
+
+ /* Main bias enable, VMID=2x40k */
+ snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_1,
+@@ -796,6 +798,8 @@ static void vmid_dereference(struct snd_soc_codec *codec)
+ WM8994_VMID_BUF_ENA |
+ WM8994_VMID_RAMP_MASK, 0);
+ }
++
++ pm_runtime_put(codec->dev);
+ }
+
+ static int vmid_event(struct snd_soc_dapm_widget *w,
+diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
+index 48e61e9..3642e06 100644
+--- a/sound/soc/codecs/wm_hubs.c
++++ b/sound/soc/codecs/wm_hubs.c
+@@ -587,14 +587,14 @@ SOC_DAPM_SINGLE("Left Output Switch", WM8993_LINE_MIXER1, 0, 1, 0),
+ };
+
+ static const struct snd_kcontrol_new line2_mix[] = {
+-SOC_DAPM_SINGLE("IN2R Switch", WM8993_LINE_MIXER2, 2, 1, 0),
+-SOC_DAPM_SINGLE("IN2L Switch", WM8993_LINE_MIXER2, 1, 1, 0),
++SOC_DAPM_SINGLE("IN1L Switch", WM8993_LINE_MIXER2, 2, 1, 0),
++SOC_DAPM_SINGLE("IN1R Switch", WM8993_LINE_MIXER2, 1, 1, 0),
+ SOC_DAPM_SINGLE("Output Switch", WM8993_LINE_MIXER2, 0, 1, 0),
+ };
+
+ static const struct snd_kcontrol_new line2n_mix[] = {
+-SOC_DAPM_SINGLE("Left Output Switch", WM8993_LINE_MIXER2, 6, 1, 0),
+-SOC_DAPM_SINGLE("Right Output Switch", WM8993_LINE_MIXER2, 5, 1, 0),
++SOC_DAPM_SINGLE("Left Output Switch", WM8993_LINE_MIXER2, 5, 1, 0),
++SOC_DAPM_SINGLE("Right Output Switch", WM8993_LINE_MIXER2, 6, 1, 0),
+ };
+
+ static const struct snd_kcontrol_new line2p_mix[] = {
+@@ -614,6 +614,8 @@ SND_SOC_DAPM_INPUT("IN2RP:VXRP"),
+ SND_SOC_DAPM_MICBIAS("MICBIAS2", WM8993_POWER_MANAGEMENT_1, 5, 0),
+ SND_SOC_DAPM_MICBIAS("MICBIAS1", WM8993_POWER_MANAGEMENT_1, 4, 0),
+
++SND_SOC_DAPM_SUPPLY("LINEOUT_VMID_BUF", WM8993_ANTIPOP1, 7, 0, NULL, 0),
++
+ SND_SOC_DAPM_MIXER("IN1L PGA", WM8993_POWER_MANAGEMENT_2, 6, 0,
+ in1l_pga, ARRAY_SIZE(in1l_pga)),
+ SND_SOC_DAPM_MIXER("IN1R PGA", WM8993_POWER_MANAGEMENT_2, 4, 0,
+@@ -832,9 +834,11 @@ static const struct snd_soc_dapm_route lineout1_diff_routes[] = {
+ };
+
+ static const struct snd_soc_dapm_route lineout1_se_routes[] = {
++ { "LINEOUT1N Mixer", NULL, "LINEOUT_VMID_BUF" },
+ { "LINEOUT1N Mixer", "Left Output Switch", "Left Output PGA" },
+ { "LINEOUT1N Mixer", "Right Output Switch", "Right Output PGA" },
+
++ { "LINEOUT1P Mixer", NULL, "LINEOUT_VMID_BUF" },
+ { "LINEOUT1P Mixer", "Left Output Switch", "Left Output PGA" },
+
+ { "LINEOUT1N Driver", NULL, "LINEOUT1N Mixer" },
+@@ -842,8 +846,8 @@ static const struct snd_soc_dapm_route lineout1_se_routes[] = {
+ };
+
+ static const struct snd_soc_dapm_route lineout2_diff_routes[] = {
+- { "LINEOUT2 Mixer", "IN2L Switch", "IN2L PGA" },
+- { "LINEOUT2 Mixer", "IN2R Switch", "IN2R PGA" },
++ { "LINEOUT2 Mixer", "IN1L Switch", "IN1L PGA" },
++ { "LINEOUT2 Mixer", "IN1R Switch", "IN1R PGA" },
+ { "LINEOUT2 Mixer", "Output Switch", "Right Output PGA" },
+
+ { "LINEOUT2N Driver", NULL, "LINEOUT2 Mixer" },
+@@ -851,9 +855,11 @@ static const struct snd_soc_dapm_route lineout2_diff_routes[] = {
+ };
+
+ static const struct snd_soc_dapm_route lineout2_se_routes[] = {
++ { "LINEOUT2N Mixer", NULL, "LINEOUT_VMID_BUF" },
+ { "LINEOUT2N Mixer", "Left Output Switch", "Left Output PGA" },
+ { "LINEOUT2N Mixer", "Right Output Switch", "Right Output PGA" },
+
++ { "LINEOUT2P Mixer", NULL, "LINEOUT_VMID_BUF" },
+ { "LINEOUT2P Mixer", "Right Output Switch", "Right Output PGA" },
+
+ { "LINEOUT2N Driver", NULL, "LINEOUT2N Mixer" },
diff --git a/1006_linux-3.2.7.patch b/1006_linux-3.2.7.patch
new file mode 100644
index 00000000..08a6ba3e
--- /dev/null
+++ b/1006_linux-3.2.7.patch
@@ -0,0 +1,994 @@
+diff --git a/Makefile b/Makefile
+index 47fe496..d1bdc90 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 6
++SUBLEVEL = 7
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
+index 492ade8..d99346e 100644
+--- a/arch/x86/pci/xen.c
++++ b/arch/x86/pci/xen.c
+@@ -374,7 +374,7 @@ int __init pci_xen_init(void)
+
+ int __init pci_xen_hvm_init(void)
+ {
+- if (!xen_feature(XENFEAT_hvm_pirqs))
++ if (!xen_have_vector_callback || !xen_feature(XENFEAT_hvm_pirqs))
+ return 0;
+
+ #ifdef CONFIG_ACPI
+diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
+index 88f160b..107f6f7 100644
+--- a/crypto/sha512_generic.c
++++ b/crypto/sha512_generic.c
+@@ -31,11 +31,6 @@ static inline u64 Maj(u64 x, u64 y, u64 z)
+ return (x & y) | (z & (x | y));
+ }
+
+-static inline u64 RORu64(u64 x, u64 y)
+-{
+- return (x >> y) | (x << (64 - y));
+-}
+-
+ static const u64 sha512_K[80] = {
+ 0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL, 0xb5c0fbcfec4d3b2fULL,
+ 0xe9b5dba58189dbbcULL, 0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL,
+@@ -66,10 +61,10 @@ static const u64 sha512_K[80] = {
+ 0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL,
+ };
+
+-#define e0(x) (RORu64(x,28) ^ RORu64(x,34) ^ RORu64(x,39))
+-#define e1(x) (RORu64(x,14) ^ RORu64(x,18) ^ RORu64(x,41))
+-#define s0(x) (RORu64(x, 1) ^ RORu64(x, 8) ^ (x >> 7))
+-#define s1(x) (RORu64(x,19) ^ RORu64(x,61) ^ (x >> 6))
++#define e0(x) (ror64(x,28) ^ ror64(x,34) ^ ror64(x,39))
++#define e1(x) (ror64(x,14) ^ ror64(x,18) ^ ror64(x,41))
++#define s0(x) (ror64(x, 1) ^ ror64(x, 8) ^ (x >> 7))
++#define s1(x) (ror64(x,19) ^ ror64(x,61) ^ (x >> 6))
+
+ static inline void LOAD_OP(int I, u64 *W, const u8 *input)
+ {
+@@ -78,7 +73,7 @@ static inline void LOAD_OP(int I, u64 *W, const u8 *input)
+
+ static inline void BLEND_OP(int I, u64 *W)
+ {
+- W[I % 16] += s1(W[(I-2) % 16]) + W[(I-7) % 16] + s0(W[(I-15) % 16]);
++ W[I & 15] += s1(W[(I-2) & 15]) + W[(I-7) & 15] + s0(W[(I-15) & 15]);
+ }
+
+ static void
+@@ -89,46 +84,42 @@ sha512_transform(u64 *state, const u8 *input)
+ int i;
+ u64 W[16];
+
+- /* load the input */
+- for (i = 0; i < 16; i++)
+- LOAD_OP(i, W, input);
+-
+ /* load the state into our registers */
+ a=state[0]; b=state[1]; c=state[2]; d=state[3];
+ e=state[4]; f=state[5]; g=state[6]; h=state[7];
+
+-#define SHA512_0_15(i, a, b, c, d, e, f, g, h) \
+- t1 = h + e1(e) + Ch(e, f, g) + sha512_K[i] + W[i]; \
+- t2 = e0(a) + Maj(a, b, c); \
+- d += t1; \
+- h = t1 + t2
+-
+-#define SHA512_16_79(i, a, b, c, d, e, f, g, h) \
+- BLEND_OP(i, W); \
+- t1 = h + e1(e) + Ch(e, f, g) + sha512_K[i] + W[(i)%16]; \
+- t2 = e0(a) + Maj(a, b, c); \
+- d += t1; \
+- h = t1 + t2
+-
+- for (i = 0; i < 16; i += 8) {
+- SHA512_0_15(i, a, b, c, d, e, f, g, h);
+- SHA512_0_15(i + 1, h, a, b, c, d, e, f, g);
+- SHA512_0_15(i + 2, g, h, a, b, c, d, e, f);
+- SHA512_0_15(i + 3, f, g, h, a, b, c, d, e);
+- SHA512_0_15(i + 4, e, f, g, h, a, b, c, d);
+- SHA512_0_15(i + 5, d, e, f, g, h, a, b, c);
+- SHA512_0_15(i + 6, c, d, e, f, g, h, a, b);
+- SHA512_0_15(i + 7, b, c, d, e, f, g, h, a);
+- }
+- for (i = 16; i < 80; i += 8) {
+- SHA512_16_79(i, a, b, c, d, e, f, g, h);
+- SHA512_16_79(i + 1, h, a, b, c, d, e, f, g);
+- SHA512_16_79(i + 2, g, h, a, b, c, d, e, f);
+- SHA512_16_79(i + 3, f, g, h, a, b, c, d, e);
+- SHA512_16_79(i + 4, e, f, g, h, a, b, c, d);
+- SHA512_16_79(i + 5, d, e, f, g, h, a, b, c);
+- SHA512_16_79(i + 6, c, d, e, f, g, h, a, b);
+- SHA512_16_79(i + 7, b, c, d, e, f, g, h, a);
++ /* now iterate */
++ for (i=0; i<80; i+=8) {
++ if (!(i & 8)) {
++ int j;
++
++ if (i < 16) {
++ /* load the input */
++ for (j = 0; j < 16; j++)
++ LOAD_OP(i + j, W, input);
++ } else {
++ for (j = 0; j < 16; j++) {
++ BLEND_OP(i + j, W);
++ }
++ }
++ }
++
++ t1 = h + e1(e) + Ch(e,f,g) + sha512_K[i ] + W[(i & 15)];
++ t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2;
++ t1 = g + e1(d) + Ch(d,e,f) + sha512_K[i+1] + W[(i & 15) + 1];
++ t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2;
++ t1 = f + e1(c) + Ch(c,d,e) + sha512_K[i+2] + W[(i & 15) + 2];
++ t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2;
++ t1 = e + e1(b) + Ch(b,c,d) + sha512_K[i+3] + W[(i & 15) + 3];
++ t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2;
++ t1 = d + e1(a) + Ch(a,b,c) + sha512_K[i+4] + W[(i & 15) + 4];
++ t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2;
++ t1 = c + e1(h) + Ch(h,a,b) + sha512_K[i+5] + W[(i & 15) + 5];
++ t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2;
++ t1 = b + e1(g) + Ch(g,h,a) + sha512_K[i+6] + W[(i & 15) + 6];
++ t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2;
++ t1 = a + e1(f) + Ch(f,g,h) + sha512_K[i+7] + W[(i & 15) + 7];
++ t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2;
+ }
+
+ state[0] += a; state[1] += b; state[2] += c; state[3] += d;
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index db3b461..94f860c 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -208,17 +208,8 @@ intel_dp_link_clock(uint8_t link_bw)
+ */
+
+ static int
+-intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock, int check_bpp)
++intel_dp_link_required(int pixel_clock, int bpp)
+ {
+- struct drm_crtc *crtc = intel_dp->base.base.crtc;
+- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+- int bpp = 24;
+-
+- if (check_bpp)
+- bpp = check_bpp;
+- else if (intel_crtc)
+- bpp = intel_crtc->bpp;
+-
+ return (pixel_clock * bpp + 9) / 10;
+ }
+
+@@ -245,12 +236,11 @@ intel_dp_mode_valid(struct drm_connector *connector,
+ return MODE_PANEL;
+ }
+
+- mode_rate = intel_dp_link_required(intel_dp, mode->clock, 0);
++ mode_rate = intel_dp_link_required(mode->clock, 24);
+ max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
+
+ if (mode_rate > max_rate) {
+- mode_rate = intel_dp_link_required(intel_dp,
+- mode->clock, 18);
++ mode_rate = intel_dp_link_required(mode->clock, 18);
+ if (mode_rate > max_rate)
+ return MODE_CLOCK_HIGH;
+ else
+@@ -683,7 +673,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ int lane_count, clock;
+ int max_lane_count = intel_dp_max_lane_count(intel_dp);
+ int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
+- int bpp = mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 0;
++ int bpp = mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
+ static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
+
+ if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
+@@ -701,7 +691,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ for (clock = 0; clock <= max_clock; clock++) {
+ int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
+
+- if (intel_dp_link_required(intel_dp, mode->clock, bpp)
++ if (intel_dp_link_required(mode->clock, bpp)
+ <= link_avail) {
+ intel_dp->link_bw = bws[clock];
+ intel_dp->lane_count = lane_count;
+diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
+index e441911..b83f745 100644
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -694,6 +694,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
++ .ident = "AOpen i45GMx-I",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
++ DMI_MATCH(DMI_BOARD_NAME, "i45GMx-I"),
++ },
++ },
++ {
++ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Aopen i945GTt-VFA",
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"),
+diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c
+index 95cbfb3..e4ab491 100644
+--- a/drivers/hwmon/f75375s.c
++++ b/drivers/hwmon/f75375s.c
+@@ -159,7 +159,7 @@ static inline void f75375_write8(struct i2c_client *client, u8 reg,
+ static inline void f75375_write16(struct i2c_client *client, u8 reg,
+ u16 value)
+ {
+- int err = i2c_smbus_write_byte_data(client, reg, (value << 8));
++ int err = i2c_smbus_write_byte_data(client, reg, (value >> 8));
+ if (err)
+ return;
+ i2c_smbus_write_byte_data(client, reg + 1, (value & 0xFF));
+@@ -311,7 +311,7 @@ static int set_pwm_enable_direct(struct i2c_client *client, int nr, int val)
+ fanmode |= (3 << FAN_CTRL_MODE(nr));
+ break;
+ case 2: /* AUTOMATIC*/
+- fanmode |= (2 << FAN_CTRL_MODE(nr));
++ fanmode |= (1 << FAN_CTRL_MODE(nr));
+ break;
+ case 3: /* fan speed */
+ break;
+diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
+index a7ee502..72bc756 100644
+--- a/drivers/mmc/host/atmel-mci.c
++++ b/drivers/mmc/host/atmel-mci.c
+@@ -965,11 +965,14 @@ static void atmci_start_request(struct atmel_mci *host,
+ host->data_status = 0;
+
+ if (host->need_reset) {
++ iflags = atmci_readl(host, ATMCI_IMR);
++ iflags &= (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB);
+ atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
+ atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
+ atmci_writel(host, ATMCI_MR, host->mode_reg);
+ if (host->caps.has_cfg_reg)
+ atmci_writel(host, ATMCI_CFG, host->cfg_reg);
++ atmci_writel(host, ATMCI_IER, iflags);
+ host->need_reset = false;
+ }
+ atmci_writel(host, ATMCI_SDCR, slot->sdc_reg);
+diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
+index 3aaeb08..baf3d42 100644
+--- a/drivers/mmc/host/dw_mmc.c
++++ b/drivers/mmc/host/dw_mmc.c
+@@ -22,7 +22,6 @@
+ #include <linux/ioport.h>
+ #include <linux/module.h>
+ #include <linux/platform_device.h>
+-#include <linux/scatterlist.h>
+ #include <linux/seq_file.h>
+ #include <linux/slab.h>
+ #include <linux/stat.h>
+@@ -502,8 +501,14 @@ static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
+ host->dir_status = DW_MCI_SEND_STATUS;
+
+ if (dw_mci_submit_data_dma(host, data)) {
++ int flags = SG_MITER_ATOMIC;
++ if (host->data->flags & MMC_DATA_READ)
++ flags |= SG_MITER_TO_SG;
++ else
++ flags |= SG_MITER_FROM_SG;
++
++ sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
+ host->sg = data->sg;
+- host->pio_offset = 0;
+ host->part_buf_start = 0;
+ host->part_buf_count = 0;
+
+@@ -953,6 +958,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
+ * generates a block interrupt, hence setting
+ * the scatter-gather pointer to NULL.
+ */
++ sg_miter_stop(&host->sg_miter);
+ host->sg = NULL;
+ ctrl = mci_readl(host, CTRL);
+ ctrl |= SDMMC_CTRL_FIFO_RESET;
+@@ -1286,54 +1292,44 @@ static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
+
+ static void dw_mci_read_data_pio(struct dw_mci *host)
+ {
+- struct scatterlist *sg = host->sg;
+- void *buf = sg_virt(sg);
+- unsigned int offset = host->pio_offset;
++ struct sg_mapping_iter *sg_miter = &host->sg_miter;
++ void *buf;
++ unsigned int offset;
+ struct mmc_data *data = host->data;
+ int shift = host->data_shift;
+ u32 status;
+ unsigned int nbytes = 0, len;
++ unsigned int remain, fcnt;
+
+ do {
+- len = host->part_buf_count +
+- (SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift);
+- if (offset + len <= sg->length) {
++ if (!sg_miter_next(sg_miter))
++ goto done;
++
++ host->sg = sg_miter->__sg;
++ buf = sg_miter->addr;
++ remain = sg_miter->length;
++ offset = 0;
++
++ do {
++ fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
++ << shift) + host->part_buf_count;
++ len = min(remain, fcnt);
++ if (!len)
++ break;
+ dw_mci_pull_data(host, (void *)(buf + offset), len);
+-
+ offset += len;
+ nbytes += len;
+-
+- if (offset == sg->length) {
+- flush_dcache_page(sg_page(sg));
+- host->sg = sg = sg_next(sg);
+- if (!sg)
+- goto done;
+-
+- offset = 0;
+- buf = sg_virt(sg);
+- }
+- } else {
+- unsigned int remaining = sg->length - offset;
+- dw_mci_pull_data(host, (void *)(buf + offset),
+- remaining);
+- nbytes += remaining;
+-
+- flush_dcache_page(sg_page(sg));
+- host->sg = sg = sg_next(sg);
+- if (!sg)
+- goto done;
+-
+- offset = len - remaining;
+- buf = sg_virt(sg);
+- dw_mci_pull_data(host, buf, offset);
+- nbytes += offset;
+- }
++ remain -= len;
++ } while (remain);
++ sg_miter->consumed = offset;
+
+ status = mci_readl(host, MINTSTS);
+ mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
+ if (status & DW_MCI_DATA_ERROR_FLAGS) {
+ host->data_status = status;
+ data->bytes_xfered += nbytes;
++ sg_miter_stop(sg_miter);
++ host->sg = NULL;
+ smp_wmb();
+
+ set_bit(EVENT_DATA_ERROR, &host->pending_events);
+@@ -1342,65 +1338,66 @@ static void dw_mci_read_data_pio(struct dw_mci *host)
+ return;
+ }
+ } while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/
+- host->pio_offset = offset;
+ data->bytes_xfered += nbytes;
++
++ if (!remain) {
++ if (!sg_miter_next(sg_miter))
++ goto done;
++ sg_miter->consumed = 0;
++ }
++ sg_miter_stop(sg_miter);
+ return;
+
+ done:
+ data->bytes_xfered += nbytes;
++ sg_miter_stop(sg_miter);
++ host->sg = NULL;
+ smp_wmb();
+ set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
+ }
+
+ static void dw_mci_write_data_pio(struct dw_mci *host)
+ {
+- struct scatterlist *sg = host->sg;
+- void *buf = sg_virt(sg);
+- unsigned int offset = host->pio_offset;
++ struct sg_mapping_iter *sg_miter = &host->sg_miter;
++ void *buf;
++ unsigned int offset;
+ struct mmc_data *data = host->data;
+ int shift = host->data_shift;
+ u32 status;
+ unsigned int nbytes = 0, len;
++ unsigned int fifo_depth = host->fifo_depth;
++ unsigned int remain, fcnt;
+
+ do {
+- len = ((host->fifo_depth -
+- SDMMC_GET_FCNT(mci_readl(host, STATUS))) << shift)
+- - host->part_buf_count;
+- if (offset + len <= sg->length) {
++ if (!sg_miter_next(sg_miter))
++ goto done;
++
++ host->sg = sg_miter->__sg;
++ buf = sg_miter->addr;
++ remain = sg_miter->length;
++ offset = 0;
++
++ do {
++ fcnt = ((fifo_depth -
++ SDMMC_GET_FCNT(mci_readl(host, STATUS)))
++ << shift) - host->part_buf_count;
++ len = min(remain, fcnt);
++ if (!len)
++ break;
+ host->push_data(host, (void *)(buf + offset), len);
+-
+ offset += len;
+ nbytes += len;
+- if (offset == sg->length) {
+- host->sg = sg = sg_next(sg);
+- if (!sg)
+- goto done;
+-
+- offset = 0;
+- buf = sg_virt(sg);
+- }
+- } else {
+- unsigned int remaining = sg->length - offset;
+-
+- host->push_data(host, (void *)(buf + offset),
+- remaining);
+- nbytes += remaining;
+-
+- host->sg = sg = sg_next(sg);
+- if (!sg)
+- goto done;
+-
+- offset = len - remaining;
+- buf = sg_virt(sg);
+- host->push_data(host, (void *)buf, offset);
+- nbytes += offset;
+- }
++ remain -= len;
++ } while (remain);
++ sg_miter->consumed = offset;
+
+ status = mci_readl(host, MINTSTS);
+ mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
+ if (status & DW_MCI_DATA_ERROR_FLAGS) {
+ host->data_status = status;
+ data->bytes_xfered += nbytes;
++ sg_miter_stop(sg_miter);
++ host->sg = NULL;
+
+ smp_wmb();
+
+@@ -1410,12 +1407,20 @@ static void dw_mci_write_data_pio(struct dw_mci *host)
+ return;
+ }
+ } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
+- host->pio_offset = offset;
+ data->bytes_xfered += nbytes;
++
++ if (!remain) {
++ if (!sg_miter_next(sg_miter))
++ goto done;
++ sg_miter->consumed = 0;
++ }
++ sg_miter_stop(sg_miter);
+ return;
+
+ done:
+ data->bytes_xfered += nbytes;
++ sg_miter_stop(sg_miter);
++ host->sg = NULL;
+ smp_wmb();
+ set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
+ }
+@@ -1618,6 +1623,7 @@ static void dw_mci_work_routine_card(struct work_struct *work)
+ * block interrupt, hence setting the
+ * scatter-gather pointer to NULL.
+ */
++ sg_miter_stop(&host->sg_miter);
+ host->sg = NULL;
+
+ ctrl = mci_readl(host, CTRL);
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index ced5444..222954d 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -4965,7 +4965,8 @@ static int igb_find_enabled_vfs(struct igb_adapter *adapter)
+ vf_devfn = pdev->devfn + 0x80;
+ pvfdev = pci_get_device(hw->vendor_id, device_id, NULL);
+ while (pvfdev) {
+- if (pvfdev->devfn == vf_devfn)
++ if (pvfdev->devfn == vf_devfn &&
++ (pvfdev->bus->number >= pdev->bus->number))
+ vfs_found++;
+ vf_devfn += vf_stride;
+ pvfdev = pci_get_device(hw->vendor_id,
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+index 00fcd39..e571356 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+@@ -67,7 +67,8 @@ static int ixgbe_find_enabled_vfs(struct ixgbe_adapter *adapter)
+ vf_devfn = pdev->devfn + 0x80;
+ pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, device_id, NULL);
+ while (pvfdev) {
+- if (pvfdev->devfn == vf_devfn)
++ if (pvfdev->devfn == vf_devfn &&
++ (pvfdev->bus->number >= pdev->bus->number))
+ vfs_found++;
+ vf_devfn += 2;
+ pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID,
+diff --git a/drivers/net/ethernet/toshiba/Kconfig b/drivers/net/ethernet/toshiba/Kconfig
+index 0517647..74acb5c 100644
+--- a/drivers/net/ethernet/toshiba/Kconfig
++++ b/drivers/net/ethernet/toshiba/Kconfig
+@@ -5,7 +5,7 @@
+ config NET_VENDOR_TOSHIBA
+ bool "Toshiba devices"
+ default y
+- depends on PCI && (PPC_IBM_CELL_BLADE || PPC_CELLEB) || PPC_PS3
++ depends on PCI && (PPC_IBM_CELL_BLADE || PPC_CELLEB || MIPS) || PPC_PS3
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
+index 8873c6e..8b0c2ca 100644
+--- a/drivers/net/wireless/ath/ath9k/hw.c
++++ b/drivers/net/wireless/ath/ath9k/hw.c
+@@ -1034,13 +1034,16 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
+
+ /*
+ * Workaround for early ACK timeouts, add an offset to match the
+- * initval's 64us ack timeout value.
++ * initval's 64us ack timeout value. Use 48us for the CTS timeout.
+ * This was initially only meant to work around an issue with delayed
+ * BA frames in some implementations, but it has been found to fix ACK
+ * timeout issues in other cases as well.
+ */
+- if (conf->channel && conf->channel->band == IEEE80211_BAND_2GHZ)
++ if (conf->channel && conf->channel->band == IEEE80211_BAND_2GHZ) {
+ acktimeout += 64 - sifstime - ah->slottime;
++ ctstimeout += 48 - sifstime - ah->slottime;
++ }
++
+
+ ath9k_hw_set_sifs_time(ah, sifstime);
+ ath9k_hw_setslottime(ah, slottime);
+diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
+index d4c909f..57622e0 100644
+--- a/drivers/net/wireless/ath/ath9k/init.c
++++ b/drivers/net/wireless/ath/ath9k/init.c
+@@ -775,6 +775,11 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc,
+ ARRAY_SIZE(ath9k_tpt_blink));
+ #endif
+
++ INIT_WORK(&sc->hw_reset_work, ath_reset_work);
++ INIT_WORK(&sc->hw_check_work, ath_hw_check);
++ INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
++ INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
++
+ /* Register with mac80211 */
+ error = ieee80211_register_hw(hw);
+ if (error)
+@@ -793,10 +798,6 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc,
+ goto error_world;
+ }
+
+- INIT_WORK(&sc->hw_reset_work, ath_reset_work);
+- INIT_WORK(&sc->hw_check_work, ath_hw_check);
+- INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
+- INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
+ sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
+
+ ath_init_leds(sc);
+diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
+index 67b862c..2f3aeac 100644
+--- a/drivers/net/wireless/ath/ath9k/recv.c
++++ b/drivers/net/wireless/ath/ath9k/recv.c
+@@ -824,6 +824,14 @@ static bool ath9k_rx_accept(struct ath_common *common,
+ (ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC |
+ ATH9K_RXERR_KEYMISS));
+
++ /*
++ * Key miss events are only relevant for pairwise keys where the
++ * descriptor does contain a valid key index. This has been observed
++ * mostly with CCMP encryption.
++ */
++ if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID)
++ rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
++
+ if (!rx_stats->rs_datalen)
+ return false;
+ /*
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 63e4be4..720edf5 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -756,10 +756,11 @@ standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid)
+ cifs_dump_mem("Bad SMB: ", buf,
+ min_t(unsigned int, server->total_read, 48));
+
+- if (mid)
+- handle_mid(mid, server, smb_buffer, length);
++ if (!mid)
++ return length;
+
+- return length;
++ handle_mid(mid, server, smb_buffer, length);
++ return 0;
+ }
+
+ static int
+diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
+index d7eeb9d..e4c3334 100644
+--- a/fs/cifs/dir.c
++++ b/fs/cifs/dir.c
+@@ -492,7 +492,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
+ {
+ int xid;
+ int rc = 0; /* to get around spurious gcc warning, set to zero here */
+- __u32 oplock = 0;
++ __u32 oplock = enable_oplocks ? REQ_OPLOCK : 0;
+ __u16 fileHandle = 0;
+ bool posix_open = false;
+ struct cifs_sb_info *cifs_sb;
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index 517f211..54f5786 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -48,14 +48,6 @@ struct wb_writeback_work {
+ };
+
+ /*
+- * Include the creation of the trace points after defining the
+- * wb_writeback_work structure so that the definition remains local to this
+- * file.
+- */
+-#define CREATE_TRACE_POINTS
+-#include <trace/events/writeback.h>
+-
+-/*
+ * We don't actually have pdflush, but this one is exported though /proc...
+ */
+ int nr_pdflush_threads;
+@@ -87,6 +79,14 @@ static inline struct inode *wb_inode(struct list_head *head)
+ return list_entry(head, struct inode, i_wb_list);
+ }
+
++/*
++ * Include the creation of the trace points after defining the
++ * wb_writeback_work structure and inline functions so that the definition
++ * remains local to this file.
++ */
++#define CREATE_TRACE_POINTS
++#include <trace/events/writeback.h>
++
+ /* Wakeup flusher thread or forker thread to fork it. Requires bdi->wb_lock. */
+ static void bdi_wakeup_flusher(struct backing_dev_info *bdi)
+ {
+diff --git a/include/linux/bitops.h b/include/linux/bitops.h
+index a3ef66a..fc8a3ff 100644
+--- a/include/linux/bitops.h
++++ b/include/linux/bitops.h
+@@ -50,6 +50,26 @@ static inline unsigned long hweight_long(unsigned long w)
+ }
+
+ /**
++ * rol64 - rotate a 64-bit value left
++ * @word: value to rotate
++ * @shift: bits to roll
++ */
++static inline __u64 rol64(__u64 word, unsigned int shift)
++{
++ return (word << shift) | (word >> (64 - shift));
++}
++
++/**
++ * ror64 - rotate a 64-bit value right
++ * @word: value to rotate
++ * @shift: bits to roll
++ */
++static inline __u64 ror64(__u64 word, unsigned int shift)
++{
++ return (word >> shift) | (word << (64 - shift));
++}
++
++/**
+ * rol32 - rotate a 32-bit value left
+ * @word: value to rotate
+ * @shift: bits to roll
+diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
+index 6dc9b80..107fcb3 100644
+--- a/include/linux/mmc/dw_mmc.h
++++ b/include/linux/mmc/dw_mmc.h
+@@ -14,6 +14,8 @@
+ #ifndef LINUX_MMC_DW_MMC_H
+ #define LINUX_MMC_DW_MMC_H
+
++#include <linux/scatterlist.h>
++
+ #define MAX_MCI_SLOTS 2
+
+ enum dw_mci_state {
+@@ -40,7 +42,7 @@ struct mmc_data;
+ * @lock: Spinlock protecting the queue and associated data.
+ * @regs: Pointer to MMIO registers.
+ * @sg: Scatterlist entry currently being processed by PIO code, if any.
+- * @pio_offset: Offset into the current scatterlist entry.
++ * @sg_miter: PIO mapping scatterlist iterator.
+ * @cur_slot: The slot which is currently using the controller.
+ * @mrq: The request currently being processed on @cur_slot,
+ * or NULL if the controller is idle.
+@@ -115,7 +117,7 @@ struct dw_mci {
+ void __iomem *regs;
+
+ struct scatterlist *sg;
+- unsigned int pio_offset;
++ struct sg_mapping_iter sg_miter;
+
+ struct dw_mci_slot *cur_slot;
+ struct mmc_request *mrq;
+diff --git a/include/linux/proportions.h b/include/linux/proportions.h
+index ef35bb7..26a8a4e 100644
+--- a/include/linux/proportions.h
++++ b/include/linux/proportions.h
+@@ -81,7 +81,11 @@ void prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl)
+ * Limit the time part in order to ensure there are some bits left for the
+ * cycle counter and fraction multiply.
+ */
++#if BITS_PER_LONG == 32
+ #define PROP_MAX_SHIFT (3*BITS_PER_LONG/4)
++#else
++#define PROP_MAX_SHIFT (BITS_PER_LONG/2)
++#endif
+
+ #define PROP_FRAC_SHIFT (BITS_PER_LONG - PROP_MAX_SHIFT - 1)
+ #define PROP_FRAC_BASE (1UL << PROP_FRAC_SHIFT)
+diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
+index 99d1d0d..1f48f14 100644
+--- a/include/trace/events/writeback.h
++++ b/include/trace/events/writeback.h
+@@ -47,7 +47,10 @@ DECLARE_EVENT_CLASS(writeback_work_class,
+ __field(int, reason)
+ ),
+ TP_fast_assign(
+- strncpy(__entry->name, dev_name(bdi->dev), 32);
++ struct device *dev = bdi->dev;
++ if (!dev)
++ dev = default_backing_dev_info.dev;
++ strncpy(__entry->name, dev_name(dev), 32);
+ __entry->nr_pages = work->nr_pages;
+ __entry->sb_dev = work->sb ? work->sb->s_dev : 0;
+ __entry->sync_mode = work->sync_mode;
+@@ -418,7 +421,7 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
+
+ TP_fast_assign(
+ strncpy(__entry->name,
+- dev_name(inode->i_mapping->backing_dev_info->dev), 32);
++ dev_name(inode_to_bdi(inode)->dev), 32);
+ __entry->ino = inode->i_ino;
+ __entry->state = inode->i_state;
+ __entry->dirtied_when = inode->dirtied_when;
+diff --git a/kernel/relay.c b/kernel/relay.c
+index 226fade..b6f803a 100644
+--- a/kernel/relay.c
++++ b/kernel/relay.c
+@@ -164,10 +164,14 @@ depopulate:
+ */
+ static struct rchan_buf *relay_create_buf(struct rchan *chan)
+ {
+- struct rchan_buf *buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
+- if (!buf)
++ struct rchan_buf *buf;
++
++ if (chan->n_subbufs > UINT_MAX / sizeof(size_t *))
+ return NULL;
+
++ buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
++ if (!buf)
++ return NULL;
+ buf->padding = kmalloc(chan->n_subbufs * sizeof(size_t *), GFP_KERNEL);
+ if (!buf->padding)
+ goto free_buf;
+@@ -574,6 +578,8 @@ struct rchan *relay_open(const char *base_filename,
+
+ if (!(subbuf_size && n_subbufs))
+ return NULL;
++ if (subbuf_size > UINT_MAX / n_subbufs)
++ return NULL;
+
+ chan = kzalloc(sizeof(struct rchan), GFP_KERNEL);
+ if (!chan)
+diff --git a/mm/backing-dev.c b/mm/backing-dev.c
+index 71034f4..2b49dd2 100644
+--- a/mm/backing-dev.c
++++ b/mm/backing-dev.c
+@@ -318,7 +318,7 @@ static void wakeup_timer_fn(unsigned long data)
+ if (bdi->wb.task) {
+ trace_writeback_wake_thread(bdi);
+ wake_up_process(bdi->wb.task);
+- } else {
++ } else if (bdi->dev) {
+ /*
+ * When bdi tasks are inactive for long time, they are killed.
+ * In this case we have to wake-up the forker thread which
+@@ -584,6 +584,8 @@ EXPORT_SYMBOL(bdi_register_dev);
+ */
+ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
+ {
++ struct task_struct *task;
++
+ if (!bdi_cap_writeback_dirty(bdi))
+ return;
+
+@@ -604,9 +606,14 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
+ * unfreeze of the thread before calling kthread_stop(), otherwise
+ * it would never exet if it is currently stuck in the refrigerator.
+ */
+- if (bdi->wb.task) {
+- thaw_process(bdi->wb.task);
+- kthread_stop(bdi->wb.task);
++ spin_lock_bh(&bdi->wb_lock);
++ task = bdi->wb.task;
++ bdi->wb.task = NULL;
++ spin_unlock_bh(&bdi->wb_lock);
++
++ if (task) {
++ thaw_process(task);
++ kthread_stop(task);
+ }
+ }
+
+@@ -627,7 +634,9 @@ static void bdi_prune_sb(struct backing_dev_info *bdi)
+
+ void bdi_unregister(struct backing_dev_info *bdi)
+ {
+- if (bdi->dev) {
++ struct device *dev = bdi->dev;
++
++ if (dev) {
+ bdi_set_min_ratio(bdi, 0);
+ trace_writeback_bdi_unregister(bdi);
+ bdi_prune_sb(bdi);
+@@ -636,8 +645,12 @@ void bdi_unregister(struct backing_dev_info *bdi)
+ if (!bdi_cap_flush_forker(bdi))
+ bdi_wb_shutdown(bdi);
+ bdi_debug_unregister(bdi);
+- device_unregister(bdi->dev);
++
++ spin_lock_bh(&bdi->wb_lock);
+ bdi->dev = NULL;
++ spin_unlock_bh(&bdi->wb_lock);
++
++ device_unregister(dev);
+ }
+ }
+ EXPORT_SYMBOL(bdi_unregister);
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index 5c51607..064d20f 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -616,7 +616,7 @@ static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
+ index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
+ tid_agg_rx->buf_size;
+ if (!tid_agg_rx->reorder_buf[index] &&
+- tid_agg_rx->stored_mpdu_num > 1) {
++ tid_agg_rx->stored_mpdu_num) {
+ /*
+ * No buffers ready to be released, but check whether any
+ * frames in the reorder buffer have timed out.
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 34e5fcc..9c197d4 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -4213,8 +4213,26 @@ enum {
+ PINFIX_PB_M5210,
+ PINFIX_ACER_ASPIRE_7736,
+ PINFIX_ASUS_W90V,
++ ALC889_FIXUP_DAC_ROUTE,
+ };
+
++/* Fix the connection of some pins for ALC889:
++ * At least, Acer Aspire 5935 shows the connections to DAC3/4 don't
++ * work correctly (bko#42740)
++ */
++static void alc889_fixup_dac_route(struct hda_codec *codec,
++ const struct alc_fixup *fix, int action)
++{
++ if (action == ALC_FIXUP_ACT_PRE_PROBE) {
++ hda_nid_t conn1[2] = { 0x0c, 0x0d };
++ hda_nid_t conn2[2] = { 0x0e, 0x0f };
++ snd_hda_override_conn_list(codec, 0x14, 2, conn1);
++ snd_hda_override_conn_list(codec, 0x15, 2, conn1);
++ snd_hda_override_conn_list(codec, 0x18, 2, conn2);
++ snd_hda_override_conn_list(codec, 0x1a, 2, conn2);
++ }
++}
++
+ static const struct alc_fixup alc882_fixups[] = {
+ [PINFIX_ABIT_AW9D_MAX] = {
+ .type = ALC_FIXUP_PINS,
+@@ -4251,10 +4269,15 @@ static const struct alc_fixup alc882_fixups[] = {
+ { }
+ }
+ },
++ [ALC889_FIXUP_DAC_ROUTE] = {
++ .type = ALC_FIXUP_FUNC,
++ .v.func = alc889_fixup_dac_route,
++ },
+ };
+
+ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1025, 0x0155, "Packard-Bell M5120", PINFIX_PB_M5210),
++ SND_PCI_QUIRK(0x1025, 0x0259, "Acer Aspire 5935", ALC889_FIXUP_DAC_ROUTE),
+ SND_PCI_QUIRK(0x1043, 0x1873, "ASUS W90V", PINFIX_ASUS_W90V),
+ SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", PINFIX_LENOVO_Y530),
+ SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", PINFIX_ABIT_AW9D_MAX),
+diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
+index a0a3f50..1fe1308 100644
+--- a/sound/pci/hda/patch_via.c
++++ b/sound/pci/hda/patch_via.c
+@@ -665,6 +665,9 @@ static void via_auto_init_analog_input(struct hda_codec *codec)
+ /* init input-src */
+ for (i = 0; i < spec->num_adc_nids; i++) {
+ int adc_idx = spec->inputs[spec->cur_mux[i]].adc_idx;
++ /* secondary ADCs must have the unique MUX */
++ if (i > 0 && !spec->mux_nids[i])
++ break;
+ if (spec->mux_nids[adc_idx]) {
+ int mux_idx = spec->inputs[spec->cur_mux[i]].mux_idx;
+ snd_hda_codec_write(codec, spec->mux_nids[adc_idx], 0,
+diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
+index 11718b49..55f48fb 100644
+--- a/sound/pci/intel8x0.c
++++ b/sound/pci/intel8x0.c
+@@ -2102,6 +2102,12 @@ static struct ac97_quirk ac97_quirks[] __devinitdata = {
+ },
+ {
+ .subvendor = 0x161f,
++ .subdevice = 0x202f,
++ .name = "Gateway M520",
++ .type = AC97_TUNE_INV_EAPD
++ },
++ {
++ .subvendor = 0x161f,
+ .subdevice = 0x203a,
+ .name = "Gateway 4525GZ", /* AD1981B */
+ .type = AC97_TUNE_INV_EAPD
+diff --git a/tools/perf/bench/mem-memcpy-x86-64-asm.S b/tools/perf/bench/mem-memcpy-x86-64-asm.S
+index a57b66e..185a96d 100644
+--- a/tools/perf/bench/mem-memcpy-x86-64-asm.S
++++ b/tools/perf/bench/mem-memcpy-x86-64-asm.S
+@@ -1,2 +1,8 @@
+
+ #include "../../../arch/x86/lib/memcpy_64.S"
++/*
++ * We need to provide note.GNU-stack section, saying that we want
++ * NOT executable stack. Otherwise the final linking will assume that
++ * the ELF stack should not be restricted at all and set it RWX.
++ */
++.section .note.GNU-stack,"",@progbits
+diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
+index d7915d4..efca198 100644
+--- a/tools/perf/util/evsel.c
++++ b/tools/perf/util/evsel.c
+@@ -390,6 +390,7 @@ int perf_event__parse_sample(const union perf_event *event, u64 type,
+
+ data->cpu = data->pid = data->tid = -1;
+ data->stream_id = data->id = data->time = -1ULL;
++ data->period = 1;
+
+ if (event->header.type != PERF_RECORD_SAMPLE) {
+ if (!sample_id_all)
diff --git a/1007_linux-3.2.8.patch b/1007_linux-3.2.8.patch
new file mode 100644
index 00000000..9ca9d538
--- /dev/null
+++ b/1007_linux-3.2.8.patch
@@ -0,0 +1,666 @@
+diff --git a/Makefile b/Makefile
+index d1bdc90..7df8a84 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 7
++SUBLEVEL = 8
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
+index c9e09ea..a850b4d 100644
+--- a/arch/x86/include/asm/i387.h
++++ b/arch/x86/include/asm/i387.h
+@@ -29,8 +29,8 @@ extern unsigned int sig_xstate_size;
+ extern void fpu_init(void);
+ extern void mxcsr_feature_mask_init(void);
+ extern int init_fpu(struct task_struct *child);
+-extern asmlinkage void math_state_restore(void);
+-extern void __math_state_restore(void);
++extern void __math_state_restore(struct task_struct *);
++extern void math_state_restore(void);
+ extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
+
+ extern user_regset_active_fn fpregs_active, xfpregs_active;
+@@ -212,19 +212,11 @@ static inline void fpu_fxsave(struct fpu *fpu)
+
+ #endif /* CONFIG_X86_64 */
+
+-/* We need a safe address that is cheap to find and that is already
+- in L1 during context switch. The best choices are unfortunately
+- different for UP and SMP */
+-#ifdef CONFIG_SMP
+-#define safe_address (__per_cpu_offset[0])
+-#else
+-#define safe_address (kstat_cpu(0).cpustat.user)
+-#endif
+-
+ /*
+- * These must be called with preempt disabled
++ * These must be called with preempt disabled. Returns
++ * 'true' if the FPU state is still intact.
+ */
+-static inline void fpu_save_init(struct fpu *fpu)
++static inline int fpu_save_init(struct fpu *fpu)
+ {
+ if (use_xsave()) {
+ fpu_xsave(fpu);
+@@ -233,33 +225,33 @@ static inline void fpu_save_init(struct fpu *fpu)
+ * xsave header may indicate the init state of the FP.
+ */
+ if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP))
+- return;
++ return 1;
+ } else if (use_fxsr()) {
+ fpu_fxsave(fpu);
+ } else {
+ asm volatile("fnsave %[fx]; fwait"
+ : [fx] "=m" (fpu->state->fsave));
+- return;
++ return 0;
+ }
+
+- if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES))
++ /*
++ * If exceptions are pending, we need to clear them so
++ * that we don't randomly get exceptions later.
++ *
++ * FIXME! Is this perhaps only true for the old-style
++ * irq13 case? Maybe we could leave the x87 state
++ * intact otherwise?
++ */
++ if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) {
+ asm volatile("fnclex");
+-
+- /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
+- is pending. Clear the x87 state here by setting it to fixed
+- values. safe_address is a random variable that should be in L1 */
+- alternative_input(
+- ASM_NOP8 ASM_NOP2,
+- "emms\n\t" /* clear stack tags */
+- "fildl %P[addr]", /* set F?P to defined value */
+- X86_FEATURE_FXSAVE_LEAK,
+- [addr] "m" (safe_address));
++ return 0;
++ }
++ return 1;
+ }
+
+-static inline void __save_init_fpu(struct task_struct *tsk)
++static inline int __save_init_fpu(struct task_struct *tsk)
+ {
+- fpu_save_init(&tsk->thread.fpu);
+- task_thread_info(tsk)->status &= ~TS_USEDFPU;
++ return fpu_save_init(&tsk->thread.fpu);
+ }
+
+ static inline int fpu_fxrstor_checking(struct fpu *fpu)
+@@ -281,39 +273,185 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
+ }
+
+ /*
+- * Signal frame handlers...
++ * Software FPU state helpers. Careful: these need to
++ * be preemption protection *and* they need to be
++ * properly paired with the CR0.TS changes!
+ */
+-extern int save_i387_xstate(void __user *buf);
+-extern int restore_i387_xstate(void __user *buf);
++static inline int __thread_has_fpu(struct task_struct *tsk)
++{
++ return tsk->thread.has_fpu;
++}
+
+-static inline void __unlazy_fpu(struct task_struct *tsk)
++/* Must be paired with an 'stts' after! */
++static inline void __thread_clear_has_fpu(struct task_struct *tsk)
+ {
+- if (task_thread_info(tsk)->status & TS_USEDFPU) {
+- __save_init_fpu(tsk);
+- stts();
+- } else
+- tsk->fpu_counter = 0;
++ tsk->thread.has_fpu = 0;
++}
++
++/* Must be paired with a 'clts' before! */
++static inline void __thread_set_has_fpu(struct task_struct *tsk)
++{
++ tsk->thread.has_fpu = 1;
+ }
+
++/*
++ * Encapsulate the CR0.TS handling together with the
++ * software flag.
++ *
++ * These generally need preemption protection to work,
++ * do try to avoid using these on their own.
++ */
++static inline void __thread_fpu_end(struct task_struct *tsk)
++{
++ __thread_clear_has_fpu(tsk);
++ stts();
++}
++
++static inline void __thread_fpu_begin(struct task_struct *tsk)
++{
++ clts();
++ __thread_set_has_fpu(tsk);
++}
++
++/*
++ * FPU state switching for scheduling.
++ *
++ * This is a two-stage process:
++ *
++ * - switch_fpu_prepare() saves the old state and
++ * sets the new state of the CR0.TS bit. This is
++ * done within the context of the old process.
++ *
++ * - switch_fpu_finish() restores the new state as
++ * necessary.
++ */
++typedef struct { int preload; } fpu_switch_t;
++
++/*
++ * FIXME! We could do a totally lazy restore, but we need to
++ * add a per-cpu "this was the task that last touched the FPU
++ * on this CPU" variable, and the task needs to have a "I last
++ * touched the FPU on this CPU" and check them.
++ *
++ * We don't do that yet, so "fpu_lazy_restore()" always returns
++ * false, but some day..
++ */
++#define fpu_lazy_restore(tsk) (0)
++#define fpu_lazy_state_intact(tsk) do { } while (0)
++
++static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new)
++{
++ fpu_switch_t fpu;
++
++ fpu.preload = tsk_used_math(new) && new->fpu_counter > 5;
++ if (__thread_has_fpu(old)) {
++ if (__save_init_fpu(old))
++ fpu_lazy_state_intact(old);
++ __thread_clear_has_fpu(old);
++ old->fpu_counter++;
++
++ /* Don't change CR0.TS if we just switch! */
++ if (fpu.preload) {
++ __thread_set_has_fpu(new);
++ prefetch(new->thread.fpu.state);
++ } else
++ stts();
++ } else {
++ old->fpu_counter = 0;
++ if (fpu.preload) {
++ if (fpu_lazy_restore(new))
++ fpu.preload = 0;
++ else
++ prefetch(new->thread.fpu.state);
++ __thread_fpu_begin(new);
++ }
++ }
++ return fpu;
++}
++
++/*
++ * By the time this gets called, we've already cleared CR0.TS and
++ * given the process the FPU if we are going to preload the FPU
++ * state - all we need to do is to conditionally restore the register
++ * state itself.
++ */
++static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu)
++{
++ if (fpu.preload)
++ __math_state_restore(new);
++}
++
++/*
++ * Signal frame handlers...
++ */
++extern int save_i387_xstate(void __user *buf);
++extern int restore_i387_xstate(void __user *buf);
++
+ static inline void __clear_fpu(struct task_struct *tsk)
+ {
+- if (task_thread_info(tsk)->status & TS_USEDFPU) {
++ if (__thread_has_fpu(tsk)) {
+ /* Ignore delayed exceptions from user space */
+ asm volatile("1: fwait\n"
+ "2:\n"
+ _ASM_EXTABLE(1b, 2b));
+- task_thread_info(tsk)->status &= ~TS_USEDFPU;
+- stts();
++ __thread_fpu_end(tsk);
+ }
+ }
+
++/*
++ * Were we in an interrupt that interrupted kernel mode?
++ *
++ * We can do a kernel_fpu_begin/end() pair *ONLY* if that
++ * pair does nothing at all: the thread must not have fpu (so
++ * that we don't try to save the FPU state), and TS must
++ * be set (so that the clts/stts pair does nothing that is
++ * visible in the interrupted kernel thread).
++ */
++static inline bool interrupted_kernel_fpu_idle(void)
++{
++ return !__thread_has_fpu(current) &&
++ (read_cr0() & X86_CR0_TS);
++}
++
++/*
++ * Were we in user mode (or vm86 mode) when we were
++ * interrupted?
++ *
++ * Doing kernel_fpu_begin/end() is ok if we are running
++ * in an interrupt context from user mode - we'll just
++ * save the FPU state as required.
++ */
++static inline bool interrupted_user_mode(void)
++{
++ struct pt_regs *regs = get_irq_regs();
++ return regs && user_mode_vm(regs);
++}
++
++/*
++ * Can we use the FPU in kernel mode with the
++ * whole "kernel_fpu_begin/end()" sequence?
++ *
++ * It's always ok in process context (ie "not interrupt")
++ * but it is sometimes ok even from an irq.
++ */
++static inline bool irq_fpu_usable(void)
++{
++ return !in_interrupt() ||
++ interrupted_user_mode() ||
++ interrupted_kernel_fpu_idle();
++}
++
+ static inline void kernel_fpu_begin(void)
+ {
+- struct thread_info *me = current_thread_info();
++ struct task_struct *me = current;
++
++ WARN_ON_ONCE(!irq_fpu_usable());
+ preempt_disable();
+- if (me->status & TS_USEDFPU)
+- __save_init_fpu(me->task);
+- else
++ if (__thread_has_fpu(me)) {
++ __save_init_fpu(me);
++ __thread_clear_has_fpu(me);
++ /* We do 'stts()' in kernel_fpu_end() */
++ } else
+ clts();
+ }
+
+@@ -323,14 +461,6 @@ static inline void kernel_fpu_end(void)
+ preempt_enable();
+ }
+
+-static inline bool irq_fpu_usable(void)
+-{
+- struct pt_regs *regs;
+-
+- return !in_interrupt() || !(regs = get_irq_regs()) || \
+- user_mode(regs) || (read_cr0() & X86_CR0_TS);
+-}
+-
+ /*
+ * Some instructions like VIA's padlock instructions generate a spurious
+ * DNA fault but don't modify SSE registers. And these instructions
+@@ -363,20 +493,64 @@ static inline void irq_ts_restore(int TS_state)
+ }
+
+ /*
++ * The question "does this thread have fpu access?"
++ * is slightly racy, since preemption could come in
++ * and revoke it immediately after the test.
++ *
++ * However, even in that very unlikely scenario,
++ * we can just assume we have FPU access - typically
++ * to save the FP state - we'll just take a #NM
++ * fault and get the FPU access back.
++ *
++ * The actual user_fpu_begin/end() functions
++ * need to be preemption-safe, though.
++ *
++ * NOTE! user_fpu_end() must be used only after you
++ * have saved the FP state, and user_fpu_begin() must
++ * be used only immediately before restoring it.
++ * These functions do not do any save/restore on
++ * their own.
++ */
++static inline int user_has_fpu(void)
++{
++ return __thread_has_fpu(current);
++}
++
++static inline void user_fpu_end(void)
++{
++ preempt_disable();
++ __thread_fpu_end(current);
++ preempt_enable();
++}
++
++static inline void user_fpu_begin(void)
++{
++ preempt_disable();
++ if (!user_has_fpu())
++ __thread_fpu_begin(current);
++ preempt_enable();
++}
++
++/*
+ * These disable preemption on their own and are safe
+ */
+ static inline void save_init_fpu(struct task_struct *tsk)
+ {
++ WARN_ON_ONCE(!__thread_has_fpu(tsk));
+ preempt_disable();
+ __save_init_fpu(tsk);
+- stts();
++ __thread_fpu_end(tsk);
+ preempt_enable();
+ }
+
+ static inline void unlazy_fpu(struct task_struct *tsk)
+ {
+ preempt_disable();
+- __unlazy_fpu(tsk);
++ if (__thread_has_fpu(tsk)) {
++ __save_init_fpu(tsk);
++ __thread_fpu_end(tsk);
++ } else
++ tsk->fpu_counter = 0;
+ preempt_enable();
+ }
+
+diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
+index b650435..bb3ee36 100644
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -456,6 +456,7 @@ struct thread_struct {
+ unsigned long trap_no;
+ unsigned long error_code;
+ /* floating point and extended processor state */
++ unsigned long has_fpu;
+ struct fpu fpu;
+ #ifdef CONFIG_X86_32
+ /* Virtual 86 mode info */
+diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
+index a1fe5c1..d7ef849 100644
+--- a/arch/x86/include/asm/thread_info.h
++++ b/arch/x86/include/asm/thread_info.h
+@@ -242,8 +242,6 @@ static inline struct thread_info *current_thread_info(void)
+ * ever touches our thread-synchronous status, so we don't
+ * have to worry about atomic accesses.
+ */
+-#define TS_USEDFPU 0x0001 /* FPU was used by this task
+- this quantum (SMP) */
+ #define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/
+ #define TS_POLLING 0x0004 /* idle task polling need_resched,
+ skip sending interrupt */
+diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
+index 795b79f..8598296 100644
+--- a/arch/x86/kernel/process_32.c
++++ b/arch/x86/kernel/process_32.c
+@@ -297,22 +297,11 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+ *next = &next_p->thread;
+ int cpu = smp_processor_id();
+ struct tss_struct *tss = &per_cpu(init_tss, cpu);
+- bool preload_fpu;
++ fpu_switch_t fpu;
+
+ /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
+
+- /*
+- * If the task has used fpu the last 5 timeslices, just do a full
+- * restore of the math state immediately to avoid the trap; the
+- * chances of needing FPU soon are obviously high now
+- */
+- preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5;
+-
+- __unlazy_fpu(prev_p);
+-
+- /* we're going to use this soon, after a few expensive things */
+- if (preload_fpu)
+- prefetch(next->fpu.state);
++ fpu = switch_fpu_prepare(prev_p, next_p);
+
+ /*
+ * Reload esp0.
+@@ -352,11 +341,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+ task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
+ __switch_to_xtra(prev_p, next_p, tss);
+
+- /* If we're going to preload the fpu context, make sure clts
+- is run while we're batching the cpu state updates. */
+- if (preload_fpu)
+- clts();
+-
+ /*
+ * Leave lazy mode, flushing any hypercalls made here.
+ * This must be done before restoring TLS segments so
+@@ -366,15 +350,14 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+ */
+ arch_end_context_switch(next_p);
+
+- if (preload_fpu)
+- __math_state_restore();
+-
+ /*
+ * Restore %gs if needed (which is common)
+ */
+ if (prev->gs | next->gs)
+ lazy_load_gs(next->gs);
+
++ switch_fpu_finish(next_p, fpu);
++
+ percpu_write(current_task, next_p);
+
+ return prev_p;
+diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
+index 3bd7e6e..6a364a6 100644
+--- a/arch/x86/kernel/process_64.c
++++ b/arch/x86/kernel/process_64.c
+@@ -381,18 +381,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+ int cpu = smp_processor_id();
+ struct tss_struct *tss = &per_cpu(init_tss, cpu);
+ unsigned fsindex, gsindex;
+- bool preload_fpu;
++ fpu_switch_t fpu;
+
+- /*
+- * If the task has used fpu the last 5 timeslices, just do a full
+- * restore of the math state immediately to avoid the trap; the
+- * chances of needing FPU soon are obviously high now
+- */
+- preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5;
+-
+- /* we're going to use this soon, after a few expensive things */
+- if (preload_fpu)
+- prefetch(next->fpu.state);
++ fpu = switch_fpu_prepare(prev_p, next_p);
+
+ /*
+ * Reload esp0, LDT and the page table pointer:
+@@ -422,13 +413,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+
+ load_TLS(next, cpu);
+
+- /* Must be after DS reload */
+- __unlazy_fpu(prev_p);
+-
+- /* Make sure cpu is ready for new context */
+- if (preload_fpu)
+- clts();
+-
+ /*
+ * Leave lazy mode, flushing any hypercalls made here.
+ * This must be done before restoring TLS segments so
+@@ -469,6 +453,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+ wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
+ prev->gsindex = gsindex;
+
++ switch_fpu_finish(next_p, fpu);
++
+ /*
+ * Switch the PDA and FPU contexts.
+ */
+@@ -487,13 +473,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+ task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
+ __switch_to_xtra(prev_p, next_p, tss);
+
+- /*
+- * Preload the FPU context, now that we've determined that the
+- * task is likely to be using it.
+- */
+- if (preload_fpu)
+- __math_state_restore();
+-
+ return prev_p;
+ }
+
+diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
+index a8e3eb8..31d9d0f 100644
+--- a/arch/x86/kernel/traps.c
++++ b/arch/x86/kernel/traps.c
+@@ -562,25 +562,34 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
+ }
+
+ /*
+- * __math_state_restore assumes that cr0.TS is already clear and the
+- * fpu state is all ready for use. Used during context switch.
++ * This gets called with the process already owning the
++ * FPU state, and with CR0.TS cleared. It just needs to
++ * restore the FPU register state.
+ */
+-void __math_state_restore(void)
++void __math_state_restore(struct task_struct *tsk)
+ {
+- struct thread_info *thread = current_thread_info();
+- struct task_struct *tsk = thread->task;
++ /* We need a safe address that is cheap to find and that is already
++ in L1. We've just brought in "tsk->thread.has_fpu", so use that */
++#define safe_address (tsk->thread.has_fpu)
++
++ /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
++ is pending. Clear the x87 state here by setting it to fixed
++ values. safe_address is a random variable that should be in L1 */
++ alternative_input(
++ ASM_NOP8 ASM_NOP2,
++ "emms\n\t" /* clear stack tags */
++ "fildl %P[addr]", /* set F?P to defined value */
++ X86_FEATURE_FXSAVE_LEAK,
++ [addr] "m" (safe_address));
+
+ /*
+ * Paranoid restore. send a SIGSEGV if we fail to restore the state.
+ */
+ if (unlikely(restore_fpu_checking(tsk))) {
+- stts();
++ __thread_fpu_end(tsk);
+ force_sig(SIGSEGV, tsk);
+ return;
+ }
+-
+- thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
+- tsk->fpu_counter++;
+ }
+
+ /*
+@@ -590,13 +599,12 @@ void __math_state_restore(void)
+ * Careful.. There are problems with IBM-designed IRQ13 behaviour.
+ * Don't touch unless you *really* know how it works.
+ *
+- * Must be called with kernel preemption disabled (in this case,
+- * local interrupts are disabled at the call-site in entry.S).
++ * Must be called with kernel preemption disabled (eg with local
++ * local interrupts as in the case of do_device_not_available).
+ */
+-asmlinkage void math_state_restore(void)
++void math_state_restore(void)
+ {
+- struct thread_info *thread = current_thread_info();
+- struct task_struct *tsk = thread->task;
++ struct task_struct *tsk = current;
+
+ if (!tsk_used_math(tsk)) {
+ local_irq_enable();
+@@ -613,9 +621,10 @@ asmlinkage void math_state_restore(void)
+ local_irq_disable();
+ }
+
+- clts(); /* Allow maths ops (or we recurse) */
++ __thread_fpu_begin(tsk);
++ __math_state_restore(tsk);
+
+- __math_state_restore();
++ tsk->fpu_counter++;
+ }
+ EXPORT_SYMBOL_GPL(math_state_restore);
+
+diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
+index a391134..7110911 100644
+--- a/arch/x86/kernel/xsave.c
++++ b/arch/x86/kernel/xsave.c
+@@ -47,7 +47,7 @@ void __sanitize_i387_state(struct task_struct *tsk)
+ if (!fx)
+ return;
+
+- BUG_ON(task_thread_info(tsk)->status & TS_USEDFPU);
++ BUG_ON(__thread_has_fpu(tsk));
+
+ xstate_bv = tsk->thread.fpu.state->xsave.xsave_hdr.xstate_bv;
+
+@@ -168,7 +168,7 @@ int save_i387_xstate(void __user *buf)
+ if (!used_math())
+ return 0;
+
+- if (task_thread_info(tsk)->status & TS_USEDFPU) {
++ if (user_has_fpu()) {
+ if (use_xsave())
+ err = xsave_user(buf);
+ else
+@@ -176,8 +176,7 @@ int save_i387_xstate(void __user *buf)
+
+ if (err)
+ return err;
+- task_thread_info(tsk)->status &= ~TS_USEDFPU;
+- stts();
++ user_fpu_end();
+ } else {
+ sanitize_i387_state(tsk);
+ if (__copy_to_user(buf, &tsk->thread.fpu.state->fxsave,
+@@ -292,10 +291,7 @@ int restore_i387_xstate(void __user *buf)
+ return err;
+ }
+
+- if (!(task_thread_info(current)->status & TS_USEDFPU)) {
+- clts();
+- task_thread_info(current)->status |= TS_USEDFPU;
+- }
++ user_fpu_begin();
+ if (use_xsave())
+ err = restore_user_xstate(buf);
+ else
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 579a0b5..4ea7678 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -1456,7 +1456,7 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
+ #ifdef CONFIG_X86_64
+ wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
+ #endif
+- if (current_thread_info()->status & TS_USEDFPU)
++ if (__thread_has_fpu(current))
+ clts();
+ load_gdt(&__get_cpu_var(host_gdt));
+ }
diff --git a/1008_linux-3.2.9.patch b/1008_linux-3.2.9.patch
new file mode 100644
index 00000000..7cac2cf2
--- /dev/null
+++ b/1008_linux-3.2.9.patch
@@ -0,0 +1,3675 @@
+diff --git a/Makefile b/Makefile
+index 7df8a84..5f1739b 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 8
++SUBLEVEL = 9
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/common/pl330.c b/arch/arm/common/pl330.c
+index 8d8df74..67abef5 100644
+--- a/arch/arm/common/pl330.c
++++ b/arch/arm/common/pl330.c
+@@ -1496,12 +1496,13 @@ int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op)
+ struct pl330_thread *thrd = ch_id;
+ struct pl330_dmac *pl330;
+ unsigned long flags;
+- int ret = 0, active = thrd->req_running;
++ int ret = 0, active;
+
+ if (!thrd || thrd->free || thrd->dmac->state == DYING)
+ return -EINVAL;
+
+ pl330 = thrd->dmac;
++ active = thrd->req_running;
+
+ spin_lock_irqsave(&pl330->lock, flags);
+
+diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
+index 29035e8..7bb8bf9 100644
+--- a/arch/arm/include/asm/assembler.h
++++ b/arch/arm/include/asm/assembler.h
+@@ -137,6 +137,11 @@
+ disable_irq
+ .endm
+
++ .macro save_and_disable_irqs_notrace, oldcpsr
++ mrs \oldcpsr, cpsr
++ disable_irq_notrace
++ .endm
++
+ /*
+ * Restore interrupt state previously stored in a register. We don't
+ * guarantee that this will preserve the flags.
+diff --git a/arch/arm/mach-at91/at91rm9200_devices.c b/arch/arm/mach-at91/at91rm9200_devices.c
+index ad93068..143eebb 100644
+--- a/arch/arm/mach-at91/at91rm9200_devices.c
++++ b/arch/arm/mach-at91/at91rm9200_devices.c
+@@ -83,7 +83,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
+ * USB Device (Gadget)
+ * -------------------------------------------------------------------- */
+
+-#ifdef CONFIG_USB_AT91
++#if defined(CONFIG_USB_AT91) || defined(CONFIG_USB_AT91_MODULE)
+ static struct at91_udc_data udc_data;
+
+ static struct resource udc_resources[] = {
+diff --git a/arch/arm/mach-at91/at91sam9260_devices.c b/arch/arm/mach-at91/at91sam9260_devices.c
+index 629fa97..2590988 100644
+--- a/arch/arm/mach-at91/at91sam9260_devices.c
++++ b/arch/arm/mach-at91/at91sam9260_devices.c
+@@ -84,7 +84,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
+ * USB Device (Gadget)
+ * -------------------------------------------------------------------- */
+
+-#ifdef CONFIG_USB_AT91
++#if defined(CONFIG_USB_AT91) || defined(CONFIG_USB_AT91_MODULE)
+ static struct at91_udc_data udc_data;
+
+ static struct resource udc_resources[] = {
+diff --git a/arch/arm/mach-at91/at91sam9261_devices.c b/arch/arm/mach-at91/at91sam9261_devices.c
+index a178b58..daf3e66 100644
+--- a/arch/arm/mach-at91/at91sam9261_devices.c
++++ b/arch/arm/mach-at91/at91sam9261_devices.c
+@@ -87,7 +87,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
+ * USB Device (Gadget)
+ * -------------------------------------------------------------------- */
+
+-#ifdef CONFIG_USB_AT91
++#if defined(CONFIG_USB_AT91) || defined(CONFIG_USB_AT91_MODULE)
+ static struct at91_udc_data udc_data;
+
+ static struct resource udc_resources[] = {
+diff --git a/arch/arm/mach-at91/at91sam9263_devices.c b/arch/arm/mach-at91/at91sam9263_devices.c
+index d5fbac9..32a7e43 100644
+--- a/arch/arm/mach-at91/at91sam9263_devices.c
++++ b/arch/arm/mach-at91/at91sam9263_devices.c
+@@ -92,7 +92,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
+ * USB Device (Gadget)
+ * -------------------------------------------------------------------- */
+
+-#ifdef CONFIG_USB_AT91
++#if defined(CONFIG_USB_AT91) || defined(CONFIG_USB_AT91_MODULE)
+ static struct at91_udc_data udc_data;
+
+ static struct resource udc_resources[] = {
+diff --git a/arch/arm/mach-omap2/vp.c b/arch/arm/mach-omap2/vp.c
+index 66bd700..3b52027 100644
+--- a/arch/arm/mach-omap2/vp.c
++++ b/arch/arm/mach-omap2/vp.c
+@@ -41,6 +41,11 @@ void __init omap_vp_init(struct voltagedomain *voltdm)
+ u32 val, sys_clk_rate, timeout, waittime;
+ u32 vddmin, vddmax, vstepmin, vstepmax;
+
++ if (!voltdm->pmic || !voltdm->pmic->uv_to_vsel) {
++ pr_err("%s: No PMIC info for vdd_%s\n", __func__, voltdm->name);
++ return;
++ }
++
+ if (!voltdm->read || !voltdm->write) {
+ pr_err("%s: No read/write API for accessing vdd_%s regs\n",
+ __func__, voltdm->name);
+diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
+index 07c4bc8..a655d3d 100644
+--- a/arch/arm/mm/cache-v7.S
++++ b/arch/arm/mm/cache-v7.S
+@@ -54,9 +54,15 @@ loop1:
+ and r1, r1, #7 @ mask of the bits for current cache only
+ cmp r1, #2 @ see what cache we have at this level
+ blt skip @ skip if no cache, or just i-cache
++#ifdef CONFIG_PREEMPT
++ save_and_disable_irqs_notrace r9 @ make cssr&csidr read atomic
++#endif
+ mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
+ isb @ isb to sych the new cssr&csidr
+ mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
++#ifdef CONFIG_PREEMPT
++ restore_irqs_notrace r9
++#endif
+ and r2, r1, #7 @ extract the length of the cache lines
+ add r2, r2, #4 @ add 4 (line length offset)
+ ldr r4, =0x3ff
+diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
+index 10a140f..64483fd 100644
+--- a/arch/powerpc/kernel/perf_event.c
++++ b/arch/powerpc/kernel/perf_event.c
+@@ -865,6 +865,7 @@ static void power_pmu_start(struct perf_event *event, int ef_flags)
+ {
+ unsigned long flags;
+ s64 left;
++ unsigned long val;
+
+ if (!event->hw.idx || !event->hw.sample_period)
+ return;
+@@ -880,7 +881,12 @@ static void power_pmu_start(struct perf_event *event, int ef_flags)
+
+ event->hw.state = 0;
+ left = local64_read(&event->hw.period_left);
+- write_pmc(event->hw.idx, left);
++
++ val = 0;
++ if (left < 0x80000000L)
++ val = 0x80000000L - left;
++
++ write_pmc(event->hw.idx, val);
+
+ perf_event_update_userpage(event);
+ perf_pmu_enable(event->pmu);
+diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
+index ebbfab3..e03c555 100644
+--- a/arch/s390/kernel/time.c
++++ b/arch/s390/kernel/time.c
+@@ -113,11 +113,14 @@ static void fixup_clock_comparator(unsigned long long delta)
+ static int s390_next_ktime(ktime_t expires,
+ struct clock_event_device *evt)
+ {
++ struct timespec ts;
+ u64 nsecs;
+
+- nsecs = ktime_to_ns(ktime_sub(expires, ktime_get_monotonic_offset()));
++ ts.tv_sec = ts.tv_nsec = 0;
++ monotonic_to_bootbased(&ts);
++ nsecs = ktime_to_ns(ktime_add(timespec_to_ktime(ts), expires));
+ do_div(nsecs, 125);
+- S390_lowcore.clock_comparator = TOD_UNIX_EPOCH + (nsecs << 9);
++ S390_lowcore.clock_comparator = sched_clock_base_cc + (nsecs << 9);
+ set_clock_comparator(S390_lowcore.clock_comparator);
+ return 0;
+ }
+diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
+index a3b0811..0e89635 100644
+--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
++++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
+@@ -326,8 +326,7 @@ static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb)
+ l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1;
+ }
+
+-static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf,
+- int index)
++static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index)
+ {
+ int node;
+
+@@ -725,14 +724,16 @@ static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
+ #define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y]))
+
+ #ifdef CONFIG_SMP
+-static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
++
++static int __cpuinit cache_shared_amd_cpu_map_setup(unsigned int cpu, int index)
+ {
+- struct _cpuid4_info *this_leaf, *sibling_leaf;
+- unsigned long num_threads_sharing;
+- int index_msb, i, sibling;
++ struct _cpuid4_info *this_leaf;
++ int ret, i, sibling;
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
+
+- if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) {
++ ret = 0;
++ if (index == 3) {
++ ret = 1;
+ for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
+ if (!per_cpu(ici_cpuid4_info, i))
+ continue;
+@@ -743,8 +744,35 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
+ set_bit(sibling, this_leaf->shared_cpu_map);
+ }
+ }
+- return;
++ } else if ((c->x86 == 0x15) && ((index == 1) || (index == 2))) {
++ ret = 1;
++ for_each_cpu(i, cpu_sibling_mask(cpu)) {
++ if (!per_cpu(ici_cpuid4_info, i))
++ continue;
++ this_leaf = CPUID4_INFO_IDX(i, index);
++ for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
++ if (!cpu_online(sibling))
++ continue;
++ set_bit(sibling, this_leaf->shared_cpu_map);
++ }
++ }
+ }
++
++ return ret;
++}
++
++static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
++{
++ struct _cpuid4_info *this_leaf, *sibling_leaf;
++ unsigned long num_threads_sharing;
++ int index_msb, i;
++ struct cpuinfo_x86 *c = &cpu_data(cpu);
++
++ if (c->x86_vendor == X86_VENDOR_AMD) {
++ if (cache_shared_amd_cpu_map_setup(cpu, index))
++ return;
++ }
++
+ this_leaf = CPUID4_INFO_IDX(cpu, index);
+ num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing;
+
+diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
+index 666f6f5..64004b0 100644
+--- a/drivers/base/regmap/regcache.c
++++ b/drivers/base/regmap/regcache.c
+@@ -54,7 +54,7 @@ static int regcache_hw_init(struct regmap *map)
+ for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) {
+ val = regcache_get_val(map->reg_defaults_raw,
+ i, map->cache_word_size);
+- if (!val)
++ if (regmap_volatile(map, i))
+ continue;
+ count++;
+ }
+@@ -69,7 +69,7 @@ static int regcache_hw_init(struct regmap *map)
+ for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
+ val = regcache_get_val(map->reg_defaults_raw,
+ i, map->cache_word_size);
+- if (!val)
++ if (regmap_volatile(map, i))
+ continue;
+ map->reg_defaults[j].reg = i;
+ map->reg_defaults[j].def = val;
+diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
+index cedb231..2678b6f 100644
+--- a/drivers/cdrom/cdrom.c
++++ b/drivers/cdrom/cdrom.c
+@@ -2120,11 +2120,6 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
+ if (!nr)
+ return -ENOMEM;
+
+- if (!access_ok(VERIFY_WRITE, ubuf, nframes * CD_FRAMESIZE_RAW)) {
+- ret = -EFAULT;
+- goto out;
+- }
+-
+ cgc.data_direction = CGC_DATA_READ;
+ while (nframes > 0) {
+ if (nr > nframes)
+@@ -2133,7 +2128,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
+ ret = cdrom_read_block(cdi, &cgc, lba, nr, 1, CD_FRAMESIZE_RAW);
+ if (ret)
+ break;
+- if (__copy_to_user(ubuf, cgc.buffer, CD_FRAMESIZE_RAW * nr)) {
++ if (copy_to_user(ubuf, cgc.buffer, CD_FRAMESIZE_RAW * nr)) {
+ ret = -EFAULT;
+ break;
+ }
+@@ -2141,7 +2136,6 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
+ nframes -= nr;
+ lba += nr;
+ }
+-out:
+ kfree(cgc.buffer);
+ return ret;
+ }
+diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
+index 31b0d1a..fad7cd1 100644
+--- a/drivers/gpu/drm/radeon/r100.c
++++ b/drivers/gpu/drm/radeon/r100.c
+@@ -789,9 +789,7 @@ int r100_irq_process(struct radeon_device *rdev)
+ WREG32(RADEON_AIC_CNTL, msi_rearm | RS400_MSI_REARM);
+ break;
+ default:
+- msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN;
+- WREG32(RADEON_MSI_REARM_EN, msi_rearm);
+- WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN);
++ WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN);
+ break;
+ }
+ }
+diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
+index c259e21..ee898e9 100644
+--- a/drivers/gpu/drm/radeon/rs600.c
++++ b/drivers/gpu/drm/radeon/rs600.c
+@@ -693,9 +693,7 @@ int rs600_irq_process(struct radeon_device *rdev)
+ WREG32(RADEON_BUS_CNTL, msi_rearm | RS600_MSI_REARM);
+ break;
+ default:
+- msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN;
+- WREG32(RADEON_MSI_REARM_EN, msi_rearm);
+- WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN);
++ WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN);
+ break;
+ }
+ }
+diff --git a/drivers/hwmon/ads1015.c b/drivers/hwmon/ads1015.c
+index eedca3c..dd87ae9 100644
+--- a/drivers/hwmon/ads1015.c
++++ b/drivers/hwmon/ads1015.c
+@@ -271,7 +271,7 @@ static int ads1015_probe(struct i2c_client *client,
+ continue;
+ err = device_create_file(&client->dev, &ads1015_in[k].dev_attr);
+ if (err)
+- goto exit_free;
++ goto exit_remove;
+ }
+
+ data->hwmon_dev = hwmon_device_register(&client->dev);
+@@ -285,7 +285,6 @@ static int ads1015_probe(struct i2c_client *client,
+ exit_remove:
+ for (k = 0; k < ADS1015_CHANNELS; ++k)
+ device_remove_file(&client->dev, &ads1015_in[k].dev_attr);
+-exit_free:
+ kfree(data);
+ exit:
+ return err;
+diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c
+index e4ab491..040a820 100644
+--- a/drivers/hwmon/f75375s.c
++++ b/drivers/hwmon/f75375s.c
+@@ -304,8 +304,6 @@ static int set_pwm_enable_direct(struct i2c_client *client, int nr, int val)
+ case 0: /* Full speed */
+ fanmode |= (3 << FAN_CTRL_MODE(nr));
+ data->pwm[nr] = 255;
+- f75375_write8(client, F75375_REG_FAN_PWM_DUTY(nr),
+- data->pwm[nr]);
+ break;
+ case 1: /* PWM */
+ fanmode |= (3 << FAN_CTRL_MODE(nr));
+@@ -318,6 +316,9 @@ static int set_pwm_enable_direct(struct i2c_client *client, int nr, int val)
+ }
+ f75375_write8(client, F75375_REG_FAN_TIMER, fanmode);
+ data->pwm_enable[nr] = val;
++ if (val == 0)
++ f75375_write8(client, F75375_REG_FAN_PWM_DUTY(nr),
++ data->pwm[nr]);
+ return 0;
+ }
+
+diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c
+index f20d997..8c3df04 100644
+--- a/drivers/hwmon/max6639.c
++++ b/drivers/hwmon/max6639.c
+@@ -72,8 +72,8 @@ static unsigned short normal_i2c[] = { 0x2c, 0x2e, 0x2f, I2C_CLIENT_END };
+
+ static const int rpm_ranges[] = { 2000, 4000, 8000, 16000 };
+
+-#define FAN_FROM_REG(val, div, rpm_range) ((val) == 0 ? -1 : \
+- (val) == 255 ? 0 : (rpm_ranges[rpm_range] * 30) / ((div + 1) * (val)))
++#define FAN_FROM_REG(val, rpm_range) ((val) == 0 || (val) == 255 ? \
++ 0 : (rpm_ranges[rpm_range] * 30) / (val))
+ #define TEMP_LIMIT_TO_REG(val) SENSORS_LIMIT((val) / 1000, 0, 255)
+
+ /*
+@@ -333,7 +333,7 @@ static ssize_t show_fan_input(struct device *dev,
+ return PTR_ERR(data);
+
+ return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan[attr->index],
+- data->ppr, data->rpm_range));
++ data->rpm_range));
+ }
+
+ static ssize_t show_alarm(struct device *dev,
+@@ -429,9 +429,9 @@ static int max6639_init_client(struct i2c_client *client)
+ struct max6639_data *data = i2c_get_clientdata(client);
+ struct max6639_platform_data *max6639_info =
+ client->dev.platform_data;
+- int i = 0;
++ int i;
+ int rpm_range = 1; /* default: 4000 RPM */
+- int err = 0;
++ int err;
+
+ /* Reset chip to default values, see below for GCONFIG setup */
+ err = i2c_smbus_write_byte_data(client, MAX6639_REG_GCONFIG,
+@@ -446,11 +446,6 @@ static int max6639_init_client(struct i2c_client *client)
+ else
+ data->ppr = 2;
+ data->ppr -= 1;
+- err = i2c_smbus_write_byte_data(client,
+- MAX6639_REG_FAN_PPR(i),
+- data->ppr << 5);
+- if (err)
+- goto exit;
+
+ if (max6639_info)
+ rpm_range = rpm_range_to_reg(max6639_info->rpm_range);
+@@ -458,6 +453,13 @@ static int max6639_init_client(struct i2c_client *client)
+
+ for (i = 0; i < 2; i++) {
+
++ /* Set Fan pulse per revolution */
++ err = i2c_smbus_write_byte_data(client,
++ MAX6639_REG_FAN_PPR(i),
++ data->ppr << 6);
++ if (err)
++ goto exit;
++
+ /* Fans config PWM, RPM */
+ err = i2c_smbus_write_byte_data(client,
+ MAX6639_REG_FAN_CONFIG1(i),
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
+index b3cc1e0..86df632 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib.h
++++ b/drivers/infiniband/ulp/ipoib/ipoib.h
+@@ -44,6 +44,7 @@
+ #include <linux/mutex.h>
+
+ #include <net/neighbour.h>
++#include <net/sch_generic.h>
+
+ #include <linux/atomic.h>
+
+@@ -117,8 +118,9 @@ struct ipoib_header {
+ u16 reserved;
+ };
+
+-struct ipoib_pseudoheader {
+- u8 hwaddr[INFINIBAND_ALEN];
++struct ipoib_cb {
++ struct qdisc_skb_cb qdisc_cb;
++ u8 hwaddr[INFINIBAND_ALEN];
+ };
+
+ /* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+index 83695b4..fe2fdbb 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+@@ -658,7 +658,7 @@ static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
+ }
+
+ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
+- struct ipoib_pseudoheader *phdr)
++ struct ipoib_cb *cb)
+ {
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_path *path;
+@@ -666,17 +666,15 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+- path = __path_find(dev, phdr->hwaddr + 4);
++ path = __path_find(dev, cb->hwaddr + 4);
+ if (!path || !path->valid) {
+ int new_path = 0;
+
+ if (!path) {
+- path = path_rec_create(dev, phdr->hwaddr + 4);
++ path = path_rec_create(dev, cb->hwaddr + 4);
+ new_path = 1;
+ }
+ if (path) {
+- /* put pseudoheader back on for next time */
+- skb_push(skb, sizeof *phdr);
+ __skb_queue_tail(&path->queue, skb);
+
+ if (!path->query && path_rec_start(dev, path)) {
+@@ -700,12 +698,10 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
+ be16_to_cpu(path->pathrec.dlid));
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+- ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr));
++ ipoib_send(dev, skb, path->ah, IPOIB_QPN(cb->hwaddr));
+ return;
+ } else if ((path->query || !path_rec_start(dev, path)) &&
+ skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
+- /* put pseudoheader back on for next time */
+- skb_push(skb, sizeof *phdr);
+ __skb_queue_tail(&path->queue, skb);
+ } else {
+ ++dev->stats.tx_dropped;
+@@ -774,16 +770,14 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ dev_kfree_skb_any(skb);
+ }
+ } else {
+- struct ipoib_pseudoheader *phdr =
+- (struct ipoib_pseudoheader *) skb->data;
+- skb_pull(skb, sizeof *phdr);
++ struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb;
+
+- if (phdr->hwaddr[4] == 0xff) {
++ if (cb->hwaddr[4] == 0xff) {
+ /* Add in the P_Key for multicast*/
+- phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
+- phdr->hwaddr[9] = priv->pkey & 0xff;
++ cb->hwaddr[8] = (priv->pkey >> 8) & 0xff;
++ cb->hwaddr[9] = priv->pkey & 0xff;
+
+- ipoib_mcast_send(dev, phdr->hwaddr + 4, skb);
++ ipoib_mcast_send(dev, cb->hwaddr + 4, skb);
+ } else {
+ /* unicast GID -- should be ARP or RARP reply */
+
+@@ -792,14 +786,14 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x %pI6\n",
+ skb_dst(skb) ? "neigh" : "dst",
+ be16_to_cpup((__be16 *) skb->data),
+- IPOIB_QPN(phdr->hwaddr),
+- phdr->hwaddr + 4);
++ IPOIB_QPN(cb->hwaddr),
++ cb->hwaddr + 4);
+ dev_kfree_skb_any(skb);
+ ++dev->stats.tx_dropped;
+ goto unlock;
+ }
+
+- unicast_arp_send(skb, dev, phdr);
++ unicast_arp_send(skb, dev, cb);
+ }
+ }
+ unlock:
+@@ -825,8 +819,6 @@ static int ipoib_hard_header(struct sk_buff *skb,
+ const void *daddr, const void *saddr, unsigned len)
+ {
+ struct ipoib_header *header;
+- struct dst_entry *dst;
+- struct neighbour *n;
+
+ header = (struct ipoib_header *) skb_push(skb, sizeof *header);
+
+@@ -834,18 +826,13 @@ static int ipoib_hard_header(struct sk_buff *skb,
+ header->reserved = 0;
+
+ /*
+- * If we don't have a neighbour structure, stuff the
+- * destination address onto the front of the skb so we can
+- * figure out where to send the packet later.
++ * If we don't have a dst_entry structure, stuff the
++ * destination address into skb->cb so we can figure out where
++ * to send the packet later.
+ */
+- dst = skb_dst(skb);
+- n = NULL;
+- if (dst)
+- n = dst_get_neighbour_raw(dst);
+- if ((!dst || !n) && daddr) {
+- struct ipoib_pseudoheader *phdr =
+- (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr);
+- memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
++ if (!skb_dst(skb)) {
++ struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb;
++ memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN);
+ }
+
+ return 0;
+@@ -1021,11 +1008,7 @@ static void ipoib_setup(struct net_device *dev)
+
+ dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
+
+- /*
+- * We add in INFINIBAND_ALEN to allow for the destination
+- * address "pseudoheader" for skbs without neighbour struct.
+- */
+- dev->hard_header_len = IPOIB_ENCAP_LEN + INFINIBAND_ALEN;
++ dev->hard_header_len = IPOIB_ENCAP_LEN;
+ dev->addr_len = INFINIBAND_ALEN;
+ dev->type = ARPHRD_INFINIBAND;
+ dev->tx_queue_len = ipoib_sendq_size * 2;
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+index 873bff9..e5069b4 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+@@ -262,21 +262,14 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
+ netif_tx_lock_bh(dev);
+ while (!skb_queue_empty(&mcast->pkt_queue)) {
+ struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue);
+- struct dst_entry *dst = skb_dst(skb);
+- struct neighbour *n = NULL;
+
+ netif_tx_unlock_bh(dev);
+
+ skb->dev = dev;
+- if (dst)
+- n = dst_get_neighbour_raw(dst);
+- if (!dst || !n) {
+- /* put pseudoheader back on for next time */
+- skb_push(skb, sizeof (struct ipoib_pseudoheader));
+- }
+
+ if (dev_queue_xmit(skb))
+ ipoib_warn(priv, "dev_queue_xmit failed to requeue packet\n");
++
+ netif_tx_lock_bh(dev);
+ }
+ netif_tx_unlock_bh(dev);
+diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
+index 6ed9646..3f175eb 100644
+--- a/drivers/media/rc/imon.c
++++ b/drivers/media/rc/imon.c
+@@ -47,7 +47,7 @@
+ #define MOD_AUTHOR "Jarod Wilson <jarod@wilsonet.com>"
+ #define MOD_DESC "Driver for SoundGraph iMON MultiMedia IR/Display"
+ #define MOD_NAME "imon"
+-#define MOD_VERSION "0.9.3"
++#define MOD_VERSION "0.9.4"
+
+ #define DISPLAY_MINOR_BASE 144
+ #define DEVICE_NAME "lcd%d"
+@@ -1658,9 +1658,17 @@ static void usb_rx_callback_intf0(struct urb *urb)
+ return;
+
+ ictx = (struct imon_context *)urb->context;
+- if (!ictx || !ictx->dev_present_intf0)
++ if (!ictx)
+ return;
+
++ /*
++ * if we get a callback before we're done configuring the hardware, we
++ * can't yet process the data, as there's nowhere to send it, but we
++ * still need to submit a new rx URB to avoid wedging the hardware
++ */
++ if (!ictx->dev_present_intf0)
++ goto out;
++
+ switch (urb->status) {
+ case -ENOENT: /* usbcore unlink successful! */
+ return;
+@@ -1678,6 +1686,7 @@ static void usb_rx_callback_intf0(struct urb *urb)
+ break;
+ }
+
++out:
+ usb_submit_urb(ictx->rx_urb_intf0, GFP_ATOMIC);
+ }
+
+@@ -1690,9 +1699,17 @@ static void usb_rx_callback_intf1(struct urb *urb)
+ return;
+
+ ictx = (struct imon_context *)urb->context;
+- if (!ictx || !ictx->dev_present_intf1)
++ if (!ictx)
+ return;
+
++ /*
++ * if we get a callback before we're done configuring the hardware, we
++ * can't yet process the data, as there's nowhere to send it, but we
++ * still need to submit a new rx URB to avoid wedging the hardware
++ */
++ if (!ictx->dev_present_intf1)
++ goto out;
++
+ switch (urb->status) {
+ case -ENOENT: /* usbcore unlink successful! */
+ return;
+@@ -1710,6 +1727,7 @@ static void usb_rx_callback_intf1(struct urb *urb)
+ break;
+ }
+
++out:
+ usb_submit_urb(ictx->rx_urb_intf1, GFP_ATOMIC);
+ }
+
+@@ -2242,7 +2260,7 @@ find_endpoint_failed:
+ mutex_unlock(&ictx->lock);
+ usb_free_urb(rx_urb);
+ rx_urb_alloc_failed:
+- dev_err(ictx->dev, "unable to initialize intf0, err %d\n", ret);
++ dev_err(ictx->dev, "unable to initialize intf1, err %d\n", ret);
+
+ return NULL;
+ }
+diff --git a/drivers/media/video/hdpvr/hdpvr-video.c b/drivers/media/video/hdpvr/hdpvr-video.c
+index 087f7c0..41fd57b 100644
+--- a/drivers/media/video/hdpvr/hdpvr-video.c
++++ b/drivers/media/video/hdpvr/hdpvr-video.c
+@@ -283,12 +283,13 @@ static int hdpvr_start_streaming(struct hdpvr_device *dev)
+
+ hdpvr_config_call(dev, CTRL_START_STREAMING_VALUE, 0x00);
+
++ dev->status = STATUS_STREAMING;
++
+ INIT_WORK(&dev->worker, hdpvr_transmit_buffers);
+ queue_work(dev->workqueue, &dev->worker);
+
+ v4l2_dbg(MSG_BUFFER, hdpvr_debug, &dev->v4l2_dev,
+ "streaming started\n");
+- dev->status = STATUS_STREAMING;
+
+ return 0;
+ }
+diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
+index 1e0e27c..e15e47d 100644
+--- a/drivers/mmc/card/block.c
++++ b/drivers/mmc/card/block.c
+@@ -266,6 +266,9 @@ static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
+ goto idata_err;
+ }
+
++ if (!idata->buf_bytes)
++ return idata;
++
+ idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL);
+ if (!idata->buf) {
+ err = -ENOMEM;
+@@ -312,25 +315,6 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
+ if (IS_ERR(idata))
+ return PTR_ERR(idata);
+
+- cmd.opcode = idata->ic.opcode;
+- cmd.arg = idata->ic.arg;
+- cmd.flags = idata->ic.flags;
+-
+- data.sg = &sg;
+- data.sg_len = 1;
+- data.blksz = idata->ic.blksz;
+- data.blocks = idata->ic.blocks;
+-
+- sg_init_one(data.sg, idata->buf, idata->buf_bytes);
+-
+- if (idata->ic.write_flag)
+- data.flags = MMC_DATA_WRITE;
+- else
+- data.flags = MMC_DATA_READ;
+-
+- mrq.cmd = &cmd;
+- mrq.data = &data;
+-
+ md = mmc_blk_get(bdev->bd_disk);
+ if (!md) {
+ err = -EINVAL;
+@@ -343,6 +327,48 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
+ goto cmd_done;
+ }
+
++ cmd.opcode = idata->ic.opcode;
++ cmd.arg = idata->ic.arg;
++ cmd.flags = idata->ic.flags;
++
++ if (idata->buf_bytes) {
++ data.sg = &sg;
++ data.sg_len = 1;
++ data.blksz = idata->ic.blksz;
++ data.blocks = idata->ic.blocks;
++
++ sg_init_one(data.sg, idata->buf, idata->buf_bytes);
++
++ if (idata->ic.write_flag)
++ data.flags = MMC_DATA_WRITE;
++ else
++ data.flags = MMC_DATA_READ;
++
++ /* data.flags must already be set before doing this. */
++ mmc_set_data_timeout(&data, card);
++
++ /* Allow overriding the timeout_ns for empirical tuning. */
++ if (idata->ic.data_timeout_ns)
++ data.timeout_ns = idata->ic.data_timeout_ns;
++
++ if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
++ /*
++ * Pretend this is a data transfer and rely on the
++ * host driver to compute timeout. When all host
++ * drivers support cmd.cmd_timeout for R1B, this
++ * can be changed to:
++ *
++ * mrq.data = NULL;
++ * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
++ */
++ data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
++ }
++
++ mrq.data = &data;
++ }
++
++ mrq.cmd = &cmd;
++
+ mmc_claim_host(card->host);
+
+ if (idata->ic.is_acmd) {
+@@ -351,24 +377,6 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
+ goto cmd_rel_host;
+ }
+
+- /* data.flags must already be set before doing this. */
+- mmc_set_data_timeout(&data, card);
+- /* Allow overriding the timeout_ns for empirical tuning. */
+- if (idata->ic.data_timeout_ns)
+- data.timeout_ns = idata->ic.data_timeout_ns;
+-
+- if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
+- /*
+- * Pretend this is a data transfer and rely on the host driver
+- * to compute timeout. When all host drivers support
+- * cmd.cmd_timeout for R1B, this can be changed to:
+- *
+- * mrq.data = NULL;
+- * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
+- */
+- data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
+- }
+-
+ mmc_wait_for_req(card->host, &mrq);
+
+ if (cmd.error) {
+diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
+index 04a3f1b..192b0d1 100644
+--- a/drivers/net/can/sja1000/sja1000.c
++++ b/drivers/net/can/sja1000/sja1000.c
+@@ -95,11 +95,16 @@ static void sja1000_write_cmdreg(struct sja1000_priv *priv, u8 val)
+ spin_unlock_irqrestore(&priv->cmdreg_lock, flags);
+ }
+
++static int sja1000_is_absent(struct sja1000_priv *priv)
++{
++ return (priv->read_reg(priv, REG_MOD) == 0xFF);
++}
++
+ static int sja1000_probe_chip(struct net_device *dev)
+ {
+ struct sja1000_priv *priv = netdev_priv(dev);
+
+- if (priv->reg_base && (priv->read_reg(priv, 0) == 0xFF)) {
++ if (priv->reg_base && sja1000_is_absent(priv)) {
+ printk(KERN_INFO "%s: probing @0x%lX failed\n",
+ DRV_NAME, dev->base_addr);
+ return 0;
+@@ -493,6 +498,9 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
+ while ((isrc = priv->read_reg(priv, REG_IR)) && (n < SJA1000_MAX_IRQ)) {
+ n++;
+ status = priv->read_reg(priv, REG_SR);
++ /* check for absent controller due to hw unplug */
++ if (status == 0xFF && sja1000_is_absent(priv))
++ return IRQ_NONE;
+
+ if (isrc & IRQ_WUI)
+ dev_warn(dev->dev.parent, "wakeup interrupt\n");
+@@ -509,6 +517,9 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
+ while (status & SR_RBS) {
+ sja1000_rx(dev);
+ status = priv->read_reg(priv, REG_SR);
++ /* check for absent controller */
++ if (status == 0xFF && sja1000_is_absent(priv))
++ return IRQ_NONE;
+ }
+ }
+ if (isrc & (IRQ_DOI | IRQ_EI | IRQ_BEI | IRQ_EPI | IRQ_ALI)) {
+diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
+index b42c06b..e0c5529 100644
+--- a/drivers/net/ethernet/3com/3c59x.c
++++ b/drivers/net/ethernet/3com/3c59x.c
+@@ -1842,7 +1842,7 @@ vortex_timer(unsigned long data)
+ ok = 1;
+ }
+
+- if (!netif_carrier_ok(dev))
++ if (dev->flags & IFF_SLAVE || !netif_carrier_ok(dev))
+ next_tick = 5*HZ;
+
+ if (vp->medialock)
+diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
+index 76b8457..ab784e0 100644
+--- a/drivers/net/ethernet/jme.c
++++ b/drivers/net/ethernet/jme.c
+@@ -2328,19 +2328,11 @@ jme_change_mtu(struct net_device *netdev, int new_mtu)
+ ((new_mtu) < IPV6_MIN_MTU))
+ return -EINVAL;
+
+- if (new_mtu > 4000) {
+- jme->reg_rxcs &= ~RXCS_FIFOTHNP;
+- jme->reg_rxcs |= RXCS_FIFOTHNP_64QW;
+- jme_restart_rx_engine(jme);
+- } else {
+- jme->reg_rxcs &= ~RXCS_FIFOTHNP;
+- jme->reg_rxcs |= RXCS_FIFOTHNP_128QW;
+- jme_restart_rx_engine(jme);
+- }
+
+ netdev->mtu = new_mtu;
+ netdev_update_features(netdev);
+
++ jme_restart_rx_engine(jme);
+ jme_reset_link(jme);
+
+ return 0;
+diff --git a/drivers/net/ethernet/jme.h b/drivers/net/ethernet/jme.h
+index 4304072..3efc897 100644
+--- a/drivers/net/ethernet/jme.h
++++ b/drivers/net/ethernet/jme.h
+@@ -730,7 +730,7 @@ enum jme_rxcs_values {
+ RXCS_RETRYCNT_60 = 0x00000F00,
+
+ RXCS_DEFAULT = RXCS_FIFOTHTP_128T |
+- RXCS_FIFOTHNP_128QW |
++ RXCS_FIFOTHNP_16QW |
+ RXCS_DMAREQSZ_128B |
+ RXCS_RETRYGAP_256ns |
+ RXCS_RETRYCNT_32,
+diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
+index 815c797..22f2788 100644
+--- a/drivers/net/ethernet/ti/davinci_emac.c
++++ b/drivers/net/ethernet/ti/davinci_emac.c
+@@ -1007,7 +1007,7 @@ static void emac_rx_handler(void *token, int len, int status)
+ int ret;
+
+ /* free and bail if we are shutting down */
+- if (unlikely(!netif_running(ndev) || !netif_carrier_ok(ndev))) {
++ if (unlikely(!netif_running(ndev))) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+@@ -1036,7 +1036,9 @@ static void emac_rx_handler(void *token, int len, int status)
+ recycle:
+ ret = cpdma_chan_submit(priv->rxchan, skb, skb->data,
+ skb_tailroom(skb), GFP_KERNEL);
+- if (WARN_ON(ret < 0))
++
++ WARN_ON(ret == -ENOMEM);
++ if (unlikely(ret < 0))
+ dev_kfree_skb_any(skb);
+ }
+
+diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
+index 4535d7c..e015a57 100644
+--- a/drivers/net/ethernet/via/via-velocity.c
++++ b/drivers/net/ethernet/via/via-velocity.c
+@@ -2489,9 +2489,6 @@ static int velocity_close(struct net_device *dev)
+ if (dev->irq != 0)
+ free_irq(dev->irq, dev);
+
+- /* Power down the chip */
+- pci_set_power_state(vptr->pdev, PCI_D3hot);
+-
+ velocity_free_rings(vptr);
+
+ vptr->flags &= (~VELOCITY_FLAGS_OPENED);
+diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
+index 13c1f04..ad96164 100644
+--- a/drivers/net/usb/ipheth.c
++++ b/drivers/net/usb/ipheth.c
+@@ -60,6 +60,7 @@
+ #define USB_PRODUCT_IPHONE_3GS 0x1294
+ #define USB_PRODUCT_IPHONE_4 0x1297
+ #define USB_PRODUCT_IPHONE_4_VZW 0x129c
++#define USB_PRODUCT_IPHONE_4S 0x12a0
+
+ #define IPHETH_USBINTF_CLASS 255
+ #define IPHETH_USBINTF_SUBCLASS 253
+@@ -103,6 +104,10 @@ static struct usb_device_id ipheth_table[] = {
+ USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4_VZW,
+ IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
+ IPHETH_USBINTF_PROTO) },
++ { USB_DEVICE_AND_INTERFACE_INFO(
++ USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4S,
++ IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
++ IPHETH_USBINTF_PROTO) },
+ { }
+ };
+ MODULE_DEVICE_TABLE(usb, ipheth_table);
+diff --git a/drivers/net/veth.c b/drivers/net/veth.c
+index ef883e9..b907398 100644
+--- a/drivers/net/veth.c
++++ b/drivers/net/veth.c
+@@ -423,7 +423,9 @@ static void veth_dellink(struct net_device *dev, struct list_head *head)
+ unregister_netdevice_queue(peer, head);
+ }
+
+-static const struct nla_policy veth_policy[VETH_INFO_MAX + 1];
++static const struct nla_policy veth_policy[VETH_INFO_MAX + 1] = {
++ [VETH_INFO_PEER] = { .len = sizeof(struct ifinfomsg) },
++};
+
+ static struct rtnl_link_ops veth_link_ops = {
+ .kind = DRV_NAME,
+diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
+index 528d5f3..64af11f 100644
+--- a/drivers/net/wireless/ath/ath9k/rc.c
++++ b/drivers/net/wireless/ath/ath9k/rc.c
+@@ -1347,7 +1347,7 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
+ fc = hdr->frame_control;
+ for (i = 0; i < sc->hw->max_rates; i++) {
+ struct ieee80211_tx_rate *rate = &tx_info->status.rates[i];
+- if (!rate->count)
++ if (rate->idx < 0 || !rate->count)
+ break;
+
+ final_ts_idx = i;
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index 04e74f4..dfee1b3 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -651,6 +651,11 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
+ dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
+ secondary, subordinate, pass);
+
++ if (!primary && (primary != bus->number) && secondary && subordinate) {
++ dev_warn(&dev->dev, "Primary bus is hard wired to 0\n");
++ primary = bus->number;
++ }
++
+ /* Check if setup is sensible at all */
+ if (!pass &&
+ (primary != bus->number || secondary <= bus->number)) {
+diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
+index d329f8b..2aeaf5c 100644
+--- a/drivers/scsi/scsi_pm.c
++++ b/drivers/scsi/scsi_pm.c
+@@ -7,6 +7,7 @@
+
+ #include <linux/pm_runtime.h>
+ #include <linux/export.h>
++#include <linux/async.h>
+
+ #include <scsi/scsi.h>
+ #include <scsi/scsi_device.h>
+@@ -69,6 +70,19 @@ static int scsi_bus_resume_common(struct device *dev)
+ return err;
+ }
+
++static int scsi_bus_prepare(struct device *dev)
++{
++ if (scsi_is_sdev_device(dev)) {
++ /* sd probing uses async_schedule. Wait until it finishes. */
++ async_synchronize_full();
++
++ } else if (scsi_is_host_device(dev)) {
++ /* Wait until async scanning is finished */
++ scsi_complete_async_scans();
++ }
++ return 0;
++}
++
+ static int scsi_bus_suspend(struct device *dev)
+ {
+ return scsi_bus_suspend_common(dev, PMSG_SUSPEND);
+@@ -87,6 +101,7 @@ static int scsi_bus_poweroff(struct device *dev)
+ #else /* CONFIG_PM_SLEEP */
+
+ #define scsi_bus_resume_common NULL
++#define scsi_bus_prepare NULL
+ #define scsi_bus_suspend NULL
+ #define scsi_bus_freeze NULL
+ #define scsi_bus_poweroff NULL
+@@ -195,6 +210,7 @@ void scsi_autopm_put_host(struct Scsi_Host *shost)
+ #endif /* CONFIG_PM_RUNTIME */
+
+ const struct dev_pm_ops scsi_bus_pm_ops = {
++ .prepare = scsi_bus_prepare,
+ .suspend = scsi_bus_suspend,
+ .resume = scsi_bus_resume_common,
+ .freeze = scsi_bus_freeze,
+diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
+index 2a58895..5b475d0 100644
+--- a/drivers/scsi/scsi_priv.h
++++ b/drivers/scsi/scsi_priv.h
+@@ -110,6 +110,7 @@ extern void scsi_exit_procfs(void);
+ #endif /* CONFIG_PROC_FS */
+
+ /* scsi_scan.c */
++extern int scsi_complete_async_scans(void);
+ extern int scsi_scan_host_selected(struct Scsi_Host *, unsigned int,
+ unsigned int, unsigned int, int);
+ extern void scsi_forget_host(struct Scsi_Host *);
+diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
+index b3c6d95..6e7ea4a 100644
+--- a/drivers/scsi/scsi_scan.c
++++ b/drivers/scsi/scsi_scan.c
+@@ -1815,6 +1815,7 @@ static void scsi_finish_async_scan(struct async_scan_data *data)
+ }
+ spin_unlock(&async_scan_lock);
+
++ scsi_autopm_put_host(shost);
+ scsi_host_put(shost);
+ kfree(data);
+ }
+@@ -1841,7 +1842,6 @@ static int do_scan_async(void *_data)
+
+ do_scsi_scan_host(shost);
+ scsi_finish_async_scan(data);
+- scsi_autopm_put_host(shost);
+ return 0;
+ }
+
+@@ -1869,7 +1869,7 @@ void scsi_scan_host(struct Scsi_Host *shost)
+ p = kthread_run(do_scan_async, data, "scsi_scan_%d", shost->host_no);
+ if (IS_ERR(p))
+ do_scan_async(data);
+- /* scsi_autopm_put_host(shost) is called in do_scan_async() */
++ /* scsi_autopm_put_host(shost) is called in scsi_finish_async_scan() */
+ }
+ EXPORT_SYMBOL(scsi_scan_host);
+
+diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
+index 1dcbef4..1d24512 100644
+--- a/drivers/target/target_core_alua.c
++++ b/drivers/target/target_core_alua.c
+@@ -79,7 +79,7 @@ int target_emulate_report_target_port_groups(struct se_task *task)
+ return -EINVAL;
+ }
+
+- buf = transport_kmap_first_data_page(cmd);
++ buf = transport_kmap_data_sg(cmd);
+
+ spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
+ list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list,
+@@ -164,7 +164,7 @@ int target_emulate_report_target_port_groups(struct se_task *task)
+ buf[2] = ((rd_len >> 8) & 0xff);
+ buf[3] = (rd_len & 0xff);
+
+- transport_kunmap_first_data_page(cmd);
++ transport_kunmap_data_sg(cmd);
+
+ task->task_scsi_status = GOOD;
+ transport_complete_task(task, 1);
+@@ -195,7 +195,7 @@ int target_emulate_set_target_port_groups(struct se_task *task)
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
+ }
+- buf = transport_kmap_first_data_page(cmd);
++ buf = transport_kmap_data_sg(cmd);
+
+ /*
+ * Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed
+@@ -352,7 +352,7 @@ int target_emulate_set_target_port_groups(struct se_task *task)
+ }
+
+ out:
+- transport_kunmap_first_data_page(cmd);
++ transport_kunmap_data_sg(cmd);
+ task->task_scsi_status = GOOD;
+ transport_complete_task(task, 1);
+ return 0;
+diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
+index 251e48f..8facd33 100644
+--- a/drivers/target/target_core_cdb.c
++++ b/drivers/target/target_core_cdb.c
+@@ -82,7 +82,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
+ return -EINVAL;
+ }
+
+- buf = transport_kmap_first_data_page(cmd);
++ buf = transport_kmap_data_sg(cmd);
+
+ if (dev == tpg->tpg_virt_lun0.lun_se_dev) {
+ buf[0] = 0x3f; /* Not connected */
+@@ -135,7 +135,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
+ buf[4] = 31; /* Set additional length to 31 */
+
+ out:
+- transport_kunmap_first_data_page(cmd);
++ transport_kunmap_data_sg(cmd);
+ return 0;
+ }
+
+@@ -726,7 +726,7 @@ int target_emulate_inquiry(struct se_task *task)
+ return -EINVAL;
+ }
+
+- buf = transport_kmap_first_data_page(cmd);
++ buf = transport_kmap_data_sg(cmd);
+
+ buf[0] = dev->transport->get_device_type(dev);
+
+@@ -743,7 +743,7 @@ int target_emulate_inquiry(struct se_task *task)
+ ret = -EINVAL;
+
+ out_unmap:
+- transport_kunmap_first_data_page(cmd);
++ transport_kunmap_data_sg(cmd);
+ out:
+ if (!ret) {
+ task->task_scsi_status = GOOD;
+@@ -765,7 +765,7 @@ int target_emulate_readcapacity(struct se_task *task)
+ else
+ blocks = (u32)blocks_long;
+
+- buf = transport_kmap_first_data_page(cmd);
++ buf = transport_kmap_data_sg(cmd);
+
+ buf[0] = (blocks >> 24) & 0xff;
+ buf[1] = (blocks >> 16) & 0xff;
+@@ -781,7 +781,7 @@ int target_emulate_readcapacity(struct se_task *task)
+ if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
+ put_unaligned_be32(0xFFFFFFFF, &buf[0]);
+
+- transport_kunmap_first_data_page(cmd);
++ transport_kunmap_data_sg(cmd);
+
+ task->task_scsi_status = GOOD;
+ transport_complete_task(task, 1);
+@@ -795,7 +795,7 @@ int target_emulate_readcapacity_16(struct se_task *task)
+ unsigned char *buf;
+ unsigned long long blocks = dev->transport->get_blocks(dev);
+
+- buf = transport_kmap_first_data_page(cmd);
++ buf = transport_kmap_data_sg(cmd);
+
+ buf[0] = (blocks >> 56) & 0xff;
+ buf[1] = (blocks >> 48) & 0xff;
+@@ -816,7 +816,7 @@ int target_emulate_readcapacity_16(struct se_task *task)
+ if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
+ buf[14] = 0x80;
+
+- transport_kunmap_first_data_page(cmd);
++ transport_kunmap_data_sg(cmd);
+
+ task->task_scsi_status = GOOD;
+ transport_complete_task(task, 1);
+@@ -1029,9 +1029,9 @@ int target_emulate_modesense(struct se_task *task)
+ offset = cmd->data_length;
+ }
+
+- rbuf = transport_kmap_first_data_page(cmd);
++ rbuf = transport_kmap_data_sg(cmd);
+ memcpy(rbuf, buf, offset);
+- transport_kunmap_first_data_page(cmd);
++ transport_kunmap_data_sg(cmd);
+
+ task->task_scsi_status = GOOD;
+ transport_complete_task(task, 1);
+@@ -1053,7 +1053,7 @@ int target_emulate_request_sense(struct se_task *task)
+ return -ENOSYS;
+ }
+
+- buf = transport_kmap_first_data_page(cmd);
++ buf = transport_kmap_data_sg(cmd);
+
+ if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) {
+ /*
+@@ -1099,7 +1099,7 @@ int target_emulate_request_sense(struct se_task *task)
+ }
+
+ end:
+- transport_kunmap_first_data_page(cmd);
++ transport_kunmap_data_sg(cmd);
+ task->task_scsi_status = GOOD;
+ transport_complete_task(task, 1);
+ return 0;
+@@ -1133,7 +1133,7 @@ int target_emulate_unmap(struct se_task *task)
+ dl = get_unaligned_be16(&cdb[0]);
+ bd_dl = get_unaligned_be16(&cdb[2]);
+
+- buf = transport_kmap_first_data_page(cmd);
++ buf = transport_kmap_data_sg(cmd);
+
+ ptr = &buf[offset];
+ pr_debug("UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu"
+@@ -1157,7 +1157,7 @@ int target_emulate_unmap(struct se_task *task)
+ }
+
+ err:
+- transport_kunmap_first_data_page(cmd);
++ transport_kunmap_data_sg(cmd);
+ if (!ret) {
+ task->task_scsi_status = GOOD;
+ transport_complete_task(task, 1);
+diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
+index 9b86394..19f8aca 100644
+--- a/drivers/target/target_core_device.c
++++ b/drivers/target/target_core_device.c
+@@ -658,7 +658,7 @@ int target_report_luns(struct se_task *se_task)
+ unsigned char *buf;
+ u32 cdb_offset = 0, lun_count = 0, offset = 8, i;
+
+- buf = transport_kmap_first_data_page(se_cmd);
++ buf = (unsigned char *) transport_kmap_data_sg(se_cmd);
+
+ /*
+ * If no struct se_session pointer is present, this struct se_cmd is
+@@ -696,7 +696,7 @@ int target_report_luns(struct se_task *se_task)
+ * See SPC3 r07, page 159.
+ */
+ done:
+- transport_kunmap_first_data_page(se_cmd);
++ transport_kunmap_data_sg(se_cmd);
+ lun_count *= 8;
+ buf[0] = ((lun_count >> 24) & 0xff);
+ buf[1] = ((lun_count >> 16) & 0xff);
+diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
+index 9119d92..778c1a6 100644
+--- a/drivers/target/target_core_pr.c
++++ b/drivers/target/target_core_pr.c
+@@ -1538,7 +1538,7 @@ static int core_scsi3_decode_spec_i_port(
+ tidh_new->dest_local_nexus = 1;
+ list_add_tail(&tidh_new->dest_list, &tid_dest_list);
+
+- buf = transport_kmap_first_data_page(cmd);
++ buf = transport_kmap_data_sg(cmd);
+ /*
+ * For a PERSISTENT RESERVE OUT specify initiator ports payload,
+ * first extract TransportID Parameter Data Length, and make sure
+@@ -1789,7 +1789,7 @@ static int core_scsi3_decode_spec_i_port(
+
+ }
+
+- transport_kunmap_first_data_page(cmd);
++ transport_kunmap_data_sg(cmd);
+
+ /*
+ * Go ahead and create a registrations from tid_dest_list for the
+@@ -1837,7 +1837,7 @@ static int core_scsi3_decode_spec_i_port(
+
+ return 0;
+ out:
+- transport_kunmap_first_data_page(cmd);
++ transport_kunmap_data_sg(cmd);
+ /*
+ * For the failure case, release everything from tid_dest_list
+ * including *dest_pr_reg and the configfs dependances..
+@@ -3429,14 +3429,14 @@ static int core_scsi3_emulate_pro_register_and_move(
+ * will be moved to for the TransportID containing SCSI initiator WWN
+ * information.
+ */
+- buf = transport_kmap_first_data_page(cmd);
++ buf = transport_kmap_data_sg(cmd);
+ rtpi = (buf[18] & 0xff) << 8;
+ rtpi |= buf[19] & 0xff;
+ tid_len = (buf[20] & 0xff) << 24;
+ tid_len |= (buf[21] & 0xff) << 16;
+ tid_len |= (buf[22] & 0xff) << 8;
+ tid_len |= buf[23] & 0xff;
+- transport_kunmap_first_data_page(cmd);
++ transport_kunmap_data_sg(cmd);
+ buf = NULL;
+
+ if ((tid_len + 24) != cmd->data_length) {
+@@ -3488,7 +3488,7 @@ static int core_scsi3_emulate_pro_register_and_move(
+ return -EINVAL;
+ }
+
+- buf = transport_kmap_first_data_page(cmd);
++ buf = transport_kmap_data_sg(cmd);
+ proto_ident = (buf[24] & 0x0f);
+ #if 0
+ pr_debug("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:"
+@@ -3522,7 +3522,7 @@ static int core_scsi3_emulate_pro_register_and_move(
+ goto out;
+ }
+
+- transport_kunmap_first_data_page(cmd);
++ transport_kunmap_data_sg(cmd);
+ buf = NULL;
+
+ pr_debug("SPC-3 PR [%s] Extracted initiator %s identifier: %s"
+@@ -3787,13 +3787,13 @@ after_iport_check:
+ " REGISTER_AND_MOVE\n");
+ }
+
+- transport_kunmap_first_data_page(cmd);
++ transport_kunmap_data_sg(cmd);
+
+ core_scsi3_put_pr_reg(dest_pr_reg);
+ return 0;
+ out:
+ if (buf)
+- transport_kunmap_first_data_page(cmd);
++ transport_kunmap_data_sg(cmd);
+ if (dest_se_deve)
+ core_scsi3_lunacl_undepend_item(dest_se_deve);
+ if (dest_node_acl)
+@@ -3867,7 +3867,7 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
+ scope = (cdb[2] & 0xf0);
+ type = (cdb[2] & 0x0f);
+
+- buf = transport_kmap_first_data_page(cmd);
++ buf = transport_kmap_data_sg(cmd);
+ /*
+ * From PERSISTENT_RESERVE_OUT parameter list (payload)
+ */
+@@ -3885,7 +3885,7 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
+ aptpl = (buf[17] & 0x01);
+ unreg = (buf[17] & 0x02);
+ }
+- transport_kunmap_first_data_page(cmd);
++ transport_kunmap_data_sg(cmd);
+ buf = NULL;
+
+ /*
+@@ -3985,7 +3985,7 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
+ return -EINVAL;
+ }
+
+- buf = transport_kmap_first_data_page(cmd);
++ buf = transport_kmap_data_sg(cmd);
+ buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff);
+ buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff);
+ buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff);
+@@ -4019,7 +4019,7 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
+ buf[6] = ((add_len >> 8) & 0xff);
+ buf[7] = (add_len & 0xff);
+
+- transport_kunmap_first_data_page(cmd);
++ transport_kunmap_data_sg(cmd);
+
+ return 0;
+ }
+@@ -4045,7 +4045,7 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
+ return -EINVAL;
+ }
+
+- buf = transport_kmap_first_data_page(cmd);
++ buf = transport_kmap_data_sg(cmd);
+ buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff);
+ buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff);
+ buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff);
+@@ -4104,7 +4104,7 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
+
+ err:
+ spin_unlock(&se_dev->dev_reservation_lock);
+- transport_kunmap_first_data_page(cmd);
++ transport_kunmap_data_sg(cmd);
+
+ return 0;
+ }
+@@ -4128,7 +4128,7 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
+ return -EINVAL;
+ }
+
+- buf = transport_kmap_first_data_page(cmd);
++ buf = transport_kmap_data_sg(cmd);
+
+ buf[0] = ((add_len << 8) & 0xff);
+ buf[1] = (add_len & 0xff);
+@@ -4160,7 +4160,7 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
+ buf[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */
+ buf[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
+
+- transport_kunmap_first_data_page(cmd);
++ transport_kunmap_data_sg(cmd);
+
+ return 0;
+ }
+@@ -4190,7 +4190,7 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
+ return -EINVAL;
+ }
+
+- buf = transport_kmap_first_data_page(cmd);
++ buf = transport_kmap_data_sg(cmd);
+
+ buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff);
+ buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff);
+@@ -4311,7 +4311,7 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
+ buf[6] = ((add_len >> 8) & 0xff);
+ buf[7] = (add_len & 0xff);
+
+- transport_kunmap_first_data_page(cmd);
++ transport_kunmap_data_sg(cmd);
+
+ return 0;
+ }
+diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
+index 8b15e56..5c12137 100644
+--- a/drivers/target/target_core_pscsi.c
++++ b/drivers/target/target_core_pscsi.c
+@@ -695,7 +695,7 @@ static int pscsi_transport_complete(struct se_task *task)
+
+ if (task->task_se_cmd->se_deve->lun_flags &
+ TRANSPORT_LUNFLAGS_READ_ONLY) {
+- unsigned char *buf = transport_kmap_first_data_page(task->task_se_cmd);
++ unsigned char *buf = transport_kmap_data_sg(task->task_se_cmd);
+
+ if (cdb[0] == MODE_SENSE_10) {
+ if (!(buf[3] & 0x80))
+@@ -705,7 +705,7 @@ static int pscsi_transport_complete(struct se_task *task)
+ buf[2] |= 0x80;
+ }
+
+- transport_kunmap_first_data_page(task->task_se_cmd);
++ transport_kunmap_data_sg(task->task_se_cmd);
+ }
+ }
+ after_mode_sense:
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 861628e..e4ddb93 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -3053,11 +3053,6 @@ static int transport_generic_cmd_sequencer(
+ (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
+ goto out_unsupported_cdb;
+
+- /* Let's limit control cdbs to a page, for simplicity's sake. */
+- if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
+- size > PAGE_SIZE)
+- goto out_invalid_cdb_field;
+-
+ transport_set_supported_SAM_opcode(cmd);
+ return ret;
+
+@@ -3435,9 +3430,11 @@ int transport_generic_map_mem_to_cmd(
+ }
+ EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
+
+-void *transport_kmap_first_data_page(struct se_cmd *cmd)
++void *transport_kmap_data_sg(struct se_cmd *cmd)
+ {
+ struct scatterlist *sg = cmd->t_data_sg;
++ struct page **pages;
++ int i;
+
+ BUG_ON(!sg);
+ /*
+@@ -3445,15 +3442,41 @@ void *transport_kmap_first_data_page(struct se_cmd *cmd)
+ * tcm_loop who may be using a contig buffer from the SCSI midlayer for
+ * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
+ */
+- return kmap(sg_page(sg)) + sg->offset;
++ if (!cmd->t_data_nents)
++ return NULL;
++ else if (cmd->t_data_nents == 1)
++ return kmap(sg_page(sg)) + sg->offset;
++
++ /* >1 page. use vmap */
++ pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL);
++ if (!pages)
++ return NULL;
++
++ /* convert sg[] to pages[] */
++ for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
++ pages[i] = sg_page(sg);
++ }
++
++ cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL);
++ kfree(pages);
++ if (!cmd->t_data_vmap)
++ return NULL;
++
++ return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
+ }
+-EXPORT_SYMBOL(transport_kmap_first_data_page);
++EXPORT_SYMBOL(transport_kmap_data_sg);
+
+-void transport_kunmap_first_data_page(struct se_cmd *cmd)
++void transport_kunmap_data_sg(struct se_cmd *cmd)
+ {
+- kunmap(sg_page(cmd->t_data_sg));
++ if (!cmd->t_data_nents)
++ return;
++ else if (cmd->t_data_nents == 1)
++ kunmap(sg_page(cmd->t_data_sg));
++
++ vunmap(cmd->t_data_vmap);
++ cmd->t_data_vmap = NULL;
+ }
+-EXPORT_SYMBOL(transport_kunmap_first_data_page);
++EXPORT_SYMBOL(transport_kunmap_data_sg);
+
+ static int
+ transport_generic_get_mem(struct se_cmd *cmd)
+diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
+index a004db3..61d08dd 100644
+--- a/drivers/usb/core/hcd-pci.c
++++ b/drivers/usb/core/hcd-pci.c
+@@ -187,7 +187,10 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ return -ENODEV;
+ dev->current_state = PCI_D0;
+
+- if (!dev->irq) {
++ /* The xHCI driver supports MSI and MSI-X,
++ * so don't fail if the BIOS doesn't provide a legacy IRQ.
++ */
++ if (!dev->irq && (driver->flags & HCD_MASK) != HCD_USB3) {
+ dev_err(&dev->dev,
+ "Found HC with no IRQ. Check BIOS/PCI %s setup!\n",
+ pci_name(dev));
+diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
+index 179e364..8cb9304 100644
+--- a/drivers/usb/core/hcd.c
++++ b/drivers/usb/core/hcd.c
+@@ -2465,8 +2465,10 @@ int usb_add_hcd(struct usb_hcd *hcd,
+ && device_can_wakeup(&hcd->self.root_hub->dev))
+ dev_dbg(hcd->self.controller, "supports USB remote wakeup\n");
+
+- /* enable irqs just before we start the controller */
+- if (usb_hcd_is_primary_hcd(hcd)) {
++ /* enable irqs just before we start the controller,
++ * if the BIOS provides legacy PCI irqs.
++ */
++ if (usb_hcd_is_primary_hcd(hcd) && irqnum) {
+ retval = usb_hcd_request_irqs(hcd, irqnum, irqflags);
+ if (retval)
+ goto err_request_irq;
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 7978146..bc06a8f 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -705,10 +705,26 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
+ if (type == HUB_INIT3)
+ goto init3;
+
+- /* After a resume, port power should still be on.
++ /* The superspeed hub except for root hub has to use Hub Depth
++ * value as an offset into the route string to locate the bits
++ * it uses to determine the downstream port number. So hub driver
++ * should send a set hub depth request to superspeed hub after
++ * the superspeed hub is set configuration in initialization or
++ * reset procedure.
++ *
++ * After a resume, port power should still be on.
+ * For any other type of activation, turn it on.
+ */
+ if (type != HUB_RESUME) {
++ if (hdev->parent && hub_is_superspeed(hdev)) {
++ ret = usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
++ HUB_SET_DEPTH, USB_RT_HUB,
++ hdev->level - 1, 0, NULL, 0,
++ USB_CTRL_SET_TIMEOUT);
++ if (ret < 0)
++ dev_err(hub->intfdev,
++ "set hub depth failed\n");
++ }
+
+ /* Speed up system boot by using a delayed_work for the
+ * hub's initial power-up delays. This is pretty awkward
+@@ -987,18 +1003,6 @@ static int hub_configure(struct usb_hub *hub,
+ goto fail;
+ }
+
+- if (hub_is_superspeed(hdev) && (hdev->parent != NULL)) {
+- ret = usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
+- HUB_SET_DEPTH, USB_RT_HUB,
+- hdev->level - 1, 0, NULL, 0,
+- USB_CTRL_SET_TIMEOUT);
+-
+- if (ret < 0) {
+- message = "can't set hub depth";
+- goto fail;
+- }
+- }
+-
+ /* Request the entire hub descriptor.
+ * hub->descriptor can handle USB_MAXCHILDREN ports,
+ * but the hub can/will return fewer bytes here.
+diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
+index ac53a66..7732d69 100644
+--- a/drivers/usb/host/pci-quirks.c
++++ b/drivers/usb/host/pci-quirks.c
+@@ -872,7 +872,17 @@ static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev)
+ */
+ if (pdev->vendor == 0x184e) /* vendor Netlogic */
+ return;
++ if (pdev->class != PCI_CLASS_SERIAL_USB_UHCI &&
++ pdev->class != PCI_CLASS_SERIAL_USB_OHCI &&
++ pdev->class != PCI_CLASS_SERIAL_USB_EHCI &&
++ pdev->class != PCI_CLASS_SERIAL_USB_XHCI)
++ return;
+
++ if (pci_enable_device(pdev) < 0) {
++ dev_warn(&pdev->dev, "Can't enable PCI device, "
++ "BIOS handoff failed.\n");
++ return;
++ }
+ if (pdev->class == PCI_CLASS_SERIAL_USB_UHCI)
+ quirk_usb_handoff_uhci(pdev);
+ else if (pdev->class == PCI_CLASS_SERIAL_USB_OHCI)
+@@ -881,5 +891,6 @@ static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev)
+ quirk_usb_disable_ehci(pdev);
+ else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI)
+ quirk_usb_handoff_xhci(pdev);
++ pci_disable_device(pdev);
+ }
+ DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_usb_early_handoff);
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index 430e88f..a8b2980 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -95,7 +95,7 @@ static void xhci_usb2_hub_descriptor(struct usb_hcd *hcd, struct xhci_hcd *xhci,
+ */
+ memset(port_removable, 0, sizeof(port_removable));
+ for (i = 0; i < ports; i++) {
+- portsc = xhci_readl(xhci, xhci->usb3_ports[i]);
++ portsc = xhci_readl(xhci, xhci->usb2_ports[i]);
+ /* If a device is removable, PORTSC reports a 0, same as in the
+ * hub descriptor DeviceRemovable bits.
+ */
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 0e4b25f..c69cf54 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -1140,26 +1140,42 @@ static unsigned int xhci_parse_exponent_interval(struct usb_device *udev,
+ }
+
+ /*
+- * Convert bInterval expressed in frames (in 1-255 range) to exponent of
++ * Convert bInterval expressed in microframes (in 1-255 range) to exponent of
+ * microframes, rounded down to nearest power of 2.
+ */
+-static unsigned int xhci_parse_frame_interval(struct usb_device *udev,
+- struct usb_host_endpoint *ep)
++static unsigned int xhci_microframes_to_exponent(struct usb_device *udev,
++ struct usb_host_endpoint *ep, unsigned int desc_interval,
++ unsigned int min_exponent, unsigned int max_exponent)
+ {
+ unsigned int interval;
+
+- interval = fls(8 * ep->desc.bInterval) - 1;
+- interval = clamp_val(interval, 3, 10);
+- if ((1 << interval) != 8 * ep->desc.bInterval)
++ interval = fls(desc_interval) - 1;
++ interval = clamp_val(interval, min_exponent, max_exponent);
++ if ((1 << interval) != desc_interval)
+ dev_warn(&udev->dev,
+ "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n",
+ ep->desc.bEndpointAddress,
+ 1 << interval,
+- 8 * ep->desc.bInterval);
++ desc_interval);
+
+ return interval;
+ }
+
++static unsigned int xhci_parse_microframe_interval(struct usb_device *udev,
++ struct usb_host_endpoint *ep)
++{
++ return xhci_microframes_to_exponent(udev, ep,
++ ep->desc.bInterval, 0, 15);
++}
++
++
++static unsigned int xhci_parse_frame_interval(struct usb_device *udev,
++ struct usb_host_endpoint *ep)
++{
++ return xhci_microframes_to_exponent(udev, ep,
++ ep->desc.bInterval * 8, 3, 10);
++}
++
+ /* Return the polling or NAK interval.
+ *
+ * The polling interval is expressed in "microframes". If xHCI's Interval field
+@@ -1178,7 +1194,7 @@ static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
+ /* Max NAK rate */
+ if (usb_endpoint_xfer_control(&ep->desc) ||
+ usb_endpoint_xfer_bulk(&ep->desc)) {
+- interval = ep->desc.bInterval;
++ interval = xhci_parse_microframe_interval(udev, ep);
+ break;
+ }
+ /* Fall through - SS and HS isoc/int have same decoding */
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index b33f059..034f554 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -352,6 +352,11 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd)
+ /* hcd->irq is -1, we have MSI */
+ return 0;
+
++ if (!pdev->irq) {
++ xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n");
++ return -EINVAL;
++ }
++
+ /* fall back to legacy interrupt*/
+ ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
+ hcd->irq_descr, hcd);
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index a515237..33d25d4 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -136,6 +136,8 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */
+ { USB_DEVICE(0x16DC, 0x0012) }, /* W-IE-NE-R Plein & Baus GmbH MPOD Multi Channel Power Supply */
+ { USB_DEVICE(0x16DC, 0x0015) }, /* W-IE-NE-R Plein & Baus GmbH CML Control, Monitoring and Data Logger */
++ { USB_DEVICE(0x17A8, 0x0001) }, /* Kamstrup Optical Eye/3-wire */
++ { USB_DEVICE(0x17A8, 0x0005) }, /* Kamstrup M-Bus Master MultiPort 250D */
+ { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
+ { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
+ { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 338d082..68fa8c7 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -788,7 +788,6 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0012, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0013, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0016, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff),
+@@ -803,7 +802,6 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0024, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0025, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+- /* { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0026, 0xff, 0xff, 0xff) }, */
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0028, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0029, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0030, 0xff, 0xff, 0xff) },
+@@ -828,7 +826,6 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0051, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0052, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+- /* { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0053, 0xff, 0xff, 0xff) }, */
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0054, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0055, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+@@ -836,7 +833,6 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0057, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0058, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0061, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0062, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0063, 0xff, 0xff, 0xff),
+@@ -846,7 +842,6 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0066, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0067, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0069, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0076, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0077, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0078, 0xff, 0xff, 0xff) },
+@@ -865,8 +860,6 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0095, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0096, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0097, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0098, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0099, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0104, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0105, 0xff, 0xff, 0xff) },
+@@ -887,28 +880,18 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0143, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0144, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0145, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0146, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0147, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0148, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0149, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0150, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0151, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0153, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0155, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0156, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0159, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0160, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0164, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0168, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0170, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0176, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) },
+@@ -1083,127 +1066,27 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1401, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1402, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1403, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1404, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1405, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1406, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1407, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1408, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1409, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1410, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1411, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1412, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1413, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1414, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1415, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1416, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1417, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1418, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1419, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1420, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1421, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1422, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1423, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1424, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1425, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1427, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1429, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1430, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1431, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1432, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1433, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1434, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1435, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1436, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1437, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1438, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1439, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1440, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1441, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1442, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1443, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1444, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1445, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1446, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1447, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1448, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1449, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1450, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1451, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1452, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1453, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1454, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1455, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1456, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1457, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1458, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1459, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1460, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1461, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1462, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1463, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1464, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1465, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1466, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1467, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1468, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1469, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1470, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1471, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1472, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1473, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1474, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1475, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1476, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1477, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1478, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1479, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1480, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1481, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1482, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1483, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1484, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1485, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1486, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1487, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1488, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1489, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1490, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1491, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1492, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1493, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1494, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1495, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1496, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1497, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1498, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1499, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1500, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1501, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1502, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1503, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1504, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1505, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1506, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1507, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1508, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1509, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1510, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff,
++ 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
++
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0027, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0060, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0094, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff,
+- 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0147, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0168, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0170, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0176, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff) },
++
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
+diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
+index ea84456..21c82b0 100644
+--- a/drivers/usb/serial/ti_usb_3410_5052.c
++++ b/drivers/usb/serial/ti_usb_3410_5052.c
+@@ -165,7 +165,7 @@ static unsigned int product_5052_count;
+ /* the array dimension is the number of default entries plus */
+ /* TI_EXTRA_VID_PID_COUNT user defined entries plus 1 terminating */
+ /* null entry */
+-static struct usb_device_id ti_id_table_3410[13+TI_EXTRA_VID_PID_COUNT+1] = {
++static struct usb_device_id ti_id_table_3410[14+TI_EXTRA_VID_PID_COUNT+1] = {
+ { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) },
+ { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) },
+ { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) },
+@@ -179,6 +179,7 @@ static struct usb_device_id ti_id_table_3410[13+TI_EXTRA_VID_PID_COUNT+1] = {
+ { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) },
+ { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) },
+ { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) },
++ { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) },
+ };
+
+ static struct usb_device_id ti_id_table_5052[5+TI_EXTRA_VID_PID_COUNT+1] = {
+@@ -188,7 +189,7 @@ static struct usb_device_id ti_id_table_5052[5+TI_EXTRA_VID_PID_COUNT+1] = {
+ { USB_DEVICE(TI_VENDOR_ID, TI_5052_FIRMWARE_PRODUCT_ID) },
+ };
+
+-static struct usb_device_id ti_id_table_combined[17+2*TI_EXTRA_VID_PID_COUNT+1] = {
++static struct usb_device_id ti_id_table_combined[18+2*TI_EXTRA_VID_PID_COUNT+1] = {
+ { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) },
+ { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) },
+ { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) },
+@@ -206,6 +207,7 @@ static struct usb_device_id ti_id_table_combined[17+2*TI_EXTRA_VID_PID_COUNT+1]
+ { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) },
+ { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) },
+ { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) },
++ { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) },
+ { }
+ };
+
+diff --git a/drivers/usb/serial/ti_usb_3410_5052.h b/drivers/usb/serial/ti_usb_3410_5052.h
+index 2aac195..f140f1b 100644
+--- a/drivers/usb/serial/ti_usb_3410_5052.h
++++ b/drivers/usb/serial/ti_usb_3410_5052.h
+@@ -49,6 +49,10 @@
+ #define MTS_MT9234ZBA_PRODUCT_ID 0xF115
+ #define MTS_MT9234ZBAOLD_PRODUCT_ID 0x0319
+
++/* Abbott Diabetics vendor and product ids */
++#define ABBOTT_VENDOR_ID 0x1a61
++#define ABBOTT_PRODUCT_ID 0x3410
++
+ /* Commands */
+ #define TI_GET_VERSION 0x01
+ #define TI_GET_PORT_STATUS 0x02
+diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
+index 9e069ef..db51ba1 100644
+--- a/drivers/usb/storage/usb.c
++++ b/drivers/usb/storage/usb.c
+@@ -788,15 +788,19 @@ static void quiesce_and_remove_host(struct us_data *us)
+ struct Scsi_Host *host = us_to_host(us);
+
+ /* If the device is really gone, cut short reset delays */
+- if (us->pusb_dev->state == USB_STATE_NOTATTACHED)
++ if (us->pusb_dev->state == USB_STATE_NOTATTACHED) {
+ set_bit(US_FLIDX_DISCONNECTING, &us->dflags);
++ wake_up(&us->delay_wait);
++ }
+
+- /* Prevent SCSI-scanning (if it hasn't started yet)
+- * and wait for the SCSI-scanning thread to stop.
++ /* Prevent SCSI scanning (if it hasn't started yet)
++ * or wait for the SCSI-scanning routine to stop.
+ */
+- set_bit(US_FLIDX_DONT_SCAN, &us->dflags);
+- wake_up(&us->delay_wait);
+- wait_for_completion(&us->scanning_done);
++ cancel_delayed_work_sync(&us->scan_dwork);
++
++ /* Balance autopm calls if scanning was cancelled */
++ if (test_bit(US_FLIDX_SCAN_PENDING, &us->dflags))
++ usb_autopm_put_interface_no_suspend(us->pusb_intf);
+
+ /* Removing the host will perform an orderly shutdown: caches
+ * synchronized, disks spun down, etc.
+@@ -823,52 +827,28 @@ static void release_everything(struct us_data *us)
+ scsi_host_put(us_to_host(us));
+ }
+
+-/* Thread to carry out delayed SCSI-device scanning */
+-static int usb_stor_scan_thread(void * __us)
++/* Delayed-work routine to carry out SCSI-device scanning */
++static void usb_stor_scan_dwork(struct work_struct *work)
+ {
+- struct us_data *us = (struct us_data *)__us;
++ struct us_data *us = container_of(work, struct us_data,
++ scan_dwork.work);
+ struct device *dev = &us->pusb_intf->dev;
+
+- dev_dbg(dev, "device found\n");
++ dev_dbg(dev, "starting scan\n");
+
+- set_freezable_with_signal();
+- /*
+- * Wait for the timeout to expire or for a disconnect
+- *
+- * We can't freeze in this thread or we risk causing khubd to
+- * fail to freeze, but we can't be non-freezable either. Nor can
+- * khubd freeze while waiting for scanning to complete as it may
+- * hold the device lock, causing a hang when suspending devices.
+- * So we request a fake signal when freezing and use
+- * interruptible sleep to kick us out of our wait early when
+- * freezing happens.
+- */
+- if (delay_use > 0) {
+- dev_dbg(dev, "waiting for device to settle "
+- "before scanning\n");
+- wait_event_interruptible_timeout(us->delay_wait,
+- test_bit(US_FLIDX_DONT_SCAN, &us->dflags),
+- delay_use * HZ);
++ /* For bulk-only devices, determine the max LUN value */
++ if (us->protocol == USB_PR_BULK && !(us->fflags & US_FL_SINGLE_LUN)) {
++ mutex_lock(&us->dev_mutex);
++ us->max_lun = usb_stor_Bulk_max_lun(us);
++ mutex_unlock(&us->dev_mutex);
+ }
++ scsi_scan_host(us_to_host(us));
++ dev_dbg(dev, "scan complete\n");
+
+- /* If the device is still connected, perform the scanning */
+- if (!test_bit(US_FLIDX_DONT_SCAN, &us->dflags)) {
+-
+- /* For bulk-only devices, determine the max LUN value */
+- if (us->protocol == USB_PR_BULK &&
+- !(us->fflags & US_FL_SINGLE_LUN)) {
+- mutex_lock(&us->dev_mutex);
+- us->max_lun = usb_stor_Bulk_max_lun(us);
+- mutex_unlock(&us->dev_mutex);
+- }
+- scsi_scan_host(us_to_host(us));
+- dev_dbg(dev, "scan complete\n");
+-
+- /* Should we unbind if no devices were detected? */
+- }
++ /* Should we unbind if no devices were detected? */
+
+ usb_autopm_put_interface(us->pusb_intf);
+- complete_and_exit(&us->scanning_done, 0);
++ clear_bit(US_FLIDX_SCAN_PENDING, &us->dflags);
+ }
+
+ static unsigned int usb_stor_sg_tablesize(struct usb_interface *intf)
+@@ -915,7 +895,7 @@ int usb_stor_probe1(struct us_data **pus,
+ init_completion(&us->cmnd_ready);
+ init_completion(&(us->notify));
+ init_waitqueue_head(&us->delay_wait);
+- init_completion(&us->scanning_done);
++ INIT_DELAYED_WORK(&us->scan_dwork, usb_stor_scan_dwork);
+
+ /* Associate the us_data structure with the USB device */
+ result = associate_dev(us, intf);
+@@ -946,7 +926,6 @@ EXPORT_SYMBOL_GPL(usb_stor_probe1);
+ /* Second part of general USB mass-storage probing */
+ int usb_stor_probe2(struct us_data *us)
+ {
+- struct task_struct *th;
+ int result;
+ struct device *dev = &us->pusb_intf->dev;
+
+@@ -987,20 +966,14 @@ int usb_stor_probe2(struct us_data *us)
+ goto BadDevice;
+ }
+
+- /* Start up the thread for delayed SCSI-device scanning */
+- th = kthread_create(usb_stor_scan_thread, us, "usb-stor-scan");
+- if (IS_ERR(th)) {
+- dev_warn(dev,
+- "Unable to start the device-scanning thread\n");
+- complete(&us->scanning_done);
+- quiesce_and_remove_host(us);
+- result = PTR_ERR(th);
+- goto BadDevice;
+- }
+-
++ /* Submit the delayed_work for SCSI-device scanning */
+ usb_autopm_get_interface_no_resume(us->pusb_intf);
+- wake_up_process(th);
++ set_bit(US_FLIDX_SCAN_PENDING, &us->dflags);
+
++ if (delay_use > 0)
++ dev_dbg(dev, "waiting for device to settle before scanning\n");
++ queue_delayed_work(system_freezable_wq, &us->scan_dwork,
++ delay_use * HZ);
+ return 0;
+
+ /* We come here if there are any problems */
+diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
+index 7b0f211..75f70f0 100644
+--- a/drivers/usb/storage/usb.h
++++ b/drivers/usb/storage/usb.h
+@@ -47,6 +47,7 @@
+ #include <linux/blkdev.h>
+ #include <linux/completion.h>
+ #include <linux/mutex.h>
++#include <linux/workqueue.h>
+ #include <scsi/scsi_host.h>
+
+ struct us_data;
+@@ -72,7 +73,7 @@ struct us_unusual_dev {
+ #define US_FLIDX_DISCONNECTING 3 /* disconnect in progress */
+ #define US_FLIDX_RESETTING 4 /* device reset in progress */
+ #define US_FLIDX_TIMED_OUT 5 /* SCSI midlayer timed out */
+-#define US_FLIDX_DONT_SCAN 6 /* don't scan (disconnect) */
++#define US_FLIDX_SCAN_PENDING 6 /* scanning not yet done */
+ #define US_FLIDX_REDO_READ10 7 /* redo READ(10) command */
+ #define US_FLIDX_READ10_WORKED 8 /* previous READ(10) succeeded */
+
+@@ -147,8 +148,8 @@ struct us_data {
+ /* mutual exclusion and synchronization structures */
+ struct completion cmnd_ready; /* to sleep thread on */
+ struct completion notify; /* thread begin/end */
+- wait_queue_head_t delay_wait; /* wait during scan, reset */
+- struct completion scanning_done; /* wait for scan thread */
++ wait_queue_head_t delay_wait; /* wait during reset */
++ struct delayed_work scan_dwork; /* for async scanning */
+
+ /* subdriver information */
+ void *extra; /* Any extra data */
+diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c
+index 976ac23..c04205c 100644
+--- a/drivers/video/omap2/dss/dpi.c
++++ b/drivers/video/omap2/dss/dpi.c
+@@ -180,6 +180,11 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
+ {
+ int r;
+
++ if (cpu_is_omap34xx() && !dpi.vdds_dsi_reg) {
++ DSSERR("no VDSS_DSI regulator\n");
++ return -ENODEV;
++ }
++
+ if (dssdev->manager == NULL) {
+ DSSERR("failed to enable display: no manager\n");
+ return -ENODEV;
+diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
+index d2039ca..af11098 100644
+--- a/fs/ecryptfs/inode.c
++++ b/fs/ecryptfs/inode.c
+@@ -1104,6 +1104,8 @@ ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value,
+ }
+
+ rc = vfs_setxattr(lower_dentry, name, value, size, flags);
++ if (!rc)
++ fsstack_copy_attr_all(dentry->d_inode, lower_dentry->d_inode);
+ out:
+ return rc;
+ }
+diff --git a/fs/eventpoll.c b/fs/eventpoll.c
+index 828e750..ea54cde 100644
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -197,6 +197,12 @@ struct eventpoll {
+
+ /* The user that created the eventpoll descriptor */
+ struct user_struct *user;
++
++ struct file *file;
++
++ /* used to optimize loop detection check */
++ int visited;
++ struct list_head visited_list_link;
+ };
+
+ /* Wait structure used by the poll hooks */
+@@ -255,6 +261,15 @@ static struct kmem_cache *epi_cache __read_mostly;
+ /* Slab cache used to allocate "struct eppoll_entry" */
+ static struct kmem_cache *pwq_cache __read_mostly;
+
++/* Visited nodes during ep_loop_check(), so we can unset them when we finish */
++static LIST_HEAD(visited_list);
++
++/*
++ * List of files with newly added links, where we may need to limit the number
++ * of emanating paths. Protected by the epmutex.
++ */
++static LIST_HEAD(tfile_check_list);
++
+ #ifdef CONFIG_SYSCTL
+
+ #include <linux/sysctl.h>
+@@ -276,6 +291,12 @@ ctl_table epoll_table[] = {
+ };
+ #endif /* CONFIG_SYSCTL */
+
++static const struct file_operations eventpoll_fops;
++
++static inline int is_file_epoll(struct file *f)
++{
++ return f->f_op == &eventpoll_fops;
++}
+
+ /* Setup the structure that is used as key for the RB tree */
+ static inline void ep_set_ffd(struct epoll_filefd *ffd,
+@@ -299,6 +320,11 @@ static inline int ep_is_linked(struct list_head *p)
+ return !list_empty(p);
+ }
+
++static inline struct eppoll_entry *ep_pwq_from_wait(wait_queue_t *p)
++{
++ return container_of(p, struct eppoll_entry, wait);
++}
++
+ /* Get the "struct epitem" from a wait queue pointer */
+ static inline struct epitem *ep_item_from_wait(wait_queue_t *p)
+ {
+@@ -446,6 +472,18 @@ static void ep_poll_safewake(wait_queue_head_t *wq)
+ put_cpu();
+ }
+
++static void ep_remove_wait_queue(struct eppoll_entry *pwq)
++{
++ wait_queue_head_t *whead;
++
++ rcu_read_lock();
++ /* If it is cleared by POLLFREE, it should be rcu-safe */
++ whead = rcu_dereference(pwq->whead);
++ if (whead)
++ remove_wait_queue(whead, &pwq->wait);
++ rcu_read_unlock();
++}
++
+ /*
+ * This function unregisters poll callbacks from the associated file
+ * descriptor. Must be called with "mtx" held (or "epmutex" if called from
+@@ -460,7 +498,7 @@ static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi)
+ pwq = list_first_entry(lsthead, struct eppoll_entry, llink);
+
+ list_del(&pwq->llink);
+- remove_wait_queue(pwq->whead, &pwq->wait);
++ ep_remove_wait_queue(pwq);
+ kmem_cache_free(pwq_cache, pwq);
+ }
+ }
+@@ -711,12 +749,6 @@ static const struct file_operations eventpoll_fops = {
+ .llseek = noop_llseek,
+ };
+
+-/* Fast test to see if the file is an eventpoll file */
+-static inline int is_file_epoll(struct file *f)
+-{
+- return f->f_op == &eventpoll_fops;
+-}
+-
+ /*
+ * This is called from eventpoll_release() to unlink files from the eventpoll
+ * interface. We need to have this facility to cleanup correctly files that are
+@@ -827,6 +859,17 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k
+ struct epitem *epi = ep_item_from_wait(wait);
+ struct eventpoll *ep = epi->ep;
+
++ if ((unsigned long)key & POLLFREE) {
++ ep_pwq_from_wait(wait)->whead = NULL;
++ /*
++ * whead = NULL above can race with ep_remove_wait_queue()
++ * which can do another remove_wait_queue() after us, so we
++ * can't use __remove_wait_queue(). whead->lock is held by
++ * the caller.
++ */
++ list_del_init(&wait->task_list);
++ }
++
+ spin_lock_irqsave(&ep->lock, flags);
+
+ /*
+@@ -926,6 +969,99 @@ static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi)
+ rb_insert_color(&epi->rbn, &ep->rbr);
+ }
+
++
++
++#define PATH_ARR_SIZE 5
++/*
++ * These are the number paths of length 1 to 5, that we are allowing to emanate
++ * from a single file of interest. For example, we allow 1000 paths of length
++ * 1, to emanate from each file of interest. This essentially represents the
++ * potential wakeup paths, which need to be limited in order to avoid massive
++ * uncontrolled wakeup storms. The common use case should be a single ep which
++ * is connected to n file sources. In this case each file source has 1 path
++ * of length 1. Thus, the numbers below should be more than sufficient. These
++ * path limits are enforced during an EPOLL_CTL_ADD operation, since a modify
++ * and delete can't add additional paths. Protected by the epmutex.
++ */
++static const int path_limits[PATH_ARR_SIZE] = { 1000, 500, 100, 50, 10 };
++static int path_count[PATH_ARR_SIZE];
++
++static int path_count_inc(int nests)
++{
++ if (++path_count[nests] > path_limits[nests])
++ return -1;
++ return 0;
++}
++
++static void path_count_init(void)
++{
++ int i;
++
++ for (i = 0; i < PATH_ARR_SIZE; i++)
++ path_count[i] = 0;
++}
++
++static int reverse_path_check_proc(void *priv, void *cookie, int call_nests)
++{
++ int error = 0;
++ struct file *file = priv;
++ struct file *child_file;
++ struct epitem *epi;
++
++ list_for_each_entry(epi, &file->f_ep_links, fllink) {
++ child_file = epi->ep->file;
++ if (is_file_epoll(child_file)) {
++ if (list_empty(&child_file->f_ep_links)) {
++ if (path_count_inc(call_nests)) {
++ error = -1;
++ break;
++ }
++ } else {
++ error = ep_call_nested(&poll_loop_ncalls,
++ EP_MAX_NESTS,
++ reverse_path_check_proc,
++ child_file, child_file,
++ current);
++ }
++ if (error != 0)
++ break;
++ } else {
++ printk(KERN_ERR "reverse_path_check_proc: "
++ "file is not an ep!\n");
++ }
++ }
++ return error;
++}
++
++/**
++ * reverse_path_check - The tfile_check_list is list of file *, which have
++ * links that are proposed to be newly added. We need to
++ * make sure that those added links don't add too many
++ * paths such that we will spend all our time waking up
++ * eventpoll objects.
++ *
++ * Returns: Returns zero if the proposed links don't create too many paths,
++ * -1 otherwise.
++ */
++static int reverse_path_check(void)
++{
++ int length = 0;
++ int error = 0;
++ struct file *current_file;
++
++ /* let's call this for all tfiles */
++ list_for_each_entry(current_file, &tfile_check_list, f_tfile_llink) {
++ length++;
++ path_count_init();
++ error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
++ reverse_path_check_proc, current_file,
++ current_file, current);
++ if (error)
++ break;
++ }
++ return error;
++}
++
+ /*
+ * Must be called with "mtx" held.
+ */
+@@ -987,6 +1123,11 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
+ */
+ ep_rbtree_insert(ep, epi);
+
++ /* now check if we've created too many backpaths */
++ error = -EINVAL;
++ if (reverse_path_check())
++ goto error_remove_epi;
++
+ /* We have to drop the new item inside our item list to keep track of it */
+ spin_lock_irqsave(&ep->lock, flags);
+
+@@ -1011,6 +1152,14 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
+
+ return 0;
+
++error_remove_epi:
++ spin_lock(&tfile->f_lock);
++ if (ep_is_linked(&epi->fllink))
++ list_del_init(&epi->fllink);
++ spin_unlock(&tfile->f_lock);
++
++ rb_erase(&epi->rbn, &ep->rbr);
++
+ error_unregister:
+ ep_unregister_pollwait(ep, epi);
+
+@@ -1275,18 +1424,36 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
+ int error = 0;
+ struct file *file = priv;
+ struct eventpoll *ep = file->private_data;
++ struct eventpoll *ep_tovisit;
+ struct rb_node *rbp;
+ struct epitem *epi;
+
+ mutex_lock_nested(&ep->mtx, call_nests + 1);
++ ep->visited = 1;
++ list_add(&ep->visited_list_link, &visited_list);
+ for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
+ epi = rb_entry(rbp, struct epitem, rbn);
+ if (unlikely(is_file_epoll(epi->ffd.file))) {
++ ep_tovisit = epi->ffd.file->private_data;
++ if (ep_tovisit->visited)
++ continue;
+ error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
+- ep_loop_check_proc, epi->ffd.file,
+- epi->ffd.file->private_data, current);
++ ep_loop_check_proc, epi->ffd.file,
++ ep_tovisit, current);
+ if (error != 0)
+ break;
++ } else {
++ /*
++ * If we've reached a file that is not associated with
++ * an ep, then we need to check if the newly added
++ * links are going to add too many wakeup paths. We do
++ * this by adding it to the tfile_check_list, if it's
++ * not already there, and calling reverse_path_check()
++ * during ep_insert().
++ */
++ if (list_empty(&epi->ffd.file->f_tfile_llink))
++ list_add(&epi->ffd.file->f_tfile_llink,
++ &tfile_check_list);
+ }
+ }
+ mutex_unlock(&ep->mtx);
+@@ -1307,8 +1474,31 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
+ */
+ static int ep_loop_check(struct eventpoll *ep, struct file *file)
+ {
+- return ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
++ int ret;
++ struct eventpoll *ep_cur, *ep_next;
++
++ ret = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
+ ep_loop_check_proc, file, ep, current);
++ /* clear visited list */
++ list_for_each_entry_safe(ep_cur, ep_next, &visited_list,
++ visited_list_link) {
++ ep_cur->visited = 0;
++ list_del(&ep_cur->visited_list_link);
++ }
++ return ret;
++}
++
++static void clear_tfile_check_list(void)
++{
++ struct file *file;
++
++ /* first clear the tfile_check_list */
++ while (!list_empty(&tfile_check_list)) {
++ file = list_first_entry(&tfile_check_list, struct file,
++ f_tfile_llink);
++ list_del_init(&file->f_tfile_llink);
++ }
++ INIT_LIST_HEAD(&tfile_check_list);
+ }
+
+ /*
+@@ -1316,8 +1506,9 @@ static int ep_loop_check(struct eventpoll *ep, struct file *file)
+ */
+ SYSCALL_DEFINE1(epoll_create1, int, flags)
+ {
+- int error;
++ int error, fd;
+ struct eventpoll *ep = NULL;
++ struct file *file;
+
+ /* Check the EPOLL_* constant for consistency. */
+ BUILD_BUG_ON(EPOLL_CLOEXEC != O_CLOEXEC);
+@@ -1334,11 +1525,25 @@ SYSCALL_DEFINE1(epoll_create1, int, flags)
+ * Creates all the items needed to setup an eventpoll file. That is,
+ * a file structure and a free file descriptor.
+ */
+- error = anon_inode_getfd("[eventpoll]", &eventpoll_fops, ep,
++ fd = get_unused_fd_flags(O_RDWR | (flags & O_CLOEXEC));
++ if (fd < 0) {
++ error = fd;
++ goto out_free_ep;
++ }
++ file = anon_inode_getfile("[eventpoll]", &eventpoll_fops, ep,
+ O_RDWR | (flags & O_CLOEXEC));
+- if (error < 0)
+- ep_free(ep);
+-
++ if (IS_ERR(file)) {
++ error = PTR_ERR(file);
++ goto out_free_fd;
++ }
++ fd_install(fd, file);
++ ep->file = file;
++ return fd;
++
++out_free_fd:
++ put_unused_fd(fd);
++out_free_ep:
++ ep_free(ep);
+ return error;
+ }
+
+@@ -1404,21 +1609,27 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
+ /*
+ * When we insert an epoll file descriptor, inside another epoll file
+ * descriptor, there is the change of creating closed loops, which are
+- * better be handled here, than in more critical paths.
++ * better be handled here, than in more critical paths. While we are
++ * checking for loops we also determine the list of files reachable
++ * and hang them on the tfile_check_list, so we can check that we
++ * haven't created too many possible wakeup paths.
+ *
+- * We hold epmutex across the loop check and the insert in this case, in
+- * order to prevent two separate inserts from racing and each doing the
+- * insert "at the same time" such that ep_loop_check passes on both
+- * before either one does the insert, thereby creating a cycle.
++ * We need to hold the epmutex across both ep_insert and ep_remove
++ * b/c we want to make sure we are looking at a coherent view of
++ * epoll network.
+ */
+- if (unlikely(is_file_epoll(tfile) && op == EPOLL_CTL_ADD)) {
++ if (op == EPOLL_CTL_ADD || op == EPOLL_CTL_DEL) {
+ mutex_lock(&epmutex);
+ did_lock_epmutex = 1;
+- error = -ELOOP;
+- if (ep_loop_check(ep, tfile) != 0)
+- goto error_tgt_fput;
+ }
+-
++ if (op == EPOLL_CTL_ADD) {
++ if (is_file_epoll(tfile)) {
++ error = -ELOOP;
++ if (ep_loop_check(ep, tfile) != 0)
++ goto error_tgt_fput;
++ } else
++ list_add(&tfile->f_tfile_llink, &tfile_check_list);
++ }
+
+ mutex_lock_nested(&ep->mtx, 0);
+
+@@ -1437,6 +1648,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
+ error = ep_insert(ep, &epds, tfile, fd);
+ } else
+ error = -EEXIST;
++ clear_tfile_check_list();
+ break;
+ case EPOLL_CTL_DEL:
+ if (epi)
+@@ -1455,7 +1667,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
+ mutex_unlock(&ep->mtx);
+
+ error_tgt_fput:
+- if (unlikely(did_lock_epmutex))
++ if (did_lock_epmutex)
+ mutex_unlock(&epmutex);
+
+ fput(tfile);
+diff --git a/fs/namei.c b/fs/namei.c
+index 5008f01..744e942 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -1094,8 +1094,10 @@ static struct dentry *d_inode_lookup(struct dentry *parent, struct dentry *dentr
+ struct dentry *old;
+
+ /* Don't create child dentry for a dead directory. */
+- if (unlikely(IS_DEADDIR(inode)))
++ if (unlikely(IS_DEADDIR(inode))) {
++ dput(dentry);
+ return ERR_PTR(-ENOENT);
++ }
+
+ old = inode->i_op->lookup(inode, dentry, nd);
+ if (unlikely(old)) {
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 055d702..e527030 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -3568,8 +3568,8 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
+ }
+ if (npages > 1) {
+ /* for decoding across pages */
+- args.acl_scratch = alloc_page(GFP_KERNEL);
+- if (!args.acl_scratch)
++ res.acl_scratch = alloc_page(GFP_KERNEL);
++ if (!res.acl_scratch)
+ goto out_free;
+ }
+ args.acl_len = npages * PAGE_SIZE;
+@@ -3605,8 +3605,8 @@ out_free:
+ for (i = 0; i < npages; i++)
+ if (pages[i])
+ __free_page(pages[i]);
+- if (args.acl_scratch)
+- __free_page(args.acl_scratch);
++ if (res.acl_scratch)
++ __free_page(res.acl_scratch);
+ return ret;
+ }
+
+@@ -4876,8 +4876,10 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
+ clp->cl_rpcclient->cl_auth->au_flavor);
+
+ res.server_scope = kzalloc(sizeof(struct server_scope), GFP_KERNEL);
+- if (unlikely(!res.server_scope))
+- return -ENOMEM;
++ if (unlikely(!res.server_scope)) {
++ status = -ENOMEM;
++ goto out;
++ }
+
+ status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
+ if (!status)
+@@ -4894,12 +4896,13 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
+ clp->server_scope = NULL;
+ }
+
+- if (!clp->server_scope)
++ if (!clp->server_scope) {
+ clp->server_scope = res.server_scope;
+- else
+- kfree(res.server_scope);
++ goto out;
++ }
+ }
+-
++ kfree(res.server_scope);
++out:
+ dprintk("<-- %s status= %d\n", __func__, status);
+ return status;
+ }
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index 6a7107a..a58eed7 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -1071,6 +1071,8 @@ void nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4
+ {
+ struct nfs_client *clp = server->nfs_client;
+
++ if (test_and_clear_bit(NFS_DELEGATED_STATE, &state->flags))
++ nfs_async_inode_return_delegation(state->inode, &state->stateid);
+ nfs4_state_mark_reclaim_nograce(clp, state);
+ nfs4_schedule_state_manager(clp);
+ }
+diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
+index dcaf693..68adab4 100644
+--- a/fs/nfs/nfs4xdr.c
++++ b/fs/nfs/nfs4xdr.c
+@@ -2522,7 +2522,6 @@ static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr,
+
+ xdr_inline_pages(&req->rq_rcv_buf, replen << 2,
+ args->acl_pages, args->acl_pgbase, args->acl_len);
+- xdr_set_scratch_buffer(xdr, page_address(args->acl_scratch), PAGE_SIZE);
+
+ encode_nops(&hdr);
+ }
+@@ -6034,6 +6033,10 @@ nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ struct compound_hdr hdr;
+ int status;
+
++ if (res->acl_scratch != NULL) {
++ void *p = page_address(res->acl_scratch);
++ xdr_set_scratch_buffer(xdr, p, PAGE_SIZE);
++ }
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+diff --git a/fs/signalfd.c b/fs/signalfd.c
+index 492465b..7ae2a57 100644
+--- a/fs/signalfd.c
++++ b/fs/signalfd.c
+@@ -30,6 +30,21 @@
+ #include <linux/signalfd.h>
+ #include <linux/syscalls.h>
+
++void signalfd_cleanup(struct sighand_struct *sighand)
++{
++ wait_queue_head_t *wqh = &sighand->signalfd_wqh;
++ /*
++ * The lockless check can race with remove_wait_queue() in progress,
++ * but in this case its caller should run under rcu_read_lock() and
++ * sighand_cachep is SLAB_DESTROY_BY_RCU, we can safely return.
++ */
++ if (likely(!waitqueue_active(wqh)))
++ return;
++
++ /* wait_queue_t->func(POLLFREE) should do remove_wait_queue() */
++ wake_up_poll(wqh, POLLHUP | POLLFREE);
++}
++
+ struct signalfd_ctx {
+ sigset_t sigmask;
+ };
+diff --git a/include/asm-generic/poll.h b/include/asm-generic/poll.h
+index 44bce83..9ce7f44 100644
+--- a/include/asm-generic/poll.h
++++ b/include/asm-generic/poll.h
+@@ -28,6 +28,8 @@
+ #define POLLRDHUP 0x2000
+ #endif
+
++#define POLLFREE 0x4000 /* currently only for epoll */
++
+ struct pollfd {
+ int fd;
+ short events;
+diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h
+index f362733..657ab55 100644
+--- a/include/linux/eventpoll.h
++++ b/include/linux/eventpoll.h
+@@ -61,6 +61,7 @@ struct file;
+ static inline void eventpoll_init_file(struct file *file)
+ {
+ INIT_LIST_HEAD(&file->f_ep_links);
++ INIT_LIST_HEAD(&file->f_tfile_llink);
+ }
+
+
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index e0bc4ff..10b2288 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -1001,6 +1001,7 @@ struct file {
+ #ifdef CONFIG_EPOLL
+ /* Used by fs/eventpoll.c to link all the hooks to this file */
+ struct list_head f_ep_links;
++ struct list_head f_tfile_llink;
+ #endif /* #ifdef CONFIG_EPOLL */
+ struct address_space *f_mapping;
+ #ifdef CONFIG_DEBUG_WRITECOUNT
+diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
+index 6c898af..41116ab 100644
+--- a/include/linux/nfs_xdr.h
++++ b/include/linux/nfs_xdr.h
+@@ -602,7 +602,6 @@ struct nfs_getaclargs {
+ size_t acl_len;
+ unsigned int acl_pgbase;
+ struct page ** acl_pages;
+- struct page * acl_scratch;
+ struct nfs4_sequence_args seq_args;
+ };
+
+@@ -612,6 +611,7 @@ struct nfs_getaclres {
+ size_t acl_len;
+ size_t acl_data_offset;
+ int acl_flags;
++ struct page * acl_scratch;
+ struct nfs4_sequence_res seq_res;
+ };
+
+diff --git a/include/linux/signalfd.h b/include/linux/signalfd.h
+index 3ff4961..247399b 100644
+--- a/include/linux/signalfd.h
++++ b/include/linux/signalfd.h
+@@ -61,13 +61,16 @@ static inline void signalfd_notify(struct task_struct *tsk, int sig)
+ wake_up(&tsk->sighand->signalfd_wqh);
+ }
+
++extern void signalfd_cleanup(struct sighand_struct *sighand);
++
+ #else /* CONFIG_SIGNALFD */
+
+ static inline void signalfd_notify(struct task_struct *tsk, int sig) { }
+
++static inline void signalfd_cleanup(struct sighand_struct *sighand) { }
++
+ #endif /* CONFIG_SIGNALFD */
+
+ #endif /* __KERNEL__ */
+
+ #endif /* _LINUX_SIGNALFD_H */
+-
+diff --git a/include/linux/usb/ch11.h b/include/linux/usb/ch11.h
+index 4ebaf08..1eb735b 100644
+--- a/include/linux/usb/ch11.h
++++ b/include/linux/usb/ch11.h
+@@ -62,12 +62,6 @@
+ #define USB_PORT_FEAT_TEST 21
+ #define USB_PORT_FEAT_INDICATOR 22
+ #define USB_PORT_FEAT_C_PORT_L1 23
+-#define USB_PORT_FEAT_C_PORT_LINK_STATE 25
+-#define USB_PORT_FEAT_C_PORT_CONFIG_ERROR 26
+-#define USB_PORT_FEAT_PORT_REMOTE_WAKE_MASK 27
+-#define USB_PORT_FEAT_BH_PORT_RESET 28
+-#define USB_PORT_FEAT_C_BH_PORT_RESET 29
+-#define USB_PORT_FEAT_FORCE_LINKPM_ACCEPT 30
+
+ /*
+ * Port feature selectors added by USB 3.0 spec.
+@@ -76,8 +70,8 @@
+ #define USB_PORT_FEAT_LINK_STATE 5
+ #define USB_PORT_FEAT_U1_TIMEOUT 23
+ #define USB_PORT_FEAT_U2_TIMEOUT 24
+-#define USB_PORT_FEAT_C_LINK_STATE 25
+-#define USB_PORT_FEAT_C_CONFIG_ERR 26
++#define USB_PORT_FEAT_C_PORT_LINK_STATE 25
++#define USB_PORT_FEAT_C_PORT_CONFIG_ERROR 26
+ #define USB_PORT_FEAT_REMOTE_WAKE_MASK 27
+ #define USB_PORT_FEAT_BH_PORT_RESET 28
+ #define USB_PORT_FEAT_C_BH_PORT_RESET 29
+diff --git a/include/net/flow.h b/include/net/flow.h
+index 57f15a7..2a7eefd 100644
+--- a/include/net/flow.h
++++ b/include/net/flow.h
+@@ -90,6 +90,16 @@ static inline void flowi4_init_output(struct flowi4 *fl4, int oif,
+ fl4->fl4_dport = dport;
+ fl4->fl4_sport = sport;
+ }
++
++/* Reset some input parameters after previous lookup */
++static inline void flowi4_update_output(struct flowi4 *fl4, int oif, __u8 tos,
++ __be32 daddr, __be32 saddr)
++{
++ fl4->flowi4_oif = oif;
++ fl4->flowi4_tos = tos;
++ fl4->daddr = daddr;
++ fl4->saddr = saddr;
++}
+
+
+ struct flowi6 {
+diff --git a/include/net/route.h b/include/net/route.h
+index 91855d1..b1c0d5b 100644
+--- a/include/net/route.h
++++ b/include/net/route.h
+@@ -270,6 +270,7 @@ static inline struct rtable *ip_route_connect(struct flowi4 *fl4,
+ if (IS_ERR(rt))
+ return rt;
+ ip_rt_put(rt);
++ flowi4_update_output(fl4, oif, tos, fl4->daddr, fl4->saddr);
+ }
+ security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
+ return ip_route_output_flow(net, fl4, sk);
+@@ -284,6 +285,9 @@ static inline struct rtable *ip_route_newports(struct flowi4 *fl4, struct rtable
+ fl4->fl4_dport = dport;
+ fl4->fl4_sport = sport;
+ ip_rt_put(rt);
++ flowi4_update_output(fl4, sk->sk_bound_dev_if,
++ RT_CONN_FLAGS(sk), fl4->daddr,
++ fl4->saddr);
+ security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
+ return ip_route_output_flow(sock_net(sk), fl4, sk);
+ }
+diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
+index f6bb08b..55ce96b 100644
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -220,9 +220,16 @@ struct tcf_proto {
+
+ struct qdisc_skb_cb {
+ unsigned int pkt_len;
+- long data[];
++ unsigned char data[24];
+ };
+
++static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
++{
++ struct qdisc_skb_cb *qcb;
++ BUILD_BUG_ON(sizeof(skb->cb) < sizeof(unsigned int) + sz);
++ BUILD_BUG_ON(sizeof(qcb->data) < sz);
++}
++
+ static inline int qdisc_qlen(const struct Qdisc *q)
+ {
+ return q->q.qlen;
+diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
+index a79886c..94bbec3 100644
+--- a/include/target/target_core_base.h
++++ b/include/target/target_core_base.h
+@@ -486,6 +486,7 @@ struct se_cmd {
+
+ struct scatterlist *t_data_sg;
+ unsigned int t_data_nents;
++ void *t_data_vmap;
+ struct scatterlist *t_bidi_data_sg;
+ unsigned int t_bidi_data_nents;
+
+diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h
+index dac4f2d..72751e8 100644
+--- a/include/target/target_core_transport.h
++++ b/include/target/target_core_transport.h
+@@ -129,8 +129,8 @@ extern void transport_init_se_cmd(struct se_cmd *,
+ struct target_core_fabric_ops *,
+ struct se_session *, u32, int, int,
+ unsigned char *);
+-void *transport_kmap_first_data_page(struct se_cmd *cmd);
+-void transport_kunmap_first_data_page(struct se_cmd *cmd);
++void *transport_kmap_data_sg(struct se_cmd *);
++void transport_kunmap_data_sg(struct se_cmd *);
+ extern int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *);
+ extern int transport_handle_cdb_direct(struct se_cmd *);
+ extern int transport_generic_handle_cdb_map(struct se_cmd *);
+diff --git a/kernel/fork.c b/kernel/fork.c
+index da4a6a1..0acf42c0 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -66,6 +66,7 @@
+ #include <linux/user-return-notifier.h>
+ #include <linux/oom.h>
+ #include <linux/khugepaged.h>
++#include <linux/signalfd.h>
+
+ #include <asm/pgtable.h>
+ #include <asm/pgalloc.h>
+@@ -910,8 +911,10 @@ static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
+
+ void __cleanup_sighand(struct sighand_struct *sighand)
+ {
+- if (atomic_dec_and_test(&sighand->count))
++ if (atomic_dec_and_test(&sighand->count)) {
++ signalfd_cleanup(sighand);
+ kmem_cache_free(sighand_cachep, sighand);
++ }
+ }
+
+
+diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c
+index 342d8f4..0119b9d 100644
+--- a/kernel/irq/autoprobe.c
++++ b/kernel/irq/autoprobe.c
+@@ -53,7 +53,7 @@ unsigned long probe_irq_on(void)
+ if (desc->irq_data.chip->irq_set_type)
+ desc->irq_data.chip->irq_set_type(&desc->irq_data,
+ IRQ_TYPE_PROBE);
+- irq_startup(desc);
++ irq_startup(desc, false);
+ }
+ raw_spin_unlock_irq(&desc->lock);
+ }
+@@ -70,7 +70,7 @@ unsigned long probe_irq_on(void)
+ raw_spin_lock_irq(&desc->lock);
+ if (!desc->action && irq_settings_can_probe(desc)) {
+ desc->istate |= IRQS_AUTODETECT | IRQS_WAITING;
+- if (irq_startup(desc))
++ if (irq_startup(desc, false))
+ desc->istate |= IRQS_PENDING;
+ }
+ raw_spin_unlock_irq(&desc->lock);
+diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
+index f7c543a..fb7db75 100644
+--- a/kernel/irq/chip.c
++++ b/kernel/irq/chip.c
+@@ -157,19 +157,22 @@ static void irq_state_set_masked(struct irq_desc *desc)
+ irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
+ }
+
+-int irq_startup(struct irq_desc *desc)
++int irq_startup(struct irq_desc *desc, bool resend)
+ {
++ int ret = 0;
++
+ irq_state_clr_disabled(desc);
+ desc->depth = 0;
+
+ if (desc->irq_data.chip->irq_startup) {
+- int ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
++ ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
+ irq_state_clr_masked(desc);
+- return ret;
++ } else {
++ irq_enable(desc);
+ }
+-
+- irq_enable(desc);
+- return 0;
++ if (resend)
++ check_irq_resend(desc, desc->irq_data.irq);
++ return ret;
+ }
+
+ void irq_shutdown(struct irq_desc *desc)
+@@ -330,6 +333,24 @@ out_unlock:
+ }
+ EXPORT_SYMBOL_GPL(handle_simple_irq);
+
++/*
++ * Called unconditionally from handle_level_irq() and only for oneshot
++ * interrupts from handle_fasteoi_irq()
++ */
++static void cond_unmask_irq(struct irq_desc *desc)
++{
++ /*
++ * We need to unmask in the following cases:
++ * - Standard level irq (IRQF_ONESHOT is not set)
++ * - Oneshot irq which did not wake the thread (caused by a
++ * spurious interrupt or a primary handler handling it
++ * completely).
++ */
++ if (!irqd_irq_disabled(&desc->irq_data) &&
++ irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
++ unmask_irq(desc);
++}
++
+ /**
+ * handle_level_irq - Level type irq handler
+ * @irq: the interrupt number
+@@ -362,8 +383,8 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
+
+ handle_irq_event(desc);
+
+- if (!irqd_irq_disabled(&desc->irq_data) && !(desc->istate & IRQS_ONESHOT))
+- unmask_irq(desc);
++ cond_unmask_irq(desc);
++
+ out_unlock:
+ raw_spin_unlock(&desc->lock);
+ }
+@@ -417,6 +438,9 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
+ preflow_handler(desc);
+ handle_irq_event(desc);
+
++ if (desc->istate & IRQS_ONESHOT)
++ cond_unmask_irq(desc);
++
+ out_eoi:
+ desc->irq_data.chip->irq_eoi(&desc->irq_data);
+ out_unlock:
+@@ -625,7 +649,7 @@ __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
+ irq_settings_set_noprobe(desc);
+ irq_settings_set_norequest(desc);
+ irq_settings_set_nothread(desc);
+- irq_startup(desc);
++ irq_startup(desc, true);
+ }
+ out:
+ irq_put_desc_busunlock(desc, flags);
+diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
+index a73dd6c..e1a8b64 100644
+--- a/kernel/irq/internals.h
++++ b/kernel/irq/internals.h
+@@ -67,7 +67,7 @@ extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
+ extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp);
+ extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume);
+
+-extern int irq_startup(struct irq_desc *desc);
++extern int irq_startup(struct irq_desc *desc, bool resend);
+ extern void irq_shutdown(struct irq_desc *desc);
+ extern void irq_enable(struct irq_desc *desc);
+ extern void irq_disable(struct irq_desc *desc);
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index 1da999f..cf2d7ae 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -1027,7 +1027,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
+ desc->istate |= IRQS_ONESHOT;
+
+ if (irq_settings_can_autoenable(desc))
+- irq_startup(desc);
++ irq_startup(desc, true);
+ else
+ /* Undo nested disables: */
+ desc->depth = 1;
+diff --git a/mm/nommu.c b/mm/nommu.c
+index b982290..ee7e57e 100644
+--- a/mm/nommu.c
++++ b/mm/nommu.c
+@@ -696,9 +696,11 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
+ if (vma->vm_file) {
+ mapping = vma->vm_file->f_mapping;
+
++ mutex_lock(&mapping->i_mmap_mutex);
+ flush_dcache_mmap_lock(mapping);
+ vma_prio_tree_insert(vma, &mapping->i_mmap);
+ flush_dcache_mmap_unlock(mapping);
++ mutex_unlock(&mapping->i_mmap_mutex);
+ }
+
+ /* add the VMA to the tree */
+@@ -760,9 +762,11 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)
+ if (vma->vm_file) {
+ mapping = vma->vm_file->f_mapping;
+
++ mutex_lock(&mapping->i_mmap_mutex);
+ flush_dcache_mmap_lock(mapping);
+ vma_prio_tree_remove(vma, &mapping->i_mmap);
+ flush_dcache_mmap_unlock(mapping);
++ mutex_unlock(&mapping->i_mmap_mutex);
+ }
+
+ /* remove from the MM's tree and list */
+@@ -2052,6 +2056,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
+ high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+ down_write(&nommu_region_sem);
++ mutex_lock(&inode->i_mapping->i_mmap_mutex);
+
+ /* search for VMAs that fall within the dead zone */
+ vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap,
+@@ -2059,6 +2064,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
+ /* found one - only interested if it's shared out of the page
+ * cache */
+ if (vma->vm_flags & VM_SHARED) {
++ mutex_unlock(&inode->i_mapping->i_mmap_mutex);
+ up_write(&nommu_region_sem);
+ return -ETXTBSY; /* not quite true, but near enough */
+ }
+@@ -2086,6 +2092,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
+ }
+ }
+
++ mutex_unlock(&inode->i_mapping->i_mmap_mutex);
+ up_write(&nommu_region_sem);
+ return 0;
+ }
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 5a13edf..c56cacf 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3565,14 +3565,20 @@ static inline gro_result_t
+ __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
+ {
+ struct sk_buff *p;
++ unsigned int maclen = skb->dev->hard_header_len;
+
+ for (p = napi->gro_list; p; p = p->next) {
+ unsigned long diffs;
+
+ diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
+ diffs |= p->vlan_tci ^ skb->vlan_tci;
+- diffs |= compare_ether_header(skb_mac_header(p),
+- skb_gro_mac_header(skb));
++ if (maclen == ETH_HLEN)
++ diffs |= compare_ether_header(skb_mac_header(p),
++ skb_gro_mac_header(skb));
++ else if (!diffs)
++ diffs = memcmp(skb_mac_header(p),
++ skb_gro_mac_header(skb),
++ maclen);
+ NAPI_GRO_CB(p)->same_flow = !diffs;
+ NAPI_GRO_CB(p)->flush = 0;
+ }
+diff --git a/net/core/netpoll.c b/net/core/netpoll.c
+index 5d4d896..ab0633f 100644
+--- a/net/core/netpoll.c
++++ b/net/core/netpoll.c
+@@ -194,7 +194,7 @@ static void netpoll_poll_dev(struct net_device *dev)
+
+ poll_napi(dev);
+
+- if (dev->priv_flags & IFF_SLAVE) {
++ if (dev->flags & IFF_SLAVE) {
+ if (dev->npinfo) {
+ struct net_device *bond_dev = dev->master;
+ struct sk_buff *skb;
+diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
+index 96a164a..59a7041 100644
+--- a/net/ipv4/arp.c
++++ b/net/ipv4/arp.c
+@@ -867,7 +867,8 @@ static int arp_process(struct sk_buff *skb)
+ if (addr_type == RTN_UNICAST &&
+ (arp_fwd_proxy(in_dev, dev, rt) ||
+ arp_fwd_pvlan(in_dev, dev, rt, sip, tip) ||
+- pneigh_lookup(&arp_tbl, net, &tip, dev, 0))) {
++ (rt->dst.dev != dev &&
++ pneigh_lookup(&arp_tbl, net, &tip, dev, 0)))) {
+ n = neigh_event_ns(&arp_tbl, sha, &sip, dev);
+ if (n)
+ neigh_release(n);
+diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
+index 1e60f76..42dd1a9 100644
+--- a/net/ipv4/ip_options.c
++++ b/net/ipv4/ip_options.c
+@@ -573,8 +573,8 @@ void ip_forward_options(struct sk_buff *skb)
+ }
+ if (srrptr + 3 <= srrspace) {
+ opt->is_changed = 1;
+- ip_rt_get_source(&optptr[srrptr-1], skb, rt);
+ ip_hdr(skb)->daddr = opt->nexthop;
++ ip_rt_get_source(&optptr[srrptr-1], skb, rt);
+ optptr[2] = srrptr+4;
+ } else if (net_ratelimit())
+ printk(KERN_CRIT "ip_forward(): Argh! Destination lost!\n");
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 52b5c2d..53113b9 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -1310,25 +1310,26 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
+ return in_sack;
+ }
+
+-static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk,
+- struct tcp_sacktag_state *state,
++/* Mark the given newly-SACKed range as such, adjusting counters and hints. */
++static u8 tcp_sacktag_one(struct sock *sk,
++ struct tcp_sacktag_state *state, u8 sacked,
++ u32 start_seq, u32 end_seq,
+ int dup_sack, int pcount)
+ {
+ struct tcp_sock *tp = tcp_sk(sk);
+- u8 sacked = TCP_SKB_CB(skb)->sacked;
+ int fack_count = state->fack_count;
+
+ /* Account D-SACK for retransmitted packet. */
+ if (dup_sack && (sacked & TCPCB_RETRANS)) {
+ if (tp->undo_marker && tp->undo_retrans &&
+- after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker))
++ after(end_seq, tp->undo_marker))
+ tp->undo_retrans--;
+ if (sacked & TCPCB_SACKED_ACKED)
+ state->reord = min(fack_count, state->reord);
+ }
+
+ /* Nothing to do; acked frame is about to be dropped (was ACKed). */
+- if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
++ if (!after(end_seq, tp->snd_una))
+ return sacked;
+
+ if (!(sacked & TCPCB_SACKED_ACKED)) {
+@@ -1347,13 +1348,13 @@ static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk,
+ /* New sack for not retransmitted frame,
+ * which was in hole. It is reordering.
+ */
+- if (before(TCP_SKB_CB(skb)->seq,
++ if (before(start_seq,
+ tcp_highest_sack_seq(tp)))
+ state->reord = min(fack_count,
+ state->reord);
+
+ /* SACK enhanced F-RTO (RFC4138; Appendix B) */
+- if (!after(TCP_SKB_CB(skb)->end_seq, tp->frto_highmark))
++ if (!after(end_seq, tp->frto_highmark))
+ state->flag |= FLAG_ONLY_ORIG_SACKED;
+ }
+
+@@ -1371,8 +1372,7 @@ static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk,
+
+ /* Lost marker hint past SACKed? Tweak RFC3517 cnt */
+ if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) &&
+- before(TCP_SKB_CB(skb)->seq,
+- TCP_SKB_CB(tp->lost_skb_hint)->seq))
++ before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq))
+ tp->lost_cnt_hint += pcount;
+
+ if (fack_count > tp->fackets_out)
+@@ -1391,6 +1391,9 @@ static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk,
+ return sacked;
+ }
+
++/* Shift newly-SACKed bytes from this skb to the immediately previous
++ * already-SACKed sk_buff. Mark the newly-SACKed bytes as such.
++ */
+ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
+ struct tcp_sacktag_state *state,
+ unsigned int pcount, int shifted, int mss,
+@@ -1398,10 +1401,13 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
+ {
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct sk_buff *prev = tcp_write_queue_prev(sk, skb);
++ u32 start_seq = TCP_SKB_CB(skb)->seq; /* start of newly-SACKed */
++ u32 end_seq = start_seq + shifted; /* end of newly-SACKed */
+
+ BUG_ON(!pcount);
+
+- if (skb == tp->lost_skb_hint)
++ /* Adjust hint for FACK. Non-FACK is handled in tcp_sacktag_one(). */
++ if (tcp_is_fack(tp) && (skb == tp->lost_skb_hint))
+ tp->lost_cnt_hint += pcount;
+
+ TCP_SKB_CB(prev)->end_seq += shifted;
+@@ -1427,8 +1433,11 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
+ skb_shinfo(skb)->gso_type = 0;
+ }
+
+- /* We discard results */
+- tcp_sacktag_one(skb, sk, state, dup_sack, pcount);
++ /* Adjust counters and hints for the newly sacked sequence range but
++ * discard the return value since prev is already marked.
++ */
++ tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
++ start_seq, end_seq, dup_sack, pcount);
+
+ /* Difference in this won't matter, both ACKed by the same cumul. ACK */
+ TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS);
+@@ -1667,10 +1676,14 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
+ break;
+
+ if (in_sack) {
+- TCP_SKB_CB(skb)->sacked = tcp_sacktag_one(skb, sk,
+- state,
+- dup_sack,
+- tcp_skb_pcount(skb));
++ TCP_SKB_CB(skb)->sacked =
++ tcp_sacktag_one(sk,
++ state,
++ TCP_SKB_CB(skb)->sacked,
++ TCP_SKB_CB(skb)->seq,
++ TCP_SKB_CB(skb)->end_seq,
++ dup_sack,
++ tcp_skb_pcount(skb));
+
+ if (!before(TCP_SKB_CB(skb)->seq,
+ tcp_highest_sack_seq(tp)))
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index c89e354..eb90aa8 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -650,6 +650,11 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
+ arg.iov[0].iov_len, IPPROTO_TCP, 0);
+ arg.csumoffset = offsetof(struct tcphdr, check) / 2;
+ arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
++ /* When socket is gone, all binding information is lost.
++ * routing might fail in this case. using iif for oif to
++ * make sure we can deliver it
++ */
++ arg.bound_dev_if = sk ? sk->sk_bound_dev_if : inet_iif(skb);
+
+ net = dev_net(skb_dst(skb)->dev);
+ arg.tos = ip_hdr(skb)->tos;
+diff --git a/net/mac80211/main.c b/net/mac80211/main.c
+index a7536fd..7d9b21d 100644
+--- a/net/mac80211/main.c
++++ b/net/mac80211/main.c
+@@ -885,6 +885,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
+ wiphy_debug(local->hw.wiphy, "Failed to initialize wep: %d\n",
+ result);
+
++ ieee80211_led_init(local);
++
+ rtnl_lock();
+
+ result = ieee80211_init_rate_ctrl_alg(local,
+@@ -906,8 +908,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
+
+ rtnl_unlock();
+
+- ieee80211_led_init(local);
+-
+ local->network_latency_notifier.notifier_call =
+ ieee80211_max_network_latency;
+ result = pm_qos_add_notifier(PM_QOS_NETWORK_LATENCY,
+diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
+index 093cc32..6dc7d7d 100644
+--- a/net/netfilter/ipvs/ip_vs_core.c
++++ b/net/netfilter/ipvs/ip_vs_core.c
+@@ -232,6 +232,7 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
+ __be16 dport = 0; /* destination port to forward */
+ unsigned int flags;
+ struct ip_vs_conn_param param;
++ const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) };
+ union nf_inet_addr snet; /* source network of the client,
+ after masking */
+
+@@ -267,7 +268,6 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
+ {
+ int protocol = iph.protocol;
+ const union nf_inet_addr *vaddr = &iph.daddr;
+- const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) };
+ __be16 vport = 0;
+
+ if (dst_port == svc->port) {
+diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
+index 3422b25..081ffb9 100644
+--- a/net/sched/sch_choke.c
++++ b/net/sched/sch_choke.c
+@@ -225,8 +225,7 @@ struct choke_skb_cb {
+
+ static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb)
+ {
+- BUILD_BUG_ON(sizeof(skb->cb) <
+- sizeof(struct qdisc_skb_cb) + sizeof(struct choke_skb_cb));
++ qdisc_cb_private_validate(skb, sizeof(struct choke_skb_cb));
+ return (struct choke_skb_cb *)qdisc_skb_cb(skb)->data;
+ }
+
+diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
+index a4ab207..7801b15 100644
+--- a/net/sched/sch_netem.c
++++ b/net/sched/sch_netem.c
+@@ -118,8 +118,7 @@ struct netem_skb_cb {
+
+ static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
+ {
+- BUILD_BUG_ON(sizeof(skb->cb) <
+- sizeof(struct qdisc_skb_cb) + sizeof(struct netem_skb_cb));
++ qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
+ return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
+ }
+
+@@ -383,8 +382,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ q->counter = 0;
+
+ __skb_queue_head(&q->qdisc->q, skb);
+- q->qdisc->qstats.backlog += qdisc_pkt_len(skb);
+- q->qdisc->qstats.requeues++;
++ sch->qstats.backlog += qdisc_pkt_len(skb);
++ sch->qstats.requeues++;
+ ret = NET_XMIT_SUCCESS;
+ }
+
+diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
+index e83c272..17859ea 100644
+--- a/net/sched/sch_sfb.c
++++ b/net/sched/sch_sfb.c
+@@ -93,8 +93,7 @@ struct sfb_skb_cb {
+
+ static inline struct sfb_skb_cb *sfb_skb_cb(const struct sk_buff *skb)
+ {
+- BUILD_BUG_ON(sizeof(skb->cb) <
+- sizeof(struct qdisc_skb_cb) + sizeof(struct sfb_skb_cb));
++ qdisc_cb_private_validate(skb, sizeof(struct sfb_skb_cb));
+ return (struct sfb_skb_cb *)qdisc_skb_cb(skb)->data;
+ }
+
+diff --git a/scripts/package/builddeb b/scripts/package/builddeb
+index f6cbc3d..3c6c0b1 100644
+--- a/scripts/package/builddeb
++++ b/scripts/package/builddeb
+@@ -238,14 +238,14 @@ EOF
+ fi
+
+ # Build header package
+-(cd $srctree; find . -name Makefile -o -name Kconfig\* -o -name \*.pl > /tmp/files$$)
+-(cd $srctree; find arch/$SRCARCH/include include scripts -type f >> /tmp/files$$)
+-(cd $objtree; find .config Module.symvers include scripts -type f >> /tmp/objfiles$$)
++(cd $srctree; find . -name Makefile -o -name Kconfig\* -o -name \*.pl > "$objtree/debian/hdrsrcfiles")
++(cd $srctree; find arch/$SRCARCH/include include scripts -type f >> "$objtree/debian/hdrsrcfiles")
++(cd $objtree; find .config Module.symvers include scripts -type f >> "$objtree/debian/hdrobjfiles")
+ destdir=$kernel_headers_dir/usr/src/linux-headers-$version
+ mkdir -p "$destdir"
+-(cd $srctree; tar -c -f - -T /tmp/files$$) | (cd $destdir; tar -xf -)
+-(cd $objtree; tar -c -f - -T /tmp/objfiles$$) | (cd $destdir; tar -xf -)
+-rm -f /tmp/files$$ /tmp/objfiles$$
++(cd $srctree; tar -c -f - -T "$objtree/debian/hdrsrcfiles") | (cd $destdir; tar -xf -)
++(cd $objtree; tar -c -f - -T "$objtree/debian/hdrobjfiles") | (cd $destdir; tar -xf -)
++rm -f "$objtree/debian/hdrsrcfiles" "$objtree/debian/hdrobjfiles"
+ arch=$(dpkg --print-architecture)
+
+ cat <<EOF >> debian/control
+diff --git a/security/tomoyo/.gitignore b/security/tomoyo/.gitignore
+new file mode 100644
+index 0000000..5caf1a6
+--- /dev/null
++++ b/security/tomoyo/.gitignore
+@@ -0,0 +1,2 @@
++builtin-policy.h
++policy/
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 7072251..08bad5b 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -1899,6 +1899,10 @@ static void cxt5051_init_mic_port(struct hda_codec *codec, hda_nid_t nid,
+ snd_hda_codec_write(codec, nid, 0,
+ AC_VERB_SET_UNSOLICITED_ENABLE,
+ AC_USRSP_EN | event);
++}
++
++static void cxt5051_init_mic_jack(struct hda_codec *codec, hda_nid_t nid)
++{
+ snd_hda_input_jack_add(codec, nid, SND_JACK_MICROPHONE, NULL);
+ snd_hda_input_jack_report(codec, nid);
+ }
+@@ -1916,7 +1920,6 @@ static int cxt5051_init(struct hda_codec *codec)
+ struct conexant_spec *spec = codec->spec;
+
+ conexant_init(codec);
+- conexant_init_jacks(codec);
+
+ if (spec->auto_mic & AUTO_MIC_PORTB)
+ cxt5051_init_mic_port(codec, 0x17, CXT5051_PORTB_EVENT);
+@@ -2037,6 +2040,12 @@ static int patch_cxt5051(struct hda_codec *codec)
+ if (spec->beep_amp)
+ snd_hda_attach_beep_device(codec, spec->beep_amp);
+
++ conexant_init_jacks(codec);
++ if (spec->auto_mic & AUTO_MIC_PORTB)
++ cxt5051_init_mic_jack(codec, 0x17);
++ if (spec->auto_mic & AUTO_MIC_PORTC)
++ cxt5051_init_mic_jack(codec, 0x18);
++
+ return 0;
+ }
+
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 9c197d4..c4c8d78 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -79,6 +79,8 @@ enum {
+ ALC_AUTOMUTE_MIXER, /* mute/unmute mixer widget AMP */
+ };
+
++#define MAX_VOL_NIDS 0x40
++
+ struct alc_spec {
+ /* codec parameterization */
+ const struct snd_kcontrol_new *mixers[5]; /* mixer arrays */
+@@ -117,8 +119,8 @@ struct alc_spec {
+ const hda_nid_t *capsrc_nids;
+ hda_nid_t dig_in_nid; /* digital-in NID; optional */
+ hda_nid_t mixer_nid; /* analog-mixer NID */
+- DECLARE_BITMAP(vol_ctls, 0x20 << 1);
+- DECLARE_BITMAP(sw_ctls, 0x20 << 1);
++ DECLARE_BITMAP(vol_ctls, MAX_VOL_NIDS << 1);
++ DECLARE_BITMAP(sw_ctls, MAX_VOL_NIDS << 1);
+
+ /* capture setup for dynamic dual-adc switch */
+ hda_nid_t cur_adc;
+@@ -3068,7 +3070,10 @@ static int alc_auto_fill_dac_nids(struct hda_codec *codec)
+ static inline unsigned int get_ctl_pos(unsigned int data)
+ {
+ hda_nid_t nid = get_amp_nid_(data);
+- unsigned int dir = get_amp_direction_(data);
++ unsigned int dir;
++ if (snd_BUG_ON(nid >= MAX_VOL_NIDS))
++ return 0;
++ dir = get_amp_direction_(data);
+ return (nid << 1) | dir;
+ }
+
+@@ -4224,12 +4229,20 @@ static void alc889_fixup_dac_route(struct hda_codec *codec,
+ const struct alc_fixup *fix, int action)
+ {
+ if (action == ALC_FIXUP_ACT_PRE_PROBE) {
++ /* fake the connections during parsing the tree */
+ hda_nid_t conn1[2] = { 0x0c, 0x0d };
+ hda_nid_t conn2[2] = { 0x0e, 0x0f };
+ snd_hda_override_conn_list(codec, 0x14, 2, conn1);
+ snd_hda_override_conn_list(codec, 0x15, 2, conn1);
+ snd_hda_override_conn_list(codec, 0x18, 2, conn2);
+ snd_hda_override_conn_list(codec, 0x1a, 2, conn2);
++ } else if (action == ALC_FIXUP_ACT_PROBE) {
++ /* restore the connections */
++ hda_nid_t conn[5] = { 0x0c, 0x0d, 0x0e, 0x0f, 0x26 };
++ snd_hda_override_conn_list(codec, 0x14, 5, conn);
++ snd_hda_override_conn_list(codec, 0x15, 5, conn);
++ snd_hda_override_conn_list(codec, 0x18, 5, conn);
++ snd_hda_override_conn_list(codec, 0x1a, 5, conn);
+ }
+ }
+
+diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
+index d795294..07dd7eb 100644
+--- a/sound/soc/codecs/wm8962.c
++++ b/sound/soc/codecs/wm8962.c
+@@ -2559,7 +2559,7 @@ static int dsp2_event(struct snd_soc_dapm_widget *w,
+ return 0;
+ }
+
+-static const char *st_text[] = { "None", "Right", "Left" };
++static const char *st_text[] = { "None", "Left", "Right" };
+
+ static const struct soc_enum str_enum =
+ SOC_ENUM_SINGLE(WM8962_DAC_DSP_MIXING_1, 2, 3, st_text);
diff --git a/1009_linux-3.2.10.patch b/1009_linux-3.2.10.patch
new file mode 100644
index 00000000..e5ac4c7d
--- /dev/null
+++ b/1009_linux-3.2.10.patch
@@ -0,0 +1,4275 @@
+diff --git a/Documentation/hwmon/jc42 b/Documentation/hwmon/jc42
+index a22ecf4..52729a7 100644
+--- a/Documentation/hwmon/jc42
++++ b/Documentation/hwmon/jc42
+@@ -7,21 +7,29 @@ Supported chips:
+ Addresses scanned: I2C 0x18 - 0x1f
+ Datasheets:
+ http://www.analog.com/static/imported-files/data_sheets/ADT7408.pdf
+- * IDT TSE2002B3, TS3000B3
+- Prefix: 'tse2002b3', 'ts3000b3'
++ * Atmel AT30TS00
++ Prefix: 'at30ts00'
+ Addresses scanned: I2C 0x18 - 0x1f
+ Datasheets:
+- http://www.idt.com/products/getdoc.cfm?docid=18715691
+- http://www.idt.com/products/getdoc.cfm?docid=18715692
++ http://www.atmel.com/Images/doc8585.pdf
++ * IDT TSE2002B3, TSE2002GB2, TS3000B3, TS3000GB2
++ Prefix: 'tse2002', 'ts3000'
++ Addresses scanned: I2C 0x18 - 0x1f
++ Datasheets:
++ http://www.idt.com/sites/default/files/documents/IDT_TSE2002B3C_DST_20100512_120303152056.pdf
++ http://www.idt.com/sites/default/files/documents/IDT_TSE2002GB2A1_DST_20111107_120303145914.pdf
++ http://www.idt.com/sites/default/files/documents/IDT_TS3000B3A_DST_20101129_120303152013.pdf
++ http://www.idt.com/sites/default/files/documents/IDT_TS3000GB2A1_DST_20111104_120303151012.pdf
+ * Maxim MAX6604
+ Prefix: 'max6604'
+ Addresses scanned: I2C 0x18 - 0x1f
+ Datasheets:
+ http://datasheets.maxim-ic.com/en/ds/MAX6604.pdf
+- * Microchip MCP9805, MCP98242, MCP98243, MCP9843
+- Prefixes: 'mcp9805', 'mcp98242', 'mcp98243', 'mcp9843'
++ * Microchip MCP9804, MCP9805, MCP98242, MCP98243, MCP9843
++ Prefixes: 'mcp9804', 'mcp9805', 'mcp98242', 'mcp98243', 'mcp9843'
+ Addresses scanned: I2C 0x18 - 0x1f
+ Datasheets:
++ http://ww1.microchip.com/downloads/en/DeviceDoc/22203C.pdf
+ http://ww1.microchip.com/downloads/en/DeviceDoc/21977b.pdf
+ http://ww1.microchip.com/downloads/en/DeviceDoc/21996a.pdf
+ http://ww1.microchip.com/downloads/en/DeviceDoc/22153c.pdf
+@@ -48,6 +56,12 @@ Supported chips:
+ Datasheets:
+ http://www.st.com/stonline/products/literature/ds/13447/stts424.pdf
+ http://www.st.com/stonline/products/literature/ds/13448/stts424e02.pdf
++ * ST Microelectronics STTS2002, STTS3000
++ Prefix: 'stts2002', 'stts3000'
++ Addresses scanned: I2C 0x18 - 0x1f
++ Datasheets:
++ http://www.st.com/internet/com/TECHNICAL_RESOURCES/TECHNICAL_LITERATURE/DATASHEET/CD00225278.pdf
++ http://www.st.com/internet/com/TECHNICAL_RESOURCES/TECHNICAL_LITERATURE/DATA_BRIEF/CD00270920.pdf
+ * JEDEC JC 42.4 compliant temperature sensor chips
+ Prefix: 'jc42'
+ Addresses scanned: I2C 0x18 - 0x1f
+diff --git a/Makefile b/Makefile
+index 5f1739b..1ddd6e9 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 9
++SUBLEVEL = 10
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/alpha/include/asm/futex.h b/arch/alpha/include/asm/futex.h
+index e8a761a..f939794 100644
+--- a/arch/alpha/include/asm/futex.h
++++ b/arch/alpha/include/asm/futex.h
+@@ -108,7 +108,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ " lda $31,3b-2b(%0)\n"
+ " .previous\n"
+ : "+r"(ret), "=&r"(prev), "=&r"(cmp)
+- : "r"(uaddr), "r"((long)oldval), "r"(newval)
++ : "r"(uaddr), "r"((long)(int)oldval), "r"(newval)
+ : "memory");
+
+ *uval = prev;
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index b259c7c..ab3740e 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -1272,7 +1272,7 @@ config ARM_ERRATA_743622
+ depends on CPU_V7
+ help
+ This option enables the workaround for the 743622 Cortex-A9
+- (r2p0..r2p2) erratum. Under very rare conditions, a faulty
++ (r2p*) erratum. Under very rare conditions, a faulty
+ optimisation in the Cortex-A9 Store Buffer may lead to data
+ corruption. This workaround sets a specific bit in the diagnostic
+ register of the Cortex-A9 which disables the Store Buffer
+diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
+index 0bda22c..665ef2c 100644
+--- a/arch/arm/include/asm/pmu.h
++++ b/arch/arm/include/asm/pmu.h
+@@ -125,7 +125,7 @@ int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type);
+
+ u64 armpmu_event_update(struct perf_event *event,
+ struct hw_perf_event *hwc,
+- int idx, int overflow);
++ int idx);
+
+ int armpmu_event_set_period(struct perf_event *event,
+ struct hw_perf_event *hwc,
+diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
+index 88b0941..ecebb89 100644
+--- a/arch/arm/kernel/perf_event.c
++++ b/arch/arm/kernel/perf_event.c
+@@ -187,7 +187,7 @@ armpmu_event_set_period(struct perf_event *event,
+ u64
+ armpmu_event_update(struct perf_event *event,
+ struct hw_perf_event *hwc,
+- int idx, int overflow)
++ int idx)
+ {
+ struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+ u64 delta, prev_raw_count, new_raw_count;
+@@ -200,13 +200,7 @@ again:
+ new_raw_count) != prev_raw_count)
+ goto again;
+
+- new_raw_count &= armpmu->max_period;
+- prev_raw_count &= armpmu->max_period;
+-
+- if (overflow)
+- delta = armpmu->max_period - prev_raw_count + new_raw_count + 1;
+- else
+- delta = new_raw_count - prev_raw_count;
++ delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
+
+ local64_add(delta, &event->count);
+ local64_sub(delta, &hwc->period_left);
+@@ -223,7 +217,7 @@ armpmu_read(struct perf_event *event)
+ if (hwc->idx < 0)
+ return;
+
+- armpmu_event_update(event, hwc, hwc->idx, 0);
++ armpmu_event_update(event, hwc, hwc->idx);
+ }
+
+ static void
+@@ -239,7 +233,7 @@ armpmu_stop(struct perf_event *event, int flags)
+ if (!(hwc->state & PERF_HES_STOPPED)) {
+ armpmu->disable(hwc, hwc->idx);
+ barrier(); /* why? */
+- armpmu_event_update(event, hwc, hwc->idx, 0);
++ armpmu_event_update(event, hwc, hwc->idx);
+ hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+ }
+ }
+@@ -519,7 +513,13 @@ __hw_perf_event_init(struct perf_event *event)
+ hwc->config_base |= (unsigned long)mapping;
+
+ if (!hwc->sample_period) {
+- hwc->sample_period = armpmu->max_period;
++ /*
++ * For non-sampling runs, limit the sample_period to half
++ * of the counter width. That way, the new counter value
++ * is far less likely to overtake the previous one unless
++ * you have some serious IRQ latency issues.
++ */
++ hwc->sample_period = armpmu->max_period >> 1;
+ hwc->last_period = hwc->sample_period;
+ local64_set(&hwc->period_left, hwc->sample_period);
+ }
+diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
+index e63d811..0ad3c6f 100644
+--- a/arch/arm/kernel/perf_event_v6.c
++++ b/arch/arm/kernel/perf_event_v6.c
+@@ -463,23 +463,6 @@ armv6pmu_enable_event(struct hw_perf_event *hwc,
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+ }
+
+-static int counter_is_active(unsigned long pmcr, int idx)
+-{
+- unsigned long mask = 0;
+- if (idx == ARMV6_CYCLE_COUNTER)
+- mask = ARMV6_PMCR_CCOUNT_IEN;
+- else if (idx == ARMV6_COUNTER0)
+- mask = ARMV6_PMCR_COUNT0_IEN;
+- else if (idx == ARMV6_COUNTER1)
+- mask = ARMV6_PMCR_COUNT1_IEN;
+-
+- if (mask)
+- return pmcr & mask;
+-
+- WARN_ONCE(1, "invalid counter number (%d)\n", idx);
+- return 0;
+-}
+-
+ static irqreturn_t
+ armv6pmu_handle_irq(int irq_num,
+ void *dev)
+@@ -509,7 +492,8 @@ armv6pmu_handle_irq(int irq_num,
+ struct perf_event *event = cpuc->events[idx];
+ struct hw_perf_event *hwc;
+
+- if (!counter_is_active(pmcr, idx))
++ /* Ignore if we don't have an event. */
++ if (!event)
+ continue;
+
+ /*
+@@ -520,7 +504,7 @@ armv6pmu_handle_irq(int irq_num,
+ continue;
+
+ hwc = &event->hw;
+- armpmu_event_update(event, hwc, idx, 1);
++ armpmu_event_update(event, hwc, idx);
+ data.period = event->hw.last_period;
+ if (!armpmu_event_set_period(event, hwc, idx))
+ continue;
+diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
+index 1ef6d00..1049319 100644
+--- a/arch/arm/kernel/perf_event_v7.c
++++ b/arch/arm/kernel/perf_event_v7.c
+@@ -878,6 +878,11 @@ static inline int armv7_pmnc_disable_intens(int idx)
+
+ counter = ARMV7_IDX_TO_COUNTER(idx);
+ asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
++ isb();
++ /* Clear the overflow flag in case an interrupt is pending. */
++ asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter)));
++ isb();
++
+ return idx;
+ }
+
+@@ -1024,6 +1029,10 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
+ struct perf_event *event = cpuc->events[idx];
+ struct hw_perf_event *hwc;
+
++ /* Ignore if we don't have an event. */
++ if (!event)
++ continue;
++
+ /*
+ * We have a single interrupt for all counters. Check that
+ * each counter has overflowed before we process it.
+@@ -1032,7 +1041,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
+ continue;
+
+ hwc = &event->hw;
+- armpmu_event_update(event, hwc, idx, 1);
++ armpmu_event_update(event, hwc, idx);
+ data.period = event->hw.last_period;
+ if (!armpmu_event_set_period(event, hwc, idx))
+ continue;
+diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
+index e0cca10..9fc2c95 100644
+--- a/arch/arm/kernel/perf_event_xscale.c
++++ b/arch/arm/kernel/perf_event_xscale.c
+@@ -253,11 +253,14 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
+ struct perf_event *event = cpuc->events[idx];
+ struct hw_perf_event *hwc;
+
++ if (!event)
++ continue;
++
+ if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
+ continue;
+
+ hwc = &event->hw;
+- armpmu_event_update(event, hwc, idx, 1);
++ armpmu_event_update(event, hwc, idx);
+ data.period = event->hw.last_period;
+ if (!armpmu_event_set_period(event, hwc, idx))
+ continue;
+@@ -590,11 +593,14 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
+ struct perf_event *event = cpuc->events[idx];
+ struct hw_perf_event *hwc;
+
+- if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx))
++ if (!event)
++ continue;
++
++ if (!xscale2_pmnc_counter_has_overflowed(of_flags, idx))
+ continue;
+
+ hwc = &event->hw;
+- armpmu_event_update(event, hwc, idx, 1);
++ armpmu_event_update(event, hwc, idx);
+ data.period = event->hw.last_period;
+ if (!armpmu_event_set_period(event, hwc, idx))
+ continue;
+@@ -661,7 +667,7 @@ xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
+ static void
+ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
+ {
+- unsigned long flags, ien, evtsel;
++ unsigned long flags, ien, evtsel, of_flags;
+ struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+
+ ien = xscale2pmu_read_int_enable();
+@@ -670,26 +676,31 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
+ switch (idx) {
+ case XSCALE_CYCLE_COUNTER:
+ ien &= ~XSCALE2_CCOUNT_INT_EN;
++ of_flags = XSCALE2_CCOUNT_OVERFLOW;
+ break;
+ case XSCALE_COUNTER0:
+ ien &= ~XSCALE2_COUNT0_INT_EN;
+ evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
+ evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT;
++ of_flags = XSCALE2_COUNT0_OVERFLOW;
+ break;
+ case XSCALE_COUNTER1:
+ ien &= ~XSCALE2_COUNT1_INT_EN;
+ evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
+ evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT;
++ of_flags = XSCALE2_COUNT1_OVERFLOW;
+ break;
+ case XSCALE_COUNTER2:
+ ien &= ~XSCALE2_COUNT2_INT_EN;
+ evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
+ evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT;
++ of_flags = XSCALE2_COUNT2_OVERFLOW;
+ break;
+ case XSCALE_COUNTER3:
+ ien &= ~XSCALE2_COUNT3_INT_EN;
+ evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
+ evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT;
++ of_flags = XSCALE2_COUNT3_OVERFLOW;
+ break;
+ default:
+ WARN_ONCE(1, "invalid counter number (%d)\n", idx);
+@@ -699,6 +710,7 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
+ xscale2pmu_write_event_select(evtsel);
+ xscale2pmu_write_int_enable(ien);
++ xscale2pmu_write_overflow_flags(of_flags);
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+ }
+
+diff --git a/arch/arm/mach-dove/common.c b/arch/arm/mach-dove/common.c
+index a9e0dae..1620b15 100644
+--- a/arch/arm/mach-dove/common.c
++++ b/arch/arm/mach-dove/common.c
+@@ -29,6 +29,7 @@
+ #include <asm/mach/arch.h>
+ #include <linux/irq.h>
+ #include <plat/time.h>
++#include <plat/ehci-orion.h>
+ #include <plat/common.h>
+ #include "common.h"
+
+@@ -72,7 +73,7 @@ void __init dove_map_io(void)
+ void __init dove_ehci0_init(void)
+ {
+ orion_ehci_init(&dove_mbus_dram_info,
+- DOVE_USB0_PHYS_BASE, IRQ_DOVE_USB0);
++ DOVE_USB0_PHYS_BASE, IRQ_DOVE_USB0, EHCI_PHY_NA);
+ }
+
+ /*****************************************************************************
+diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
+index f3248cf..c5dbbb3 100644
+--- a/arch/arm/mach-kirkwood/common.c
++++ b/arch/arm/mach-kirkwood/common.c
+@@ -28,6 +28,7 @@
+ #include <plat/cache-feroceon-l2.h>
+ #include <plat/mvsdio.h>
+ #include <plat/orion_nand.h>
++#include <plat/ehci-orion.h>
+ #include <plat/common.h>
+ #include <plat/time.h>
+ #include "common.h"
+@@ -74,7 +75,7 @@ void __init kirkwood_ehci_init(void)
+ {
+ kirkwood_clk_ctrl |= CGC_USB0;
+ orion_ehci_init(&kirkwood_mbus_dram_info,
+- USB_PHYS_BASE, IRQ_KIRKWOOD_USB);
++ USB_PHYS_BASE, IRQ_KIRKWOOD_USB, EHCI_PHY_NA);
+ }
+
+
+diff --git a/arch/arm/mach-kirkwood/mpp.h b/arch/arm/mach-kirkwood/mpp.h
+index ac78795..7afccf4 100644
+--- a/arch/arm/mach-kirkwood/mpp.h
++++ b/arch/arm/mach-kirkwood/mpp.h
+@@ -31,313 +31,313 @@
+ #define MPP_F6282_MASK MPP( 0, 0x0, 0, 0, 0, 0, 0, 0, 1 )
+
+ #define MPP0_GPIO MPP( 0, 0x0, 1, 1, 1, 1, 1, 1, 1 )
+-#define MPP0_NF_IO2 MPP( 0, 0x1, 1, 1, 1, 1, 1, 1, 1 )
+-#define MPP0_SPI_SCn MPP( 0, 0x2, 0, 1, 1, 1, 1, 1, 1 )
++#define MPP0_NF_IO2 MPP( 0, 0x1, 0, 0, 1, 1, 1, 1, 1 )
++#define MPP0_SPI_SCn MPP( 0, 0x2, 0, 0, 1, 1, 1, 1, 1 )
+
+ #define MPP1_GPO MPP( 1, 0x0, 0, 1, 1, 1, 1, 1, 1 )
+-#define MPP1_NF_IO3 MPP( 1, 0x1, 1, 1, 1, 1, 1, 1, 1 )
+-#define MPP1_SPI_MOSI MPP( 1, 0x2, 0, 1, 1, 1, 1, 1, 1 )
++#define MPP1_NF_IO3 MPP( 1, 0x1, 0, 0, 1, 1, 1, 1, 1 )
++#define MPP1_SPI_MOSI MPP( 1, 0x2, 0, 0, 1, 1, 1, 1, 1 )
+
+ #define MPP2_GPO MPP( 2, 0x0, 0, 1, 1, 1, 1, 1, 1 )
+-#define MPP2_NF_IO4 MPP( 2, 0x1, 1, 1, 1, 1, 1, 1, 1 )
+-#define MPP2_SPI_SCK MPP( 2, 0x2, 0, 1, 1, 1, 1, 1, 1 )
++#define MPP2_NF_IO4 MPP( 2, 0x1, 0, 0, 1, 1, 1, 1, 1 )
++#define MPP2_SPI_SCK MPP( 2, 0x2, 0, 0, 1, 1, 1, 1, 1 )
+
+ #define MPP3_GPO MPP( 3, 0x0, 0, 1, 1, 1, 1, 1, 1 )
+-#define MPP3_NF_IO5 MPP( 3, 0x1, 1, 1, 1, 1, 1, 1, 1 )
+-#define MPP3_SPI_MISO MPP( 3, 0x2, 1, 0, 1, 1, 1, 1, 1 )
++#define MPP3_NF_IO5 MPP( 3, 0x1, 0, 0, 1, 1, 1, 1, 1 )
++#define MPP3_SPI_MISO MPP( 3, 0x2, 0, 0, 1, 1, 1, 1, 1 )
+
+ #define MPP4_GPIO MPP( 4, 0x0, 1, 1, 1, 1, 1, 1, 1 )
+-#define MPP4_NF_IO6 MPP( 4, 0x1, 1, 1, 1, 1, 1, 1, 1 )
+-#define MPP4_UART0_RXD MPP( 4, 0x2, 1, 0, 1, 1, 1, 1, 1 )
+-#define MPP4_SATA1_ACTn MPP( 4, 0x5, 0, 1, 0, 0, 1, 1, 1 )
++#define MPP4_NF_IO6 MPP( 4, 0x1, 0, 0, 1, 1, 1, 1, 1 )
++#define MPP4_UART0_RXD MPP( 4, 0x2, 0, 0, 1, 1, 1, 1, 1 )
++#define MPP4_SATA1_ACTn MPP( 4, 0x5, 0, 0, 0, 0, 1, 1, 1 )
+ #define MPP4_LCD_VGA_HSYNC MPP( 4, 0xb, 0, 0, 0, 0, 0, 0, 1 )
+-#define MPP4_PTP_CLK MPP( 4, 0xd, 1, 0, 1, 1, 1, 1, 0 )
++#define MPP4_PTP_CLK MPP( 4, 0xd, 0, 0, 1, 1, 1, 1, 0 )
+
+ #define MPP5_GPO MPP( 5, 0x0, 0, 1, 1, 1, 1, 1, 1 )
+-#define MPP5_NF_IO7 MPP( 5, 0x1, 1, 1, 1, 1, 1, 1, 1 )
+-#define MPP5_UART0_TXD MPP( 5, 0x2, 0, 1, 1, 1, 1, 1, 1 )
+-#define MPP5_PTP_TRIG_GEN MPP( 5, 0x4, 0, 1, 1, 1, 1, 1, 0 )
+-#define MPP5_SATA0_ACTn MPP( 5, 0x5, 0, 1, 0, 1, 1, 1, 1 )
++#define MPP5_NF_IO7 MPP( 5, 0x1, 0, 0, 1, 1, 1, 1, 1 )
++#define MPP5_UART0_TXD MPP( 5, 0x2, 0, 0, 1, 1, 1, 1, 1 )
++#define MPP5_PTP_TRIG_GEN MPP( 5, 0x4, 0, 0, 1, 1, 1, 1, 0 )
++#define MPP5_SATA0_ACTn MPP( 5, 0x5, 0, 0, 0, 1, 1, 1, 1 )
+ #define MPP5_LCD_VGA_VSYNC MPP( 5, 0xb, 0, 0, 0, 0, 0, 0, 1 )
+
+-#define MPP6_SYSRST_OUTn MPP( 6, 0x1, 0, 1, 1, 1, 1, 1, 1 )
+-#define MPP6_SPI_MOSI MPP( 6, 0x2, 0, 1, 1, 1, 1, 1, 1 )
+-#define MPP6_PTP_TRIG_GEN MPP( 6, 0x3, 0, 1, 1, 1, 1, 1, 0 )
++#define MPP6_SYSRST_OUTn MPP( 6, 0x1, 0, 0, 1, 1, 1, 1, 1 )
++#define MPP6_SPI_MOSI MPP( 6, 0x2, 0, 0, 1, 1, 1, 1, 1 )
++#define MPP6_PTP_TRIG_GEN MPP( 6, 0x3, 0, 0, 1, 1, 1, 1, 0 )
+
+ #define MPP7_GPO MPP( 7, 0x0, 0, 1, 1, 1, 1, 1, 1 )
+-#define MPP7_PEX_RST_OUTn MPP( 7, 0x1, 0, 1, 1, 1, 1, 1, 0 )
+-#define MPP7_SPI_SCn MPP( 7, 0x2, 0, 1, 1, 1, 1, 1, 1 )
+-#define MPP7_PTP_TRIG_GEN MPP( 7, 0x3, 0, 1, 1, 1, 1, 1, 0 )
+-#define MPP7_LCD_PWM MPP( 7, 0xb, 0, 1, 0, 0, 0, 0, 1 )
++#define MPP7_PEX_RST_OUTn MPP( 7, 0x1, 0, 0, 1, 1, 1, 1, 0 )
++#define MPP7_SPI_SCn MPP( 7, 0x2, 0, 0, 1, 1, 1, 1, 1 )
++#define MPP7_PTP_TRIG_GEN MPP( 7, 0x3, 0, 0, 1, 1, 1, 1, 0 )
++#define MPP7_LCD_PWM MPP( 7, 0xb, 0, 0, 0, 0, 0, 0, 1 )
+
+ #define MPP8_GPIO MPP( 8, 0x0, 1, 1, 1, 1, 1, 1, 1 )
+-#define MPP8_TW0_SDA MPP( 8, 0x1, 1, 1, 1, 1, 1, 1, 1 )
+-#define MPP8_UART0_RTS MPP( 8, 0x2, 0, 1, 1, 1, 1, 1, 1 )
+-#define MPP8_UART1_RTS MPP( 8, 0x3, 0, 1, 1, 1, 1, 1, 1 )
+-#define MPP8_MII0_RXERR MPP( 8, 0x4, 1, 0, 0, 1, 1, 1, 1 )
+-#define MPP8_SATA1_PRESENTn MPP( 8, 0x5, 0, 1, 0, 0, 1, 1, 1 )
+-#define MPP8_PTP_CLK MPP( 8, 0xc, 1, 0, 1, 1, 1, 1, 0 )
+-#define MPP8_MII0_COL MPP( 8, 0xd, 1, 0, 1, 1, 1, 1, 1 )
++#define MPP8_TW0_SDA MPP( 8, 0x1, 0, 0, 1, 1, 1, 1, 1 )
++#define MPP8_UART0_RTS MPP( 8, 0x2, 0, 0, 1, 1, 1, 1, 1 )
++#define MPP8_UART1_RTS MPP( 8, 0x3, 0, 0, 1, 1, 1, 1, 1 )
++#define MPP8_MII0_RXERR MPP( 8, 0x4, 0, 0, 0, 1, 1, 1, 1 )
++#define MPP8_SATA1_PRESENTn MPP( 8, 0x5, 0, 0, 0, 0, 1, 1, 1 )
++#define MPP8_PTP_CLK MPP( 8, 0xc, 0, 0, 1, 1, 1, 1, 0 )
++#define MPP8_MII0_COL MPP( 8, 0xd, 0, 0, 1, 1, 1, 1, 1 )
+
+ #define MPP9_GPIO MPP( 9, 0x0, 1, 1, 1, 1, 1, 1, 1 )
+-#define MPP9_TW0_SCK MPP( 9, 0x1, 1, 1, 1, 1, 1, 1, 1 )
+-#define MPP9_UART0_CTS MPP( 9, 0x2, 1, 0, 1, 1, 1, 1, 1 )
+-#define MPP9_UART1_CTS MPP( 9, 0x3, 1, 0, 1, 1, 1, 1, 1 )
+-#define MPP9_SATA0_PRESENTn MPP( 9, 0x5, 0, 1, 0, 1, 1, 1, 1 )
+-#define MPP9_PTP_EVENT_REQ MPP( 9, 0xc, 1, 0, 1, 1, 1, 1, 0 )
+-#define MPP9_MII0_CRS MPP( 9, 0xd, 1, 0, 1, 1, 1, 1, 1 )
++#define MPP9_TW0_SCK MPP( 9, 0x1, 0, 0, 1, 1, 1, 1, 1 )
++#define MPP9_UART0_CTS MPP( 9, 0x2, 0, 0, 1, 1, 1, 1, 1 )
++#define MPP9_UART1_CTS MPP( 9, 0x3, 0, 0, 1, 1, 1, 1, 1 )
++#define MPP9_SATA0_PRESENTn MPP( 9, 0x5, 0, 0, 0, 1, 1, 1, 1 )
++#define MPP9_PTP_EVENT_REQ MPP( 9, 0xc, 0, 0, 1, 1, 1, 1, 0 )
++#define MPP9_MII0_CRS MPP( 9, 0xd, 0, 0, 1, 1, 1, 1, 1 )
+
+ #define MPP10_GPO MPP( 10, 0x0, 0, 1, 1, 1, 1, 1, 1 )
+-#define MPP10_SPI_SCK MPP( 10, 0x2, 0, 1, 1, 1, 1, 1, 1 )
+-#define MPP10_UART0_TXD MPP( 10, 0X3, 0, 1, 1, 1, 1, 1, 1 )
+-#define MPP10_SATA1_ACTn MPP( 10, 0x5, 0, 1, 0, 0, 1, 1, 1 )
+-#define MPP10_PTP_TRIG_GEN MPP( 10, 0xc, 0, 1, 1, 1, 1, 1, 0 )
++#define MPP10_SPI_SCK MPP( 10, 0x2, 0, 0, 1, 1, 1, 1, 1 )
++#define MPP10_UART0_TXD MPP( 10, 0X3, 0, 0, 1, 1, 1, 1, 1 )
++#define MPP10_SATA1_ACTn MPP( 10, 0x5, 0, 0, 0, 0, 1, 1, 1 )
++#define MPP10_PTP_TRIG_GEN MPP( 10, 0xc, 0, 0, 1, 1, 1, 1, 0 )
+
+ #define MPP11_GPIO MPP( 11, 0x0, 1, 1, 1, 1, 1, 1, 1 )
+-#define MPP11_SPI_MISO MPP( 11, 0x2, 1, 0, 1, 1, 1, 1, 1 )
+-#define MPP11_UART0_RXD MPP( 11, 0x3, 1, 0, 1, 1, 1, 1, 1 )
+-#define MPP11_PTP_EVENT_REQ MPP( 11, 0x4, 1, 0, 1, 1, 1, 1, 0 )
+-#define MPP11_PTP_TRIG_GEN MPP( 11, 0xc, 0, 1, 1, 1, 1, 1, 0 )
+-#define MPP11_PTP_CLK MPP( 11, 0xd, 1, 0, 1, 1, 1, 1, 0 )
+-#define MPP11_SATA0_ACTn MPP( 11, 0x5, 0, 1, 0, 1, 1, 1, 1 )
++#define MPP11_SPI_MISO MPP( 11, 0x2, 0, 0, 1, 1, 1, 1, 1 )
++#define MPP11_UART0_RXD MPP( 11, 0x3, 0, 0, 1, 1, 1, 1, 1 )
++#define MPP11_PTP_EVENT_REQ MPP( 11, 0x4, 0, 0, 1, 1, 1, 1, 0 )
++#define MPP11_PTP_TRIG_GEN MPP( 11, 0xc, 0, 0, 1, 1, 1, 1, 0 )
++#define MPP11_PTP_CLK MPP( 11, 0xd, 0, 0, 1, 1, 1, 1, 0 )
++#define MPP11_SATA0_ACTn MPP( 11, 0x5, 0, 0, 0, 1, 1, 1, 1 )
+
+ #define MPP12_GPO MPP( 12, 0x0, 0, 1, 1, 1, 1, 1, 1 )
+-#define MPP12_SD_CLK MPP( 12, 0x1, 0, 1, 1, 1, 1, 1, 1 )
+-#define MPP12_AU_SPDIF0 MPP( 12, 0xa, 0, 1, 0, 0, 0, 0, 1 )
+-#define MPP12_SPI_MOSI MPP( 12, 0xb, 0, 1, 0, 0, 0, 0, 1 )
+-#define MPP12_TW1_SDA MPP( 12, 0xd, 1, 0, 0, 0, 0, 0, 1 )
++#define MPP12_SD_CLK MPP( 12, 0x1, 0, 0, 1, 1, 1, 1, 1 )
++#define MPP12_AU_SPDIF0 MPP( 12, 0xa, 0, 0, 0, 0, 0, 0, 1 )
++#define MPP12_SPI_MOSI MPP( 12, 0xb, 0, 0, 0, 0, 0, 0, 1 )
++#define MPP12_TW1_SDA MPP( 12, 0xd, 0, 0, 0, 0, 0, 0, 1 )
+
+ #define MPP13_GPIO MPP( 13, 0x0, 1, 1, 1, 1, 1, 1, 1 )
+-#define MPP13_SD_CMD MPP( 13, 0x1, 1, 1, 1, 1, 1, 1, 1 )
+-#define MPP13_UART1_TXD MPP( 13, 0x3, 0, 1, 1, 1, 1, 1, 1 )
+-#define MPP13_AU_SPDIFRMCLK MPP( 13, 0xa, 0, 1, 0, 0, 0, 0, 1 )
+-#define MPP13_LCDPWM MPP( 13, 0xb, 0, 1, 0, 0, 0, 0, 1 )
++#define MPP13_SD_CMD MPP( 13, 0x1, 0, 0, 1, 1, 1, 1, 1 )
++#define MPP13_UART1_TXD MPP( 13, 0x3, 0, 0, 1, 1, 1, 1, 1 )
++#define MPP13_AU_SPDIFRMCLK MPP( 13, 0xa, 0, 0, 0, 0, 0, 0, 1 )
++#define MPP13_LCDPWM MPP( 13, 0xb, 0, 0, 0, 0, 0, 0, 1 )
+
+ #define MPP14_GPIO MPP( 14, 0x0, 1, 1, 1, 1, 1, 1, 1 )
+-#define MPP14_SD_D0 MPP( 14, 0x1, 1, 1, 1, 1, 1, 1, 1 )
+-#define MPP14_UART1_RXD MPP( 14, 0x3, 1, 0, 1, 1, 1, 1, 1 )
+-#define MPP14_SATA1_PRESENTn MPP( 14, 0x4, 0, 1, 0, 0, 1, 1, 1 )
+-#define MPP14_AU_SPDIFI MPP( 14, 0xa, 1, 0, 0, 0, 0, 0, 1 )
+-#define MPP14_AU_I2SDI MPP( 14, 0xb, 1, 0, 0, 0, 0, 0, 1 )
+-#define MPP14_MII0_COL MPP( 14, 0xd, 1, 0, 1, 1, 1, 1, 1 )
++#define MPP14_SD_D0 MPP( 14, 0x1, 0, 0, 1, 1, 1, 1, 1 )
++#define MPP14_UART1_RXD MPP( 14, 0x3, 0, 0, 1, 1, 1, 1, 1 )
++#define MPP14_SATA1_PRESENTn MPP( 14, 0x4, 0, 0, 0, 0, 1, 1, 1 )
++#define MPP14_AU_SPDIFI MPP( 14, 0xa, 0, 0, 0, 0, 0, 0, 1 )
++#define MPP14_AU_I2SDI MPP( 14, 0xb, 0, 0, 0, 0, 0, 0, 1 )
++#define MPP14_MII0_COL MPP( 14, 0xd, 0, 0, 1, 1, 1, 1, 1 )
+
+ #define MPP15_GPIO MPP( 15, 0x0, 1, 1, 1, 1, 1, 1, 1 )
+-#define MPP15_SD_D1 MPP( 15, 0x1, 1, 1, 1, 1, 1, 1, 1 )
+-#define MPP15_UART0_RTS MPP( 15, 0x2, 0, 1, 1, 1, 1, 1, 1 )
+-#define MPP15_UART1_TXD MPP( 15, 0x3, 0, 1, 1, 1, 1, 1, 1 )
+-#define MPP15_SATA0_ACTn MPP( 15, 0x4, 0, 1, 0, 1, 1, 1, 1 )
+-#define MPP15_SPI_CSn MPP( 15, 0xb, 0, 1, 0, 0, 0, 0, 1 )
++#define MPP15_SD_D1 MPP( 15, 0x1, 0, 0, 1, 1, 1, 1, 1 )
++#define MPP15_UART0_RTS MPP( 15, 0x2, 0, 0, 1, 1, 1, 1, 1 )
++#define MPP15_UART1_TXD MPP( 15, 0x3, 0, 0, 1, 1, 1, 1, 1 )
++#define MPP15_SATA0_ACTn MPP( 15, 0x4, 0, 0, 0, 1, 1, 1, 1 )
++#define MPP15_SPI_CSn MPP( 15, 0xb, 0, 0, 0, 0, 0, 0, 1 )
+
+ #define MPP16_GPIO MPP( 16, 0x0, 1, 1, 1, 1, 1, 1, 1 )
+-#define MPP16_SD_D2 MPP( 16, 0x1, 1, 1, 1, 1, 1, 1, 1 )
+-#define MPP16_UART0_CTS MPP( 16, 0x2, 1, 0, 1, 1, 1, 1, 1 )
+-#define MPP16_UART1_RXD MPP( 16, 0x3, 1, 0, 1, 1, 1, 1, 1 )
+-#define MPP16_SATA1_ACTn MPP( 16, 0x4, 0, 1, 0, 0, 1, 1, 1 )
+-#define MPP16_LCD_EXT_REF_CLK MPP( 16, 0xb, 1, 0, 0, 0, 0, 0, 1 )
+-#define MPP16_MII0_CRS MPP( 16, 0xd, 1, 0, 1, 1, 1, 1, 1 )
++#define MPP16_SD_D2 MPP( 16, 0x1, 0, 0, 1, 1, 1, 1, 1 )
++#define MPP16_UART0_CTS MPP( 16, 0x2, 0, 0, 1, 1, 1, 1, 1 )
++#define MPP16_UART1_RXD MPP( 16, 0x3, 0, 0, 1, 1, 1, 1, 1 )
++#define MPP16_SATA1_ACTn MPP( 16, 0x4, 0, 0, 0, 0, 1, 1, 1 )
++#define MPP16_LCD_EXT_REF_CLK MPP( 16, 0xb, 0, 0, 0, 0, 0, 0, 1 )
++#define MPP16_MII0_CRS MPP( 16, 0xd, 0, 0, 1, 1, 1, 1, 1 )
+
+ #define MPP17_GPIO MPP( 17, 0x0, 1, 1, 1, 1, 1, 1, 1 )
+-#define MPP17_SD_D3 MPP( 17, 0x1, 1, 1, 1, 1, 1, 1, 1 )
+-#define MPP17_SATA0_PRESENTn MPP( 17, 0x4, 0, 1, 0, 1, 1, 1, 1 )
+-#define MPP17_SATA1_ACTn MPP( 17, 0xa, 0, 1, 0, 0, 0, 0, 1 )
+-#define MPP17_TW1_SCK MPP( 17, 0xd, 1, 1, 0, 0, 0, 0, 1 )
++#define MPP17_SD_D3 MPP( 17, 0x1, 0, 0, 1, 1, 1, 1, 1 )
++#define MPP17_SATA0_PRESENTn MPP( 17, 0x4, 0, 0, 0, 1, 1, 1, 1 )
++#define MPP17_SATA1_ACTn MPP( 17, 0xa, 0, 0, 0, 0, 0, 0, 1 )
++#define MPP17_TW1_SCK MPP( 17, 0xd, 0, 0, 0, 0, 0, 0, 1 )
+
+ #define MPP18_GPO MPP( 18, 0x0, 0, 1, 1, 1, 1, 1, 1 )
+-#define MPP18_NF_IO0 MPP( 18, 0x1, 1, 1, 1, 1, 1, 1, 1 )
+-#define MPP18_PEX0_CLKREQ MPP( 18, 0x2, 0, 1, 0, 0, 0, 0, 1 )
++#define MPP18_NF_IO0 MPP( 18, 0x1, 0, 0, 1, 1, 1, 1, 1 )
++#define MPP18_PEX0_CLKREQ MPP( 18, 0x2, 0, 0, 0, 0, 0, 0, 1 )
+
+ #define MPP19_GPO MPP( 19, 0x0, 0, 1, 1, 1, 1, 1, 1 )
+-#define MPP19_NF_IO1 MPP( 19, 0x1, 1, 1, 1, 1, 1, 1, 1 )
++#define MPP19_NF_IO1 MPP( 19, 0x1, 0, 0, 1, 1, 1, 1, 1 )
+
+ #define MPP20_GPIO MPP( 20, 0x0, 1, 1, 0, 1, 1, 1, 1 )
+-#define MPP20_TSMP0 MPP( 20, 0x1, 1, 1, 0, 0, 1, 1, 1 )
+-#define MPP20_TDM_CH0_TX_QL MPP( 20, 0x2, 0, 1, 0, 0, 1, 1, 1 )
++#define MPP20_TSMP0 MPP( 20, 0x1, 0, 0, 0, 0, 1, 1, 1 )
++#define MPP20_TDM_CH0_TX_QL MPP( 20, 0x2, 0, 0, 0, 0, 1, 1, 1 )
+ #define MPP20_GE1_TXD0 MPP( 20, 0x3, 0, 0, 0, 1, 1, 1, 1 )
+-#define MPP20_AU_SPDIFI MPP( 20, 0x4, 1, 0, 0, 0, 1, 1, 1 )
+-#define MPP20_SATA1_ACTn MPP( 20, 0x5, 0, 1, 0, 0, 1, 1, 1 )
++#define MPP20_AU_SPDIFI MPP( 20, 0x4, 0, 0, 0, 0, 1, 1, 1 )
++#define MPP20_SATA1_ACTn MPP( 20, 0x5, 0, 0, 0, 0, 1, 1, 1 )
+ #define MPP20_LCD_D0 MPP( 20, 0xb, 0, 0, 0, 0, 0, 0, 1 )
+
+ #define MPP21_GPIO MPP( 21, 0x0, 1, 1, 0, 1, 1, 1, 1 )
+-#define MPP21_TSMP1 MPP( 21, 0x1, 1, 1, 0, 0, 1, 1, 1 )
+-#define MPP21_TDM_CH0_RX_QL MPP( 21, 0x2, 0, 1, 0, 0, 1, 1, 1 )
++#define MPP21_TSMP1 MPP( 21, 0x1, 0, 0, 0, 0, 1, 1, 1 )
++#define MPP21_TDM_CH0_RX_QL MPP( 21, 0x2, 0, 0, 0, 0, 1, 1, 1 )
+ #define MPP21_GE1_TXD1 MPP( 21, 0x3, 0, 0, 0, 1, 1, 1, 1 )
+-#define MPP21_AU_SPDIFO MPP( 21, 0x4, 0, 1, 0, 0, 1, 1, 1 )
+-#define MPP21_SATA0_ACTn MPP( 21, 0x5, 0, 1, 0, 1, 1, 1, 1 )
++#define MPP21_AU_SPDIFO MPP( 21, 0x4, 0, 0, 0, 0, 1, 1, 1 )
++#define MPP21_SATA0_ACTn MPP( 21, 0x5, 0, 0, 0, 1, 1, 1, 1 )
+ #define MPP21_LCD_D1 MPP( 21, 0xb, 0, 0, 0, 0, 0, 0, 1 )
+
+ #define MPP22_GPIO MPP( 22, 0x0, 1, 1, 0, 1, 1, 1, 1 )
+-#define MPP22_TSMP2 MPP( 22, 0x1, 1, 1, 0, 0, 1, 1, 1 )
+-#define MPP22_TDM_CH2_TX_QL MPP( 22, 0x2, 0, 1, 0, 0, 1, 1, 1 )
++#define MPP22_TSMP2 MPP( 22, 0x1, 0, 0, 0, 0, 1, 1, 1 )
++#define MPP22_TDM_CH2_TX_QL MPP( 22, 0x2, 0, 0, 0, 0, 1, 1, 1 )
+ #define MPP22_GE1_TXD2 MPP( 22, 0x3, 0, 0, 0, 1, 1, 1, 1 )
+-#define MPP22_AU_SPDIFRMKCLK MPP( 22, 0x4, 0, 1, 0, 0, 1, 1, 1 )
+-#define MPP22_SATA1_PRESENTn MPP( 22, 0x5, 0, 1, 0, 0, 1, 1, 1 )
++#define MPP22_AU_SPDIFRMKCLK MPP( 22, 0x4, 0, 0, 0, 0, 1, 1, 1 )
++#define MPP22_SATA1_PRESENTn MPP( 22, 0x5, 0, 0, 0, 0, 1, 1, 1 )
+ #define MPP22_LCD_D2 MPP( 22, 0xb, 0, 0, 0, 0, 0, 0, 1 )
+
+ #define MPP23_GPIO MPP( 23, 0x0, 1, 1, 0, 1, 1, 1, 1 )
+-#define MPP23_TSMP3 MPP( 23, 0x1, 1, 1, 0, 0, 1, 1, 1 )
+-#define MPP23_TDM_CH2_RX_QL MPP( 23, 0x2, 1, 0, 0, 0, 1, 1, 1 )
++#define MPP23_TSMP3 MPP( 23, 0x1, 0, 0, 0, 0, 1, 1, 1 )
++#define MPP23_TDM_CH2_RX_QL MPP( 23, 0x2, 0, 0, 0, 0, 1, 1, 1 )
+ #define MPP23_GE1_TXD3 MPP( 23, 0x3, 0, 0, 0, 1, 1, 1, 1 )
+-#define MPP23_AU_I2SBCLK MPP( 23, 0x4, 0, 1, 0, 0, 1, 1, 1 )
+-#define MPP23_SATA0_PRESENTn MPP( 23, 0x5, 0, 1, 0, 1, 1, 1, 1 )
++#define MPP23_AU_I2SBCLK MPP( 23, 0x4, 0, 0, 0, 0, 1, 1, 1 )
++#define MPP23_SATA0_PRESENTn MPP( 23, 0x5, 0, 0, 0, 1, 1, 1, 1 )
+ #define MPP23_LCD_D3 MPP( 23, 0xb, 0, 0, 0, 0, 0, 0, 1 )
+
+ #define MPP24_GPIO MPP( 24, 0x0, 1, 1, 0, 1, 1, 1, 1 )
+-#define MPP24_TSMP4 MPP( 24, 0x1, 1, 1, 0, 0, 1, 1, 1 )
+-#define MPP24_TDM_SPI_CS0 MPP( 24, 0x2, 0, 1, 0, 0, 1, 1, 1 )
++#define MPP24_TSMP4 MPP( 24, 0x1, 0, 0, 0, 0, 1, 1, 1 )
++#define MPP24_TDM_SPI_CS0 MPP( 24, 0x2, 0, 0, 0, 0, 1, 1, 1 )
+ #define MPP24_GE1_RXD0 MPP( 24, 0x3, 0, 0, 0, 1, 1, 1, 1 )
+-#define MPP24_AU_I2SDO MPP( 24, 0x4, 0, 1, 0, 0, 1, 1, 1 )
++#define MPP24_AU_I2SDO MPP( 24, 0x4, 0, 0, 0, 0, 1, 1, 1 )
+ #define MPP24_LCD_D4 MPP( 24, 0xb, 0, 0, 0, 0, 0, 0, 1 )
+
+ #define MPP25_GPIO MPP( 25, 0x0, 1, 1, 0, 1, 1, 1, 1 )
+-#define MPP25_TSMP5 MPP( 25, 0x1, 1, 1, 0, 0, 1, 1, 1 )
+-#define MPP25_TDM_SPI_SCK MPP( 25, 0x2, 0, 1, 0, 0, 1, 1, 1 )
++#define MPP25_TSMP5 MPP( 25, 0x1, 0, 0, 0, 0, 1, 1, 1 )
++#define MPP25_TDM_SPI_SCK MPP( 25, 0x2, 0, 0, 0, 0, 1, 1, 1 )
+ #define MPP25_GE1_RXD1 MPP( 25, 0x3, 0, 0, 0, 1, 1, 1, 1 )
+-#define MPP25_AU_I2SLRCLK MPP( 25, 0x4, 0, 1, 0, 0, 1, 1, 1 )
++#define MPP25_AU_I2SLRCLK MPP( 25, 0x4, 0, 0, 0, 0, 1, 1, 1 )
+ #define MPP25_LCD_D5 MPP( 25, 0xb, 0, 0, 0, 0, 0, 0, 1 )
+
+ #define MPP26_GPIO MPP( 26, 0x0, 1, 1, 0, 1, 1, 1, 1 )
+-#define MPP26_TSMP6 MPP( 26, 0x1, 1, 1, 0, 0, 1, 1, 1 )
+-#define MPP26_TDM_SPI_MISO MPP( 26, 0x2, 1, 0, 0, 0, 1, 1, 1 )
++#define MPP26_TSMP6 MPP( 26, 0x1, 0, 0, 0, 0, 1, 1, 1 )
++#define MPP26_TDM_SPI_MISO MPP( 26, 0x2, 0, 0, 0, 0, 1, 1, 1 )
+ #define MPP26_GE1_RXD2 MPP( 26, 0x3, 0, 0, 0, 1, 1, 1, 1 )
+-#define MPP26_AU_I2SMCLK MPP( 26, 0x4, 0, 1, 0, 0, 1, 1, 1 )
++#define MPP26_AU_I2SMCLK MPP( 26, 0x4, 0, 0, 0, 0, 1, 1, 1 )
+ #define MPP26_LCD_D6 MPP( 26, 0xb, 0, 0, 0, 0, 0, 0, 1 )
+
+ #define MPP27_GPIO MPP( 27, 0x0, 1, 1, 0, 1, 1, 1, 1 )
+-#define MPP27_TSMP7 MPP( 27, 0x1, 1, 1, 0, 0, 1, 1, 1 )
+-#define MPP27_TDM_SPI_MOSI MPP( 27, 0x2, 0, 1, 0, 0, 1, 1, 1 )
++#define MPP27_TSMP7 MPP( 27, 0x1, 0, 0, 0, 0, 1, 1, 1 )
++#define MPP27_TDM_SPI_MOSI MPP( 27, 0x2, 0, 0, 0, 0, 1, 1, 1 )
+ #define MPP27_GE1_RXD3 MPP( 27, 0x3, 0, 0, 0, 1, 1, 1, 1 )
+-#define MPP27_AU_I2SDI MPP( 27, 0x4, 1, 0, 0, 0, 1, 1, 1 )
++#define MPP27_AU_I2SDI MPP( 27, 0x4, 0, 0, 0, 0, 1, 1, 1 )
+ #define MPP27_LCD_D7 MPP( 27, 0xb, 0, 0, 0, 0, 0, 0, 1 )
+
+ #define MPP28_GPIO MPP( 28, 0x0, 1, 1, 0, 1, 1, 1, 1 )
+-#define MPP28_TSMP8 MPP( 28, 0x1, 1, 1, 0, 0, 1, 1, 1 )
++#define MPP28_TSMP8 MPP( 28, 0x1, 0, 0, 0, 0, 1, 1, 1 )
+ #define MPP28_TDM_CODEC_INTn MPP( 28, 0x2, 0, 0, 0, 0, 1, 1, 1 )
+ #define MPP28_GE1_COL MPP( 28, 0x3, 0, 0, 0, 1, 1, 1, 1 )
+-#define MPP28_AU_EXTCLK MPP( 28, 0x4, 1, 0, 0, 0, 1, 1, 1 )
++#define MPP28_AU_EXTCLK MPP( 28, 0x4, 0, 0, 0, 0, 1, 1, 1 )
+ #define MPP28_LCD_D8 MPP( 28, 0xb, 0, 0, 0, 0, 0, 0, 1 )
+
+ #define MPP29_GPIO MPP( 29, 0x0, 1, 1, 0, 1, 1, 1, 1 )
+-#define MPP29_TSMP9 MPP( 29, 0x1, 1, 1, 0, 0, 1, 1, 1 )
++#define MPP29_TSMP9 MPP( 29, 0x1, 0, 0, 0, 0, 1, 1, 1 )
+ #define MPP29_TDM_CODEC_RSTn MPP( 29, 0x2, 0, 0, 0, 0, 1, 1, 1 )
+ #define MPP29_GE1_TCLK MPP( 29, 0x3, 0, 0, 0, 1, 1, 1, 1 )
+ #define MPP29_LCD_D9 MPP( 29, 0xb, 0, 0, 0, 0, 0, 0, 1 )
+
+ #define MPP30_GPIO MPP( 30, 0x0, 1, 1, 0, 1, 1, 1, 1 )
+-#define MPP30_TSMP10 MPP( 30, 0x1, 1, 1, 0, 0, 1, 1, 1 )
+-#define MPP30_TDM_PCLK MPP( 30, 0x2, 1, 1, 0, 0, 1, 1, 1 )
++#define MPP30_TSMP10 MPP( 30, 0x1, 0, 0, 0, 0, 1, 1, 1 )
++#define MPP30_TDM_PCLK MPP( 30, 0x2, 0, 0, 0, 0, 1, 1, 1 )
+ #define MPP30_GE1_RXCTL MPP( 30, 0x3, 0, 0, 0, 1, 1, 1, 1 )
+ #define MPP30_LCD_D10 MPP( 30, 0xb, 0, 0, 0, 0, 0, 0, 1 )
+
+ #define MPP31_GPIO MPP( 31, 0x0, 1, 1, 0, 1, 1, 1, 1 )
+-#define MPP31_TSMP11 MPP( 31, 0x1, 1, 1, 0, 0, 1, 1, 1 )
+-#define MPP31_TDM_FS MPP( 31, 0x2, 1, 1, 0, 0, 1, 1, 1 )
++#define MPP31_TSMP11 MPP( 31, 0x1, 0, 0, 0, 0, 1, 1, 1 )
++#define MPP31_TDM_FS MPP( 31, 0x2, 0, 0, 0, 0, 1, 1, 1 )
+ #define MPP31_GE1_RXCLK MPP( 31, 0x3, 0, 0, 0, 1, 1, 1, 1 )
+ #define MPP31_LCD_D11 MPP( 31, 0xb, 0, 0, 0, 0, 0, 0, 1 )
+
+ #define MPP32_GPIO MPP( 32, 0x0, 1, 1, 0, 1, 1, 1, 1 )
+-#define MPP32_TSMP12 MPP( 32, 0x1, 1, 1, 0, 0, 1, 1, 1 )
+-#define MPP32_TDM_DRX MPP( 32, 0x2, 1, 0, 0, 0, 1, 1, 1 )
++#define MPP32_TSMP12 MPP( 32, 0x1, 0, 0, 0, 0, 1, 1, 1 )
++#define MPP32_TDM_DRX MPP( 32, 0x2, 0, 0, 0, 0, 1, 1, 1 )
+ #define MPP32_GE1_TCLKOUT MPP( 32, 0x3, 0, 0, 0, 1, 1, 1, 1 )
+ #define MPP32_LCD_D12 MPP( 32, 0xb, 0, 0, 0, 0, 0, 0, 1 )
+
+ #define MPP33_GPO MPP( 33, 0x0, 0, 1, 0, 1, 1, 1, 1 )
+-#define MPP33_TDM_DTX MPP( 33, 0x2, 0, 1, 0, 0, 1, 1, 1 )
++#define MPP33_TDM_DTX MPP( 33, 0x2, 0, 0, 0, 0, 1, 1, 1 )
+ #define MPP33_GE1_TXCTL MPP( 33, 0x3, 0, 0, 0, 1, 1, 1, 1 )
+ #define MPP33_LCD_D13 MPP( 33, 0xb, 0, 0, 0, 0, 0, 0, 1 )
+
+ #define MPP34_GPIO MPP( 34, 0x0, 1, 1, 0, 1, 1, 1, 1 )
+-#define MPP34_TDM_SPI_CS1 MPP( 34, 0x2, 0, 1, 0, 0, 1, 1, 1 )
++#define MPP34_TDM_SPI_CS1 MPP( 34, 0x2, 0, 0, 0, 0, 1, 1, 1 )
+ #define MPP34_GE1_TXEN MPP( 34, 0x3, 0, 0, 0, 1, 1, 1, 1 )
+-#define MPP34_SATA1_ACTn MPP( 34, 0x5, 0, 1, 0, 0, 0, 1, 1 )
++#define MPP34_SATA1_ACTn MPP( 34, 0x5, 0, 0, 0, 0, 0, 1, 1 )
+ #define MPP34_LCD_D14 MPP( 34, 0xb, 0, 0, 0, 0, 0, 0, 1 )
+
+ #define MPP35_GPIO MPP( 35, 0x0, 1, 1, 1, 1, 1, 1, 1 )
+-#define MPP35_TDM_CH0_TX_QL MPP( 35, 0x2, 0, 1, 0, 0, 1, 1, 1 )
++#define MPP35_TDM_CH0_TX_QL MPP( 35, 0x2, 0, 0, 0, 0, 1, 1, 1 )
+ #define MPP35_GE1_RXERR MPP( 35, 0x3, 0, 0, 0, 1, 1, 1, 1 )
+-#define MPP35_SATA0_ACTn MPP( 35, 0x5, 0, 1, 0, 1, 1, 1, 1 )
++#define MPP35_SATA0_ACTn MPP( 35, 0x5, 0, 0, 0, 1, 1, 1, 1 )
+ #define MPP35_LCD_D15 MPP( 22, 0xb, 0, 0, 0, 0, 0, 0, 1 )
+-#define MPP35_MII0_RXERR MPP( 35, 0xc, 1, 0, 1, 1, 1, 1, 1 )
++#define MPP35_MII0_RXERR MPP( 35, 0xc, 0, 0, 1, 1, 1, 1, 1 )
+
+ #define MPP36_GPIO MPP( 36, 0x0, 1, 1, 1, 0, 0, 1, 1 )
+-#define MPP36_TSMP0 MPP( 36, 0x1, 1, 1, 0, 0, 0, 1, 1 )
+-#define MPP36_TDM_SPI_CS1 MPP( 36, 0x2, 0, 1, 0, 0, 0, 1, 1 )
+-#define MPP36_AU_SPDIFI MPP( 36, 0x4, 1, 0, 1, 0, 0, 1, 1 )
+-#define MPP36_TW1_SDA MPP( 36, 0xb, 1, 1, 0, 0, 0, 0, 1 )
++#define MPP36_TSMP0 MPP( 36, 0x1, 0, 0, 0, 0, 0, 1, 1 )
++#define MPP36_TDM_SPI_CS1 MPP( 36, 0x2, 0, 0, 0, 0, 0, 1, 1 )
++#define MPP36_AU_SPDIFI MPP( 36, 0x4, 0, 0, 1, 0, 0, 1, 1 )
++#define MPP36_TW1_SDA MPP( 36, 0xb, 0, 0, 0, 0, 0, 0, 1 )
+
+ #define MPP37_GPIO MPP( 37, 0x0, 1, 1, 1, 0, 0, 1, 1 )
+-#define MPP37_TSMP1 MPP( 37, 0x1, 1, 1, 0, 0, 0, 1, 1 )
+-#define MPP37_TDM_CH2_TX_QL MPP( 37, 0x2, 0, 1, 0, 0, 0, 1, 1 )
+-#define MPP37_AU_SPDIFO MPP( 37, 0x4, 0, 1, 1, 0, 0, 1, 1 )
+-#define MPP37_TW1_SCK MPP( 37, 0xb, 1, 1, 0, 0, 0, 0, 1 )
++#define MPP37_TSMP1 MPP( 37, 0x1, 0, 0, 0, 0, 0, 1, 1 )
++#define MPP37_TDM_CH2_TX_QL MPP( 37, 0x2, 0, 0, 0, 0, 0, 1, 1 )
++#define MPP37_AU_SPDIFO MPP( 37, 0x4, 0, 0, 1, 0, 0, 1, 1 )
++#define MPP37_TW1_SCK MPP( 37, 0xb, 0, 0, 0, 0, 0, 0, 1 )
+
+ #define MPP38_GPIO MPP( 38, 0x0, 1, 1, 1, 0, 0, 1, 1 )
+-#define MPP38_TSMP2 MPP( 38, 0x1, 1, 1, 0, 0, 0, 1, 1 )
+-#define MPP38_TDM_CH2_RX_QL MPP( 38, 0x2, 0, 1, 0, 0, 0, 1, 1 )
+-#define MPP38_AU_SPDIFRMLCLK MPP( 38, 0x4, 0, 1, 1, 0, 0, 1, 1 )
++#define MPP38_TSMP2 MPP( 38, 0x1, 0, 0, 0, 0, 0, 1, 1 )
++#define MPP38_TDM_CH2_RX_QL MPP( 38, 0x2, 0, 0, 0, 0, 0, 1, 1 )
++#define MPP38_AU_SPDIFRMLCLK MPP( 38, 0x4, 0, 0, 1, 0, 0, 1, 1 )
+ #define MPP38_LCD_D18 MPP( 38, 0xb, 0, 0, 0, 0, 0, 0, 1 )
+
+ #define MPP39_GPIO MPP( 39, 0x0, 1, 1, 1, 0, 0, 1, 1 )
+-#define MPP39_TSMP3 MPP( 39, 0x1, 1, 1, 0, 0, 0, 1, 1 )
+-#define MPP39_TDM_SPI_CS0 MPP( 39, 0x2, 0, 1, 0, 0, 0, 1, 1 )
+-#define MPP39_AU_I2SBCLK MPP( 39, 0x4, 0, 1, 1, 0, 0, 1, 1 )
++#define MPP39_TSMP3 MPP( 39, 0x1, 0, 0, 0, 0, 0, 1, 1 )
++#define MPP39_TDM_SPI_CS0 MPP( 39, 0x2, 0, 0, 0, 0, 0, 1, 1 )
++#define MPP39_AU_I2SBCLK MPP( 39, 0x4, 0, 0, 1, 0, 0, 1, 1 )
+ #define MPP39_LCD_D19 MPP( 39, 0xb, 0, 0, 0, 0, 0, 0, 1 )
+
+ #define MPP40_GPIO MPP( 40, 0x0, 1, 1, 1, 0, 0, 1, 1 )
+-#define MPP40_TSMP4 MPP( 40, 0x1, 1, 1, 0, 0, 0, 1, 1 )
+-#define MPP40_TDM_SPI_SCK MPP( 40, 0x2, 0, 1, 0, 0, 0, 1, 1 )
+-#define MPP40_AU_I2SDO MPP( 40, 0x4, 0, 1, 1, 0, 0, 1, 1 )
++#define MPP40_TSMP4 MPP( 40, 0x1, 0, 0, 0, 0, 0, 1, 1 )
++#define MPP40_TDM_SPI_SCK MPP( 40, 0x2, 0, 0, 0, 0, 0, 1, 1 )
++#define MPP40_AU_I2SDO MPP( 40, 0x4, 0, 0, 1, 0, 0, 1, 1 )
+ #define MPP40_LCD_D20 MPP( 40, 0xb, 0, 0, 0, 0, 0, 0, 1 )
+
+ #define MPP41_GPIO MPP( 41, 0x0, 1, 1, 1, 0, 0, 1, 1 )
+-#define MPP41_TSMP5 MPP( 41, 0x1, 1, 1, 0, 0, 0, 1, 1 )
+-#define MPP41_TDM_SPI_MISO MPP( 41, 0x2, 1, 0, 0, 0, 0, 1, 1 )
+-#define MPP41_AU_I2SLRCLK MPP( 41, 0x4, 0, 1, 1, 0, 0, 1, 1 )
++#define MPP41_TSMP5 MPP( 41, 0x1, 0, 0, 0, 0, 0, 1, 1 )
++#define MPP41_TDM_SPI_MISO MPP( 41, 0x2, 0, 0, 0, 0, 0, 1, 1 )
++#define MPP41_AU_I2SLRCLK MPP( 41, 0x4, 0, 0, 1, 0, 0, 1, 1 )
+ #define MPP41_LCD_D21 MPP( 41, 0xb, 0, 0, 0, 0, 0, 0, 1 )
+
+ #define MPP42_GPIO MPP( 42, 0x0, 1, 1, 1, 0, 0, 1, 1 )
+-#define MPP42_TSMP6 MPP( 42, 0x1, 1, 1, 0, 0, 0, 1, 1 )
+-#define MPP42_TDM_SPI_MOSI MPP( 42, 0x2, 0, 1, 0, 0, 0, 1, 1 )
+-#define MPP42_AU_I2SMCLK MPP( 42, 0x4, 0, 1, 1, 0, 0, 1, 1 )
++#define MPP42_TSMP6 MPP( 42, 0x1, 0, 0, 0, 0, 0, 1, 1 )
++#define MPP42_TDM_SPI_MOSI MPP( 42, 0x2, 0, 0, 0, 0, 0, 1, 1 )
++#define MPP42_AU_I2SMCLK MPP( 42, 0x4, 0, 0, 1, 0, 0, 1, 1 )
+ #define MPP42_LCD_D22 MPP( 42, 0xb, 0, 0, 0, 0, 0, 0, 1 )
+
+ #define MPP43_GPIO MPP( 43, 0x0, 1, 1, 1, 0, 0, 1, 1 )
+-#define MPP43_TSMP7 MPP( 43, 0x1, 1, 1, 0, 0, 0, 1, 1 )
++#define MPP43_TSMP7 MPP( 43, 0x1, 0, 0, 0, 0, 0, 1, 1 )
+ #define MPP43_TDM_CODEC_INTn MPP( 43, 0x2, 0, 0, 0, 0, 0, 1, 1 )
+-#define MPP43_AU_I2SDI MPP( 43, 0x4, 1, 0, 1, 0, 0, 1, 1 )
++#define MPP43_AU_I2SDI MPP( 43, 0x4, 0, 0, 1, 0, 0, 1, 1 )
+ #define MPP43_LCD_D23 MPP( 22, 0xb, 0, 0, 0, 0, 0, 0, 1 )
+
+ #define MPP44_GPIO MPP( 44, 0x0, 1, 1, 1, 0, 0, 1, 1 )
+-#define MPP44_TSMP8 MPP( 44, 0x1, 1, 1, 0, 0, 0, 1, 1 )
++#define MPP44_TSMP8 MPP( 44, 0x1, 0, 0, 0, 0, 0, 1, 1 )
+ #define MPP44_TDM_CODEC_RSTn MPP( 44, 0x2, 0, 0, 0, 0, 0, 1, 1 )
+-#define MPP44_AU_EXTCLK MPP( 44, 0x4, 1, 0, 1, 0, 0, 1, 1 )
++#define MPP44_AU_EXTCLK MPP( 44, 0x4, 0, 0, 1, 0, 0, 1, 1 )
+ #define MPP44_LCD_CLK MPP( 44, 0xb, 0, 0, 0, 0, 0, 0, 1 )
+
+ #define MPP45_GPIO MPP( 45, 0x0, 1, 1, 0, 0, 0, 1, 1 )
+-#define MPP45_TSMP9 MPP( 45, 0x1, 1, 1, 0, 0, 0, 1, 1 )
+-#define MPP45_TDM_PCLK MPP( 45, 0x2, 1, 1, 0, 0, 0, 1, 1 )
++#define MPP45_TSMP9 MPP( 45, 0x1, 0, 0, 0, 0, 0, 1, 1 )
++#define MPP45_TDM_PCLK MPP( 45, 0x2, 0, 0, 0, 0, 0, 1, 1 )
+ #define MPP245_LCD_E MPP( 45, 0xb, 0, 0, 0, 0, 0, 0, 1 )
+
+ #define MPP46_GPIO MPP( 46, 0x0, 1, 1, 0, 0, 0, 1, 1 )
+-#define MPP46_TSMP10 MPP( 46, 0x1, 1, 1, 0, 0, 0, 1, 1 )
+-#define MPP46_TDM_FS MPP( 46, 0x2, 1, 1, 0, 0, 0, 1, 1 )
++#define MPP46_TSMP10 MPP( 46, 0x1, 0, 0, 0, 0, 0, 1, 1 )
++#define MPP46_TDM_FS MPP( 46, 0x2, 0, 0, 0, 0, 0, 1, 1 )
+ #define MPP46_LCD_HSYNC MPP( 46, 0xb, 0, 0, 0, 0, 0, 0, 1 )
+
+ #define MPP47_GPIO MPP( 47, 0x0, 1, 1, 0, 0, 0, 1, 1 )
+-#define MPP47_TSMP11 MPP( 47, 0x1, 1, 1, 0, 0, 0, 1, 1 )
+-#define MPP47_TDM_DRX MPP( 47, 0x2, 1, 0, 0, 0, 0, 1, 1 )
++#define MPP47_TSMP11 MPP( 47, 0x1, 0, 0, 0, 0, 0, 1, 1 )
++#define MPP47_TDM_DRX MPP( 47, 0x2, 0, 0, 0, 0, 0, 1, 1 )
+ #define MPP47_LCD_VSYNC MPP( 47, 0xb, 0, 0, 0, 0, 0, 0, 1 )
+
+ #define MPP48_GPIO MPP( 48, 0x0, 1, 1, 0, 0, 0, 1, 1 )
+-#define MPP48_TSMP12 MPP( 48, 0x1, 1, 1, 0, 0, 0, 1, 1 )
+-#define MPP48_TDM_DTX MPP( 48, 0x2, 0, 1, 0, 0, 0, 1, 1 )
++#define MPP48_TSMP12 MPP( 48, 0x1, 0, 0, 0, 0, 0, 1, 1 )
++#define MPP48_TDM_DTX MPP( 48, 0x2, 0, 0, 0, 0, 0, 1, 1 )
+ #define MPP48_LCD_D16 MPP( 22, 0xb, 0, 0, 0, 0, 0, 0, 1 )
+
+ #define MPP49_GPIO MPP( 49, 0x0, 1, 1, 0, 0, 0, 1, 0 )
+ #define MPP49_GPO MPP( 49, 0x0, 0, 1, 0, 0, 0, 0, 1 )
+-#define MPP49_TSMP9 MPP( 49, 0x1, 1, 1, 0, 0, 0, 1, 0 )
+-#define MPP49_TDM_CH0_RX_QL MPP( 49, 0x2, 0, 1, 0, 0, 0, 1, 1 )
+-#define MPP49_PTP_CLK MPP( 49, 0x5, 1, 0, 0, 0, 0, 1, 0 )
+-#define MPP49_PEX0_CLKREQ MPP( 49, 0xa, 0, 1, 0, 0, 0, 0, 1 )
++#define MPP49_TSMP9 MPP( 49, 0x1, 0, 0, 0, 0, 0, 1, 0 )
++#define MPP49_TDM_CH0_RX_QL MPP( 49, 0x2, 0, 0, 0, 0, 0, 1, 1 )
++#define MPP49_PTP_CLK MPP( 49, 0x5, 0, 0, 0, 0, 0, 1, 0 )
++#define MPP49_PEX0_CLKREQ MPP( 49, 0xa, 0, 0, 0, 0, 0, 0, 1 )
+ #define MPP49_LCD_D17 MPP( 49, 0xb, 0, 0, 0, 0, 0, 0, 1 )
+
+ #define MPP_MAX 49
+diff --git a/arch/arm/mach-lpc32xx/include/mach/irqs.h b/arch/arm/mach-lpc32xx/include/mach/irqs.h
+index 2667f52..9e3b90d 100644
+--- a/arch/arm/mach-lpc32xx/include/mach/irqs.h
++++ b/arch/arm/mach-lpc32xx/include/mach/irqs.h
+@@ -61,7 +61,7 @@
+ */
+ #define IRQ_LPC32XX_JTAG_COMM_TX LPC32XX_SIC1_IRQ(1)
+ #define IRQ_LPC32XX_JTAG_COMM_RX LPC32XX_SIC1_IRQ(2)
+-#define IRQ_LPC32XX_GPI_11 LPC32XX_SIC1_IRQ(4)
++#define IRQ_LPC32XX_GPI_28 LPC32XX_SIC1_IRQ(4)
+ #define IRQ_LPC32XX_TS_P LPC32XX_SIC1_IRQ(6)
+ #define IRQ_LPC32XX_TS_IRQ LPC32XX_SIC1_IRQ(7)
+ #define IRQ_LPC32XX_TS_AUX LPC32XX_SIC1_IRQ(8)
+diff --git a/arch/arm/mach-lpc32xx/irq.c b/arch/arm/mach-lpc32xx/irq.c
+index 4eae566..c74de01 100644
+--- a/arch/arm/mach-lpc32xx/irq.c
++++ b/arch/arm/mach-lpc32xx/irq.c
+@@ -118,6 +118,10 @@ static const struct lpc32xx_event_info lpc32xx_events[NR_IRQS] = {
+ .event_group = &lpc32xx_event_pin_regs,
+ .mask = LPC32XX_CLKPWR_EXTSRC_GPI_06_BIT,
+ },
++ [IRQ_LPC32XX_GPI_28] = {
++ .event_group = &lpc32xx_event_pin_regs,
++ .mask = LPC32XX_CLKPWR_EXTSRC_GPI_28_BIT,
++ },
+ [IRQ_LPC32XX_GPIO_00] = {
+ .event_group = &lpc32xx_event_int_regs,
+ .mask = LPC32XX_CLKPWR_INTSRC_GPIO_00_BIT,
+@@ -305,9 +309,18 @@ static int lpc32xx_irq_wake(struct irq_data *d, unsigned int state)
+
+ if (state)
+ eventreg |= lpc32xx_events[d->irq].mask;
+- else
++ else {
+ eventreg &= ~lpc32xx_events[d->irq].mask;
+
++ /*
++ * When disabling the wakeup, clear the latched
++ * event
++ */
++ __raw_writel(lpc32xx_events[d->irq].mask,
++ lpc32xx_events[d->irq].
++ event_group->rawstat_reg);
++ }
++
+ __raw_writel(eventreg,
+ lpc32xx_events[d->irq].event_group->enab_reg);
+
+@@ -380,13 +393,15 @@ void __init lpc32xx_init_irq(void)
+
+ /* Setup SIC1 */
+ __raw_writel(0, LPC32XX_INTC_MASK(LPC32XX_SIC1_BASE));
+- __raw_writel(MIC_APR_DEFAULT, LPC32XX_INTC_POLAR(LPC32XX_SIC1_BASE));
+- __raw_writel(MIC_ATR_DEFAULT, LPC32XX_INTC_ACT_TYPE(LPC32XX_SIC1_BASE));
++ __raw_writel(SIC1_APR_DEFAULT, LPC32XX_INTC_POLAR(LPC32XX_SIC1_BASE));
++ __raw_writel(SIC1_ATR_DEFAULT,
++ LPC32XX_INTC_ACT_TYPE(LPC32XX_SIC1_BASE));
+
+ /* Setup SIC2 */
+ __raw_writel(0, LPC32XX_INTC_MASK(LPC32XX_SIC2_BASE));
+- __raw_writel(MIC_APR_DEFAULT, LPC32XX_INTC_POLAR(LPC32XX_SIC2_BASE));
+- __raw_writel(MIC_ATR_DEFAULT, LPC32XX_INTC_ACT_TYPE(LPC32XX_SIC2_BASE));
++ __raw_writel(SIC2_APR_DEFAULT, LPC32XX_INTC_POLAR(LPC32XX_SIC2_BASE));
++ __raw_writel(SIC2_ATR_DEFAULT,
++ LPC32XX_INTC_ACT_TYPE(LPC32XX_SIC2_BASE));
+
+ /* Configure supported IRQ's */
+ for (i = 0; i < NR_IRQS; i++) {
+diff --git a/arch/arm/mach-lpc32xx/serial.c b/arch/arm/mach-lpc32xx/serial.c
+index 429cfdb..f273528 100644
+--- a/arch/arm/mach-lpc32xx/serial.c
++++ b/arch/arm/mach-lpc32xx/serial.c
+@@ -88,6 +88,7 @@ struct uartinit {
+ char *uart_ck_name;
+ u32 ck_mode_mask;
+ void __iomem *pdiv_clk_reg;
++ resource_size_t mapbase;
+ };
+
+ static struct uartinit uartinit_data[] __initdata = {
+@@ -97,6 +98,7 @@ static struct uartinit uartinit_data[] __initdata = {
+ .ck_mode_mask =
+ LPC32XX_UART_CLKMODE_LOAD(LPC32XX_UART_CLKMODE_ON, 5),
+ .pdiv_clk_reg = LPC32XX_CLKPWR_UART5_CLK_CTRL,
++ .mapbase = LPC32XX_UART5_BASE,
+ },
+ #endif
+ #ifdef CONFIG_ARCH_LPC32XX_UART3_SELECT
+@@ -105,6 +107,7 @@ static struct uartinit uartinit_data[] __initdata = {
+ .ck_mode_mask =
+ LPC32XX_UART_CLKMODE_LOAD(LPC32XX_UART_CLKMODE_ON, 3),
+ .pdiv_clk_reg = LPC32XX_CLKPWR_UART3_CLK_CTRL,
++ .mapbase = LPC32XX_UART3_BASE,
+ },
+ #endif
+ #ifdef CONFIG_ARCH_LPC32XX_UART4_SELECT
+@@ -113,6 +116,7 @@ static struct uartinit uartinit_data[] __initdata = {
+ .ck_mode_mask =
+ LPC32XX_UART_CLKMODE_LOAD(LPC32XX_UART_CLKMODE_ON, 4),
+ .pdiv_clk_reg = LPC32XX_CLKPWR_UART4_CLK_CTRL,
++ .mapbase = LPC32XX_UART4_BASE,
+ },
+ #endif
+ #ifdef CONFIG_ARCH_LPC32XX_UART6_SELECT
+@@ -121,6 +125,7 @@ static struct uartinit uartinit_data[] __initdata = {
+ .ck_mode_mask =
+ LPC32XX_UART_CLKMODE_LOAD(LPC32XX_UART_CLKMODE_ON, 6),
+ .pdiv_clk_reg = LPC32XX_CLKPWR_UART6_CLK_CTRL,
++ .mapbase = LPC32XX_UART6_BASE,
+ },
+ #endif
+ };
+@@ -165,11 +170,24 @@ void __init lpc32xx_serial_init(void)
+
+ /* pre-UART clock divider set to 1 */
+ __raw_writel(0x0101, uartinit_data[i].pdiv_clk_reg);
++
++ /*
++ * Force a flush of the RX FIFOs to work around a
++ * HW bug
++ */
++ puart = uartinit_data[i].mapbase;
++ __raw_writel(0xC1, LPC32XX_UART_IIR_FCR(puart));
++ __raw_writel(0x00, LPC32XX_UART_DLL_FIFO(puart));
++ j = LPC32XX_SUART_FIFO_SIZE;
++ while (j--)
++ tmp = __raw_readl(
++ LPC32XX_UART_DLL_FIFO(puart));
++ __raw_writel(0, LPC32XX_UART_IIR_FCR(puart));
+ }
+
+ /* This needs to be done after all UART clocks are setup */
+ __raw_writel(clkmodes, LPC32XX_UARTCTL_CLKMODE);
+- for (i = 0; i < ARRAY_SIZE(uartinit_data) - 1; i++) {
++ for (i = 0; i < ARRAY_SIZE(uartinit_data); i++) {
+ /* Force a flush of the RX FIFOs to work around a HW bug */
+ puart = serial_std_platform_data[i].mapbase;
+ __raw_writel(0xC1, LPC32XX_UART_IIR_FCR(puart));
+diff --git a/arch/arm/mach-mv78xx0/common.c b/arch/arm/mach-mv78xx0/common.c
+index 23d3980..d90e244 100644
+--- a/arch/arm/mach-mv78xx0/common.c
++++ b/arch/arm/mach-mv78xx0/common.c
+@@ -20,6 +20,7 @@
+ #include <mach/mv78xx0.h>
+ #include <mach/bridge-regs.h>
+ #include <plat/cache-feroceon-l2.h>
++#include <plat/ehci-orion.h>
+ #include <plat/orion_nand.h>
+ #include <plat/time.h>
+ #include <plat/common.h>
+@@ -170,7 +171,7 @@ void __init mv78xx0_map_io(void)
+ void __init mv78xx0_ehci0_init(void)
+ {
+ orion_ehci_init(&mv78xx0_mbus_dram_info,
+- USB0_PHYS_BASE, IRQ_MV78XX0_USB_0);
++ USB0_PHYS_BASE, IRQ_MV78XX0_USB_0, EHCI_PHY_NA);
+ }
+
+
+diff --git a/arch/arm/mach-mv78xx0/mpp.h b/arch/arm/mach-mv78xx0/mpp.h
+index b61b509..3752302 100644
+--- a/arch/arm/mach-mv78xx0/mpp.h
++++ b/arch/arm/mach-mv78xx0/mpp.h
+@@ -24,296 +24,296 @@
+ #define MPP_78100_A0_MASK MPP(0, 0x0, 0, 0, 1)
+
+ #define MPP0_GPIO MPP(0, 0x0, 1, 1, 1)
+-#define MPP0_GE0_COL MPP(0, 0x1, 1, 0, 1)
+-#define MPP0_GE1_TXCLK MPP(0, 0x2, 0, 1, 1)
++#define MPP0_GE0_COL MPP(0, 0x1, 0, 0, 1)
++#define MPP0_GE1_TXCLK MPP(0, 0x2, 0, 0, 1)
+ #define MPP0_UNUSED MPP(0, 0x3, 0, 0, 1)
+
+ #define MPP1_GPIO MPP(1, 0x0, 1, 1, 1)
+-#define MPP1_GE0_RXERR MPP(1, 0x1, 1, 0, 1)
+-#define MPP1_GE1_TXCTL MPP(1, 0x2, 0, 1, 1)
++#define MPP1_GE0_RXERR MPP(1, 0x1, 0, 0, 1)
++#define MPP1_GE1_TXCTL MPP(1, 0x2, 0, 0, 1)
+ #define MPP1_UNUSED MPP(1, 0x3, 0, 0, 1)
+
+ #define MPP2_GPIO MPP(2, 0x0, 1, 1, 1)
+-#define MPP2_GE0_CRS MPP(2, 0x1, 1, 0, 1)
+-#define MPP2_GE1_RXCTL MPP(2, 0x2, 1, 0, 1)
++#define MPP2_GE0_CRS MPP(2, 0x1, 0, 0, 1)
++#define MPP2_GE1_RXCTL MPP(2, 0x2, 0, 0, 1)
+ #define MPP2_UNUSED MPP(2, 0x3, 0, 0, 1)
+
+ #define MPP3_GPIO MPP(3, 0x0, 1, 1, 1)
+-#define MPP3_GE0_TXERR MPP(3, 0x1, 0, 1, 1)
+-#define MPP3_GE1_RXCLK MPP(3, 0x2, 1, 0, 1)
++#define MPP3_GE0_TXERR MPP(3, 0x1, 0, 0, 1)
++#define MPP3_GE1_RXCLK MPP(3, 0x2, 0, 0, 1)
+ #define MPP3_UNUSED MPP(3, 0x3, 0, 0, 1)
+
+ #define MPP4_GPIO MPP(4, 0x0, 1, 1, 1)
+-#define MPP4_GE0_TXD4 MPP(4, 0x1, 0, 1, 1)
+-#define MPP4_GE1_TXD0 MPP(4, 0x2, 0, 1, 1)
++#define MPP4_GE0_TXD4 MPP(4, 0x1, 0, 0, 1)
++#define MPP4_GE1_TXD0 MPP(4, 0x2, 0, 0, 1)
+ #define MPP4_UNUSED MPP(4, 0x3, 0, 0, 1)
+
+ #define MPP5_GPIO MPP(5, 0x0, 1, 1, 1)
+-#define MPP5_GE0_TXD5 MPP(5, 0x1, 0, 1, 1)
+-#define MPP5_GE1_TXD1 MPP(5, 0x2, 0, 1, 1)
++#define MPP5_GE0_TXD5 MPP(5, 0x1, 0, 0, 1)
++#define MPP5_GE1_TXD1 MPP(5, 0x2, 0, 0, 1)
+ #define MPP5_UNUSED MPP(5, 0x3, 0, 0, 1)
+
+ #define MPP6_GPIO MPP(6, 0x0, 1, 1, 1)
+-#define MPP6_GE0_TXD6 MPP(6, 0x1, 0, 1, 1)
+-#define MPP6_GE1_TXD2 MPP(6, 0x2, 0, 1, 1)
++#define MPP6_GE0_TXD6 MPP(6, 0x1, 0, 0, 1)
++#define MPP6_GE1_TXD2 MPP(6, 0x2, 0, 0, 1)
+ #define MPP6_UNUSED MPP(6, 0x3, 0, 0, 1)
+
+ #define MPP7_GPIO MPP(7, 0x0, 1, 1, 1)
+-#define MPP7_GE0_TXD7 MPP(7, 0x1, 0, 1, 1)
+-#define MPP7_GE1_TXD3 MPP(7, 0x2, 0, 1, 1)
++#define MPP7_GE0_TXD7 MPP(7, 0x1, 0, 0, 1)
++#define MPP7_GE1_TXD3 MPP(7, 0x2, 0, 0, 1)
+ #define MPP7_UNUSED MPP(7, 0x3, 0, 0, 1)
+
+ #define MPP8_GPIO MPP(8, 0x0, 1, 1, 1)
+-#define MPP8_GE0_RXD4 MPP(8, 0x1, 1, 0, 1)
+-#define MPP8_GE1_RXD0 MPP(8, 0x2, 1, 0, 1)
++#define MPP8_GE0_RXD4 MPP(8, 0x1, 0, 0, 1)
++#define MPP8_GE1_RXD0 MPP(8, 0x2, 0, 0, 1)
+ #define MPP8_UNUSED MPP(8, 0x3, 0, 0, 1)
+
+ #define MPP9_GPIO MPP(9, 0x0, 1, 1, 1)
+-#define MPP9_GE0_RXD5 MPP(9, 0x1, 1, 0, 1)
+-#define MPP9_GE1_RXD1 MPP(9, 0x2, 1, 0, 1)
++#define MPP9_GE0_RXD5 MPP(9, 0x1, 0, 0, 1)
++#define MPP9_GE1_RXD1 MPP(9, 0x2, 0, 0, 1)
+ #define MPP9_UNUSED MPP(9, 0x3, 0, 0, 1)
+
+ #define MPP10_GPIO MPP(10, 0x0, 1, 1, 1)
+-#define MPP10_GE0_RXD6 MPP(10, 0x1, 1, 0, 1)
+-#define MPP10_GE1_RXD2 MPP(10, 0x2, 1, 0, 1)
++#define MPP10_GE0_RXD6 MPP(10, 0x1, 0, 0, 1)
++#define MPP10_GE1_RXD2 MPP(10, 0x2, 0, 0, 1)
+ #define MPP10_UNUSED MPP(10, 0x3, 0, 0, 1)
+
+ #define MPP11_GPIO MPP(11, 0x0, 1, 1, 1)
+-#define MPP11_GE0_RXD7 MPP(11, 0x1, 1, 0, 1)
+-#define MPP11_GE1_RXD3 MPP(11, 0x2, 1, 0, 1)
++#define MPP11_GE0_RXD7 MPP(11, 0x1, 0, 0, 1)
++#define MPP11_GE1_RXD3 MPP(11, 0x2, 0, 0, 1)
+ #define MPP11_UNUSED MPP(11, 0x3, 0, 0, 1)
+
+ #define MPP12_GPIO MPP(12, 0x0, 1, 1, 1)
+-#define MPP12_M_BB MPP(12, 0x3, 1, 0, 1)
+-#define MPP12_UA0_CTSn MPP(12, 0x4, 1, 0, 1)
+-#define MPP12_NAND_FLASH_REn0 MPP(12, 0x5, 0, 1, 1)
+-#define MPP12_TDM0_SCSn MPP(12, 0X6, 0, 1, 1)
++#define MPP12_M_BB MPP(12, 0x3, 0, 0, 1)
++#define MPP12_UA0_CTSn MPP(12, 0x4, 0, 0, 1)
++#define MPP12_NAND_FLASH_REn0 MPP(12, 0x5, 0, 0, 1)
++#define MPP12_TDM0_SCSn MPP(12, 0X6, 0, 0, 1)
+ #define MPP12_UNUSED MPP(12, 0x1, 0, 0, 1)
+
+ #define MPP13_GPIO MPP(13, 0x0, 1, 1, 1)
+-#define MPP13_SYSRST_OUTn MPP(13, 0x3, 0, 1, 1)
+-#define MPP13_UA0_RTSn MPP(13, 0x4, 0, 1, 1)
+-#define MPP13_NAN_FLASH_WEn0 MPP(13, 0x5, 0, 1, 1)
+-#define MPP13_TDM_SCLK MPP(13, 0x6, 0, 1, 1)
++#define MPP13_SYSRST_OUTn MPP(13, 0x3, 0, 0, 1)
++#define MPP13_UA0_RTSn MPP(13, 0x4, 0, 0, 1)
++#define MPP13_NAN_FLASH_WEn0 MPP(13, 0x5, 0, 0, 1)
++#define MPP13_TDM_SCLK MPP(13, 0x6, 0, 0, 1)
+ #define MPP13_UNUSED MPP(13, 0x1, 0, 0, 1)
+
+ #define MPP14_GPIO MPP(14, 0x0, 1, 1, 1)
+-#define MPP14_SATA1_ACTn MPP(14, 0x3, 0, 1, 1)
+-#define MPP14_UA1_CTSn MPP(14, 0x4, 1, 0, 1)
+-#define MPP14_NAND_FLASH_REn1 MPP(14, 0x5, 0, 1, 1)
+-#define MPP14_TDM_SMOSI MPP(14, 0x6, 0, 1, 1)
++#define MPP14_SATA1_ACTn MPP(14, 0x3, 0, 0, 1)
++#define MPP14_UA1_CTSn MPP(14, 0x4, 0, 0, 1)
++#define MPP14_NAND_FLASH_REn1 MPP(14, 0x5, 0, 0, 1)
++#define MPP14_TDM_SMOSI MPP(14, 0x6, 0, 0, 1)
+ #define MPP14_UNUSED MPP(14, 0x1, 0, 0, 1)
+
+ #define MPP15_GPIO MPP(15, 0x0, 1, 1, 1)
+-#define MPP15_SATA0_ACTn MPP(15, 0x3, 0, 1, 1)
+-#define MPP15_UA1_RTSn MPP(15, 0x4, 0, 1, 1)
+-#define MPP15_NAND_FLASH_WEn1 MPP(15, 0x5, 0, 1, 1)
+-#define MPP15_TDM_SMISO MPP(15, 0x6, 1, 0, 1)
++#define MPP15_SATA0_ACTn MPP(15, 0x3, 0, 0, 1)
++#define MPP15_UA1_RTSn MPP(15, 0x4, 0, 0, 1)
++#define MPP15_NAND_FLASH_WEn1 MPP(15, 0x5, 0, 0, 1)
++#define MPP15_TDM_SMISO MPP(15, 0x6, 0, 0, 1)
+ #define MPP15_UNUSED MPP(15, 0x1, 0, 0, 1)
+
+ #define MPP16_GPIO MPP(16, 0x0, 1, 1, 1)
+-#define MPP16_SATA1_PRESENTn MPP(16, 0x3, 0, 1, 1)
+-#define MPP16_UA2_TXD MPP(16, 0x4, 0, 1, 1)
+-#define MPP16_NAND_FLASH_REn3 MPP(16, 0x5, 0, 1, 1)
+-#define MPP16_TDM_INTn MPP(16, 0x6, 1, 0, 1)
++#define MPP16_SATA1_PRESENTn MPP(16, 0x3, 0, 0, 1)
++#define MPP16_UA2_TXD MPP(16, 0x4, 0, 0, 1)
++#define MPP16_NAND_FLASH_REn3 MPP(16, 0x5, 0, 0, 1)
++#define MPP16_TDM_INTn MPP(16, 0x6, 0, 0, 1)
+ #define MPP16_UNUSED MPP(16, 0x1, 0, 0, 1)
+
+
+ #define MPP17_GPIO MPP(17, 0x0, 1, 1, 1)
+-#define MPP17_SATA0_PRESENTn MPP(17, 0x3, 0, 1, 1)
+-#define MPP17_UA2_RXD MPP(17, 0x4, 1, 0, 1)
+-#define MPP17_NAND_FLASH_WEn3 MPP(17, 0x5, 0, 1, 1)
+-#define MPP17_TDM_RSTn MPP(17, 0x6, 0, 1, 1)
++#define MPP17_SATA0_PRESENTn MPP(17, 0x3, 0, 0, 1)
++#define MPP17_UA2_RXD MPP(17, 0x4, 0, 0, 1)
++#define MPP17_NAND_FLASH_WEn3 MPP(17, 0x5, 0, 0, 1)
++#define MPP17_TDM_RSTn MPP(17, 0x6, 0, 0, 1)
+ #define MPP17_UNUSED MPP(17, 0x1, 0, 0, 1)
+
+
+ #define MPP18_GPIO MPP(18, 0x0, 1, 1, 1)
+-#define MPP18_UA0_CTSn MPP(18, 0x4, 1, 0, 1)
+-#define MPP18_BOOT_FLASH_REn MPP(18, 0x5, 0, 1, 1)
++#define MPP18_UA0_CTSn MPP(18, 0x4, 0, 0, 1)
++#define MPP18_BOOT_FLASH_REn MPP(18, 0x5, 0, 0, 1)
+ #define MPP18_UNUSED MPP(18, 0x1, 0, 0, 1)
+
+
+
+ #define MPP19_GPIO MPP(19, 0x0, 1, 1, 1)
+-#define MPP19_UA0_CTSn MPP(19, 0x4, 0, 1, 1)
+-#define MPP19_BOOT_FLASH_WEn MPP(19, 0x5, 0, 1, 1)
++#define MPP19_UA0_CTSn MPP(19, 0x4, 0, 0, 1)
++#define MPP19_BOOT_FLASH_WEn MPP(19, 0x5, 0, 0, 1)
+ #define MPP19_UNUSED MPP(19, 0x1, 0, 0, 1)
+
+
+ #define MPP20_GPIO MPP(20, 0x0, 1, 1, 1)
+-#define MPP20_UA1_CTSs MPP(20, 0x4, 1, 0, 1)
+-#define MPP20_TDM_PCLK MPP(20, 0x6, 1, 1, 0)
++#define MPP20_UA1_CTSs MPP(20, 0x4, 0, 0, 1)
++#define MPP20_TDM_PCLK MPP(20, 0x6, 0, 0, 0)
+ #define MPP20_UNUSED MPP(20, 0x1, 0, 0, 1)
+
+
+
+ #define MPP21_GPIO MPP(21, 0x0, 1, 1, 1)
+-#define MPP21_UA1_CTSs MPP(21, 0x4, 0, 1, 1)
+-#define MPP21_TDM_FSYNC MPP(21, 0x6, 1, 1, 0)
++#define MPP21_UA1_CTSs MPP(21, 0x4, 0, 0, 1)
++#define MPP21_TDM_FSYNC MPP(21, 0x6, 0, 0, 0)
+ #define MPP21_UNUSED MPP(21, 0x1, 0, 0, 1)
+
+
+
+ #define MPP22_GPIO MPP(22, 0x0, 1, 1, 1)
+-#define MPP22_UA3_TDX MPP(22, 0x4, 0, 1, 1)
+-#define MPP22_NAND_FLASH_REn2 MPP(22, 0x5, 0, 1, 1)
+-#define MPP22_TDM_DRX MPP(22, 0x6, 1, 0, 1)
++#define MPP22_UA3_TDX MPP(22, 0x4, 0, 0, 1)
++#define MPP22_NAND_FLASH_REn2 MPP(22, 0x5, 0, 0, 1)
++#define MPP22_TDM_DRX MPP(22, 0x6, 0, 0, 1)
+ #define MPP22_UNUSED MPP(22, 0x1, 0, 0, 1)
+
+
+
+ #define MPP23_GPIO MPP(23, 0x0, 1, 1, 1)
+-#define MPP23_UA3_RDX MPP(23, 0x4, 1, 0, 1)
+-#define MPP23_NAND_FLASH_WEn2 MPP(23, 0x5, 0, 1, 1)
+-#define MPP23_TDM_DTX MPP(23, 0x6, 0, 1, 1)
++#define MPP23_UA3_RDX MPP(23, 0x4, 0, 0, 1)
++#define MPP23_NAND_FLASH_WEn2 MPP(23, 0x5, 0, 0, 1)
++#define MPP23_TDM_DTX MPP(23, 0x6, 0, 0, 1)
+ #define MPP23_UNUSED MPP(23, 0x1, 0, 0, 1)
+
+
+ #define MPP24_GPIO MPP(24, 0x0, 1, 1, 1)
+-#define MPP24_UA2_TXD MPP(24, 0x4, 0, 1, 1)
+-#define MPP24_TDM_INTn MPP(24, 0x6, 1, 0, 1)
++#define MPP24_UA2_TXD MPP(24, 0x4, 0, 0, 1)
++#define MPP24_TDM_INTn MPP(24, 0x6, 0, 0, 1)
+ #define MPP24_UNUSED MPP(24, 0x1, 0, 0, 1)
+
+
+ #define MPP25_GPIO MPP(25, 0x0, 1, 1, 1)
+-#define MPP25_UA2_RXD MPP(25, 0x4, 1, 0, 1)
+-#define MPP25_TDM_RSTn MPP(25, 0x6, 0, 1, 1)
++#define MPP25_UA2_RXD MPP(25, 0x4, 0, 0, 1)
++#define MPP25_TDM_RSTn MPP(25, 0x6, 0, 0, 1)
+ #define MPP25_UNUSED MPP(25, 0x1, 0, 0, 1)
+
+
+ #define MPP26_GPIO MPP(26, 0x0, 1, 1, 1)
+-#define MPP26_UA2_CTSn MPP(26, 0x4, 1, 0, 1)
+-#define MPP26_TDM_PCLK MPP(26, 0x6, 1, 1, 1)
++#define MPP26_UA2_CTSn MPP(26, 0x4, 0, 0, 1)
++#define MPP26_TDM_PCLK MPP(26, 0x6, 0, 0, 1)
+ #define MPP26_UNUSED MPP(26, 0x1, 0, 0, 1)
+
+
+ #define MPP27_GPIO MPP(27, 0x0, 1, 1, 1)
+-#define MPP27_UA2_RTSn MPP(27, 0x4, 0, 1, 1)
+-#define MPP27_TDM_FSYNC MPP(27, 0x6, 1, 1, 1)
++#define MPP27_UA2_RTSn MPP(27, 0x4, 0, 0, 1)
++#define MPP27_TDM_FSYNC MPP(27, 0x6, 0, 0, 1)
+ #define MPP27_UNUSED MPP(27, 0x1, 0, 0, 1)
+
+
+ #define MPP28_GPIO MPP(28, 0x0, 1, 1, 1)
+-#define MPP28_UA3_TXD MPP(28, 0x4, 0, 1, 1)
+-#define MPP28_TDM_DRX MPP(28, 0x6, 1, 0, 1)
++#define MPP28_UA3_TXD MPP(28, 0x4, 0, 0, 1)
++#define MPP28_TDM_DRX MPP(28, 0x6, 0, 0, 1)
+ #define MPP28_UNUSED MPP(28, 0x1, 0, 0, 1)
+
+ #define MPP29_GPIO MPP(29, 0x0, 1, 1, 1)
+-#define MPP29_UA3_RXD MPP(29, 0x4, 1, 0, 1)
+-#define MPP29_SYSRST_OUTn MPP(29, 0x5, 0, 1, 1)
+-#define MPP29_TDM_DTX MPP(29, 0x6, 0, 1, 1)
++#define MPP29_UA3_RXD MPP(29, 0x4, 0, 0, 1)
++#define MPP29_SYSRST_OUTn MPP(29, 0x5, 0, 0, 1)
++#define MPP29_TDM_DTX MPP(29, 0x6, 0, 0, 1)
+ #define MPP29_UNUSED MPP(29, 0x1, 0, 0, 1)
+
+ #define MPP30_GPIO MPP(30, 0x0, 1, 1, 1)
+-#define MPP30_UA3_CTSn MPP(30, 0x4, 1, 0, 1)
++#define MPP30_UA3_CTSn MPP(30, 0x4, 0, 0, 1)
+ #define MPP30_UNUSED MPP(30, 0x1, 0, 0, 1)
+
+ #define MPP31_GPIO MPP(31, 0x0, 1, 1, 1)
+-#define MPP31_UA3_RTSn MPP(31, 0x4, 0, 1, 1)
+-#define MPP31_TDM1_SCSn MPP(31, 0x6, 0, 1, 1)
++#define MPP31_UA3_RTSn MPP(31, 0x4, 0, 0, 1)
++#define MPP31_TDM1_SCSn MPP(31, 0x6, 0, 0, 1)
+ #define MPP31_UNUSED MPP(31, 0x1, 0, 0, 1)
+
+
+ #define MPP32_GPIO MPP(32, 0x1, 1, 1, 1)
+-#define MPP32_UA3_TDX MPP(32, 0x4, 0, 1, 1)
+-#define MPP32_SYSRST_OUTn MPP(32, 0x5, 0, 1, 1)
+-#define MPP32_TDM0_RXQ MPP(32, 0x6, 0, 1, 1)
++#define MPP32_UA3_TDX MPP(32, 0x4, 0, 0, 1)
++#define MPP32_SYSRST_OUTn MPP(32, 0x5, 0, 0, 1)
++#define MPP32_TDM0_RXQ MPP(32, 0x6, 0, 0, 1)
+ #define MPP32_UNUSED MPP(32, 0x3, 0, 0, 1)
+
+
+ #define MPP33_GPIO MPP(33, 0x1, 1, 1, 1)
+-#define MPP33_UA3_RDX MPP(33, 0x4, 1, 0, 1)
+-#define MPP33_TDM0_TXQ MPP(33, 0x6, 0, 1, 1)
++#define MPP33_UA3_RDX MPP(33, 0x4, 0, 0, 1)
++#define MPP33_TDM0_TXQ MPP(33, 0x6, 0, 0, 1)
+ #define MPP33_UNUSED MPP(33, 0x3, 0, 0, 1)
+
+
+
+ #define MPP34_GPIO MPP(34, 0x1, 1, 1, 1)
+-#define MPP34_UA2_TDX MPP(34, 0x4, 0, 1, 1)
+-#define MPP34_TDM1_RXQ MPP(34, 0x6, 0, 1, 1)
++#define MPP34_UA2_TDX MPP(34, 0x4, 0, 0, 1)
++#define MPP34_TDM1_RXQ MPP(34, 0x6, 0, 0, 1)
+ #define MPP34_UNUSED MPP(34, 0x3, 0, 0, 1)
+
+
+
+ #define MPP35_GPIO MPP(35, 0x1, 1, 1, 1)
+-#define MPP35_UA2_RDX MPP(35, 0x4, 1, 0, 1)
+-#define MPP35_TDM1_TXQ MPP(35, 0x6, 0, 1, 1)
++#define MPP35_UA2_RDX MPP(35, 0x4, 0, 0, 1)
++#define MPP35_TDM1_TXQ MPP(35, 0x6, 0, 0, 1)
+ #define MPP35_UNUSED MPP(35, 0x3, 0, 0, 1)
+
+ #define MPP36_GPIO MPP(36, 0x1, 1, 1, 1)
+-#define MPP36_UA0_CTSn MPP(36, 0x2, 1, 0, 1)
+-#define MPP36_UA2_TDX MPP(36, 0x4, 0, 1, 1)
+-#define MPP36_TDM0_SCSn MPP(36, 0x6, 0, 1, 1)
++#define MPP36_UA0_CTSn MPP(36, 0x2, 0, 0, 1)
++#define MPP36_UA2_TDX MPP(36, 0x4, 0, 0, 1)
++#define MPP36_TDM0_SCSn MPP(36, 0x6, 0, 0, 1)
+ #define MPP36_UNUSED MPP(36, 0x3, 0, 0, 1)
+
+
+ #define MPP37_GPIO MPP(37, 0x1, 1, 1, 1)
+-#define MPP37_UA0_RTSn MPP(37, 0x2, 0, 1, 1)
+-#define MPP37_UA2_RXD MPP(37, 0x4, 1, 0, 1)
+-#define MPP37_SYSRST_OUTn MPP(37, 0x5, 0, 1, 1)
+-#define MPP37_TDM_SCLK MPP(37, 0x6, 0, 1, 1)
++#define MPP37_UA0_RTSn MPP(37, 0x2, 0, 0, 1)
++#define MPP37_UA2_RXD MPP(37, 0x4, 0, 0, 1)
++#define MPP37_SYSRST_OUTn MPP(37, 0x5, 0, 0, 1)
++#define MPP37_TDM_SCLK MPP(37, 0x6, 0, 0, 1)
+ #define MPP37_UNUSED MPP(37, 0x3, 0, 0, 1)
+
+
+
+
+ #define MPP38_GPIO MPP(38, 0x1, 1, 1, 1)
+-#define MPP38_UA1_CTSn MPP(38, 0x2, 1, 0, 1)
+-#define MPP38_UA3_TXD MPP(38, 0x4, 0, 1, 1)
+-#define MPP38_SYSRST_OUTn MPP(38, 0x5, 0, 1, 1)
+-#define MPP38_TDM_SMOSI MPP(38, 0x6, 0, 1, 1)
++#define MPP38_UA1_CTSn MPP(38, 0x2, 0, 0, 1)
++#define MPP38_UA3_TXD MPP(38, 0x4, 0, 0, 1)
++#define MPP38_SYSRST_OUTn MPP(38, 0x5, 0, 0, 1)
++#define MPP38_TDM_SMOSI MPP(38, 0x6, 0, 0, 1)
+ #define MPP38_UNUSED MPP(38, 0x3, 0, 0, 1)
+
+
+
+
+ #define MPP39_GPIO MPP(39, 0x1, 1, 1, 1)
+-#define MPP39_UA1_RTSn MPP(39, 0x2, 0, 1, 1)
+-#define MPP39_UA3_RXD MPP(39, 0x4, 1, 0, 1)
+-#define MPP39_SYSRST_OUTn MPP(39, 0x5, 0, 1, 1)
+-#define MPP39_TDM_SMISO MPP(39, 0x6, 1, 0, 1)
++#define MPP39_UA1_RTSn MPP(39, 0x2, 0, 0, 1)
++#define MPP39_UA3_RXD MPP(39, 0x4, 0, 0, 1)
++#define MPP39_SYSRST_OUTn MPP(39, 0x5, 0, 0, 1)
++#define MPP39_TDM_SMISO MPP(39, 0x6, 0, 0, 1)
+ #define MPP39_UNUSED MPP(39, 0x3, 0, 0, 1)
+
+
+
+ #define MPP40_GPIO MPP(40, 0x1, 1, 1, 1)
+-#define MPP40_TDM_INTn MPP(40, 0x6, 1, 0, 1)
++#define MPP40_TDM_INTn MPP(40, 0x6, 0, 0, 1)
+ #define MPP40_UNUSED MPP(40, 0x0, 0, 0, 1)
+
+
+
+ #define MPP41_GPIO MPP(41, 0x1, 1, 1, 1)
+-#define MPP41_TDM_RSTn MPP(41, 0x6, 0, 1, 1)
++#define MPP41_TDM_RSTn MPP(41, 0x6, 0, 0, 1)
+ #define MPP41_UNUSED MPP(41, 0x0, 0, 0, 1)
+
+
+
+ #define MPP42_GPIO MPP(42, 0x1, 1, 1, 1)
+-#define MPP42_TDM_PCLK MPP(42, 0x6, 1, 1, 1)
++#define MPP42_TDM_PCLK MPP(42, 0x6, 0, 0, 1)
+ #define MPP42_UNUSED MPP(42, 0x0, 0, 0, 1)
+
+
+
+ #define MPP43_GPIO MPP(43, 0x1, 1, 1, 1)
+-#define MPP43_TDM_FSYNC MPP(43, 0x6, 1, 1, 1)
++#define MPP43_TDM_FSYNC MPP(43, 0x6, 0, 0, 1)
+ #define MPP43_UNUSED MPP(43, 0x0, 0, 0, 1)
+
+
+
+ #define MPP44_GPIO MPP(44, 0x1, 1, 1, 1)
+-#define MPP44_TDM_DRX MPP(44, 0x6, 1, 0, 1)
++#define MPP44_TDM_DRX MPP(44, 0x6, 0, 0, 1)
+ #define MPP44_UNUSED MPP(44, 0x0, 0, 0, 1)
+
+
+
+ #define MPP45_GPIO MPP(45, 0x1, 1, 1, 1)
+-#define MPP45_SATA0_ACTn MPP(45, 0x3, 0, 1, 1)
+-#define MPP45_TDM_DRX MPP(45, 0x6, 0, 1, 1)
++#define MPP45_SATA0_ACTn MPP(45, 0x3, 0, 0, 1)
++#define MPP45_TDM_DRX MPP(45, 0x6, 0, 0, 1)
+ #define MPP45_UNUSED MPP(45, 0x0, 0, 0, 1)
+
+
+ #define MPP46_GPIO MPP(46, 0x1, 1, 1, 1)
+-#define MPP46_TDM_SCSn MPP(46, 0x6, 0, 1, 1)
++#define MPP46_TDM_SCSn MPP(46, 0x6, 0, 0, 1)
+ #define MPP46_UNUSED MPP(46, 0x0, 0, 0, 1)
+
+
+@@ -323,14 +323,14 @@
+
+
+ #define MPP48_GPIO MPP(48, 0x1, 1, 1, 1)
+-#define MPP48_SATA1_ACTn MPP(48, 0x3, 0, 1, 1)
++#define MPP48_SATA1_ACTn MPP(48, 0x3, 0, 0, 1)
+ #define MPP48_UNUSED MPP(48, 0x2, 0, 0, 1)
+
+
+
+ #define MPP49_GPIO MPP(49, 0x1, 1, 1, 1)
+-#define MPP49_SATA0_ACTn MPP(49, 0x3, 0, 1, 1)
+-#define MPP49_M_BB MPP(49, 0x4, 1, 0, 1)
++#define MPP49_SATA0_ACTn MPP(49, 0x3, 0, 0, 1)
++#define MPP49_M_BB MPP(49, 0x4, 0, 0, 1)
+ #define MPP49_UNUSED MPP(49, 0x2, 0, 0, 1)
+
+
+diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c
+index 5156468..02cd29a 100644
+--- a/arch/arm/mach-omap2/board-4430sdp.c
++++ b/arch/arm/mach-omap2/board-4430sdp.c
+@@ -52,8 +52,9 @@
+ #define ETH_KS8851_QUART 138
+ #define OMAP4_SFH7741_SENSOR_OUTPUT_GPIO 184
+ #define OMAP4_SFH7741_ENABLE_GPIO 188
+-#define HDMI_GPIO_HPD 60 /* Hot plug pin for HDMI */
++#define HDMI_GPIO_CT_CP_HPD 60 /* HPD mode enable/disable */
+ #define HDMI_GPIO_LS_OE 41 /* Level shifter for HDMI */
++#define HDMI_GPIO_HPD 63 /* Hotplug detect */
+ #define DISPLAY_SEL_GPIO 59 /* LCD2/PicoDLP switch */
+ #define DLP_POWER_ON_GPIO 40
+
+@@ -597,12 +598,8 @@ static void __init omap_sfh7741prox_init(void)
+
+ static void sdp4430_hdmi_mux_init(void)
+ {
+- /* PAD0_HDMI_HPD_PAD1_HDMI_CEC */
+- omap_mux_init_signal("hdmi_hpd",
+- OMAP_PIN_INPUT_PULLUP);
+ omap_mux_init_signal("hdmi_cec",
+ OMAP_PIN_INPUT_PULLUP);
+- /* PAD0_HDMI_DDC_SCL_PAD1_HDMI_DDC_SDA */
+ omap_mux_init_signal("hdmi_ddc_scl",
+ OMAP_PIN_INPUT_PULLUP);
+ omap_mux_init_signal("hdmi_ddc_sda",
+@@ -610,8 +607,9 @@ static void sdp4430_hdmi_mux_init(void)
+ }
+
+ static struct gpio sdp4430_hdmi_gpios[] = {
+- { HDMI_GPIO_HPD, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_hpd" },
++ { HDMI_GPIO_CT_CP_HPD, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_ct_cp_hpd" },
+ { HDMI_GPIO_LS_OE, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_ls_oe" },
++ { HDMI_GPIO_HPD, GPIOF_DIR_IN, "hdmi_gpio_hpd" },
+ };
+
+ static int sdp4430_panel_enable_hdmi(struct omap_dss_device *dssdev)
+@@ -628,8 +626,7 @@ static int sdp4430_panel_enable_hdmi(struct omap_dss_device *dssdev)
+
+ static void sdp4430_panel_disable_hdmi(struct omap_dss_device *dssdev)
+ {
+- gpio_free(HDMI_GPIO_LS_OE);
+- gpio_free(HDMI_GPIO_HPD);
++ gpio_free_array(sdp4430_hdmi_gpios, ARRAY_SIZE(sdp4430_hdmi_gpios));
+ }
+
+ static struct nokia_dsi_panel_data dsi1_panel = {
+@@ -745,6 +742,10 @@ static void sdp4430_lcd_init(void)
+ pr_err("%s: Could not get lcd2_reset_gpio\n", __func__);
+ }
+
++static struct omap_dss_hdmi_data sdp4430_hdmi_data = {
++ .hpd_gpio = HDMI_GPIO_HPD,
++};
++
+ static struct omap_dss_device sdp4430_hdmi_device = {
+ .name = "hdmi",
+ .driver_name = "hdmi_panel",
+@@ -752,6 +753,7 @@ static struct omap_dss_device sdp4430_hdmi_device = {
+ .platform_enable = sdp4430_panel_enable_hdmi,
+ .platform_disable = sdp4430_panel_disable_hdmi,
+ .channel = OMAP_DSS_CHANNEL_DIGIT,
++ .data = &sdp4430_hdmi_data,
+ };
+
+ static struct picodlp_panel_data sdp4430_picodlp_pdata = {
+@@ -829,6 +831,10 @@ static void omap_4430sdp_display_init(void)
+ sdp4430_hdmi_mux_init();
+ sdp4430_picodlp_init();
+ omap_display_init(&sdp4430_dss_data);
++
++ omap_mux_init_gpio(HDMI_GPIO_LS_OE, OMAP_PIN_OUTPUT);
++ omap_mux_init_gpio(HDMI_GPIO_CT_CP_HPD, OMAP_PIN_OUTPUT);
++ omap_mux_init_gpio(HDMI_GPIO_HPD, OMAP_PIN_INPUT_PULLDOWN);
+ }
+
+ #ifdef CONFIG_OMAP_MUX
+diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c
+index a8c2c42..51b1c93 100644
+--- a/arch/arm/mach-omap2/board-omap4panda.c
++++ b/arch/arm/mach-omap2/board-omap4panda.c
+@@ -51,8 +51,9 @@
+ #define GPIO_HUB_NRESET 62
+ #define GPIO_WIFI_PMENA 43
+ #define GPIO_WIFI_IRQ 53
+-#define HDMI_GPIO_HPD 60 /* Hot plug pin for HDMI */
++#define HDMI_GPIO_CT_CP_HPD 60 /* HPD mode enable/disable */
+ #define HDMI_GPIO_LS_OE 41 /* Level shifter for HDMI */
++#define HDMI_GPIO_HPD 63 /* Hotplug detect */
+
+ /* wl127x BT, FM, GPS connectivity chip */
+ static int wl1271_gpios[] = {46, -1, -1};
+@@ -481,12 +482,8 @@ int __init omap4_panda_dvi_init(void)
+
+ static void omap4_panda_hdmi_mux_init(void)
+ {
+- /* PAD0_HDMI_HPD_PAD1_HDMI_CEC */
+- omap_mux_init_signal("hdmi_hpd",
+- OMAP_PIN_INPUT_PULLUP);
+ omap_mux_init_signal("hdmi_cec",
+ OMAP_PIN_INPUT_PULLUP);
+- /* PAD0_HDMI_DDC_SCL_PAD1_HDMI_DDC_SDA */
+ omap_mux_init_signal("hdmi_ddc_scl",
+ OMAP_PIN_INPUT_PULLUP);
+ omap_mux_init_signal("hdmi_ddc_sda",
+@@ -494,8 +491,9 @@ static void omap4_panda_hdmi_mux_init(void)
+ }
+
+ static struct gpio panda_hdmi_gpios[] = {
+- { HDMI_GPIO_HPD, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_hpd" },
++ { HDMI_GPIO_CT_CP_HPD, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_ct_cp_hpd" },
+ { HDMI_GPIO_LS_OE, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_ls_oe" },
++ { HDMI_GPIO_HPD, GPIOF_DIR_IN, "hdmi_gpio_hpd" },
+ };
+
+ static int omap4_panda_panel_enable_hdmi(struct omap_dss_device *dssdev)
+@@ -512,10 +510,13 @@ static int omap4_panda_panel_enable_hdmi(struct omap_dss_device *dssdev)
+
+ static void omap4_panda_panel_disable_hdmi(struct omap_dss_device *dssdev)
+ {
+- gpio_free(HDMI_GPIO_LS_OE);
+- gpio_free(HDMI_GPIO_HPD);
++ gpio_free_array(panda_hdmi_gpios, ARRAY_SIZE(panda_hdmi_gpios));
+ }
+
++static struct omap_dss_hdmi_data omap4_panda_hdmi_data = {
++ .hpd_gpio = HDMI_GPIO_HPD,
++};
++
+ static struct omap_dss_device omap4_panda_hdmi_device = {
+ .name = "hdmi",
+ .driver_name = "hdmi_panel",
+@@ -523,6 +524,7 @@ static struct omap_dss_device omap4_panda_hdmi_device = {
+ .platform_enable = omap4_panda_panel_enable_hdmi,
+ .platform_disable = omap4_panda_panel_disable_hdmi,
+ .channel = OMAP_DSS_CHANNEL_DIGIT,
++ .data = &omap4_panda_hdmi_data,
+ };
+
+ static struct omap_dss_device *omap4_panda_dss_devices[] = {
+@@ -546,6 +548,10 @@ void omap4_panda_display_init(void)
+
+ omap4_panda_hdmi_mux_init();
+ omap_display_init(&omap4_panda_dss_data);
++
++ omap_mux_init_gpio(HDMI_GPIO_LS_OE, OMAP_PIN_OUTPUT);
++ omap_mux_init_gpio(HDMI_GPIO_CT_CP_HPD, OMAP_PIN_OUTPUT);
++ omap_mux_init_gpio(HDMI_GPIO_HPD, OMAP_PIN_INPUT_PULLDOWN);
+ }
+
+ static void __init omap4_panda_init(void)
+diff --git a/arch/arm/mach-omap2/omap-iommu.c b/arch/arm/mach-omap2/omap-iommu.c
+index b882204..ac49384 100644
+--- a/arch/arm/mach-omap2/omap-iommu.c
++++ b/arch/arm/mach-omap2/omap-iommu.c
+@@ -150,7 +150,8 @@ err_out:
+ platform_device_put(omap_iommu_pdev[i]);
+ return err;
+ }
+-module_init(omap_iommu_init);
++/* must be ready before omap3isp is probed */
++subsys_initcall(omap_iommu_init);
+
+ static void __exit omap_iommu_exit(void)
+ {
+diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c
+index 22ace0b..53b68b8 100644
+--- a/arch/arm/mach-orion5x/common.c
++++ b/arch/arm/mach-orion5x/common.c
+@@ -29,6 +29,7 @@
+ #include <mach/hardware.h>
+ #include <mach/orion5x.h>
+ #include <plat/orion_nand.h>
++#include <plat/ehci-orion.h>
+ #include <plat/time.h>
+ #include <plat/common.h>
+ #include "common.h"
+@@ -72,7 +73,8 @@ void __init orion5x_map_io(void)
+ void __init orion5x_ehci0_init(void)
+ {
+ orion_ehci_init(&orion5x_mbus_dram_info,
+- ORION5X_USB0_PHYS_BASE, IRQ_ORION5X_USB0_CTRL);
++ ORION5X_USB0_PHYS_BASE, IRQ_ORION5X_USB0_CTRL,
++ EHCI_PHY_ORION);
+ }
+
+
+diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
+index 40cc7aa..82ef81d 100644
+--- a/arch/arm/mm/proc-v7.S
++++ b/arch/arm/mm/proc-v7.S
+@@ -352,9 +352,7 @@ __v7_setup:
+ mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
+ #endif
+ #ifdef CONFIG_ARM_ERRATA_743622
+- teq r6, #0x20 @ present in r2p0
+- teqne r6, #0x21 @ present in r2p1
+- teqne r6, #0x22 @ present in r2p2
++ teq r5, #0x00200000 @ only present in r2p*
+ mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register
+ orreq r10, r10, #1 << 6 @ set bit #6
+ mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
+diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
+index 9e5451b..11dce87 100644
+--- a/arch/arm/plat-orion/common.c
++++ b/arch/arm/plat-orion/common.c
+@@ -806,10 +806,7 @@ void __init orion_xor1_init(unsigned long mapbase_low,
+ /*****************************************************************************
+ * EHCI
+ ****************************************************************************/
+-static struct orion_ehci_data orion_ehci_data = {
+- .phy_version = EHCI_PHY_NA,
+-};
+-
++static struct orion_ehci_data orion_ehci_data;
+ static u64 ehci_dmamask = DMA_BIT_MASK(32);
+
+
+@@ -830,9 +827,11 @@ static struct platform_device orion_ehci = {
+
+ void __init orion_ehci_init(struct mbus_dram_target_info *mbus_dram_info,
+ unsigned long mapbase,
+- unsigned long irq)
++ unsigned long irq,
++ enum orion_ehci_phy_ver phy_version)
+ {
+ orion_ehci_data.dram = mbus_dram_info;
++ orion_ehci_data.phy_version = phy_version;
+ fill_resources(&orion_ehci, orion_ehci_resources, mapbase, SZ_4K - 1,
+ irq);
+
+diff --git a/arch/arm/plat-orion/include/plat/common.h b/arch/arm/plat-orion/include/plat/common.h
+index a63c357..a2c0e31 100644
+--- a/arch/arm/plat-orion/include/plat/common.h
++++ b/arch/arm/plat-orion/include/plat/common.h
+@@ -95,7 +95,8 @@ void __init orion_xor1_init(unsigned long mapbase_low,
+
+ void __init orion_ehci_init(struct mbus_dram_target_info *mbus_dram_info,
+ unsigned long mapbase,
+- unsigned long irq);
++ unsigned long irq,
++ enum orion_ehci_phy_ver phy_version);
+
+ void __init orion_ehci_1_init(struct mbus_dram_target_info *mbus_dram_info,
+ unsigned long mapbase,
+diff --git a/arch/arm/plat-orion/mpp.c b/arch/arm/plat-orion/mpp.c
+index 9155343..3b1e17b 100644
+--- a/arch/arm/plat-orion/mpp.c
++++ b/arch/arm/plat-orion/mpp.c
+@@ -64,8 +64,7 @@ void __init orion_mpp_conf(unsigned int *mpp_list, unsigned int variant_mask,
+ gpio_mode |= GPIO_INPUT_OK;
+ if (*mpp_list & MPP_OUTPUT_MASK)
+ gpio_mode |= GPIO_OUTPUT_OK;
+- if (sel != 0)
+- gpio_mode = 0;
++
+ orion_gpio_set_valid(num, gpio_mode);
+ }
+
+diff --git a/arch/arm/plat-s3c24xx/dma.c b/arch/arm/plat-s3c24xx/dma.c
+index 53754bc..8a90b6a 100644
+--- a/arch/arm/plat-s3c24xx/dma.c
++++ b/arch/arm/plat-s3c24xx/dma.c
+@@ -1249,7 +1249,7 @@ static void s3c2410_dma_resume(void)
+ struct s3c2410_dma_chan *cp = s3c2410_chans + dma_channels - 1;
+ int channel;
+
+- for (channel = dma_channels - 1; channel >= 0; cp++, channel--)
++ for (channel = dma_channels - 1; channel >= 0; cp--, channel--)
+ s3c2410_dma_resume_chan(cp);
+ }
+
+diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig
+index 197e96f..3dea7231 100644
+--- a/arch/avr32/Kconfig
++++ b/arch/avr32/Kconfig
+@@ -8,6 +8,7 @@ config AVR32
+ select HAVE_KPROBES
+ select HAVE_GENERIC_HARDIRQS
+ select GENERIC_IRQ_PROBE
++ select GENERIC_ATOMIC64
+ select HARDIRQS_SW_RESEND
+ select GENERIC_IRQ_SHOW
+ select ARCH_HAVE_NMI_SAFE_CMPXCHG
+diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
+index 373679b..f929db9 100644
+--- a/arch/s390/Kconfig
++++ b/arch/s390/Kconfig
+@@ -230,6 +230,9 @@ config COMPAT
+ config SYSVIPC_COMPAT
+ def_bool y if COMPAT && SYSVIPC
+
++config KEYS_COMPAT
++ def_bool y if COMPAT && KEYS
++
+ config AUDIT_ARCH
+ def_bool y
+
+diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h
+index 2e49748..234f1d8 100644
+--- a/arch/s390/include/asm/compat.h
++++ b/arch/s390/include/asm/compat.h
+@@ -172,13 +172,6 @@ static inline int is_compat_task(void)
+ return is_32bit_task();
+ }
+
+-#else
+-
+-static inline int is_compat_task(void)
+-{
+- return 0;
+-}
+-
+ #endif
+
+ static inline void __user *arch_compat_alloc_user_space(long len)
+diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
+index 9451b21..53088e2 100644
+--- a/arch/s390/kernel/process.c
++++ b/arch/s390/kernel/process.c
+@@ -29,7 +29,6 @@
+ #include <asm/irq.h>
+ #include <asm/timer.h>
+ #include <asm/nmi.h>
+-#include <asm/compat.h>
+ #include <asm/smp.h>
+ #include "entry.h"
+
+diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
+index 573bc29..afe82bc 100644
+--- a/arch/s390/kernel/ptrace.c
++++ b/arch/s390/kernel/ptrace.c
+@@ -20,8 +20,8 @@
+ #include <linux/regset.h>
+ #include <linux/tracehook.h>
+ #include <linux/seccomp.h>
++#include <linux/compat.h>
+ #include <trace/syscall.h>
+-#include <asm/compat.h>
+ #include <asm/segment.h>
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
+index e54c4ff..773f55e 100644
+--- a/arch/s390/kernel/setup.c
++++ b/arch/s390/kernel/setup.c
+@@ -45,6 +45,7 @@
+ #include <linux/kexec.h>
+ #include <linux/crash_dump.h>
+ #include <linux/memory.h>
++#include <linux/compat.h>
+
+ #include <asm/ipl.h>
+ #include <asm/uaccess.h>
+@@ -58,7 +59,6 @@
+ #include <asm/ptrace.h>
+ #include <asm/sections.h>
+ #include <asm/ebcdic.h>
+-#include <asm/compat.h>
+ #include <asm/kvm_virtio.h>
+ #include <asm/diag.h>
+
+diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
+index 7f6f9f3..5086553 100644
+--- a/arch/s390/kernel/signal.c
++++ b/arch/s390/kernel/signal.c
+@@ -30,7 +30,6 @@
+ #include <asm/ucontext.h>
+ #include <asm/uaccess.h>
+ #include <asm/lowcore.h>
+-#include <asm/compat.h>
+ #include "entry.h"
+
+ #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
+index a9a3018..c7f0fbc 100644
+--- a/arch/s390/mm/fault.c
++++ b/arch/s390/mm/fault.c
+@@ -36,7 +36,6 @@
+ #include <asm/pgtable.h>
+ #include <asm/irq.h>
+ #include <asm/mmu_context.h>
+-#include <asm/compat.h>
+ #include "../kernel/entry.h"
+
+ #ifndef CONFIG_64BIT
+diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
+index f09c748..a0155c0 100644
+--- a/arch/s390/mm/mmap.c
++++ b/arch/s390/mm/mmap.c
+@@ -29,8 +29,8 @@
+ #include <linux/mman.h>
+ #include <linux/module.h>
+ #include <linux/random.h>
++#include <linux/compat.h>
+ #include <asm/pgalloc.h>
+-#include <asm/compat.h>
+
+ static unsigned long stack_maxrandom_size(void)
+ {
+diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
+index f61c62f..50d7ff2 100644
+--- a/arch/x86/include/asm/perf_event.h
++++ b/arch/x86/include/asm/perf_event.h
+@@ -212,4 +212,12 @@ static inline perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
+ static inline void perf_events_lapic_init(void) { }
+ #endif
+
++#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
++ extern void amd_pmu_enable_virt(void);
++ extern void amd_pmu_disable_virt(void);
++#else
++ static inline void amd_pmu_enable_virt(void) { }
++ static inline void amd_pmu_disable_virt(void) { }
++#endif
++
+ #endif /* _ASM_X86_PERF_EVENT_H */
+diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
+index b9698d4..02e0295 100644
+--- a/arch/x86/kernel/cpu/perf_event.h
++++ b/arch/x86/kernel/cpu/perf_event.h
+@@ -146,7 +146,9 @@ struct cpu_hw_events {
+ /*
+ * AMD specific bits
+ */
+- struct amd_nb *amd_nb;
++ struct amd_nb *amd_nb;
++ /* Inverted mask of bits to clear in the perf_ctr ctrl registers */
++ u64 perf_ctr_virt_mask;
+
+ void *kfree_on_online;
+ };
+@@ -372,9 +374,11 @@ void x86_pmu_disable_all(void);
+ static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
+ u64 enable_mask)
+ {
++ u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask);
++
+ if (hwc->extra_reg.reg)
+ wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config);
+- wrmsrl(hwc->config_base, hwc->config | enable_mask);
++ wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask);
+ }
+
+ void x86_pmu_enable_all(int added);
+diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
+index aeefd45..f64a039 100644
+--- a/arch/x86/kernel/cpu/perf_event_amd.c
++++ b/arch/x86/kernel/cpu/perf_event_amd.c
+@@ -1,4 +1,5 @@
+ #include <linux/perf_event.h>
++#include <linux/export.h>
+ #include <linux/types.h>
+ #include <linux/init.h>
+ #include <linux/slab.h>
+@@ -357,7 +358,9 @@ static void amd_pmu_cpu_starting(int cpu)
+ struct amd_nb *nb;
+ int i, nb_id;
+
+- if (boot_cpu_data.x86_max_cores < 2)
++ cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY;
++
++ if (boot_cpu_data.x86_max_cores < 2 || boot_cpu_data.x86 == 0x15)
+ return;
+
+ nb_id = amd_get_nb_id(cpu);
+@@ -587,9 +590,9 @@ static __initconst const struct x86_pmu amd_pmu_f15h = {
+ .put_event_constraints = amd_put_event_constraints,
+
+ .cpu_prepare = amd_pmu_cpu_prepare,
+- .cpu_starting = amd_pmu_cpu_starting,
+ .cpu_dead = amd_pmu_cpu_dead,
+ #endif
++ .cpu_starting = amd_pmu_cpu_starting,
+ };
+
+ __init int amd_pmu_init(void)
+@@ -621,3 +624,33 @@ __init int amd_pmu_init(void)
+
+ return 0;
+ }
++
++void amd_pmu_enable_virt(void)
++{
++ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
++
++ cpuc->perf_ctr_virt_mask = 0;
++
++ /* Reload all events */
++ x86_pmu_disable_all();
++ x86_pmu_enable_all(0);
++}
++EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
++
++void amd_pmu_disable_virt(void)
++{
++ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
++
++ /*
++ * We only mask out the Host-only bit so that host-only counting works
++ * when SVM is disabled. If someone sets up a guest-only counter when
++ * SVM is disabled the Guest-only bits still gets set and the counter
++ * will not count anything.
++ */
++ cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY;
++
++ /* Reload all events */
++ x86_pmu_disable_all();
++ x86_pmu_enable_all(0);
++}
++EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index e32243e..94a4672 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -29,6 +29,7 @@
+ #include <linux/ftrace_event.h>
+ #include <linux/slab.h>
+
++#include <asm/perf_event.h>
+ #include <asm/tlbflush.h>
+ #include <asm/desc.h>
+ #include <asm/kvm_para.h>
+@@ -575,6 +576,8 @@ static void svm_hardware_disable(void *garbage)
+ wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
+
+ cpu_svm_disable();
++
++ amd_pmu_disable_virt();
+ }
+
+ static int svm_hardware_enable(void *garbage)
+@@ -622,6 +625,8 @@ static int svm_hardware_enable(void *garbage)
+
+ svm_init_erratum_383();
+
++ amd_pmu_enable_virt();
++
+ return 0;
+ }
+
+diff --git a/block/bsg.c b/block/bsg.c
+index 702f131..c0ab25c 100644
+--- a/block/bsg.c
++++ b/block/bsg.c
+@@ -985,7 +985,8 @@ void bsg_unregister_queue(struct request_queue *q)
+
+ mutex_lock(&bsg_mutex);
+ idr_remove(&bsg_minor_idr, bcd->minor);
+- sysfs_remove_link(&q->kobj, "bsg");
++ if (q->kobj.sd)
++ sysfs_remove_link(&q->kobj, "bsg");
+ device_unregister(bcd->class_dev);
+ bcd->class_dev = NULL;
+ kref_put(&bcd->ref, bsg_kref_release_function);
+diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
+index 6d9a3ab..0a7ed69 100644
+--- a/drivers/acpi/sleep.c
++++ b/drivers/acpi/sleep.c
+@@ -476,6 +476,22 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"),
+ },
+ },
++ {
++ .callback = init_nvs_nosave,
++ .ident = "Asus K54C",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "K54C"),
++ },
++ },
++ {
++ .callback = init_nvs_nosave,
++ .ident = "Asus K54HR",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"),
++ },
++ },
+ {},
+ };
+ #endif /* CONFIG_SUSPEND */
+diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
+index dcd8bab..fe79635 100644
+--- a/drivers/crypto/mv_cesa.c
++++ b/drivers/crypto/mv_cesa.c
+@@ -714,6 +714,7 @@ static int mv_hash_final(struct ahash_request *req)
+ {
+ struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
+
++ ahash_request_set_crypt(req, NULL, req->result, 0);
+ mv_update_hash_req_ctx(ctx, 1, 0);
+ return mv_handle_req(&req->base);
+ }
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index a26d5b0..1608d2a 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -2886,6 +2886,20 @@
+ #define DISP_TILE_SURFACE_SWIZZLING (1<<13)
+ #define DISP_FBC_WM_DIS (1<<15)
+
++/* GEN7 chicken */
++#define GEN7_COMMON_SLICE_CHICKEN1 0x7010
++# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26))
++
++#define GEN7_L3CNTLREG1 0xB01C
++#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C4FFF8C
++
++#define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030
++#define GEN7_WA_L3_CHICKEN_MODE 0x20000000
++
++/* WaCatErrorRejectionIssue */
++#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030
++#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11)
++
+ /* PCH */
+
+ /* south display engine interrupt */
+@@ -3476,6 +3490,7 @@
+ #define GT_FIFO_NUM_RESERVED_ENTRIES 20
+
+ #define GEN6_UCGCTL2 0x9404
++# define GEN6_RCZUNIT_CLOCK_GATE_DISABLE (1 << 13)
+ # define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12)
+ # define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11)
+
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index daa5743..9ec9755 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -5876,14 +5876,14 @@ static void ironlake_write_eld(struct drm_connector *connector,
+ int aud_cntl_st;
+ int aud_cntrl_st2;
+
+- if (IS_IVYBRIDGE(connector->dev)) {
+- hdmiw_hdmiedid = GEN7_HDMIW_HDMIEDID_A;
+- aud_cntl_st = GEN7_AUD_CNTRL_ST_A;
+- aud_cntrl_st2 = GEN7_AUD_CNTRL_ST2;
+- } else {
++ if (HAS_PCH_IBX(connector->dev)) {
+ hdmiw_hdmiedid = GEN5_HDMIW_HDMIEDID_A;
+ aud_cntl_st = GEN5_AUD_CNTL_ST_A;
+ aud_cntrl_st2 = GEN5_AUD_CNTL_ST2;
++ } else {
++ hdmiw_hdmiedid = GEN7_HDMIW_HDMIEDID_A;
++ aud_cntl_st = GEN7_AUD_CNTRL_ST_A;
++ aud_cntrl_st2 = GEN7_AUD_CNTRL_ST2;
+ }
+
+ i = to_intel_crtc(crtc)->pipe;
+@@ -5965,7 +5965,7 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
+ int i;
+
+ /* The clocks have to be on to load the palette. */
+- if (!crtc->enabled)
++ if (!crtc->enabled || !intel_crtc->active)
+ return;
+
+ /* use legacy palette for Ironlake */
+@@ -8248,8 +8248,28 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
++ /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
++ * This implements the WaDisableRCZUnitClockGating workaround.
++ */
++ I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
++
+ I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
+
++ /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
++ I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
++ GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
++
++ /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
++ I915_WRITE(GEN7_L3CNTLREG1,
++ GEN7_WA_FOR_GEN7_L3_CONTROL);
++ I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
++ GEN7_WA_L3_CHICKEN_MODE);
++
++ /* This is required by WaCatErrorRejectionIssue */
++ I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
++ I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
++ GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
++
+ for_each_pipe(pipe) {
+ I915_WRITE(DSPCNTR(pipe),
+ I915_READ(DSPCNTR(pipe)) |
+diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.c b/drivers/gpu/drm/radeon/r600_blit_shaders.c
+index 2d1f6c5..73e2c7c 100644
+--- a/drivers/gpu/drm/radeon/r600_blit_shaders.c
++++ b/drivers/gpu/drm/radeon/r600_blit_shaders.c
+@@ -314,6 +314,10 @@ const u32 r6xx_default_state[] =
+ 0x00000000, /* VGT_VTX_CNT_EN */
+
+ 0xc0016900,
++ 0x000000d4,
++ 0x00000000, /* SX_MISC */
++
++ 0xc0016900,
+ 0x000002c8,
+ 0x00000000, /* VGT_STRMOUT_BUFFER_EN */
+
+@@ -626,6 +630,10 @@ const u32 r7xx_default_state[] =
+ 0x00000000, /* VGT_VTX_CNT_EN */
+
+ 0xc0016900,
++ 0x000000d4,
++ 0x00000000, /* SX_MISC */
++
++ 0xc0016900,
+ 0x000002c8,
+ 0x00000000, /* VGT_STRMOUT_BUFFER_EN */
+
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 00cabb3..3c3daec 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -59,6 +59,9 @@
+ #define USB_VENDOR_ID_AIRCABLE 0x16CA
+ #define USB_DEVICE_ID_AIRCABLE1 0x1502
+
++#define USB_VENDOR_ID_AIREN 0x1a2c
++#define USB_DEVICE_ID_AIREN_SLIMPLUS 0x0002
++
+ #define USB_VENDOR_ID_ALCOR 0x058f
+ #define USB_DEVICE_ID_ALCOR_USBRS232 0x9720
+
+diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
+index 5028d60..1fe6b80 100644
+--- a/drivers/hid/usbhid/hid-quirks.c
++++ b/drivers/hid/usbhid/hid-quirks.c
+@@ -53,6 +53,7 @@ static const struct hid_blacklist {
+ { USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS, HID_QUIRK_MULTI_INPUT },
+
++ { USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
+diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
+index 91be41f..83e3e9d 100644
+--- a/drivers/hwmon/Kconfig
++++ b/drivers/hwmon/Kconfig
+@@ -497,8 +497,9 @@ config SENSORS_JC42
+ If you say yes here, you get support for JEDEC JC42.4 compliant
+ temperature sensors, which are used on many DDR3 memory modules for
+ mobile devices and servers. Support will include, but not be limited
+- to, ADT7408, CAT34TS02, CAT6095, MAX6604, MCP9805, MCP98242, MCP98243,
+- MCP9843, SE97, SE98, STTS424(E), TSE2002B3, and TS3000B3.
++ to, ADT7408, AT30TS00, CAT34TS02, CAT6095, MAX6604, MCP9804, MCP9805,
++ MCP98242, MCP98243, MCP9843, SE97, SE98, STTS424(E), STTS2002,
++ STTS3000, TSE2002B3, TSE2002GB2, TS3000B3, and TS3000GB2.
+
+ This driver can also be built as a module. If so, the module
+ will be called jc42.
+diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
+index 2d3d728..0274a05 100644
+--- a/drivers/hwmon/jc42.c
++++ b/drivers/hwmon/jc42.c
+@@ -64,6 +64,7 @@ static const unsigned short normal_i2c[] = {
+
+ /* Manufacturer IDs */
+ #define ADT_MANID 0x11d4 /* Analog Devices */
++#define ATMEL_MANID 0x001f /* Atmel */
+ #define MAX_MANID 0x004d /* Maxim */
+ #define IDT_MANID 0x00b3 /* IDT */
+ #define MCP_MANID 0x0054 /* Microchip */
+@@ -77,15 +78,25 @@ static const unsigned short normal_i2c[] = {
+ #define ADT7408_DEVID 0x0801
+ #define ADT7408_DEVID_MASK 0xffff
+
++/* Atmel */
++#define AT30TS00_DEVID 0x8201
++#define AT30TS00_DEVID_MASK 0xffff
++
+ /* IDT */
+ #define TS3000B3_DEVID 0x2903 /* Also matches TSE2002B3 */
+ #define TS3000B3_DEVID_MASK 0xffff
+
++#define TS3000GB2_DEVID 0x2912 /* Also matches TSE2002GB2 */
++#define TS3000GB2_DEVID_MASK 0xffff
++
+ /* Maxim */
+ #define MAX6604_DEVID 0x3e00
+ #define MAX6604_DEVID_MASK 0xffff
+
+ /* Microchip */
++#define MCP9804_DEVID 0x0200
++#define MCP9804_DEVID_MASK 0xfffc
++
+ #define MCP98242_DEVID 0x2000
+ #define MCP98242_DEVID_MASK 0xfffc
+
+@@ -113,6 +124,12 @@ static const unsigned short normal_i2c[] = {
+ #define STTS424E_DEVID 0x0000
+ #define STTS424E_DEVID_MASK 0xfffe
+
++#define STTS2002_DEVID 0x0300
++#define STTS2002_DEVID_MASK 0xffff
++
++#define STTS3000_DEVID 0x0200
++#define STTS3000_DEVID_MASK 0xffff
++
+ static u16 jc42_hysteresis[] = { 0, 1500, 3000, 6000 };
+
+ struct jc42_chips {
+@@ -123,8 +140,11 @@ struct jc42_chips {
+
+ static struct jc42_chips jc42_chips[] = {
+ { ADT_MANID, ADT7408_DEVID, ADT7408_DEVID_MASK },
++ { ATMEL_MANID, AT30TS00_DEVID, AT30TS00_DEVID_MASK },
+ { IDT_MANID, TS3000B3_DEVID, TS3000B3_DEVID_MASK },
++ { IDT_MANID, TS3000GB2_DEVID, TS3000GB2_DEVID_MASK },
+ { MAX_MANID, MAX6604_DEVID, MAX6604_DEVID_MASK },
++ { MCP_MANID, MCP9804_DEVID, MCP9804_DEVID_MASK },
+ { MCP_MANID, MCP98242_DEVID, MCP98242_DEVID_MASK },
+ { MCP_MANID, MCP98243_DEVID, MCP98243_DEVID_MASK },
+ { MCP_MANID, MCP9843_DEVID, MCP9843_DEVID_MASK },
+@@ -133,6 +153,8 @@ static struct jc42_chips jc42_chips[] = {
+ { NXP_MANID, SE98_DEVID, SE98_DEVID_MASK },
+ { STM_MANID, STTS424_DEVID, STTS424_DEVID_MASK },
+ { STM_MANID, STTS424E_DEVID, STTS424E_DEVID_MASK },
++ { STM_MANID, STTS2002_DEVID, STTS2002_DEVID_MASK },
++ { STM_MANID, STTS3000_DEVID, STTS3000_DEVID_MASK },
+ };
+
+ /* Each client has this additional data */
+@@ -159,10 +181,12 @@ static struct jc42_data *jc42_update_device(struct device *dev);
+
+ static const struct i2c_device_id jc42_id[] = {
+ { "adt7408", 0 },
++ { "at30ts00", 0 },
+ { "cat94ts02", 0 },
+ { "cat6095", 0 },
+ { "jc42", 0 },
+ { "max6604", 0 },
++ { "mcp9804", 0 },
+ { "mcp9805", 0 },
+ { "mcp98242", 0 },
+ { "mcp98243", 0 },
+@@ -171,8 +195,10 @@ static const struct i2c_device_id jc42_id[] = {
+ { "se97b", 0 },
+ { "se98", 0 },
+ { "stts424", 0 },
+- { "tse2002b3", 0 },
+- { "ts3000b3", 0 },
++ { "stts2002", 0 },
++ { "stts3000", 0 },
++ { "tse2002", 0 },
++ { "ts3000", 0 },
+ { }
+ };
+ MODULE_DEVICE_TABLE(i2c, jc42_id);
+diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
+index 00460d8..d89b339 100644
+--- a/drivers/hwmon/pmbus/pmbus_core.c
++++ b/drivers/hwmon/pmbus/pmbus_core.c
+@@ -54,7 +54,8 @@
+ lcrit_alarm, crit_alarm */
+ #define PMBUS_IOUT_BOOLEANS_PER_PAGE 3 /* alarm, lcrit_alarm,
+ crit_alarm */
+-#define PMBUS_POUT_BOOLEANS_PER_PAGE 2 /* alarm, crit_alarm */
++#define PMBUS_POUT_BOOLEANS_PER_PAGE 3 /* cap_alarm, alarm, crit_alarm
++ */
+ #define PMBUS_MAX_BOOLEANS_PER_FAN 2 /* alarm, fault */
+ #define PMBUS_MAX_BOOLEANS_PER_TEMP 4 /* min_alarm, max_alarm,
+ lcrit_alarm, crit_alarm */
+diff --git a/drivers/hwmon/pmbus/zl6100.c b/drivers/hwmon/pmbus/zl6100.c
+index 2bc9800..ba296fd 100644
+--- a/drivers/hwmon/pmbus/zl6100.c
++++ b/drivers/hwmon/pmbus/zl6100.c
+@@ -33,6 +33,7 @@ enum chips { zl2004, zl2006, zl2008, zl2105, zl2106, zl6100, zl6105 };
+ struct zl6100_data {
+ int id;
+ ktime_t access; /* chip access time */
++ int delay; /* Delay between chip accesses in uS */
+ struct pmbus_driver_info info;
+ };
+
+@@ -49,10 +50,10 @@ MODULE_PARM_DESC(delay, "Delay between chip accesses in uS");
+ /* Some chips need a delay between accesses */
+ static inline void zl6100_wait(const struct zl6100_data *data)
+ {
+- if (delay) {
++ if (data->delay) {
+ s64 delta = ktime_us_delta(ktime_get(), data->access);
+- if (delta < delay)
+- udelay(delay - delta);
++ if (delta < data->delay)
++ udelay(data->delay - delta);
+ }
+ }
+
+@@ -184,8 +185,9 @@ static int zl6100_probe(struct i2c_client *client,
+ * can be cleared later for additional chips if tests show that it
+ * is not needed (in other words, better be safe than sorry).
+ */
++ data->delay = delay;
+ if (data->id == zl2004 || data->id == zl6105)
+- delay = 0;
++ data->delay = 0;
+
+ /*
+ * Since there was a direct I2C device access above, wait before
+diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
+index 7e78f7c..3d471d5 100644
+--- a/drivers/i2c/busses/i2c-mxs.c
++++ b/drivers/i2c/busses/i2c-mxs.c
+@@ -72,6 +72,7 @@
+
+ #define MXS_I2C_QUEUESTAT (0x70)
+ #define MXS_I2C_QUEUESTAT_RD_QUEUE_EMPTY 0x00002000
++#define MXS_I2C_QUEUESTAT_WRITE_QUEUE_CNT_MASK 0x0000001F
+
+ #define MXS_I2C_QUEUECMD (0x80)
+
+@@ -219,14 +220,14 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
+ int ret;
+ int flags;
+
+- init_completion(&i2c->cmd_complete);
+-
+ dev_dbg(i2c->dev, "addr: 0x%04x, len: %d, flags: 0x%x, stop: %d\n",
+ msg->addr, msg->len, msg->flags, stop);
+
+ if (msg->len == 0)
+ return -EINVAL;
+
++ init_completion(&i2c->cmd_complete);
++
+ flags = stop ? MXS_I2C_CTRL0_POST_SEND_STOP : 0;
+
+ if (msg->flags & I2C_M_RD)
+@@ -286,6 +287,7 @@ static irqreturn_t mxs_i2c_isr(int this_irq, void *dev_id)
+ {
+ struct mxs_i2c_dev *i2c = dev_id;
+ u32 stat = readl(i2c->regs + MXS_I2C_CTRL1) & MXS_I2C_IRQ_MASK;
++ bool is_last_cmd;
+
+ if (!stat)
+ return IRQ_NONE;
+@@ -300,9 +302,14 @@ static irqreturn_t mxs_i2c_isr(int this_irq, void *dev_id)
+ else
+ i2c->cmd_err = 0;
+
+- complete(&i2c->cmd_complete);
++ is_last_cmd = (readl(i2c->regs + MXS_I2C_QUEUESTAT) &
++ MXS_I2C_QUEUESTAT_WRITE_QUEUE_CNT_MASK) == 0;
++
++ if (is_last_cmd || i2c->cmd_err)
++ complete(&i2c->cmd_complete);
+
+ writel(stat, i2c->regs + MXS_I2C_CTRL1_CLR);
++
+ return IRQ_HANDLED;
+ }
+
+diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
+index 003587c..9c40c11 100644
+--- a/drivers/input/mouse/alps.c
++++ b/drivers/input/mouse/alps.c
+@@ -421,7 +421,9 @@ static const struct alps_model_info *alps_get_model(struct psmouse *psmouse, int
+
+ /*
+ * First try "E6 report".
+- * ALPS should return 0,0,10 or 0,0,100
++ * ALPS should return 0,0,10 or 0,0,100 if no buttons are pressed.
++ * The bits 0-2 of the first byte will be 1s if some buttons are
++ * pressed.
+ */
+ param[0] = 0;
+ if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES) ||
+@@ -437,7 +439,8 @@ static const struct alps_model_info *alps_get_model(struct psmouse *psmouse, int
+ psmouse_dbg(psmouse, "E6 report: %2.2x %2.2x %2.2x",
+ param[0], param[1], param[2]);
+
+- if (param[0] != 0 || param[1] != 0 || (param[2] != 10 && param[2] != 100))
++ if ((param[0] & 0xf8) != 0 || param[1] != 0 ||
++ (param[2] != 10 && param[2] != 100))
+ return NULL;
+
+ /*
+diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
+index 82d2410..5c74179 100644
+--- a/drivers/iommu/amd_iommu_init.c
++++ b/drivers/iommu/amd_iommu_init.c
+@@ -268,7 +268,7 @@ static void iommu_set_exclusion_range(struct amd_iommu *iommu)
+ }
+
+ /* Programs the physical address of the device table into the IOMMU hardware */
+-static void __init iommu_set_device_table(struct amd_iommu *iommu)
++static void iommu_set_device_table(struct amd_iommu *iommu)
+ {
+ u64 entry;
+
+diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
+index 8f32b2b..aba706c 100644
+--- a/drivers/iommu/omap-iommu.c
++++ b/drivers/iommu/omap-iommu.c
+@@ -1229,7 +1229,8 @@ static int __init omap_iommu_init(void)
+
+ return platform_driver_register(&omap_iommu_driver);
+ }
+-module_init(omap_iommu_init);
++/* must be ready before omap3isp is probed */
++subsys_initcall(omap_iommu_init);
+
+ static void __exit omap_iommu_exit(void)
+ {
+diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
+index 9fb18c1..b280c43 100644
+--- a/drivers/md/dm-flakey.c
++++ b/drivers/md/dm-flakey.c
+@@ -323,7 +323,7 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio,
+ * Corrupt successful READs while in down state.
+ * If flags were specified, only corrupt those that match.
+ */
+- if (!error && bio_submitted_while_down &&
++ if (fc->corrupt_bio_byte && !error && bio_submitted_while_down &&
+ (bio_data_dir(bio) == READ) && (fc->corrupt_bio_rw == READ) &&
+ all_corrupt_bio_flags_match(bio, fc))
+ corrupt_bio_data(bio, fc);
+diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
+index ad2eba4..ea5dd28 100644
+--- a/drivers/md/dm-io.c
++++ b/drivers/md/dm-io.c
+@@ -296,6 +296,8 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
+ unsigned offset;
+ unsigned num_bvecs;
+ sector_t remaining = where->count;
++ struct request_queue *q = bdev_get_queue(where->bdev);
++ sector_t discard_sectors;
+
+ /*
+ * where->count may be zero if rw holds a flush and we need to
+@@ -305,9 +307,12 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
+ /*
+ * Allocate a suitably sized-bio.
+ */
+- num_bvecs = dm_sector_div_up(remaining,
+- (PAGE_SIZE >> SECTOR_SHIFT));
+- num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev), num_bvecs);
++ if (rw & REQ_DISCARD)
++ num_bvecs = 1;
++ else
++ num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev),
++ dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));
++
+ bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
+ bio->bi_sector = where->sector + (where->count - remaining);
+ bio->bi_bdev = where->bdev;
+@@ -315,10 +320,14 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
+ bio->bi_destructor = dm_bio_destructor;
+ store_io_and_region_in_bio(bio, io, region);
+
+- /*
+- * Try and add as many pages as possible.
+- */
+- while (remaining) {
++ if (rw & REQ_DISCARD) {
++ discard_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining);
++ bio->bi_size = discard_sectors << SECTOR_SHIFT;
++ remaining -= discard_sectors;
++ } else while (remaining) {
++ /*
++ * Try and add as many pages as possible.
++ */
+ dp->get_page(dp, &page, &len, &offset);
+ len = min(len, to_bytes(remaining));
+ if (!bio_add_page(bio, page, len, offset))
+diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
+index 31c2dc2..1ce84ed 100644
+--- a/drivers/md/dm-ioctl.c
++++ b/drivers/md/dm-ioctl.c
+@@ -1437,7 +1437,7 @@ static int target_message(struct dm_ioctl *param, size_t param_size)
+
+ if (!argc) {
+ DMWARN("Empty message received.");
+- goto out;
++ goto out_argv;
+ }
+
+ table = dm_get_live_table(md);
+diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
+index c2907d8..d2a3223 100644
+--- a/drivers/md/dm-raid.c
++++ b/drivers/md/dm-raid.c
+@@ -667,7 +667,14 @@ static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
+ return ret;
+
+ sb = page_address(rdev->sb_page);
+- if (sb->magic != cpu_to_le32(DM_RAID_MAGIC)) {
++
++ /*
++ * Two cases that we want to write new superblocks and rebuild:
++ * 1) New device (no matching magic number)
++ * 2) Device specified for rebuild (!In_sync w/ offset == 0)
++ */
++ if ((sb->magic != cpu_to_le32(DM_RAID_MAGIC)) ||
++ (!test_bit(In_sync, &rdev->flags) && !rdev->recovery_offset)) {
+ super_sync(rdev->mddev, rdev);
+
+ set_bit(FirstUse, &rdev->flags);
+@@ -744,11 +751,8 @@ static int super_init_validation(struct mddev *mddev, struct md_rdev *rdev)
+ */
+ rdev_for_each(r, t, mddev) {
+ if (!test_bit(In_sync, &r->flags)) {
+- if (!test_bit(FirstUse, &r->flags))
+- DMERR("Superblock area of "
+- "rebuild device %d should have been "
+- "cleared.", r->raid_disk);
+- set_bit(FirstUse, &r->flags);
++ DMINFO("Device %d specified for rebuild: "
++ "Clearing superblock", r->raid_disk);
+ rebuilds++;
+ } else if (test_bit(FirstUse, &r->flags))
+ new_devs++;
+@@ -970,6 +974,7 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
+
+ INIT_WORK(&rs->md.event_work, do_table_event);
+ ti->private = rs;
++ ti->num_flush_requests = 1;
+
+ mutex_lock(&rs->md.reconfig_mutex);
+ ret = md_run(&rs->md);
+diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
+index 59c4f04..237571a 100644
+--- a/drivers/md/dm-thin-metadata.c
++++ b/drivers/md/dm-thin-metadata.c
+@@ -385,6 +385,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
+ data_sm = dm_sm_disk_create(tm, nr_blocks);
+ if (IS_ERR(data_sm)) {
+ DMERR("sm_disk_create failed");
++ dm_tm_unlock(tm, sblock);
+ r = PTR_ERR(data_sm);
+ goto bad;
+ }
+@@ -789,6 +790,11 @@ int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
+ return 0;
+ }
+
++/*
++ * __open_device: Returns @td corresponding to device with id @dev,
++ * creating it if @create is set and incrementing @td->open_count.
++ * On failure, @td is undefined.
++ */
+ static int __open_device(struct dm_pool_metadata *pmd,
+ dm_thin_id dev, int create,
+ struct dm_thin_device **td)
+@@ -799,10 +805,16 @@ static int __open_device(struct dm_pool_metadata *pmd,
+ struct disk_device_details details_le;
+
+ /*
+- * Check the device isn't already open.
++ * If the device is already open, return it.
+ */
+ list_for_each_entry(td2, &pmd->thin_devices, list)
+ if (td2->id == dev) {
++ /*
++ * May not create an already-open device.
++ */
++ if (create)
++ return -EEXIST;
++
+ td2->open_count++;
+ *td = td2;
+ return 0;
+@@ -817,6 +829,9 @@ static int __open_device(struct dm_pool_metadata *pmd,
+ if (r != -ENODATA || !create)
+ return r;
+
++ /*
++ * Create new device.
++ */
+ changed = 1;
+ details_le.mapped_blocks = 0;
+ details_le.transaction_id = cpu_to_le64(pmd->trans_id);
+@@ -882,12 +897,10 @@ static int __create_thin(struct dm_pool_metadata *pmd,
+
+ r = __open_device(pmd, dev, 1, &td);
+ if (r) {
+- __close_device(td);
+ dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root);
+ dm_btree_del(&pmd->bl_info, dev_root);
+ return r;
+ }
+- td->changed = 1;
+ __close_device(td);
+
+ return r;
+@@ -967,14 +980,14 @@ static int __create_snap(struct dm_pool_metadata *pmd,
+ goto bad;
+
+ r = __set_snapshot_details(pmd, td, origin, pmd->time);
++ __close_device(td);
++
+ if (r)
+ goto bad;
+
+- __close_device(td);
+ return 0;
+
+ bad:
+- __close_device(td);
+ dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root);
+ dm_btree_remove(&pmd->details_info, pmd->details_root,
+ &key, &pmd->details_root);
+@@ -1211,6 +1224,8 @@ static int __remove(struct dm_thin_device *td, dm_block_t block)
+ if (r)
+ return r;
+
++ td->mapped_blocks--;
++ td->changed = 1;
+ pmd->need_commit = 1;
+
+ return 0;
+diff --git a/drivers/mfd/cs5535-mfd.c b/drivers/mfd/cs5535-mfd.c
+index 155fa04..e488a78 100644
+--- a/drivers/mfd/cs5535-mfd.c
++++ b/drivers/mfd/cs5535-mfd.c
+@@ -179,7 +179,7 @@ static struct pci_device_id cs5535_mfd_pci_tbl[] = {
+ };
+ MODULE_DEVICE_TABLE(pci, cs5535_mfd_pci_tbl);
+
+-static struct pci_driver cs5535_mfd_drv = {
++static struct pci_driver cs5535_mfd_driver = {
+ .name = DRV_NAME,
+ .id_table = cs5535_mfd_pci_tbl,
+ .probe = cs5535_mfd_probe,
+@@ -188,12 +188,12 @@ static struct pci_driver cs5535_mfd_drv = {
+
+ static int __init cs5535_mfd_init(void)
+ {
+- return pci_register_driver(&cs5535_mfd_drv);
++ return pci_register_driver(&cs5535_mfd_driver);
+ }
+
+ static void __exit cs5535_mfd_exit(void)
+ {
+- pci_unregister_driver(&cs5535_mfd_drv);
++ pci_unregister_driver(&cs5535_mfd_driver);
+ }
+
+ module_init(cs5535_mfd_init);
+diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
+index 0f59228..411f523 100644
+--- a/drivers/mfd/mfd-core.c
++++ b/drivers/mfd/mfd-core.c
+@@ -123,7 +123,7 @@ static int mfd_add_device(struct device *parent, int id,
+ }
+
+ if (!cell->ignore_resource_conflicts) {
+- ret = acpi_check_resource_conflict(res);
++ ret = acpi_check_resource_conflict(&res[r]);
+ if (ret)
+ goto fail_res;
+ }
+diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
+index 61894fc..9302d21 100644
+--- a/drivers/mfd/wm8994-core.c
++++ b/drivers/mfd/wm8994-core.c
+@@ -252,6 +252,20 @@ static int wm8994_suspend(struct device *dev)
+ break;
+ }
+
++ switch (wm8994->type) {
++ case WM1811:
++ ret = wm8994_reg_read(wm8994, WM8994_ANTIPOP_2);
++ if (ret < 0) {
++ dev_err(dev, "Failed to read jackdet: %d\n", ret);
++ } else if (ret & WM1811_JACKDET_MODE_MASK) {
++ dev_dbg(dev, "CODEC still active, ignoring suspend\n");
++ return 0;
++ }
++ break;
++ default:
++ break;
++ }
++
+ /* Disable LDO pulldowns while the device is suspended if we
+ * don't know that something will be driving them. */
+ if (!wm8994->ldo_ena_always_driven)
+diff --git a/drivers/misc/cs5535-mfgpt.c b/drivers/misc/cs5535-mfgpt.c
+index bc685bf..87a390d 100644
+--- a/drivers/misc/cs5535-mfgpt.c
++++ b/drivers/misc/cs5535-mfgpt.c
+@@ -262,7 +262,7 @@ static void __init reset_all_timers(void)
+ * In other cases (such as with VSAless OpenFirmware), the system firmware
+ * leaves timers available for us to use.
+ */
+-static int __init scan_timers(struct cs5535_mfgpt_chip *mfgpt)
++static int __devinit scan_timers(struct cs5535_mfgpt_chip *mfgpt)
+ {
+ struct cs5535_mfgpt_timer timer = { .chip = mfgpt };
+ unsigned long flags;
+diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
+index 72bc756..9896933 100644
+--- a/drivers/mmc/host/atmel-mci.c
++++ b/drivers/mmc/host/atmel-mci.c
+@@ -1944,12 +1944,12 @@ static bool atmci_filter(struct dma_chan *chan, void *slave)
+ }
+ }
+
+-static void atmci_configure_dma(struct atmel_mci *host)
++static bool atmci_configure_dma(struct atmel_mci *host)
+ {
+ struct mci_platform_data *pdata;
+
+ if (host == NULL)
+- return;
++ return false;
+
+ pdata = host->pdev->dev.platform_data;
+
+@@ -1966,12 +1966,15 @@ static void atmci_configure_dma(struct atmel_mci *host)
+ host->dma.chan =
+ dma_request_channel(mask, atmci_filter, pdata->dma_slave);
+ }
+- if (!host->dma.chan)
+- dev_notice(&host->pdev->dev, "DMA not available, using PIO\n");
+- else
++ if (!host->dma.chan) {
++ dev_warn(&host->pdev->dev, "no DMA channel available\n");
++ return false;
++ } else {
+ dev_info(&host->pdev->dev,
+ "Using %s for DMA transfers\n",
+ dma_chan_name(host->dma.chan));
++ return true;
++ }
+ }
+
+ static inline unsigned int atmci_get_version(struct atmel_mci *host)
+@@ -2081,8 +2084,7 @@ static int __init atmci_probe(struct platform_device *pdev)
+
+ /* Get MCI capabilities and set operations according to it */
+ atmci_get_cap(host);
+- if (host->caps.has_dma) {
+- dev_info(&pdev->dev, "using DMA\n");
++ if (host->caps.has_dma && atmci_configure_dma(host)) {
+ host->prepare_data = &atmci_prepare_data_dma;
+ host->submit_data = &atmci_submit_data_dma;
+ host->stop_transfer = &atmci_stop_transfer_dma;
+@@ -2092,15 +2094,12 @@ static int __init atmci_probe(struct platform_device *pdev)
+ host->submit_data = &atmci_submit_data_pdc;
+ host->stop_transfer = &atmci_stop_transfer_pdc;
+ } else {
+- dev_info(&pdev->dev, "no DMA, no PDC\n");
++ dev_info(&pdev->dev, "using PIO\n");
+ host->prepare_data = &atmci_prepare_data;
+ host->submit_data = &atmci_submit_data;
+ host->stop_transfer = &atmci_stop_transfer;
+ }
+
+- if (host->caps.has_dma)
+- atmci_configure_dma(host);
+-
+ platform_set_drvdata(pdev, host);
+
+ /* We need at least one slot to succeed */
+diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
+index 38ebc4e..4540e37 100644
+--- a/drivers/mmc/host/sdhci-esdhc-imx.c
++++ b/drivers/mmc/host/sdhci-esdhc-imx.c
+@@ -269,8 +269,9 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
+ imx_data->scratchpad = val;
+ return;
+ case SDHCI_COMMAND:
+- if ((host->cmd->opcode == MMC_STOP_TRANSMISSION)
+- && (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT))
++ if ((host->cmd->opcode == MMC_STOP_TRANSMISSION ||
++ host->cmd->opcode == MMC_SET_BLOCK_COUNT) &&
++ (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT))
+ val |= SDHCI_CMD_ABORTCMD;
+
+ if (is_imx6q_usdhc(imx_data)) {
+diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
+index 99ed6eb..4fd4144 100644
+--- a/drivers/net/usb/cdc_ether.c
++++ b/drivers/net/usb/cdc_ether.c
+@@ -570,6 +570,13 @@ static const struct usb_device_id products [] = {
+ .driver_info = 0,
+ },
+
++/* Logitech Harmony 900 - uses the pseudo-MDLM (BLAN) driver */
++{
++ USB_DEVICE_AND_INTERFACE_INFO(0x046d, 0xc11f, USB_CLASS_COMM,
++ USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
++ .driver_info = 0,
++},
++
+ /*
+ * WHITELIST!!!
+ *
+diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
+index fae0fbd..81b96e3 100644
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -589,6 +589,7 @@ static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
+ entry = (struct skb_data *) skb->cb;
+ urb = entry->urb;
+
++ spin_unlock_irqrestore(&q->lock, flags);
+ // during some PM-driven resume scenarios,
+ // these (async) unlinks complete immediately
+ retval = usb_unlink_urb (urb);
+@@ -596,6 +597,7 @@ static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
+ netdev_dbg(dev->net, "unlink urb err, %d\n", retval);
+ else
+ count++;
++ spin_lock_irqsave(&q->lock, flags);
+ }
+ spin_unlock_irqrestore (&q->lock, flags);
+ return count;
+diff --git a/drivers/net/usb/zaurus.c b/drivers/net/usb/zaurus.c
+index 1a2234c..246b3bb 100644
+--- a/drivers/net/usb/zaurus.c
++++ b/drivers/net/usb/zaurus.c
+@@ -349,6 +349,13 @@ static const struct usb_device_id products [] = {
+ ZAURUS_MASTER_INTERFACE,
+ .driver_info = OLYMPUS_MXL_INFO,
+ },
++
++/* Logitech Harmony 900 - uses the pseudo-MDLM (BLAN) driver */
++{
++ USB_DEVICE_AND_INTERFACE_INFO(0x046d, 0xc11f, USB_CLASS_COMM,
++ USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
++ .driver_info = (unsigned long) &bogus_mdlm_info,
++},
+ { }, // END
+ };
+ MODULE_DEVICE_TABLE(usb, products);
+diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+index f199e9e..0a3c7c8 100644
+--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
++++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+@@ -489,8 +489,6 @@ static int ar5008_hw_rf_alloc_ext_banks(struct ath_hw *ah)
+ ATH_ALLOC_BANK(ah->analogBank6Data, ah->iniBank6.ia_rows);
+ ATH_ALLOC_BANK(ah->analogBank6TPCData, ah->iniBank6TPC.ia_rows);
+ ATH_ALLOC_BANK(ah->analogBank7Data, ah->iniBank7.ia_rows);
+- ATH_ALLOC_BANK(ah->addac5416_21,
+- ah->iniAddac.ia_rows * ah->iniAddac.ia_columns);
+ ATH_ALLOC_BANK(ah->bank6Temp, ah->iniBank6.ia_rows);
+
+ return 0;
+@@ -519,7 +517,6 @@ static void ar5008_hw_rf_free_ext_banks(struct ath_hw *ah)
+ ATH_FREE_BANK(ah->analogBank6Data);
+ ATH_FREE_BANK(ah->analogBank6TPCData);
+ ATH_FREE_BANK(ah->analogBank7Data);
+- ATH_FREE_BANK(ah->addac5416_21);
+ ATH_FREE_BANK(ah->bank6Temp);
+
+ #undef ATH_FREE_BANK
+@@ -805,27 +802,7 @@ static int ar5008_hw_process_ini(struct ath_hw *ah,
+ if (ah->eep_ops->set_addac)
+ ah->eep_ops->set_addac(ah, chan);
+
+- if (AR_SREV_5416_22_OR_LATER(ah)) {
+- REG_WRITE_ARRAY(&ah->iniAddac, 1, regWrites);
+- } else {
+- struct ar5416IniArray temp;
+- u32 addacSize =
+- sizeof(u32) * ah->iniAddac.ia_rows *
+- ah->iniAddac.ia_columns;
+-
+- /* For AR5416 2.0/2.1 */
+- memcpy(ah->addac5416_21,
+- ah->iniAddac.ia_array, addacSize);
+-
+- /* override CLKDRV value at [row, column] = [31, 1] */
+- (ah->addac5416_21)[31 * ah->iniAddac.ia_columns + 1] = 0;
+-
+- temp.ia_array = ah->addac5416_21;
+- temp.ia_columns = ah->iniAddac.ia_columns;
+- temp.ia_rows = ah->iniAddac.ia_rows;
+- REG_WRITE_ARRAY(&temp, 1, regWrites);
+- }
+-
++ REG_WRITE_ARRAY(&ah->iniAddac, 1, regWrites);
+ REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_INTERNAL_ADDAC);
+
+ ENABLE_REGWRITE_BUFFER(ah);
+diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+index 11f192a..d190411 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
++++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+@@ -180,6 +180,25 @@ static void ar9002_hw_init_mode_regs(struct ath_hw *ah)
+ INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac,
+ ARRAY_SIZE(ar5416Addac), 2);
+ }
++
++ /* iniAddac needs to be modified for these chips */
++ if (AR_SREV_9160(ah) || !AR_SREV_5416_22_OR_LATER(ah)) {
++ struct ar5416IniArray *addac = &ah->iniAddac;
++ u32 size = sizeof(u32) * addac->ia_rows * addac->ia_columns;
++ u32 *data;
++
++ data = kmalloc(size, GFP_KERNEL);
++ if (!data)
++ return;
++
++ memcpy(data, addac->ia_array, size);
++ addac->ia_array = data;
++
++ if (!AR_SREV_5416_22_OR_LATER(ah)) {
++ /* override CLKDRV value */
++ INI_RA(addac, 31,1) = 0;
++ }
++ }
+ }
+
+ /* Support for Japan ch.14 (2484) spread */
+diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
+index f389b3c..1bd8edf 100644
+--- a/drivers/net/wireless/ath/ath9k/hw.h
++++ b/drivers/net/wireless/ath/ath9k/hw.h
+@@ -772,7 +772,6 @@ struct ath_hw {
+ u32 *analogBank6Data;
+ u32 *analogBank6TPCData;
+ u32 *analogBank7Data;
+- u32 *addac5416_21;
+ u32 *bank6Temp;
+
+ u8 txpower_limit;
+diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
+index 59472e1..f6384af 100644
+--- a/drivers/net/wireless/ath/carl9170/tx.c
++++ b/drivers/net/wireless/ath/carl9170/tx.c
+@@ -1234,6 +1234,7 @@ static bool carl9170_tx_ps_drop(struct ar9170 *ar, struct sk_buff *skb)
+ {
+ struct ieee80211_sta *sta;
+ struct carl9170_sta_info *sta_info;
++ struct ieee80211_tx_info *tx_info;
+
+ rcu_read_lock();
+ sta = __carl9170_get_tx_sta(ar, skb);
+@@ -1241,16 +1242,18 @@ static bool carl9170_tx_ps_drop(struct ar9170 *ar, struct sk_buff *skb)
+ goto out_rcu;
+
+ sta_info = (void *) sta->drv_priv;
+- if (unlikely(sta_info->sleeping)) {
+- struct ieee80211_tx_info *tx_info;
++ tx_info = IEEE80211_SKB_CB(skb);
+
++ if (unlikely(sta_info->sleeping) &&
++ !(tx_info->flags & (IEEE80211_TX_CTL_POLL_RESPONSE |
++ IEEE80211_TX_CTL_CLEAR_PS_FILT))) {
+ rcu_read_unlock();
+
+- tx_info = IEEE80211_SKB_CB(skb);
+ if (tx_info->flags & IEEE80211_TX_CTL_AMPDU)
+ atomic_dec(&ar->tx_ampdu_upload);
+
+ tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
++ carl9170_release_dev_space(ar, skb);
+ carl9170_tx_status(ar, skb, false);
+ return true;
+ }
+diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+index 4b2aa1d..5cfb3d1 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
++++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+@@ -1211,6 +1211,7 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
+ unsigned long flags;
+ struct iwl_addsta_cmd sta_cmd;
+ u8 sta_id = iwlagn_key_sta_id(priv, ctx->vif, sta);
++ __le16 key_flags;
+
+ /* if station isn't there, neither is the key */
+ if (sta_id == IWL_INVALID_STATION)
+@@ -1236,7 +1237,14 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
+ IWL_ERR(priv, "offset %d not used in uCode key table.\n",
+ keyconf->hw_key_idx);
+
+- sta_cmd.key.key_flags = STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
++ key_flags = cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
++ key_flags |= STA_KEY_FLG_MAP_KEY_MSK | STA_KEY_FLG_NO_ENC |
++ STA_KEY_FLG_INVALID;
++
++ if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
++ key_flags |= STA_KEY_MULTICAST_MSK;
++
++ sta_cmd.key.key_flags = key_flags;
+ sta_cmd.key.key_offset = WEP_INVALID_OFFSET;
+ sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK;
+ sta_cmd.mode = STA_CONTROL_MODIFY_MSK;
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+index c244f2f..94a3e17 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+@@ -275,6 +275,8 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8191, rtl92cu_hal_cfg)},
+
+ /****** 8188CU ********/
++ /* RTL8188CTV */
++ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x018a, rtl92cu_hal_cfg)},
+ /* 8188CE-VAU USB minCard */
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8170, rtl92cu_hal_cfg)},
+ /* 8188cu 1*1 dongle */
+@@ -291,14 +293,14 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817e, rtl92cu_hal_cfg)},
+ /* 8188RU in Alfa AWUS036NHR */
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817f, rtl92cu_hal_cfg)},
++ /* RTL8188CUS-VL */
++ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x818a, rtl92cu_hal_cfg)},
+ /* 8188 Combo for BC4 */
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8754, rtl92cu_hal_cfg)},
+
+ /****** 8192CU ********/
+- /* 8191cu 1*2 */
+- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8177, rtl92cu_hal_cfg)},
+ /* 8192cu 2*2 */
+- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817b, rtl92cu_hal_cfg)},
++ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8178, rtl92cu_hal_cfg)},
+ /* 8192CE-VAU USB minCard */
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817c, rtl92cu_hal_cfg)},
+
+@@ -309,13 +311,17 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
+ {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
+ {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
+ {RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
+- {RTL_USB_DEVICE(0x0Df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
++ {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
++ {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
+ {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
+ /* HP - Lite-On ,8188CUS Slim Combo */
+ {RTL_USB_DEVICE(0x103c, 0x1629, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(0x13d3, 0x3357, rtl92cu_hal_cfg)}, /* AzureWave */
+ {RTL_USB_DEVICE(0x2001, 0x3308, rtl92cu_hal_cfg)}, /*D-Link - Alpha*/
++ {RTL_USB_DEVICE(0x2019, 0x4902, rtl92cu_hal_cfg)}, /*Planex - Etop*/
+ {RTL_USB_DEVICE(0x2019, 0xab2a, rtl92cu_hal_cfg)}, /*Planex - Abocom*/
++ /*SW-WF02-AD15 -Abocom*/
++ {RTL_USB_DEVICE(0x2019, 0xab2e, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(0x2019, 0xed17, rtl92cu_hal_cfg)}, /*PCI - Edimax*/
+ {RTL_USB_DEVICE(0x20f4, 0x648b, rtl92cu_hal_cfg)}, /*TRENDnet - Cameo*/
+ {RTL_USB_DEVICE(0x7392, 0x7811, rtl92cu_hal_cfg)}, /*Edimax - Edimax*/
+@@ -326,14 +332,36 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
+ {RTL_USB_DEVICE(0x4855, 0x0091, rtl92cu_hal_cfg)}, /* NetweeN-Feixun */
+ {RTL_USB_DEVICE(0x9846, 0x9041, rtl92cu_hal_cfg)}, /* Netgear Cameo */
+
++ /****** 8188 RU ********/
++ /* Netcore */
++ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x317f, rtl92cu_hal_cfg)},
++
++ /****** 8188CUS Slim Solo********/
++ {RTL_USB_DEVICE(0x04f2, 0xaff7, rtl92cu_hal_cfg)}, /*Xavi*/
++ {RTL_USB_DEVICE(0x04f2, 0xaff9, rtl92cu_hal_cfg)}, /*Xavi*/
++ {RTL_USB_DEVICE(0x04f2, 0xaffa, rtl92cu_hal_cfg)}, /*Xavi*/
++
++ /****** 8188CUS Slim Combo ********/
++ {RTL_USB_DEVICE(0x04f2, 0xaff8, rtl92cu_hal_cfg)}, /*Xavi*/
++ {RTL_USB_DEVICE(0x04f2, 0xaffb, rtl92cu_hal_cfg)}, /*Xavi*/
++ {RTL_USB_DEVICE(0x04f2, 0xaffc, rtl92cu_hal_cfg)}, /*Xavi*/
++ {RTL_USB_DEVICE(0x2019, 0x1201, rtl92cu_hal_cfg)}, /*Planex-Vencer*/
++
+ /****** 8192CU ********/
++ {RTL_USB_DEVICE(0x050d, 0x2102, rtl92cu_hal_cfg)}, /*Belcom-Sercomm*/
++ {RTL_USB_DEVICE(0x050d, 0x2103, rtl92cu_hal_cfg)}, /*Belcom-Edimax*/
+ {RTL_USB_DEVICE(0x0586, 0x341f, rtl92cu_hal_cfg)}, /*Zyxel -Abocom*/
+ {RTL_USB_DEVICE(0x07aa, 0x0056, rtl92cu_hal_cfg)}, /*ATKK-Gemtek*/
+ {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Funai -Abocom*/
++ {RTL_USB_DEVICE(0x0846, 0x9021, rtl92cu_hal_cfg)}, /*Netgear-Sercomm*/
++ {RTL_USB_DEVICE(0x0b05, 0x17ab, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
++ {RTL_USB_DEVICE(0x0df6, 0x0061, rtl92cu_hal_cfg)}, /*Sitecom-Edimax*/
++ {RTL_USB_DEVICE(0x0e66, 0x0019, rtl92cu_hal_cfg)}, /*Hawking-Edimax*/
+ {RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/
+ {RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
+ {RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
+ {RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/
++ {RTL_USB_DEVICE(0x20f4, 0x624d, rtl92cu_hal_cfg)}, /*TRENDNet*/
+ {RTL_USB_DEVICE(0x7392, 0x7822, rtl92cu_hal_cfg)}, /*Edimax -Edimax*/
+ {}
+ };
+diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
+index 691b1ab..30d2072 100644
+--- a/drivers/rapidio/devices/tsi721.c
++++ b/drivers/rapidio/devices/tsi721.c
+@@ -410,13 +410,14 @@ static void tsi721_db_dpc(struct work_struct *work)
+ */
+ mport = priv->mport;
+
+- wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE));
+- rd_ptr = ioread32(priv->regs + TSI721_IDQ_RP(IDB_QUEUE));
++ wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE;
++ rd_ptr = ioread32(priv->regs + TSI721_IDQ_RP(IDB_QUEUE)) % IDB_QSIZE;
+
+ while (wr_ptr != rd_ptr) {
+ idb_entry = (u64 *)(priv->idb_base +
+ (TSI721_IDB_ENTRY_SIZE * rd_ptr));
+ rd_ptr++;
++ rd_ptr %= IDB_QSIZE;
+ idb.msg = *idb_entry;
+ *idb_entry = 0;
+
+diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c
+index ca0d608..1cead1d 100644
+--- a/drivers/regulator/88pm8607.c
++++ b/drivers/regulator/88pm8607.c
+@@ -196,7 +196,7 @@ static const unsigned int LDO12_suspend_table[] = {
+ };
+
+ static const unsigned int LDO13_table[] = {
+- 1300000, 1800000, 2000000, 2500000, 2800000, 3000000, 0, 0,
++ 1200000, 1300000, 1800000, 2000000, 2500000, 2800000, 3000000, 0,
+ };
+
+ static const unsigned int LDO13_suspend_table[] = {
+@@ -389,10 +389,10 @@ static struct pm8607_regulator_info pm8607_regulator_info[] = {
+ PM8607_LDO( 7, LDO7, 0, 3, SUPPLIES_EN12, 1),
+ PM8607_LDO( 8, LDO8, 0, 3, SUPPLIES_EN12, 2),
+ PM8607_LDO( 9, LDO9, 0, 3, SUPPLIES_EN12, 3),
+- PM8607_LDO(10, LDO10, 0, 3, SUPPLIES_EN12, 4),
++ PM8607_LDO(10, LDO10, 0, 4, SUPPLIES_EN12, 4),
+ PM8607_LDO(12, LDO12, 0, 4, SUPPLIES_EN12, 5),
+ PM8607_LDO(13, VIBRATOR_SET, 1, 3, VIBRATOR_SET, 0),
+- PM8607_LDO(14, LDO14, 0, 4, SUPPLIES_EN12, 6),
++ PM8607_LDO(14, LDO14, 0, 3, SUPPLIES_EN12, 6),
+ };
+
+ static int __devinit pm8607_regulator_probe(struct platform_device *pdev)
+diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
+index 6ab2968..fe9dacc 100644
+--- a/drivers/s390/block/dasd_eckd.c
++++ b/drivers/s390/block/dasd_eckd.c
+@@ -18,12 +18,12 @@
+ #include <linux/hdreg.h> /* HDIO_GETGEO */
+ #include <linux/bio.h>
+ #include <linux/module.h>
++#include <linux/compat.h>
+ #include <linux/init.h>
+
+ #include <asm/debug.h>
+ #include <asm/idals.h>
+ #include <asm/ebcdic.h>
+-#include <asm/compat.h>
+ #include <asm/io.h>
+ #include <asm/uaccess.h>
+ #include <asm/cio.h>
+diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
+index f1a2016..792c69e 100644
+--- a/drivers/s390/block/dasd_ioctl.c
++++ b/drivers/s390/block/dasd_ioctl.c
+@@ -13,6 +13,7 @@
+ #define KMSG_COMPONENT "dasd"
+
+ #include <linux/interrupt.h>
++#include <linux/compat.h>
+ #include <linux/major.h>
+ #include <linux/fs.h>
+ #include <linux/blkpg.h>
+diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
+index e712981..9117045 100644
+--- a/drivers/s390/char/fs3270.c
++++ b/drivers/s390/char/fs3270.c
+@@ -11,6 +11,7 @@
+ #include <linux/console.h>
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
++#include <linux/compat.h>
+ #include <linux/module.h>
+ #include <linux/list.h>
+ #include <linux/slab.h>
+diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c
+index 75bde6a..89c03e6 100644
+--- a/drivers/s390/char/vmcp.c
++++ b/drivers/s390/char/vmcp.c
+@@ -13,6 +13,7 @@
+
+ #include <linux/fs.h>
+ #include <linux/init.h>
++#include <linux/compat.h>
+ #include <linux/kernel.h>
+ #include <linux/miscdevice.h>
+ #include <linux/slab.h>
+diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
+index 0c87b0f..8f9a1a3 100644
+--- a/drivers/s390/cio/chsc_sch.c
++++ b/drivers/s390/cio/chsc_sch.c
+@@ -8,6 +8,7 @@
+ */
+
+ #include <linux/slab.h>
++#include <linux/compat.h>
+ #include <linux/device.h>
+ #include <linux/module.h>
+ #include <linux/uaccess.h>
+diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
+index 3ef8d07..770a740 100644
+--- a/drivers/s390/cio/qdio_main.c
++++ b/drivers/s390/cio/qdio_main.c
+@@ -167,7 +167,7 @@ again:
+ DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
+ DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
+ q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
+- 0, -1, -1, q->irq_ptr->int_parm);
++ q->nr, q->first_to_kick, count, q->irq_ptr->int_parm);
+ return 0;
+ }
+
+@@ -215,7 +215,7 @@ again:
+ DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
+ DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
+ q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
+- 0, -1, -1, q->irq_ptr->int_parm);
++ q->nr, q->first_to_kick, count, q->irq_ptr->int_parm);
+ return 0;
+ }
+
+diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c
+index 303dde0..fab2c25 100644
+--- a/drivers/s390/scsi/zfcp_cfdc.c
++++ b/drivers/s390/scsi/zfcp_cfdc.c
+@@ -11,6 +11,7 @@
+ #define KMSG_COMPONENT "zfcp"
+ #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
++#include <linux/compat.h>
+ #include <linux/slab.h>
+ #include <linux/types.h>
+ #include <linux/miscdevice.h>
+diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c
+index b31a8e3..d4ed9eb 100644
+--- a/drivers/scsi/osd/osd_uld.c
++++ b/drivers/scsi/osd/osd_uld.c
+@@ -69,10 +69,10 @@
+ #ifndef SCSI_OSD_MAJOR
+ # define SCSI_OSD_MAJOR 260
+ #endif
+-#define SCSI_OSD_MAX_MINOR 64
++#define SCSI_OSD_MAX_MINOR MINORMASK
+
+ static const char osd_name[] = "osd";
+-static const char *osd_version_string = "open-osd 0.2.0";
++static const char *osd_version_string = "open-osd 0.2.1";
+
+ MODULE_AUTHOR("Boaz Harrosh <bharrosh@panasas.com>");
+ MODULE_DESCRIPTION("open-osd Upper-Layer-Driver osd.ko");
+diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
+index 6a80749..027b6d0 100644
+--- a/drivers/spi/spi-topcliff-pch.c
++++ b/drivers/spi/spi-topcliff-pch.c
+@@ -1717,7 +1717,7 @@ static int pch_spi_resume(struct pci_dev *pdev)
+
+ #endif
+
+-static struct pci_driver pch_spi_pcidev = {
++static struct pci_driver pch_spi_pcidev_driver = {
+ .name = "pch_spi",
+ .id_table = pch_spi_pcidev_id,
+ .probe = pch_spi_probe,
+@@ -1733,7 +1733,7 @@ static int __init pch_spi_init(void)
+ if (ret)
+ return ret;
+
+- ret = pci_register_driver(&pch_spi_pcidev);
++ ret = pci_register_driver(&pch_spi_pcidev_driver);
+ if (ret)
+ return ret;
+
+@@ -1743,7 +1743,7 @@ module_init(pch_spi_init);
+
+ static void __exit pch_spi_exit(void)
+ {
+- pci_unregister_driver(&pch_spi_pcidev);
++ pci_unregister_driver(&pch_spi_pcidev_driver);
+ platform_driver_unregister(&pch_spi_pd_driver);
+ }
+ module_exit(pch_spi_exit);
+diff --git a/drivers/staging/media/lirc/lirc_serial.c b/drivers/staging/media/lirc/lirc_serial.c
+index 8a060a8..1501e4e 100644
+--- a/drivers/staging/media/lirc/lirc_serial.c
++++ b/drivers/staging/media/lirc/lirc_serial.c
+@@ -836,25 +836,22 @@ static int hardware_init_port(void)
+ return 0;
+ }
+
+-static int init_port(void)
++static int __devinit lirc_serial_probe(struct platform_device *dev)
+ {
+ int i, nlow, nhigh, result;
+
+ result = request_irq(irq, irq_handler,
+ (share_irq ? IRQF_SHARED : 0),
+ LIRC_DRIVER_NAME, (void *)&hardware);
+-
+- switch (result) {
+- case -EBUSY:
+- printk(KERN_ERR LIRC_DRIVER_NAME ": IRQ %d busy\n", irq);
+- return -EBUSY;
+- case -EINVAL:
+- printk(KERN_ERR LIRC_DRIVER_NAME
+- ": Bad irq number or handler\n");
+- return -EINVAL;
+- default:
+- break;
+- };
++ if (result < 0) {
++ if (result == -EBUSY)
++ printk(KERN_ERR LIRC_DRIVER_NAME ": IRQ %d busy\n",
++ irq);
++ else if (result == -EINVAL)
++ printk(KERN_ERR LIRC_DRIVER_NAME
++ ": Bad irq number or handler\n");
++ return result;
++ }
+
+ /* Reserve io region. */
+ /*
+@@ -875,11 +872,14 @@ static int init_port(void)
+ ": or compile the serial port driver as module and\n");
+ printk(KERN_WARNING LIRC_DRIVER_NAME
+ ": make sure this module is loaded first\n");
+- return -EBUSY;
++ result = -EBUSY;
++ goto exit_free_irq;
+ }
+
+- if (hardware_init_port() < 0)
+- return -EINVAL;
++ if (hardware_init_port() < 0) {
++ result = -EINVAL;
++ goto exit_release_region;
++ }
+
+ /* Initialize pulse/space widths */
+ init_timing_params(duty_cycle, freq);
+@@ -911,6 +911,28 @@ static int init_port(void)
+
+ dprintk("Interrupt %d, port %04x obtained\n", irq, io);
+ return 0;
++
++exit_release_region:
++ if (iommap != 0)
++ release_mem_region(iommap, 8 << ioshift);
++ else
++ release_region(io, 8);
++exit_free_irq:
++ free_irq(irq, (void *)&hardware);
++
++ return result;
++}
++
++static int __devexit lirc_serial_remove(struct platform_device *dev)
++{
++ free_irq(irq, (void *)&hardware);
++
++ if (iommap != 0)
++ release_mem_region(iommap, 8 << ioshift);
++ else
++ release_region(io, 8);
++
++ return 0;
+ }
+
+ static int set_use_inc(void *data)
+@@ -1076,16 +1098,6 @@ static struct lirc_driver driver = {
+
+ static struct platform_device *lirc_serial_dev;
+
+-static int __devinit lirc_serial_probe(struct platform_device *dev)
+-{
+- return 0;
+-}
+-
+-static int __devexit lirc_serial_remove(struct platform_device *dev)
+-{
+- return 0;
+-}
+-
+ static int lirc_serial_suspend(struct platform_device *dev,
+ pm_message_t state)
+ {
+@@ -1112,10 +1124,8 @@ static int lirc_serial_resume(struct platform_device *dev)
+ {
+ unsigned long flags;
+
+- if (hardware_init_port() < 0) {
+- lirc_serial_exit();
++ if (hardware_init_port() < 0)
+ return -EINVAL;
+- }
+
+ spin_lock_irqsave(&hardware[type].lock, flags);
+ /* Enable Interrupt */
+@@ -1188,10 +1198,6 @@ static int __init lirc_serial_init_module(void)
+ {
+ int result;
+
+- result = lirc_serial_init();
+- if (result)
+- return result;
+-
+ switch (type) {
+ case LIRC_HOMEBREW:
+ case LIRC_IRDEO:
+@@ -1211,8 +1217,7 @@ static int __init lirc_serial_init_module(void)
+ break;
+ #endif
+ default:
+- result = -EINVAL;
+- goto exit_serial_exit;
++ return -EINVAL;
+ }
+ if (!softcarrier) {
+ switch (type) {
+@@ -1228,37 +1233,26 @@ static int __init lirc_serial_init_module(void)
+ }
+ }
+
+- result = init_port();
+- if (result < 0)
+- goto exit_serial_exit;
++ result = lirc_serial_init();
++ if (result)
++ return result;
++
+ driver.features = hardware[type].features;
+ driver.dev = &lirc_serial_dev->dev;
+ driver.minor = lirc_register_driver(&driver);
+ if (driver.minor < 0) {
+ printk(KERN_ERR LIRC_DRIVER_NAME
+ ": register_chrdev failed!\n");
+- result = -EIO;
+- goto exit_release;
++ lirc_serial_exit();
++ return -EIO;
+ }
+ return 0;
+-exit_release:
+- release_region(io, 8);
+-exit_serial_exit:
+- lirc_serial_exit();
+- return result;
+ }
+
+ static void __exit lirc_serial_exit_module(void)
+ {
+- lirc_serial_exit();
+-
+- free_irq(irq, (void *)&hardware);
+-
+- if (iommap != 0)
+- release_mem_region(iommap, 8 << ioshift);
+- else
+- release_region(io, 8);
+ lirc_unregister_driver(driver.minor);
++ lirc_serial_exit();
+ dprintk("cleaned up module\n");
+ }
+
+diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
+index b3d1741..830cd62 100644
+--- a/drivers/tty/Kconfig
++++ b/drivers/tty/Kconfig
+@@ -365,7 +365,7 @@ config PPC_EPAPR_HV_BYTECHAN
+
+ config PPC_EARLY_DEBUG_EHV_BC
+ bool "Early console (udbg) support for ePAPR hypervisors"
+- depends on PPC_EPAPR_HV_BYTECHAN
++ depends on PPC_EPAPR_HV_BYTECHAN=y
+ help
+ Select this option to enable early console (a.k.a. "udbg") support
+ via an ePAPR byte channel. You also need to choose the byte channel
+diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c
+index c56378c..7099c31 100644
+--- a/drivers/video/omap2/dss/hdmi.c
++++ b/drivers/video/omap2/dss/hdmi.c
+@@ -490,6 +490,7 @@ bool omapdss_hdmi_detect(void)
+
+ int omapdss_hdmi_display_enable(struct omap_dss_device *dssdev)
+ {
++ struct omap_dss_hdmi_data *priv = dssdev->data;
+ int r = 0;
+
+ DSSDBG("ENTER hdmi_display_enable\n");
+@@ -502,6 +503,8 @@ int omapdss_hdmi_display_enable(struct omap_dss_device *dssdev)
+ goto err0;
+ }
+
++ hdmi.ip_data.hpd_gpio = priv->hpd_gpio;
++
+ r = omap_dss_start_device(dssdev);
+ if (r) {
+ DSSERR("failed to start device\n");
+diff --git a/drivers/video/omap2/dss/ti_hdmi.h b/drivers/video/omap2/dss/ti_hdmi.h
+index 2c3443d..ec337b5d 100644
+--- a/drivers/video/omap2/dss/ti_hdmi.h
++++ b/drivers/video/omap2/dss/ti_hdmi.h
+@@ -121,6 +121,10 @@ struct hdmi_ip_data {
+ const struct ti_hdmi_ip_ops *ops;
+ struct hdmi_config cfg;
+ struct hdmi_pll_info pll_data;
++
++ /* ti_hdmi_4xxx_ip private data. These should be in a separate struct */
++ int hpd_gpio;
++ bool phy_tx_enabled;
+ };
+ int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data);
+ void ti_hdmi_4xxx_phy_disable(struct hdmi_ip_data *ip_data);
+diff --git a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
+index e1a6ce5..aad48a1 100644
+--- a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
++++ b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
+@@ -28,6 +28,7 @@
+ #include <linux/delay.h>
+ #include <linux/string.h>
+ #include <linux/seq_file.h>
++#include <linux/gpio.h>
+
+ #include "ti_hdmi_4xxx_ip.h"
+ #include "dss.h"
+@@ -223,6 +224,49 @@ void ti_hdmi_4xxx_pll_disable(struct hdmi_ip_data *ip_data)
+ hdmi_set_pll_pwr(ip_data, HDMI_PLLPWRCMD_ALLOFF);
+ }
+
++static int hdmi_check_hpd_state(struct hdmi_ip_data *ip_data)
++{
++ unsigned long flags;
++ bool hpd;
++ int r;
++ /* this should be in ti_hdmi_4xxx_ip private data */
++ static DEFINE_SPINLOCK(phy_tx_lock);
++
++ spin_lock_irqsave(&phy_tx_lock, flags);
++
++ hpd = gpio_get_value(ip_data->hpd_gpio);
++
++ if (hpd == ip_data->phy_tx_enabled) {
++ spin_unlock_irqrestore(&phy_tx_lock, flags);
++ return 0;
++ }
++
++ if (hpd)
++ r = hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_TXON);
++ else
++ r = hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_LDOON);
++
++ if (r) {
++ DSSERR("Failed to %s PHY TX power\n",
++ hpd ? "enable" : "disable");
++ goto err;
++ }
++
++ ip_data->phy_tx_enabled = hpd;
++err:
++ spin_unlock_irqrestore(&phy_tx_lock, flags);
++ return r;
++}
++
++static irqreturn_t hpd_irq_handler(int irq, void *data)
++{
++ struct hdmi_ip_data *ip_data = data;
++
++ hdmi_check_hpd_state(ip_data);
++
++ return IRQ_HANDLED;
++}
++
+ int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data)
+ {
+ u16 r = 0;
+@@ -232,10 +276,6 @@ int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data)
+ if (r)
+ return r;
+
+- r = hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_TXON);
+- if (r)
+- return r;
+-
+ /*
+ * Read address 0 in order to get the SCP reset done completed
+ * Dummy access performed to make sure reset is done
+@@ -257,12 +297,32 @@ int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data)
+ /* Write to phy address 3 to change the polarity control */
+ REG_FLD_MOD(phy_base, HDMI_TXPHY_PAD_CFG_CTRL, 0x1, 27, 27);
+
++ r = request_threaded_irq(gpio_to_irq(ip_data->hpd_gpio),
++ NULL, hpd_irq_handler,
++ IRQF_DISABLED | IRQF_TRIGGER_RISING |
++ IRQF_TRIGGER_FALLING, "hpd", ip_data);
++ if (r) {
++ DSSERR("HPD IRQ request failed\n");
++ hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_OFF);
++ return r;
++ }
++
++ r = hdmi_check_hpd_state(ip_data);
++ if (r) {
++ free_irq(gpio_to_irq(ip_data->hpd_gpio), ip_data);
++ hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_OFF);
++ return r;
++ }
++
+ return 0;
+ }
+
+ void ti_hdmi_4xxx_phy_disable(struct hdmi_ip_data *ip_data)
+ {
++ free_irq(gpio_to_irq(ip_data->hpd_gpio), ip_data);
++
+ hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_OFF);
++ ip_data->phy_tx_enabled = false;
+ }
+
+ static int hdmi_core_ddc_init(struct hdmi_ip_data *ip_data)
+@@ -419,14 +479,7 @@ int ti_hdmi_4xxx_read_edid(struct hdmi_ip_data *ip_data,
+
+ bool ti_hdmi_4xxx_detect(struct hdmi_ip_data *ip_data)
+ {
+- int r;
+-
+- void __iomem *base = hdmi_core_sys_base(ip_data);
+-
+- /* HPD */
+- r = REG_GET(base, HDMI_CORE_SYS_SYS_STAT, 1, 1);
+-
+- return r == 1;
++ return gpio_get_value(ip_data->hpd_gpio);
+ }
+
+ static void hdmi_core_init(struct hdmi_core_video_config *video_cfg,
+diff --git a/drivers/video/via/hw.c b/drivers/video/via/hw.c
+index d5aaca9..8497727 100644
+--- a/drivers/video/via/hw.c
++++ b/drivers/video/via/hw.c
+@@ -1810,7 +1810,11 @@ static void hw_init(void)
+ break;
+ }
+
++ /* magic required on VX900 for correct modesetting on IGA1 */
++ via_write_reg_mask(VIACR, 0x45, 0x00, 0x01);
++
+ /* probably this should go to the scaling code one day */
++ via_write_reg_mask(VIACR, 0xFD, 0, 0x80); /* VX900 hw scale on IGA2 */
+ viafb_write_regx(scaling_parameters, ARRAY_SIZE(scaling_parameters));
+
+ /* Fill VPIT Parameters */
+diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
+index 8464ea1..3c166d3 100644
+--- a/drivers/watchdog/hpwdt.c
++++ b/drivers/watchdog/hpwdt.c
+@@ -231,7 +231,7 @@ static int __devinit cru_detect(unsigned long map_entry,
+
+ cmn_regs.u1.reax = CRU_BIOS_SIGNATURE_VALUE;
+
+- set_memory_x((unsigned long)bios32_entrypoint, (2 * PAGE_SIZE));
++ set_memory_x((unsigned long)bios32_map, 2);
+ asminline_call(&cmn_regs, bios32_entrypoint);
+
+ if (cmn_regs.u1.ral != 0) {
+@@ -250,7 +250,8 @@ static int __devinit cru_detect(unsigned long map_entry,
+ cru_rom_addr =
+ ioremap(cru_physical_address, cru_length);
+ if (cru_rom_addr) {
+- set_memory_x((unsigned long)cru_rom_addr, cru_length);
++ set_memory_x((unsigned long)cru_rom_addr & PAGE_MASK,
++ (cru_length + PAGE_SIZE - 1) >> PAGE_SHIFT);
+ retval = 0;
+ }
+ }
+diff --git a/fs/aio.c b/fs/aio.c
+index 969beb0..67e4b90 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -490,6 +490,8 @@ static void kiocb_batch_free(struct kioctx *ctx, struct kiocb_batch *batch)
+ kmem_cache_free(kiocb_cachep, req);
+ ctx->reqs_active--;
+ }
++ if (unlikely(!ctx->reqs_active && ctx->dead))
++ wake_up_all(&ctx->wait);
+ spin_unlock_irq(&ctx->ctx_lock);
+ }
+
+diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
+index 326dc08..308a98b 100644
+--- a/fs/autofs4/autofs_i.h
++++ b/fs/autofs4/autofs_i.h
+@@ -110,6 +110,7 @@ struct autofs_sb_info {
+ int sub_version;
+ int min_proto;
+ int max_proto;
++ int compat_daemon;
+ unsigned long exp_timeout;
+ unsigned int type;
+ int reghost_enabled;
+diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
+index 509fe1e..56bac70 100644
+--- a/fs/autofs4/dev-ioctl.c
++++ b/fs/autofs4/dev-ioctl.c
+@@ -385,6 +385,7 @@ static int autofs_dev_ioctl_setpipefd(struct file *fp,
+ sbi->pipefd = pipefd;
+ sbi->pipe = pipe;
+ sbi->catatonic = 0;
++ sbi->compat_daemon = is_compat_task();
+ }
+ out:
+ mutex_unlock(&sbi->wq_mutex);
+diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
+index 8179f1a..98a5695 100644
+--- a/fs/autofs4/inode.c
++++ b/fs/autofs4/inode.c
+@@ -19,6 +19,7 @@
+ #include <linux/parser.h>
+ #include <linux/bitops.h>
+ #include <linux/magic.h>
++#include <linux/compat.h>
+ #include "autofs_i.h"
+ #include <linux/module.h>
+
+@@ -224,6 +225,7 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent)
+ set_autofs_type_indirect(&sbi->type);
+ sbi->min_proto = 0;
+ sbi->max_proto = 0;
++ sbi->compat_daemon = is_compat_task();
+ mutex_init(&sbi->wq_mutex);
+ spin_lock_init(&sbi->fs_lock);
+ sbi->queues = NULL;
+diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
+index e1fbdee..6861f61 100644
+--- a/fs/autofs4/waitq.c
++++ b/fs/autofs4/waitq.c
+@@ -90,7 +90,24 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
+
+ return (bytes > 0);
+ }
+-
++
++/*
++ * The autofs_v5 packet was misdesigned.
++ *
++ * The packets are identical on x86-32 and x86-64, but have different
++ * alignment. Which means that 'sizeof()' will give different results.
++ * Fix it up for the case of running 32-bit user mode on a 64-bit kernel.
++ */
++static noinline size_t autofs_v5_packet_size(struct autofs_sb_info *sbi)
++{
++ size_t pktsz = sizeof(struct autofs_v5_packet);
++#if defined(CONFIG_X86_64) && defined(CONFIG_COMPAT)
++ if (sbi->compat_daemon > 0)
++ pktsz -= 4;
++#endif
++ return pktsz;
++}
++
+ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
+ struct autofs_wait_queue *wq,
+ int type)
+@@ -147,8 +164,7 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
+ {
+ struct autofs_v5_packet *packet = &pkt.v5_pkt.v5_packet;
+
+- pktsz = sizeof(*packet);
+-
++ pktsz = autofs_v5_packet_size(sbi);
+ packet->wait_queue_token = wq->wait_queue_token;
+ packet->len = wq->name.len;
+ memcpy(packet->name, wq->name.name, wq->name.len);
+diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
+index 21ac5ee..6ff96c6 100644
+--- a/fs/binfmt_elf.c
++++ b/fs/binfmt_elf.c
+@@ -1421,7 +1421,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
+ for (i = 1; i < view->n; ++i) {
+ const struct user_regset *regset = &view->regsets[i];
+ do_thread_regset_writeback(t->task, regset);
+- if (regset->core_note_type &&
++ if (regset->core_note_type && regset->get &&
+ (!regset->active || regset->active(t->task, regset))) {
+ int ret;
+ size_t size = regset->n * regset->size;
+diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
+index e4c3334..bf68b4f 100644
+--- a/fs/cifs/dir.c
++++ b/fs/cifs/dir.c
+@@ -584,10 +584,26 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
+ * If either that or op not supported returned, follow
+ * the normal lookup.
+ */
+- if ((rc == 0) || (rc == -ENOENT))
++ switch (rc) {
++ case 0:
++ /*
++ * The server may allow us to open things like
++ * FIFOs, but the client isn't set up to deal
++ * with that. If it's not a regular file, just
++ * close it and proceed as if it were a normal
++ * lookup.
++ */
++ if (newInode && !S_ISREG(newInode->i_mode)) {
++ CIFSSMBClose(xid, pTcon, fileHandle);
++ break;
++ }
++ case -ENOENT:
+ posix_open = true;
+- else if ((rc == -EINVAL) || (rc != -EOPNOTSUPP))
++ case -EOPNOTSUPP:
++ break;
++ default:
+ pTcon->broken_posix_open = true;
++ }
+ }
+ if (!posix_open)
+ rc = cifs_get_inode_info_unix(&newInode, full_path,
+diff --git a/include/linux/compat.h b/include/linux/compat.h
+index 66ed067..d42bd48 100644
+--- a/include/linux/compat.h
++++ b/include/linux/compat.h
+@@ -561,5 +561,9 @@ asmlinkage ssize_t compat_sys_process_vm_writev(compat_pid_t pid,
+ unsigned long liovcnt, const struct compat_iovec __user *rvec,
+ unsigned long riovcnt, unsigned long flags);
+
++#else
++
++#define is_compat_task() (0)
++
+ #endif /* CONFIG_COMPAT */
+ #endif /* _LINUX_COMPAT_H */
+diff --git a/include/linux/regset.h b/include/linux/regset.h
+index 8abee65..686f373 100644
+--- a/include/linux/regset.h
++++ b/include/linux/regset.h
+@@ -335,8 +335,11 @@ static inline int copy_regset_to_user(struct task_struct *target,
+ {
+ const struct user_regset *regset = &view->regsets[setno];
+
++ if (!regset->get)
++ return -EOPNOTSUPP;
++
+ if (!access_ok(VERIFY_WRITE, data, size))
+- return -EIO;
++ return -EFAULT;
+
+ return regset->get(target, regset, offset, size, NULL, data);
+ }
+@@ -358,8 +361,11 @@ static inline int copy_regset_from_user(struct task_struct *target,
+ {
+ const struct user_regset *regset = &view->regsets[setno];
+
++ if (!regset->set)
++ return -EOPNOTSUPP;
++
+ if (!access_ok(VERIFY_READ, data, size))
+- return -EIO;
++ return -EFAULT;
+
+ return regset->set(target, regset, offset, size, NULL, data);
+ }
+diff --git a/include/video/omapdss.h b/include/video/omapdss.h
+index 378c7ed..6582c45 100644
+--- a/include/video/omapdss.h
++++ b/include/video/omapdss.h
+@@ -575,6 +575,11 @@ struct omap_dss_device {
+ int (*get_backlight)(struct omap_dss_device *dssdev);
+ };
+
++struct omap_dss_hdmi_data
++{
++ int hpd_gpio;
++};
++
+ struct omap_dss_driver {
+ struct device_driver driver;
+
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index cf2d7ae..ae95cd2 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -985,6 +985,11 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
+
+ /* add new interrupt at end of irq queue */
+ do {
++ /*
++ * Or all existing action->thread_mask bits,
++ * so we can find the next zero bit for this
++ * new action.
++ */
+ thread_mask |= old->thread_mask;
+ old_ptr = &old->next;
+ old = *old_ptr;
+@@ -993,14 +998,41 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
+ }
+
+ /*
+- * Setup the thread mask for this irqaction. Unlikely to have
+- * 32 resp 64 irqs sharing one line, but who knows.
++ * Setup the thread mask for this irqaction for ONESHOT. For
++ * !ONESHOT irqs the thread mask is 0 so we can avoid a
++ * conditional in irq_wake_thread().
+ */
+- if (new->flags & IRQF_ONESHOT && thread_mask == ~0UL) {
+- ret = -EBUSY;
+- goto out_mask;
++ if (new->flags & IRQF_ONESHOT) {
++ /*
++ * Unlikely to have 32 resp 64 irqs sharing one line,
++ * but who knows.
++ */
++ if (thread_mask == ~0UL) {
++ ret = -EBUSY;
++ goto out_mask;
++ }
++ /*
++ * The thread_mask for the action is or'ed to
++ * desc->thread_active to indicate that the
++ * IRQF_ONESHOT thread handler has been woken, but not
++ * yet finished. The bit is cleared when a thread
++ * completes. When all threads of a shared interrupt
++ * line have completed desc->threads_active becomes
++ * zero and the interrupt line is unmasked. See
++ * handle.c:irq_wake_thread() for further information.
++ *
++ * If no thread is woken by primary (hard irq context)
++ * interrupt handlers, then desc->threads_active is
++ * also checked for zero to unmask the irq line in the
++ * affected hard irq flow handlers
++ * (handle_[fasteoi|level]_irq).
++ *
++ * The new action gets the first zero bit of
++ * thread_mask assigned. See the loop above which or's
++ * all existing action->thread_mask bits.
++ */
++ new->thread_mask = 1 << ffz(thread_mask);
+ }
+- new->thread_mask = 1 << ffz(thread_mask);
+
+ if (!shared) {
+ init_waitqueue_head(&desc->wait_for_threads);
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index faa39d1..bc90b87 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -1334,8 +1334,10 @@ int __kprobes register_kprobe(struct kprobe *p)
+ if (!kernel_text_address((unsigned long) p->addr) ||
+ in_kprobes_functions((unsigned long) p->addr) ||
+ ftrace_text_reserved(p->addr, p->addr) ||
+- jump_label_text_reserved(p->addr, p->addr))
+- goto fail_with_jump_label;
++ jump_label_text_reserved(p->addr, p->addr)) {
++ ret = -EINVAL;
++ goto cannot_probe;
++ }
+
+ /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
+ p->flags &= KPROBE_FLAG_DISABLED;
+@@ -1352,7 +1354,7 @@ int __kprobes register_kprobe(struct kprobe *p)
+ * its code to prohibit unexpected unloading.
+ */
+ if (unlikely(!try_module_get(probed_mod)))
+- goto fail_with_jump_label;
++ goto cannot_probe;
+
+ /*
+ * If the module freed .init.text, we couldn't insert
+@@ -1361,7 +1363,7 @@ int __kprobes register_kprobe(struct kprobe *p)
+ if (within_module_init((unsigned long)p->addr, probed_mod) &&
+ probed_mod->state != MODULE_STATE_COMING) {
+ module_put(probed_mod);
+- goto fail_with_jump_label;
++ goto cannot_probe;
+ }
+ /* ret will be updated by following code */
+ }
+@@ -1409,7 +1411,7 @@ out:
+
+ return ret;
+
+-fail_with_jump_label:
++cannot_probe:
+ preempt_enable();
+ jump_label_unlock();
+ return ret;
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 33141f5..8f005e9 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -642,6 +642,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
+ set_pmd_at(mm, haddr, pmd, entry);
+ prepare_pmd_huge_pte(pgtable, mm);
+ add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
++ mm->nr_ptes++;
+ spin_unlock(&mm->page_table_lock);
+ }
+
+@@ -760,6 +761,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
+ pmd = pmd_mkold(pmd_wrprotect(pmd));
+ set_pmd_at(dst_mm, addr, dst_pmd, pmd);
+ prepare_pmd_huge_pte(pgtable, dst_mm);
++ dst_mm->nr_ptes++;
+
+ ret = 0;
+ out_unlock:
+@@ -858,7 +860,6 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
+ }
+ kfree(pages);
+
+- mm->nr_ptes++;
+ smp_wmb(); /* make pte visible before pmd */
+ pmd_populate(mm, pmd, pgtable);
+ page_remove_rmap(page);
+@@ -1017,6 +1018,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ VM_BUG_ON(page_mapcount(page) < 0);
+ add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
+ VM_BUG_ON(!PageHead(page));
++ tlb->mm->nr_ptes--;
+ spin_unlock(&tlb->mm->page_table_lock);
+ tlb_remove_page(tlb, page);
+ pte_free(tlb->mm, pgtable);
+@@ -1356,7 +1358,6 @@ static int __split_huge_page_map(struct page *page,
+ pte_unmap(pte);
+ }
+
+- mm->nr_ptes++;
+ smp_wmb(); /* make pte visible before pmd */
+ /*
+ * Up to this point the pmd is present and huge and
+@@ -1969,7 +1970,6 @@ static void collapse_huge_page(struct mm_struct *mm,
+ set_pmd_at(mm, address, pmd, _pmd);
+ update_mmu_cache(vma, address, _pmd);
+ prepare_pmd_huge_pte(pgtable, mm);
+- mm->nr_ptes--;
+ spin_unlock(&mm->page_table_lock);
+
+ #ifndef CONFIG_NUMA
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index f538e9b..de67e91 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -4502,6 +4502,9 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
+ */
+ BUG_ON(!thresholds);
+
++ if (!thresholds->primary)
++ goto unlock;
++
+ usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
+
+ /* Check if a threshold crossed before removing */
+@@ -4550,7 +4553,7 @@ swap_buffers:
+
+ /* To be sure that nobody uses thresholds */
+ synchronize_rcu();
+-
++unlock:
+ mutex_unlock(&memcg->thresholds_lock);
+ }
+
+diff --git a/mm/nommu.c b/mm/nommu.c
+index ee7e57e..f59e170 100644
+--- a/mm/nommu.c
++++ b/mm/nommu.c
+@@ -779,8 +779,6 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)
+
+ if (vma->vm_next)
+ vma->vm_next->vm_prev = vma->vm_prev;
+-
+- vma->vm_mm = NULL;
+ }
+
+ /*
+diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
+index 5a5a776..7d84b87 100644
+--- a/net/mac80211/rate.c
++++ b/net/mac80211/rate.c
+@@ -344,7 +344,7 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
+ for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
+ info->control.rates[i].idx = -1;
+ info->control.rates[i].flags = 0;
+- info->control.rates[i].count = 1;
++ info->control.rates[i].count = 0;
+ }
+
+ if (sdata->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index 05c8768..f3be54e 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -1795,7 +1795,11 @@ static void put_vol_mute(struct hda_codec *codec, struct hda_amp_info *info,
+ parm = ch ? AC_AMP_SET_RIGHT : AC_AMP_SET_LEFT;
+ parm |= direction == HDA_OUTPUT ? AC_AMP_SET_OUTPUT : AC_AMP_SET_INPUT;
+ parm |= index << AC_AMP_SET_INDEX_SHIFT;
+- parm |= val;
++ if ((val & HDA_AMP_MUTE) && !(info->amp_caps & AC_AMPCAP_MUTE) &&
++ (info->amp_caps & AC_AMPCAP_MIN_MUTE))
++ ; /* set the zero value as a fake mute */
++ else
++ parm |= val;
+ snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_AMP_GAIN_MUTE, parm);
+ info->vol[ch] = val;
+ }
+@@ -2062,7 +2066,7 @@ int snd_hda_mixer_amp_tlv(struct snd_kcontrol *kcontrol, int op_flag,
+ val1 = -((caps & AC_AMPCAP_OFFSET) >> AC_AMPCAP_OFFSET_SHIFT);
+ val1 += ofs;
+ val1 = ((int)val1) * ((int)val2);
+- if (min_mute)
++ if (min_mute || (caps & AC_AMPCAP_MIN_MUTE))
+ val2 |= TLV_DB_SCALE_MUTE;
+ if (put_user(SNDRV_CTL_TLVT_DB_SCALE, _tlv))
+ return -EFAULT;
+diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
+index 5644711..71f6744 100644
+--- a/sound/pci/hda/hda_codec.h
++++ b/sound/pci/hda/hda_codec.h
+@@ -298,6 +298,9 @@ enum {
+ #define AC_AMPCAP_MUTE (1<<31) /* mute capable */
+ #define AC_AMPCAP_MUTE_SHIFT 31
+
++/* driver-specific amp-caps: using bits 24-30 */
++#define AC_AMPCAP_MIN_MUTE (1 << 30) /* min-volume = mute */
++
+ /* Connection list */
+ #define AC_CLIST_LENGTH (0x7f<<0)
+ #define AC_CLIST_LONG (1<<7)
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 08bad5b..ae94929 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -4132,7 +4132,8 @@ static int cx_auto_add_volume_idx(struct hda_codec *codec, const char *basename,
+ err = snd_hda_ctl_add(codec, nid, kctl);
+ if (err < 0)
+ return err;
+- if (!(query_amp_caps(codec, nid, hda_dir) & AC_AMPCAP_MUTE))
++ if (!(query_amp_caps(codec, nid, hda_dir) &
++ (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE)))
+ break;
+ }
+ return 0;
+@@ -4425,6 +4426,22 @@ static const struct snd_pci_quirk cxt_fixups[] = {
+ {}
+ };
+
++/* add "fake" mute amp-caps to DACs on cx5051 so that mixer mute switches
++ * can be created (bko#42825)
++ */
++static void add_cx5051_fake_mutes(struct hda_codec *codec)
++{
++ static hda_nid_t out_nids[] = {
++ 0x10, 0x11, 0
++ };
++ hda_nid_t *p;
++
++ for (p = out_nids; *p; p++)
++ snd_hda_override_amp_caps(codec, *p, HDA_OUTPUT,
++ AC_AMPCAP_MIN_MUTE |
++ query_amp_caps(codec, *p, HDA_OUTPUT));
++}
++
+ static int patch_conexant_auto(struct hda_codec *codec)
+ {
+ struct conexant_spec *spec;
+@@ -4443,6 +4460,9 @@ static int patch_conexant_auto(struct hda_codec *codec)
+ case 0x14f15045:
+ spec->single_adc_amp = 1;
+ break;
++ case 0x14f15051:
++ add_cx5051_fake_mutes(codec);
++ break;
+ }
+
+ apply_pin_fixup(codec, cxt_fixups, cxt_pincfg_tbl);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index c4c8d78..3d8fbf4 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -3695,7 +3695,7 @@ static void alc_auto_init_input_src(struct hda_codec *codec)
+ else
+ nums = spec->num_adc_nids;
+ for (c = 0; c < nums; c++)
+- alc_mux_select(codec, 0, spec->cur_mux[c], true);
++ alc_mux_select(codec, c, spec->cur_mux[c], true);
+ }
+
+ /* add mic boosts if needed */
+diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
+index ccdac27..ed67698 100644
+--- a/sound/pci/hda/patch_sigmatel.c
++++ b/sound/pci/hda/patch_sigmatel.c
+@@ -4719,7 +4719,7 @@ static void stac92xx_hp_detect(struct hda_codec *codec)
+ unsigned int val = AC_PINCTL_OUT_EN | AC_PINCTL_HP_EN;
+ if (no_hp_sensing(spec, i))
+ continue;
+- if (presence)
++ if (1 /*presence*/)
+ stac92xx_set_pinctl(codec, cfg->hp_pins[i], val);
+ #if 0 /* FIXME */
+ /* Resetting the pinctl like below may lead to (a sort of) regressions
+diff --git a/sound/soc/imx/imx-ssi.c b/sound/soc/imx/imx-ssi.c
+index 4c05e2b..971eaf0 100644
+--- a/sound/soc/imx/imx-ssi.c
++++ b/sound/soc/imx/imx-ssi.c
+@@ -112,7 +112,7 @@ static int imx_ssi_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+ /* data on rising edge of bclk, frame high 1clk before data */
+- strcr |= SSI_STCR_TFSL | SSI_STCR_TEFS;
++ strcr |= SSI_STCR_TFSL | SSI_STCR_TXBIT0 | SSI_STCR_TEFS;
+ break;
+ }
+
+diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
+index f42e8b9..ea909c5 100644
+--- a/sound/soc/soc-dapm.c
++++ b/sound/soc/soc-dapm.c
+@@ -2982,9 +2982,13 @@ static void soc_dapm_shutdown_codec(struct snd_soc_dapm_context *dapm)
+ * standby.
+ */
+ if (powerdown) {
+- snd_soc_dapm_set_bias_level(dapm, SND_SOC_BIAS_PREPARE);
++ if (dapm->bias_level == SND_SOC_BIAS_ON)
++ snd_soc_dapm_set_bias_level(dapm,
++ SND_SOC_BIAS_PREPARE);
+ dapm_seq_run(dapm, &down_list, 0, false);
+- snd_soc_dapm_set_bias_level(dapm, SND_SOC_BIAS_STANDBY);
++ if (dapm->bias_level == SND_SOC_BIAS_PREPARE)
++ snd_soc_dapm_set_bias_level(dapm,
++ SND_SOC_BIAS_STANDBY);
+ }
+ }
+
+@@ -2997,7 +3001,9 @@ void snd_soc_dapm_shutdown(struct snd_soc_card *card)
+
+ list_for_each_entry(codec, &card->codec_dev_list, list) {
+ soc_dapm_shutdown_codec(&codec->dapm);
+- snd_soc_dapm_set_bias_level(&codec->dapm, SND_SOC_BIAS_OFF);
++ if (codec->dapm.bias_level == SND_SOC_BIAS_STANDBY)
++ snd_soc_dapm_set_bias_level(&codec->dapm,
++ SND_SOC_BIAS_OFF);
+ }
+ }
+
diff --git a/1010_linux-3.2.11.patch b/1010_linux-3.2.11.patch
new file mode 100644
index 00000000..ad860472
--- /dev/null
+++ b/1010_linux-3.2.11.patch
@@ -0,0 +1,37 @@
+diff --git a/Makefile b/Makefile
+index 1ddd6e9..4b76371 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 10
++SUBLEVEL = 11
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
+index 9302d21..61894fc 100644
+--- a/drivers/mfd/wm8994-core.c
++++ b/drivers/mfd/wm8994-core.c
+@@ -252,20 +252,6 @@ static int wm8994_suspend(struct device *dev)
+ break;
+ }
+
+- switch (wm8994->type) {
+- case WM1811:
+- ret = wm8994_reg_read(wm8994, WM8994_ANTIPOP_2);
+- if (ret < 0) {
+- dev_err(dev, "Failed to read jackdet: %d\n", ret);
+- } else if (ret & WM1811_JACKDET_MODE_MASK) {
+- dev_dbg(dev, "CODEC still active, ignoring suspend\n");
+- return 0;
+- }
+- break;
+- default:
+- break;
+- }
+-
+ /* Disable LDO pulldowns while the device is suspended if we
+ * don't know that something will be driving them. */
+ if (!wm8994->ldo_ena_always_driven)
diff --git a/1011_linux-3.2.12.patch b/1011_linux-3.2.12.patch
new file mode 100644
index 00000000..83208056
--- /dev/null
+++ b/1011_linux-3.2.12.patch
@@ -0,0 +1,1548 @@
+diff --git a/Documentation/hwmon/w83627ehf b/Documentation/hwmon/w83627ehf
+index 3f44dbd..75f3155 100644
+--- a/Documentation/hwmon/w83627ehf
++++ b/Documentation/hwmon/w83627ehf
+@@ -50,7 +50,7 @@ W83627DHG, W83627DHG-P, W83627UHG, W83667HG, W83667HG-B, W83667HG-I
+ (NCT6775F), and NCT6776F super I/O chips. We will refer to them collectively
+ as Winbond chips.
+
+-The chips implement 2 to 4 temperature sensors (9 for NCT6775F and NCT6776F),
++The chips implement 3 to 4 temperature sensors (9 for NCT6775F and NCT6776F),
+ 2 to 5 fan rotation speed sensors, 8 to 10 analog voltage sensors, one VID
+ (except for 627UHG), alarms with beep warnings (control unimplemented),
+ and some automatic fan regulation strategies (plus manual fan control mode).
+diff --git a/Documentation/hwmon/zl6100 b/Documentation/hwmon/zl6100
+index 7617798..c5e1a5b 100644
+--- a/Documentation/hwmon/zl6100
++++ b/Documentation/hwmon/zl6100
+@@ -73,14 +73,12 @@ Module parameters
+ delay
+ -----
+
+-Some Intersil/Zilker Labs DC-DC controllers require a minimum interval between
+-I2C bus accesses. According to Intersil, the minimum interval is 2 ms, though
+-1 ms appears to be sufficient and has not caused any problems in testing.
+-The problem is known to affect ZL6100, ZL2105, and ZL2008. It is known not to
+-affect ZL2004 and ZL6105. The driver automatically sets the interval to 1 ms
+-except for ZL2004 and ZL6105. To enable manual override, the driver provides a
+-writeable module parameter, 'delay', which can be used to set the interval to
+-a value between 0 and 65,535 microseconds.
++Intersil/Zilker Labs DC-DC controllers require a minimum interval between I2C
++bus accesses. According to Intersil, the minimum interval is 2 ms, though 1 ms
++appears to be sufficient and has not caused any problems in testing. The problem
++is known to affect all currently supported chips. For manual override, the
++driver provides a writeable module parameter, 'delay', which can be used to set
++the interval to a value between 0 and 65,535 microseconds.
+
+
+ Sysfs entries
+diff --git a/Makefile b/Makefile
+index 4b76371..15e80f1 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 11
++SUBLEVEL = 12
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
+index ad1fb5d..eddcfb3 100644
+--- a/arch/sparc/Makefile
++++ b/arch/sparc/Makefile
+@@ -31,7 +31,7 @@ UTS_MACHINE := sparc
+
+ #KBUILD_CFLAGS += -g -pipe -fcall-used-g5 -fcall-used-g7
+ KBUILD_CFLAGS += -m32 -pipe -mno-fpu -fcall-used-g5 -fcall-used-g7
+-KBUILD_AFLAGS += -m32
++KBUILD_AFLAGS += -m32 -Wa,-Av8
+
+ #LDFLAGS_vmlinux = -N -Ttext 0xf0004000
+ # Since 2.5.40, the first stage is left not btfix-ed.
+diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
+index 121f1be..957c216 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel.c
++++ b/arch/x86/kernel/cpu/perf_event_intel.c
+@@ -389,14 +389,15 @@ static __initconst const u64 westmere_hw_cache_event_ids
+ #define NHM_LOCAL_DRAM (1 << 14)
+ #define NHM_NON_DRAM (1 << 15)
+
+-#define NHM_ALL_DRAM (NHM_REMOTE_DRAM|NHM_LOCAL_DRAM)
++#define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
++#define NHM_REMOTE (NHM_REMOTE_DRAM)
+
+ #define NHM_DMND_READ (NHM_DMND_DATA_RD)
+ #define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
+ #define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
+
+ #define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
+-#define NHM_L3_MISS (NHM_NON_DRAM|NHM_ALL_DRAM|NHM_REMOTE_CACHE_FWD)
++#define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
+ #define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
+
+ static __initconst const u64 nehalem_hw_cache_extra_regs
+@@ -420,16 +421,16 @@ static __initconst const u64 nehalem_hw_cache_extra_regs
+ },
+ [ C(NODE) ] = {
+ [ C(OP_READ) ] = {
+- [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_ALL_DRAM,
+- [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_REMOTE_DRAM,
++ [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE,
++ [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_REMOTE,
+ },
+ [ C(OP_WRITE) ] = {
+- [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_ALL_DRAM,
+- [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_REMOTE_DRAM,
++ [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE,
++ [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_REMOTE,
+ },
+ [ C(OP_PREFETCH) ] = {
+- [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_ALL_DRAM,
+- [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_REMOTE_DRAM,
++ [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE,
++ [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_REMOTE,
+ },
+ },
+ };
+diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
+index fc45ba8..e395693 100644
+--- a/arch/x86/lib/delay.c
++++ b/arch/x86/lib/delay.c
+@@ -48,9 +48,9 @@ static void delay_loop(unsigned long loops)
+ }
+
+ /* TSC based delay: */
+-static void delay_tsc(unsigned long loops)
++static void delay_tsc(unsigned long __loops)
+ {
+- unsigned long bclock, now;
++ u32 bclock, now, loops = __loops;
+ int cpu;
+
+ preempt_disable();
+diff --git a/block/genhd.c b/block/genhd.c
+index 02e9fca..997afd6 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -36,6 +36,7 @@ static DEFINE_IDR(ext_devt_idr);
+
+ static struct device_type disk_type;
+
++static void disk_alloc_events(struct gendisk *disk);
+ static void disk_add_events(struct gendisk *disk);
+ static void disk_del_events(struct gendisk *disk);
+ static void disk_release_events(struct gendisk *disk);
+@@ -602,6 +603,8 @@ void add_disk(struct gendisk *disk)
+ disk->major = MAJOR(devt);
+ disk->first_minor = MINOR(devt);
+
++ disk_alloc_events(disk);
++
+ /* Register BDI before referencing it from bdev */
+ bdi = &disk->queue->backing_dev_info;
+ bdi_register_dev(bdi, disk_devt(disk));
+@@ -1476,9 +1479,9 @@ static void __disk_unblock_events(struct gendisk *disk, bool check_now)
+ intv = disk_events_poll_jiffies(disk);
+ set_timer_slack(&ev->dwork.timer, intv / 4);
+ if (check_now)
+- queue_delayed_work(system_nrt_wq, &ev->dwork, 0);
++ queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
+ else if (intv)
+- queue_delayed_work(system_nrt_wq, &ev->dwork, intv);
++ queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv);
+ out_unlock:
+ spin_unlock_irqrestore(&ev->lock, flags);
+ }
+@@ -1522,7 +1525,7 @@ void disk_flush_events(struct gendisk *disk, unsigned int mask)
+ ev->clearing |= mask;
+ if (!ev->block) {
+ cancel_delayed_work(&ev->dwork);
+- queue_delayed_work(system_nrt_wq, &ev->dwork, 0);
++ queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
+ }
+ spin_unlock_irq(&ev->lock);
+ }
+@@ -1559,7 +1562,7 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
+
+ /* uncondtionally schedule event check and wait for it to finish */
+ disk_block_events(disk);
+- queue_delayed_work(system_nrt_wq, &ev->dwork, 0);
++ queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
+ flush_delayed_work(&ev->dwork);
+ __disk_unblock_events(disk, false);
+
+@@ -1596,7 +1599,7 @@ static void disk_events_workfn(struct work_struct *work)
+
+ intv = disk_events_poll_jiffies(disk);
+ if (!ev->block && intv)
+- queue_delayed_work(system_nrt_wq, &ev->dwork, intv);
++ queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv);
+
+ spin_unlock_irq(&ev->lock);
+
+@@ -1734,9 +1737,9 @@ module_param_cb(events_dfl_poll_msecs, &disk_events_dfl_poll_msecs_param_ops,
+ &disk_events_dfl_poll_msecs, 0644);
+
+ /*
+- * disk_{add|del|release}_events - initialize and destroy disk_events.
++ * disk_{alloc|add|del|release}_events - initialize and destroy disk_events.
+ */
+-static void disk_add_events(struct gendisk *disk)
++static void disk_alloc_events(struct gendisk *disk)
+ {
+ struct disk_events *ev;
+
+@@ -1749,16 +1752,6 @@ static void disk_add_events(struct gendisk *disk)
+ return;
+ }
+
+- if (sysfs_create_files(&disk_to_dev(disk)->kobj,
+- disk_events_attrs) < 0) {
+- pr_warn("%s: failed to create sysfs files for events\n",
+- disk->disk_name);
+- kfree(ev);
+- return;
+- }
+-
+- disk->ev = ev;
+-
+ INIT_LIST_HEAD(&ev->node);
+ ev->disk = disk;
+ spin_lock_init(&ev->lock);
+@@ -1767,8 +1760,21 @@ static void disk_add_events(struct gendisk *disk)
+ ev->poll_msecs = -1;
+ INIT_DELAYED_WORK(&ev->dwork, disk_events_workfn);
+
++ disk->ev = ev;
++}
++
++static void disk_add_events(struct gendisk *disk)
++{
++ if (!disk->ev)
++ return;
++
++ /* FIXME: error handling */
++ if (sysfs_create_files(&disk_to_dev(disk)->kobj, disk_events_attrs) < 0)
++ pr_warn("%s: failed to create sysfs files for events\n",
++ disk->disk_name);
++
+ mutex_lock(&disk_events_mutex);
+- list_add_tail(&ev->node, &disk_events);
++ list_add_tail(&disk->ev->node, &disk_events);
+ mutex_unlock(&disk_events_mutex);
+
+ /*
+diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
+index b70f0fc..eec7b7a 100644
+--- a/drivers/block/sx8.c
++++ b/drivers/block/sx8.c
+@@ -1116,7 +1116,7 @@ static inline void carm_handle_resp(struct carm_host *host,
+ break;
+ case MISC_GET_FW_VER: {
+ struct carm_fw_ver *ver = (struct carm_fw_ver *)
+- mem + sizeof(struct carm_msg_get_fw_ver);
++ (mem + sizeof(struct carm_msg_get_fw_ver));
+ if (!error) {
+ host->fw_ver = le32_to_cpu(ver->version);
+ host->flags |= (ver->features & FL_FW_VER_MASK);
+diff --git a/drivers/hwmon/pmbus/zl6100.c b/drivers/hwmon/pmbus/zl6100.c
+index ba296fd..5c5cdd2 100644
+--- a/drivers/hwmon/pmbus/zl6100.c
++++ b/drivers/hwmon/pmbus/zl6100.c
+@@ -178,16 +178,11 @@ static int zl6100_probe(struct i2c_client *client,
+ data->id = mid->driver_data;
+
+ /*
+- * ZL2008, ZL2105, and ZL6100 are known to require a wait time
+- * between I2C accesses. ZL2004 and ZL6105 are known to be safe.
+- *
+- * Only clear the wait time for chips known to be safe. The wait time
+- * can be cleared later for additional chips if tests show that it
+- * is not needed (in other words, better be safe than sorry).
++ * According to information from the chip vendor, all currently
++ * supported chips are known to require a wait time between I2C
++ * accesses.
+ */
+ data->delay = delay;
+- if (data->id == zl2004 || data->id == zl6105)
+- data->delay = 0;
+
+ /*
+ * Since there was a direct I2C device access above, wait before
+diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
+index c25387d..ceaec92 100644
+--- a/drivers/hwmon/w83627ehf.c
++++ b/drivers/hwmon/w83627ehf.c
+@@ -39,7 +39,7 @@
+ 0x8860 0xa1
+ w83627dhg 9 5 4 3 0xa020 0xc1 0x5ca3
+ w83627dhg-p 9 5 4 3 0xb070 0xc1 0x5ca3
+- w83627uhg 8 2 2 2 0xa230 0xc1 0x5ca3
++ w83627uhg 8 2 2 3 0xa230 0xc1 0x5ca3
+ w83667hg 9 5 3 3 0xa510 0xc1 0x5ca3
+ w83667hg-b 9 5 3 4 0xb350 0xc1 0x5ca3
+ nct6775f 9 4 3 9 0xb470 0xc1 0x5ca3
+@@ -1607,7 +1607,7 @@ store_##reg(struct device *dev, struct device_attribute *attr, \
+ val = step_time_to_reg(val, data->pwm_mode[nr]); \
+ mutex_lock(&data->update_lock); \
+ data->reg[nr] = val; \
+- w83627ehf_write_value(data, W83627EHF_REG_##REG[nr], val); \
++ w83627ehf_write_value(data, data->REG_##REG[nr], val); \
+ mutex_unlock(&data->update_lock); \
+ return count; \
+ } \
+@@ -2004,7 +2004,8 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
+ goto exit;
+ }
+
+- data = kzalloc(sizeof(struct w83627ehf_data), GFP_KERNEL);
++ data = devm_kzalloc(&pdev->dev, sizeof(struct w83627ehf_data),
++ GFP_KERNEL);
+ if (!data) {
+ err = -ENOMEM;
+ goto exit_release;
+@@ -2157,16 +2158,16 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
+ w83627ehf_set_temp_reg_ehf(data, 3);
+
+ /*
+- * Temperature sources for temp1 and temp2 are selected with
++ * Temperature sources for temp2 and temp3 are selected with
+ * bank 0, registers 0x49 and 0x4a.
+ */
+ data->temp_src[0] = 0; /* SYSTIN */
+ reg = w83627ehf_read_value(data, 0x49) & 0x07;
+ /* Adjust to have the same mapping as other source registers */
+ if (reg == 0)
+- data->temp_src[1]++;
++ data->temp_src[1] = 1;
+ else if (reg >= 2 && reg <= 5)
+- data->temp_src[1] += 2;
++ data->temp_src[1] = reg + 2;
+ else /* should never happen */
+ data->have_temp &= ~(1 << 1);
+ reg = w83627ehf_read_value(data, 0x4a);
+@@ -2498,9 +2499,8 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
+
+ exit_remove:
+ w83627ehf_device_remove_files(dev);
+- kfree(data);
+- platform_set_drvdata(pdev, NULL);
+ exit_release:
++ platform_set_drvdata(pdev, NULL);
+ release_region(res->start, IOREGION_LENGTH);
+ exit:
+ return err;
+@@ -2514,7 +2514,6 @@ static int __devexit w83627ehf_remove(struct platform_device *pdev)
+ w83627ehf_device_remove_files(&pdev->dev);
+ release_region(data->addr, IOREGION_LENGTH);
+ platform_set_drvdata(pdev, NULL);
+- kfree(data);
+
+ return 0;
+ }
+diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
+index 525c734..24f94f4 100644
+--- a/drivers/i2c/algos/i2c-algo-bit.c
++++ b/drivers/i2c/algos/i2c-algo-bit.c
+@@ -103,8 +103,14 @@ static int sclhi(struct i2c_algo_bit_data *adap)
+ * chips may hold it low ("clock stretching") while they
+ * are processing data internally.
+ */
+- if (time_after(jiffies, start + adap->timeout))
++ if (time_after(jiffies, start + adap->timeout)) {
++ /* Test one last time, as we may have been preempted
++ * between last check and timeout test.
++ */
++ if (getscl(adap))
++ break;
+ return -ETIMEDOUT;
++ }
+ cond_resched();
+ }
+ #ifdef DEBUG
+diff --git a/drivers/media/video/omap3isp/ispccdc.c b/drivers/media/video/omap3isp/ispccdc.c
+index 54a4a3f..a319281 100644
+--- a/drivers/media/video/omap3isp/ispccdc.c
++++ b/drivers/media/video/omap3isp/ispccdc.c
+@@ -1406,8 +1406,7 @@ static int __ccdc_handle_stopping(struct isp_ccdc_device *ccdc, u32 event)
+
+ static void ccdc_hs_vs_isr(struct isp_ccdc_device *ccdc)
+ {
+- struct isp_pipeline *pipe =
+- to_isp_pipeline(&ccdc->video_out.video.entity);
++ struct isp_pipeline *pipe = to_isp_pipeline(&ccdc->subdev.entity);
+ struct video_device *vdev = ccdc->subdev.devnode;
+ struct v4l2_event event;
+
+diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+index 02c7ed8..eccdcff 100644
+--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
++++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+@@ -2241,10 +2241,6 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
+ dev_info(&adapter->pdev->dev, "tx locked\n");
+ return NETDEV_TX_LOCKED;
+ }
+- if (skb->mark == 0x01)
+- type = atl1c_trans_high;
+- else
+- type = atl1c_trans_normal;
+
+ if (atl1c_tpd_avail(adapter, type) < tpd_req) {
+ /* no enough descriptor, just stop queue */
+diff --git a/drivers/net/ethernet/packetengines/Kconfig b/drivers/net/ethernet/packetengines/Kconfig
+index b97132d..8f29feb 100644
+--- a/drivers/net/ethernet/packetengines/Kconfig
++++ b/drivers/net/ethernet/packetengines/Kconfig
+@@ -4,6 +4,7 @@
+
+ config NET_PACKET_ENGINE
+ bool "Packet Engine devices"
++ default y
+ depends on PCI
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index c8f47f1..0cf2351 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -3781,12 +3781,20 @@ static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
+
+ static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
+ {
++ void __iomem *ioaddr = tp->mmio_addr;
++
++ RTL_W8(Cfg9346, Cfg9346_Unlock);
+ rtl_generic_op(tp, tp->jumbo_ops.enable);
++ RTL_W8(Cfg9346, Cfg9346_Lock);
+ }
+
+ static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
+ {
++ void __iomem *ioaddr = tp->mmio_addr;
++
++ RTL_W8(Cfg9346, Cfg9346_Unlock);
+ rtl_generic_op(tp, tp->jumbo_ops.disable);
++ RTL_W8(Cfg9346, Cfg9346_Lock);
+ }
+
+ static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
+diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
+index 752d521..5ef4cc0 100644
+--- a/drivers/net/ethernet/sfc/rx.c
++++ b/drivers/net/ethernet/sfc/rx.c
+@@ -156,11 +156,10 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+- /* Adjust the SKB for padding and checksum */
++ /* Adjust the SKB for padding */
+ skb_reserve(skb, NET_IP_ALIGN);
+ rx_buf->len = skb_len - NET_IP_ALIGN;
+ rx_buf->is_page = false;
+- skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ rx_buf->dma_addr = pci_map_single(efx->pci_dev,
+ skb->data, rx_buf->len,
+@@ -499,6 +498,7 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
+
+ EFX_BUG_ON_PARANOID(!checksummed);
+ rx_buf->u.skb = NULL;
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ gro_result = napi_gro_receive(napi, skb);
+ }
+diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
+index edfa15d..486b404 100644
+--- a/drivers/net/ppp/ppp_generic.c
++++ b/drivers/net/ppp/ppp_generic.c
+@@ -2024,14 +2024,22 @@ ppp_mp_reconstruct(struct ppp *ppp)
+ continue;
+ }
+ if (PPP_MP_CB(p)->sequence != seq) {
++ u32 oldseq;
+ /* Fragment `seq' is missing. If it is after
+ minseq, it might arrive later, so stop here. */
+ if (seq_after(seq, minseq))
+ break;
+ /* Fragment `seq' is lost, keep going. */
+ lost = 1;
++ oldseq = seq;
+ seq = seq_before(minseq, PPP_MP_CB(p)->sequence)?
+ minseq + 1: PPP_MP_CB(p)->sequence;
++
++ if (ppp->debug & 1)
++ netdev_printk(KERN_DEBUG, ppp->dev,
++ "lost frag %u..%u\n",
++ oldseq, seq-1);
++
+ goto again;
+ }
+
+@@ -2076,6 +2084,10 @@ ppp_mp_reconstruct(struct ppp *ppp)
+ struct sk_buff *tmp2;
+
+ skb_queue_reverse_walk_from_safe(list, p, tmp2) {
++ if (ppp->debug & 1)
++ netdev_printk(KERN_DEBUG, ppp->dev,
++ "discarding frag %u\n",
++ PPP_MP_CB(p)->sequence);
+ __skb_unlink(p, list);
+ kfree_skb(p);
+ }
+@@ -2091,6 +2103,17 @@ ppp_mp_reconstruct(struct ppp *ppp)
+ /* If we have discarded any fragments,
+ signal a receive error. */
+ if (PPP_MP_CB(head)->sequence != ppp->nextseq) {
++ skb_queue_walk_safe(list, p, tmp) {
++ if (p == head)
++ break;
++ if (ppp->debug & 1)
++ netdev_printk(KERN_DEBUG, ppp->dev,
++ "discarding frag %u\n",
++ PPP_MP_CB(p)->sequence);
++ __skb_unlink(p, list);
++ kfree_skb(p);
++ }
++
+ if (ppp->debug & 1)
+ netdev_printk(KERN_DEBUG, ppp->dev,
+ " missed pkts %u..%u\n",
+diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
+index f5e063a..fda4be2 100644
+--- a/drivers/net/usb/asix.c
++++ b/drivers/net/usb/asix.c
+@@ -1595,6 +1595,10 @@ static const struct usb_device_id products [] = {
+ USB_DEVICE (0x6189, 0x182d),
+ .driver_info = (unsigned long) &ax8817x_info,
+ }, {
++ // Sitecom LN-031 "USB 2.0 10/100/1000 Ethernet adapter"
++ USB_DEVICE (0x0df6, 0x0056),
++ .driver_info = (unsigned long) &ax88178_info,
++}, {
+ // corega FEther USB2-TX
+ USB_DEVICE (0x07aa, 0x0017),
+ .driver_info = (unsigned long) &ax8817x_info,
+diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
+index d96bfb1..d426261 100644
+--- a/drivers/net/vmxnet3/vmxnet3_drv.c
++++ b/drivers/net/vmxnet3/vmxnet3_drv.c
+@@ -830,13 +830,8 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
+ ctx->l4_hdr_size = ((struct tcphdr *)
+ skb_transport_header(skb))->doff * 4;
+ else if (iph->protocol == IPPROTO_UDP)
+- /*
+- * Use tcp header size so that bytes to
+- * be copied are more than required by
+- * the device.
+- */
+ ctx->l4_hdr_size =
+- sizeof(struct tcphdr);
++ sizeof(struct udphdr);
+ else
+ ctx->l4_hdr_size = 0;
+ } else {
+diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
+index b18eac1..8df921b 100644
+--- a/drivers/net/vmxnet3/vmxnet3_int.h
++++ b/drivers/net/vmxnet3/vmxnet3_int.h
+@@ -70,10 +70,10 @@
+ /*
+ * Version numbers
+ */
+-#define VMXNET3_DRIVER_VERSION_STRING "1.1.18.0-k"
++#define VMXNET3_DRIVER_VERSION_STRING "1.1.29.0-k"
+
+ /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
+-#define VMXNET3_DRIVER_VERSION_NUM 0x01011200
++#define VMXNET3_DRIVER_VERSION_NUM 0x01011D00
+
+ #if defined(CONFIG_PCI_MSI)
+ /* RSS only makes sense if MSI-X is supported. */
+diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
+index edd317f..21b529b 100644
+--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
++++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
+@@ -426,10 +426,14 @@ void rt2x00lib_txdone(struct queue_entry *entry,
+ /*
+ * If the data queue was below the threshold before the txdone
+ * handler we must make sure the packet queue in the mac80211 stack
+- * is reenabled when the txdone handler has finished.
++ * is reenabled when the txdone handler has finished. This has to be
++ * serialized with rt2x00mac_tx(), otherwise we can wake up queue
++ * before it was stopped.
+ */
++ spin_lock_bh(&entry->queue->tx_lock);
+ if (!rt2x00queue_threshold(entry->queue))
+ rt2x00queue_unpause_queue(entry->queue);
++ spin_unlock_bh(&entry->queue->tx_lock);
+ }
+ EXPORT_SYMBOL_GPL(rt2x00lib_txdone);
+
+diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
+index bf0acff..373dae1 100644
+--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
++++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
+@@ -152,13 +152,22 @@ void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+ if (unlikely(rt2x00queue_write_tx_frame(queue, skb, false)))
+ goto exit_fail;
+
++ /*
++ * Pausing queue has to be serialized with rt2x00lib_txdone(). Note
++ * we should not use spin_lock_bh variant as bottom halve was already
++ * disabled before ieee80211_xmit() call.
++ */
++ spin_lock(&queue->tx_lock);
+ if (rt2x00queue_threshold(queue))
+ rt2x00queue_pause_queue(queue);
++ spin_unlock(&queue->tx_lock);
+
+ return;
+
+ exit_fail:
++ spin_lock(&queue->tx_lock);
+ rt2x00queue_pause_queue(queue);
++ spin_unlock(&queue->tx_lock);
+ exit_free_skb:
+ dev_kfree_skb_any(skb);
+ }
+diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
+index 5adfb3e..9b1b2b7 100644
+--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
++++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
+@@ -619,6 +619,9 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
+ else if (test_bit(REQUIRE_DMA, &queue->rt2x00dev->cap_flags))
+ rt2x00queue_align_frame(skb);
+
++ /*
++ * That function must be called with bh disabled.
++ */
+ spin_lock(&queue->tx_lock);
+
+ if (unlikely(rt2x00queue_full(queue))) {
+diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
+index 1cfbf22..24f049e 100644
+--- a/drivers/pci/pcie/aspm.c
++++ b/drivers/pci/pcie/aspm.c
+@@ -500,6 +500,9 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
+ int pos;
+ u32 reg32;
+
++ if (aspm_disabled)
++ return 0;
++
+ /*
+ * Some functions in a slot might not all be PCIe functions,
+ * very strange. Disable ASPM for the whole slot
+diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
+index b848277..1e5290b 100644
+--- a/drivers/platform/x86/acer-wmi.c
++++ b/drivers/platform/x86/acer-wmi.c
+@@ -679,6 +679,32 @@ static acpi_status AMW0_find_mailled(void)
+ return AE_OK;
+ }
+
++static int AMW0_set_cap_acpi_check_device_found;
++
++static acpi_status AMW0_set_cap_acpi_check_device_cb(acpi_handle handle,
++ u32 level, void *context, void **retval)
++{
++ AMW0_set_cap_acpi_check_device_found = 1;
++ return AE_OK;
++}
++
++static const struct acpi_device_id norfkill_ids[] = {
++ { "VPC2004", 0},
++ { "IBM0068", 0},
++ { "LEN0068", 0},
++ { "", 0},
++};
++
++static int AMW0_set_cap_acpi_check_device(void)
++{
++ const struct acpi_device_id *id;
++
++ for (id = norfkill_ids; id->id[0]; id++)
++ acpi_get_devices(id->id, AMW0_set_cap_acpi_check_device_cb,
++ NULL, NULL);
++ return AMW0_set_cap_acpi_check_device_found;
++}
++
+ static acpi_status AMW0_set_capabilities(void)
+ {
+ struct wmab_args args;
+@@ -692,7 +718,9 @@ static acpi_status AMW0_set_capabilities(void)
+ * work.
+ */
+ if (wmi_has_guid(AMW0_GUID2)) {
+- interface->capability |= ACER_CAP_WIRELESS;
++ if ((quirks != &quirk_unknown) ||
++ !AMW0_set_cap_acpi_check_device())
++ interface->capability |= ACER_CAP_WIRELESS;
+ return AE_OK;
+ }
+
+diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h
+index 822e54c..1c226b3 100644
+--- a/drivers/rapidio/devices/tsi721.h
++++ b/drivers/rapidio/devices/tsi721.h
+@@ -118,34 +118,34 @@
+
+ #define TSI721_IDB_ENTRY_SIZE 64
+
+-#define TSI721_IDQ_CTL(x) (0x20000 + (x) * 1000)
++#define TSI721_IDQ_CTL(x) (0x20000 + (x) * 0x1000)
+ #define TSI721_IDQ_SUSPEND 0x00000002
+ #define TSI721_IDQ_INIT 0x00000001
+
+-#define TSI721_IDQ_STS(x) (0x20004 + (x) * 1000)
++#define TSI721_IDQ_STS(x) (0x20004 + (x) * 0x1000)
+ #define TSI721_IDQ_RUN 0x00200000
+
+-#define TSI721_IDQ_MASK(x) (0x20008 + (x) * 1000)
++#define TSI721_IDQ_MASK(x) (0x20008 + (x) * 0x1000)
+ #define TSI721_IDQ_MASK_MASK 0xffff0000
+ #define TSI721_IDQ_MASK_PATT 0x0000ffff
+
+-#define TSI721_IDQ_RP(x) (0x2000c + (x) * 1000)
++#define TSI721_IDQ_RP(x) (0x2000c + (x) * 0x1000)
+ #define TSI721_IDQ_RP_PTR 0x0007ffff
+
+-#define TSI721_IDQ_WP(x) (0x20010 + (x) * 1000)
++#define TSI721_IDQ_WP(x) (0x20010 + (x) * 0x1000)
+ #define TSI721_IDQ_WP_PTR 0x0007ffff
+
+-#define TSI721_IDQ_BASEL(x) (0x20014 + (x) * 1000)
++#define TSI721_IDQ_BASEL(x) (0x20014 + (x) * 0x1000)
+ #define TSI721_IDQ_BASEL_ADDR 0xffffffc0
+-#define TSI721_IDQ_BASEU(x) (0x20018 + (x) * 1000)
+-#define TSI721_IDQ_SIZE(x) (0x2001c + (x) * 1000)
++#define TSI721_IDQ_BASEU(x) (0x20018 + (x) * 0x1000)
++#define TSI721_IDQ_SIZE(x) (0x2001c + (x) * 0x1000)
+ #define TSI721_IDQ_SIZE_VAL(size) (__fls(size) - 4)
+ #define TSI721_IDQ_SIZE_MIN 512
+ #define TSI721_IDQ_SIZE_MAX (512 * 1024)
+
+-#define TSI721_SR_CHINT(x) (0x20040 + (x) * 1000)
+-#define TSI721_SR_CHINTE(x) (0x20044 + (x) * 1000)
+-#define TSI721_SR_CHINTSET(x) (0x20048 + (x) * 1000)
++#define TSI721_SR_CHINT(x) (0x20040 + (x) * 0x1000)
++#define TSI721_SR_CHINTE(x) (0x20044 + (x) * 0x1000)
++#define TSI721_SR_CHINTSET(x) (0x20048 + (x) * 0x1000)
+ #define TSI721_SR_CHINT_ODBOK 0x00000020
+ #define TSI721_SR_CHINT_IDBQRCV 0x00000010
+ #define TSI721_SR_CHINT_SUSP 0x00000008
+@@ -156,7 +156,7 @@
+
+ #define TSI721_IBWIN_NUM 8
+
+-#define TSI721_IBWINLB(x) (0x29000 + (x) * 20)
++#define TSI721_IBWINLB(x) (0x29000 + (x) * 0x20)
+ #define TSI721_IBWINLB_BA 0xfffff000
+ #define TSI721_IBWINLB_WEN 0x00000001
+
+@@ -187,13 +187,13 @@
+ */
+ #define TSI721_OBWIN_NUM TSI721_PC2SR_WINS
+
+-#define TSI721_OBWINLB(x) (0x40000 + (x) * 20)
++#define TSI721_OBWINLB(x) (0x40000 + (x) * 0x20)
+ #define TSI721_OBWINLB_BA 0xffff8000
+ #define TSI721_OBWINLB_WEN 0x00000001
+
+-#define TSI721_OBWINUB(x) (0x40004 + (x) * 20)
++#define TSI721_OBWINUB(x) (0x40004 + (x) * 0x20)
+
+-#define TSI721_OBWINSZ(x) (0x40008 + (x) * 20)
++#define TSI721_OBWINSZ(x) (0x40008 + (x) * 0x20)
+ #define TSI721_OBWINSZ_SIZE 0x00001f00
+ #define TSI721_OBWIN_SIZE(size) (__fls(size) - 15)
+
+diff --git a/drivers/regulator/tps6524x-regulator.c b/drivers/regulator/tps6524x-regulator.c
+index 9166aa0..229b6f4 100644
+--- a/drivers/regulator/tps6524x-regulator.c
++++ b/drivers/regulator/tps6524x-regulator.c
+@@ -481,7 +481,7 @@ static int set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV,
+ if (i >= info->n_voltages)
+ i = info->n_voltages - 1;
+
+- *selector = info->voltages[i];
++ *selector = i;
+
+ return write_field(hw, &info->voltage, i);
+ }
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 0c1d5c73..03d3528 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -1029,7 +1029,7 @@ done:
+ return iscsit_add_reject_from_cmd(
+ ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+ 1, 1, buf, cmd);
+- } else if (transport_ret == -EINVAL) {
++ } else if (transport_ret < 0) {
+ /*
+ * Unsupported SAM Opcode. CHECK_CONDITION will be sent
+ * in iscsit_execute_cmd() during the CmdSN OOO Execution
+diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
+index 778c1a6..6cf6ff4 100644
+--- a/drivers/target/target_core_pr.c
++++ b/drivers/target/target_core_pr.c
+@@ -120,7 +120,7 @@ static struct t10_pr_registration *core_scsi3_locate_pr_reg(struct se_device *,
+ struct se_node_acl *, struct se_session *);
+ static void core_scsi3_put_pr_reg(struct t10_pr_registration *);
+
+-static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd, int *ret)
++static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd)
+ {
+ struct se_session *se_sess = cmd->se_sess;
+ struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
+@@ -130,7 +130,7 @@ static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd, int *ret)
+ int conflict = 0;
+
+ if (!crh)
+- return false;
++ return -EINVAL;
+
+ pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
+ se_sess);
+@@ -158,16 +158,14 @@ static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd, int *ret)
+ */
+ if (pr_reg->pr_res_holder) {
+ core_scsi3_put_pr_reg(pr_reg);
+- *ret = 0;
+- return false;
++ return 1;
+ }
+ if ((pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY) ||
+ (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY) ||
+ (pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
+ (pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
+ core_scsi3_put_pr_reg(pr_reg);
+- *ret = 0;
+- return true;
++ return 1;
+ }
+ core_scsi3_put_pr_reg(pr_reg);
+ conflict = 1;
+@@ -192,10 +190,10 @@ static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd, int *ret)
+ " while active SPC-3 registrations exist,"
+ " returning RESERVATION_CONFLICT\n");
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+- return true;
++ return -EBUSY;
+ }
+
+- return false;
++ return 0;
+ }
+
+ int target_scsi2_reservation_release(struct se_task *task)
+@@ -204,12 +202,18 @@ int target_scsi2_reservation_release(struct se_task *task)
+ struct se_device *dev = cmd->se_dev;
+ struct se_session *sess = cmd->se_sess;
+ struct se_portal_group *tpg = sess->se_tpg;
+- int ret = 0;
++ int ret = 0, rc;
+
+ if (!sess || !tpg)
+ goto out;
+- if (target_check_scsi2_reservation_conflict(cmd, &ret))
++ rc = target_check_scsi2_reservation_conflict(cmd);
++ if (rc == 1)
++ goto out;
++ else if (rc < 0) {
++ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
++ ret = -EINVAL;
+ goto out;
++ }
+
+ ret = 0;
+ spin_lock(&dev->dev_reservation_lock);
+@@ -246,7 +250,7 @@ int target_scsi2_reservation_reserve(struct se_task *task)
+ struct se_device *dev = cmd->se_dev;
+ struct se_session *sess = cmd->se_sess;
+ struct se_portal_group *tpg = sess->se_tpg;
+- int ret = 0;
++ int ret = 0, rc;
+
+ if ((cmd->t_task_cdb[1] & 0x01) &&
+ (cmd->t_task_cdb[1] & 0x02)) {
+@@ -262,8 +266,14 @@ int target_scsi2_reservation_reserve(struct se_task *task)
+ */
+ if (!sess || !tpg)
+ goto out;
+- if (target_check_scsi2_reservation_conflict(cmd, &ret))
++ rc = target_check_scsi2_reservation_conflict(cmd);
++ if (rc == 1)
+ goto out;
++ else if (rc < 0) {
++ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
++ ret = -EINVAL;
++ goto out;
++ }
+
+ ret = 0;
+ spin_lock(&dev->dev_reservation_lock);
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index e4ddb93..cdb774b 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -2507,6 +2507,7 @@ static int transport_generic_cmd_sequencer(
+ cmd, cdb, pr_reg_type) != 0) {
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
++ cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EBUSY;
+ }
+diff --git a/fs/aio.c b/fs/aio.c
+index 67e4b90..b9d64d8 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -228,12 +228,6 @@ static void __put_ioctx(struct kioctx *ctx)
+ call_rcu(&ctx->rcu_head, ctx_rcu_free);
+ }
+
+-static inline void get_ioctx(struct kioctx *kioctx)
+-{
+- BUG_ON(atomic_read(&kioctx->users) <= 0);
+- atomic_inc(&kioctx->users);
+-}
+-
+ static inline int try_get_ioctx(struct kioctx *kioctx)
+ {
+ return atomic_inc_not_zero(&kioctx->users);
+@@ -273,7 +267,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
+ mm = ctx->mm = current->mm;
+ atomic_inc(&mm->mm_count);
+
+- atomic_set(&ctx->users, 1);
++ atomic_set(&ctx->users, 2);
+ spin_lock_init(&ctx->ctx_lock);
+ spin_lock_init(&ctx->ring_info.ring_lock);
+ init_waitqueue_head(&ctx->wait);
+@@ -609,11 +603,16 @@ static void aio_fput_routine(struct work_struct *data)
+ fput(req->ki_filp);
+
+ /* Link the iocb into the context's free list */
++ rcu_read_lock();
+ spin_lock_irq(&ctx->ctx_lock);
+ really_put_req(ctx, req);
++ /*
++ * at that point ctx might've been killed, but actual
++ * freeing is RCU'd
++ */
+ spin_unlock_irq(&ctx->ctx_lock);
++ rcu_read_unlock();
+
+- put_ioctx(ctx);
+ spin_lock_irq(&fput_lock);
+ }
+ spin_unlock_irq(&fput_lock);
+@@ -644,7 +643,6 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
+ * this function will be executed w/out any aio kthread wakeup.
+ */
+ if (unlikely(!fput_atomic(req->ki_filp))) {
+- get_ioctx(ctx);
+ spin_lock(&fput_lock);
+ list_add(&req->ki_list, &fput_head);
+ spin_unlock(&fput_lock);
+@@ -1338,10 +1336,10 @@ SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
+ ret = PTR_ERR(ioctx);
+ if (!IS_ERR(ioctx)) {
+ ret = put_user(ioctx->user_id, ctxp);
+- if (!ret)
++ if (!ret) {
++ put_ioctx(ioctx);
+ return 0;
+-
+- get_ioctx(ioctx); /* io_destroy() expects us to hold a ref */
++ }
+ io_destroy(ioctx);
+ }
+
+diff --git a/fs/block_dev.c b/fs/block_dev.c
+index b07f1da..abe9b48 100644
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -1159,8 +1159,12 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
+ * The latter is necessary to prevent ghost
+ * partitions on a removed medium.
+ */
+- if (bdev->bd_invalidated && (!ret || ret == -ENOMEDIUM))
+- rescan_partitions(disk, bdev);
++ if (bdev->bd_invalidated) {
++ if (!ret)
++ rescan_partitions(disk, bdev);
++ else if (ret == -ENOMEDIUM)
++ invalidate_partitions(disk, bdev);
++ }
+ if (ret)
+ goto out_clear;
+ } else {
+@@ -1190,8 +1194,12 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
+ if (bdev->bd_disk->fops->open)
+ ret = bdev->bd_disk->fops->open(bdev, mode);
+ /* the same as first opener case, read comment there */
+- if (bdev->bd_invalidated && (!ret || ret == -ENOMEDIUM))
+- rescan_partitions(bdev->bd_disk, bdev);
++ if (bdev->bd_invalidated) {
++ if (!ret)
++ rescan_partitions(bdev->bd_disk, bdev);
++ else if (ret == -ENOMEDIUM)
++ invalidate_partitions(bdev->bd_disk, bdev);
++ }
+ if (ret)
+ goto out_unlock_bdev;
+ }
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index 4dd9283..5e64748 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -920,16 +920,26 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
+ for (lockp = &inode->i_flock; *lockp != NULL; \
+ lockp = &(*lockp)->fl_next)
+
++struct lock_to_push {
++ struct list_head llist;
++ __u64 offset;
++ __u64 length;
++ __u32 pid;
++ __u16 netfid;
++ __u8 type;
++};
++
+ static int
+ cifs_push_posix_locks(struct cifsFileInfo *cfile)
+ {
+ struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
+ struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
+ struct file_lock *flock, **before;
+- struct cifsLockInfo *lck, *tmp;
++ unsigned int count = 0, i = 0;
+ int rc = 0, xid, type;
++ struct list_head locks_to_send, *el;
++ struct lock_to_push *lck, *tmp;
+ __u64 length;
+- struct list_head locks_to_send;
+
+ xid = GetXid();
+
+@@ -940,29 +950,55 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
+ return rc;
+ }
+
++ lock_flocks();
++ cifs_for_each_lock(cfile->dentry->d_inode, before) {
++ if ((*before)->fl_flags & FL_POSIX)
++ count++;
++ }
++ unlock_flocks();
++
+ INIT_LIST_HEAD(&locks_to_send);
+
++ /*
++ * Allocating count locks is enough because no locks can be added to
++ * the list while we are holding cinode->lock_mutex that protects
++ * locking operations of this inode.
++ */
++ for (; i < count; i++) {
++ lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
++ if (!lck) {
++ rc = -ENOMEM;
++ goto err_out;
++ }
++ list_add_tail(&lck->llist, &locks_to_send);
++ }
++
++ i = 0;
++ el = locks_to_send.next;
+ lock_flocks();
+ cifs_for_each_lock(cfile->dentry->d_inode, before) {
++ if (el == &locks_to_send) {
++ /* something is really wrong */
++ cERROR(1, "Can't push all brlocks!");
++ break;
++ }
+ flock = *before;
++ if ((flock->fl_flags & FL_POSIX) == 0)
++ continue;
+ length = 1 + flock->fl_end - flock->fl_start;
+ if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
+ type = CIFS_RDLCK;
+ else
+ type = CIFS_WRLCK;
+-
+- lck = cifs_lock_init(flock->fl_start, length, type,
+- cfile->netfid);
+- if (!lck) {
+- rc = -ENOMEM;
+- goto send_locks;
+- }
++ lck = list_entry(el, struct lock_to_push, llist);
+ lck->pid = flock->fl_pid;
+-
+- list_add_tail(&lck->llist, &locks_to_send);
++ lck->netfid = cfile->netfid;
++ lck->length = length;
++ lck->type = type;
++ lck->offset = flock->fl_start;
++ i++;
++ el = el->next;
+ }
+-
+-send_locks:
+ unlock_flocks();
+
+ list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
+@@ -979,11 +1015,18 @@ send_locks:
+ kfree(lck);
+ }
+
++out:
+ cinode->can_cache_brlcks = false;
+ mutex_unlock(&cinode->lock_mutex);
+
+ FreeXid(xid);
+ return rc;
++err_out:
++ list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
++ list_del(&lck->llist);
++ kfree(lck);
++ }
++ goto out;
+ }
+
+ static int
+diff --git a/fs/namei.c b/fs/namei.c
+index 744e942..9680cef 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -2139,7 +2139,7 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
+ /* sayonara */
+ error = complete_walk(nd);
+ if (error)
+- return ERR_PTR(-ECHILD);
++ return ERR_PTR(error);
+
+ error = -ENOTDIR;
+ if (nd->flags & LOOKUP_DIRECTORY) {
+@@ -2238,7 +2238,7 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
+ /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
+ error = complete_walk(nd);
+ if (error)
+- goto exit;
++ return ERR_PTR(error);
+ error = -EISDIR;
+ if (S_ISDIR(nd->inode->i_mode))
+ goto exit;
+diff --git a/fs/partitions/check.c b/fs/partitions/check.c
+index e3c63d1..6b5fcc5 100644
+--- a/fs/partitions/check.c
++++ b/fs/partitions/check.c
+@@ -539,17 +539,11 @@ static bool disk_unlock_native_capacity(struct gendisk *disk)
+ }
+ }
+
+-int rescan_partitions(struct gendisk *disk, struct block_device *bdev)
++static int drop_partitions(struct gendisk *disk, struct block_device *bdev)
+ {
+- struct parsed_partitions *state = NULL;
+ struct disk_part_iter piter;
+ struct hd_struct *part;
+- int p, highest, res;
+-rescan:
+- if (state && !IS_ERR(state)) {
+- kfree(state);
+- state = NULL;
+- }
++ int res;
+
+ if (bdev->bd_part_count)
+ return -EBUSY;
+@@ -562,6 +556,24 @@ rescan:
+ delete_partition(disk, part->partno);
+ disk_part_iter_exit(&piter);
+
++ return 0;
++}
++
++int rescan_partitions(struct gendisk *disk, struct block_device *bdev)
++{
++ struct parsed_partitions *state = NULL;
++ struct hd_struct *part;
++ int p, highest, res;
++rescan:
++ if (state && !IS_ERR(state)) {
++ kfree(state);
++ state = NULL;
++ }
++
++ res = drop_partitions(disk, bdev);
++ if (res)
++ return res;
++
+ if (disk->fops->revalidate_disk)
+ disk->fops->revalidate_disk(disk);
+ check_disk_size_change(disk, bdev);
+@@ -665,6 +677,26 @@ rescan:
+ return 0;
+ }
+
++int invalidate_partitions(struct gendisk *disk, struct block_device *bdev)
++{
++ int res;
++
++ if (!bdev->bd_invalidated)
++ return 0;
++
++ res = drop_partitions(disk, bdev);
++ if (res)
++ return res;
++
++ set_capacity(disk, 0);
++ check_disk_size_change(disk, bdev);
++ bdev->bd_invalidated = 0;
++ /* tell userspace that the media / partition table may have changed */
++ kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE);
++
++ return 0;
++}
++
+ unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
+ {
+ struct address_space *mapping = bdev->bd_inode->i_mapping;
+diff --git a/include/linux/genhd.h b/include/linux/genhd.h
+index 6d18f35..c6f7f6a 100644
+--- a/include/linux/genhd.h
++++ b/include/linux/genhd.h
+@@ -596,6 +596,7 @@ extern char *disk_name (struct gendisk *hd, int partno, char *buf);
+
+ extern int disk_expand_part_tbl(struct gendisk *disk, int target);
+ extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev);
++extern int invalidate_partitions(struct gendisk *disk, struct block_device *bdev);
+ extern struct hd_struct * __must_check add_partition(struct gendisk *disk,
+ int partno, sector_t start,
+ sector_t len, int flags,
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index fe86488..6cf8b53 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -1453,6 +1453,16 @@ static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
+ }
+ #endif /* NET_SKBUFF_DATA_USES_OFFSET */
+
++static inline void skb_mac_header_rebuild(struct sk_buff *skb)
++{
++ if (skb_mac_header_was_set(skb)) {
++ const unsigned char *old_mac = skb_mac_header(skb);
++
++ skb_set_mac_header(skb, -skb->mac_len);
++ memmove(skb_mac_header(skb), old_mac, skb->mac_len);
++ }
++}
++
+ static inline int skb_checksum_start_offset(const struct sk_buff *skb)
+ {
+ return skb->csum_start - skb_headroom(skb);
+diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
+index 0d556de..e228ca9 100644
+--- a/include/linux/workqueue.h
++++ b/include/linux/workqueue.h
+@@ -289,12 +289,16 @@ enum {
+ *
+ * system_freezable_wq is equivalent to system_wq except that it's
+ * freezable.
++ *
++ * system_nrt_freezable_wq is equivalent to system_nrt_wq except that
++ * it's freezable.
+ */
+ extern struct workqueue_struct *system_wq;
+ extern struct workqueue_struct *system_long_wq;
+ extern struct workqueue_struct *system_nrt_wq;
+ extern struct workqueue_struct *system_unbound_wq;
+ extern struct workqueue_struct *system_freezable_wq;
++extern struct workqueue_struct *system_nrt_freezable_wq;
+
+ extern struct workqueue_struct *
+ __alloc_workqueue_key(const char *name, unsigned int flags, int max_active,
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 42fa9ad..bb425b1 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -253,11 +253,13 @@ struct workqueue_struct *system_long_wq __read_mostly;
+ struct workqueue_struct *system_nrt_wq __read_mostly;
+ struct workqueue_struct *system_unbound_wq __read_mostly;
+ struct workqueue_struct *system_freezable_wq __read_mostly;
++struct workqueue_struct *system_nrt_freezable_wq __read_mostly;
+ EXPORT_SYMBOL_GPL(system_wq);
+ EXPORT_SYMBOL_GPL(system_long_wq);
+ EXPORT_SYMBOL_GPL(system_nrt_wq);
+ EXPORT_SYMBOL_GPL(system_unbound_wq);
+ EXPORT_SYMBOL_GPL(system_freezable_wq);
++EXPORT_SYMBOL_GPL(system_nrt_freezable_wq);
+
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/workqueue.h>
+@@ -3821,8 +3823,11 @@ static int __init init_workqueues(void)
+ WQ_UNBOUND_MAX_ACTIVE);
+ system_freezable_wq = alloc_workqueue("events_freezable",
+ WQ_FREEZABLE, 0);
++ system_nrt_freezable_wq = alloc_workqueue("events_nrt_freezable",
++ WQ_NON_REENTRANT | WQ_FREEZABLE, 0);
+ BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq ||
+- !system_unbound_wq || !system_freezable_wq);
++ !system_unbound_wq || !system_freezable_wq ||
++ !system_nrt_freezable_wq);
+ return 0;
+ }
+ early_initcall(init_workqueues);
+diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
+index a5f4e57..8eb6b15 100644
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -446,8 +446,11 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
+ ip6h->nexthdr = IPPROTO_HOPOPTS;
+ ip6h->hop_limit = 1;
+ ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1));
+- ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
+- &ip6h->saddr);
++ if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
++ &ip6h->saddr)) {
++ kfree_skb(skb);
++ return NULL;
++ }
+ ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
+
+ hopopt = (u8 *)(ip6h + 1);
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index 5ac07d3..7aafaed 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -802,6 +802,8 @@ next_elt:
+ write_unlock_bh(&tbl->lock);
+ cond_resched();
+ write_lock_bh(&tbl->lock);
++ nht = rcu_dereference_protected(tbl->nht,
++ lockdep_is_held(&tbl->lock));
+ }
+ /* Cycle through all hash buckets every base_reachable_time/2 ticks.
+ * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 53113b9..e4d1e4a 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -1406,8 +1406,16 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
+
+ BUG_ON(!pcount);
+
+- /* Adjust hint for FACK. Non-FACK is handled in tcp_sacktag_one(). */
+- if (tcp_is_fack(tp) && (skb == tp->lost_skb_hint))
++ /* Adjust counters and hints for the newly sacked sequence
++ * range but discard the return value since prev is already
++ * marked. We must tag the range first because the seq
++ * advancement below implicitly advances
++ * tcp_highest_sack_seq() when skb is highest_sack.
++ */
++ tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
++ start_seq, end_seq, dup_sack, pcount);
++
++ if (skb == tp->lost_skb_hint)
+ tp->lost_cnt_hint += pcount;
+
+ TCP_SKB_CB(prev)->end_seq += shifted;
+@@ -1433,12 +1441,6 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
+ skb_shinfo(skb)->gso_type = 0;
+ }
+
+- /* Adjust counters and hints for the newly sacked sequence range but
+- * discard the return value since prev is already marked.
+- */
+- tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
+- start_seq, end_seq, dup_sack, pcount);
+-
+ /* Difference in this won't matter, both ACKed by the same cumul. ACK */
+ TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS);
+
+@@ -1586,6 +1588,10 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
+ }
+ }
+
++ /* tcp_sacktag_one() won't SACK-tag ranges below snd_una */
++ if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una))
++ goto fallback;
++
+ if (!skb_shift(prev, skb, len))
+ goto fallback;
+ if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack))
+@@ -2569,6 +2575,7 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
+
+ if (cnt > packets) {
+ if ((tcp_is_sack(tp) && !tcp_is_fack(tp)) ||
++ (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) ||
+ (oldcnt >= packets))
+ break;
+
+diff --git a/net/ipv4/xfrm4_mode_beet.c b/net/ipv4/xfrm4_mode_beet.c
+index 6341818..e3db3f9 100644
+--- a/net/ipv4/xfrm4_mode_beet.c
++++ b/net/ipv4/xfrm4_mode_beet.c
+@@ -110,10 +110,7 @@ static int xfrm4_beet_input(struct xfrm_state *x, struct sk_buff *skb)
+
+ skb_push(skb, sizeof(*iph));
+ skb_reset_network_header(skb);
+-
+- memmove(skb->data - skb->mac_len, skb_mac_header(skb),
+- skb->mac_len);
+- skb_set_mac_header(skb, -skb->mac_len);
++ skb_mac_header_rebuild(skb);
+
+ xfrm4_beet_make_header(skb);
+
+diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
+index 534972e..ed4bf11 100644
+--- a/net/ipv4/xfrm4_mode_tunnel.c
++++ b/net/ipv4/xfrm4_mode_tunnel.c
+@@ -66,7 +66,6 @@ static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
+
+ static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
+ {
+- const unsigned char *old_mac;
+ int err = -EINVAL;
+
+ if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPIP)
+@@ -84,10 +83,9 @@ static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
+ if (!(x->props.flags & XFRM_STATE_NOECN))
+ ipip_ecn_decapsulate(skb);
+
+- old_mac = skb_mac_header(skb);
+- skb_set_mac_header(skb, -skb->mac_len);
+- memmove(skb_mac_header(skb), old_mac, skb->mac_len);
+ skb_reset_network_header(skb);
++ skb_mac_header_rebuild(skb);
++
+ err = 0;
+
+ out:
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 836c4ea..a5521c5 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -434,6 +434,10 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
+ /* Join all-node multicast group */
+ ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes);
+
++ /* Join all-router multicast group if forwarding is set */
++ if (ndev->cnf.forwarding && dev && (dev->flags & IFF_MULTICAST))
++ ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
++
+ return ndev;
+ }
+
+diff --git a/net/ipv6/xfrm6_mode_beet.c b/net/ipv6/xfrm6_mode_beet.c
+index 3437d7d..f37cba9 100644
+--- a/net/ipv6/xfrm6_mode_beet.c
++++ b/net/ipv6/xfrm6_mode_beet.c
+@@ -80,7 +80,6 @@ static int xfrm6_beet_output(struct xfrm_state *x, struct sk_buff *skb)
+ static int xfrm6_beet_input(struct xfrm_state *x, struct sk_buff *skb)
+ {
+ struct ipv6hdr *ip6h;
+- const unsigned char *old_mac;
+ int size = sizeof(struct ipv6hdr);
+ int err;
+
+@@ -90,10 +89,7 @@ static int xfrm6_beet_input(struct xfrm_state *x, struct sk_buff *skb)
+
+ __skb_push(skb, size);
+ skb_reset_network_header(skb);
+-
+- old_mac = skb_mac_header(skb);
+- skb_set_mac_header(skb, -skb->mac_len);
+- memmove(skb_mac_header(skb), old_mac, skb->mac_len);
++ skb_mac_header_rebuild(skb);
+
+ xfrm6_beet_make_header(skb);
+
+diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c
+index 4d6edff..23ecd68 100644
+--- a/net/ipv6/xfrm6_mode_tunnel.c
++++ b/net/ipv6/xfrm6_mode_tunnel.c
+@@ -63,7 +63,6 @@ static int xfrm6_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
+ static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
+ {
+ int err = -EINVAL;
+- const unsigned char *old_mac;
+
+ if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPV6)
+ goto out;
+@@ -80,10 +79,9 @@ static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
+ if (!(x->props.flags & XFRM_STATE_NOECN))
+ ipip6_ecn_decapsulate(skb);
+
+- old_mac = skb_mac_header(skb);
+- skb_set_mac_header(skb, -skb->mac_len);
+- memmove(skb_mac_header(skb), old_mac, skb->mac_len);
+ skb_reset_network_header(skb);
++ skb_mac_header_rebuild(skb);
++
+ err = 0;
+
+ out:
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 3d8fbf4..dc8a6fc 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -2063,12 +2063,16 @@ static int alc_build_controls(struct hda_codec *codec)
+ */
+
+ static void alc_init_special_input_src(struct hda_codec *codec);
++static int alc269_fill_coef(struct hda_codec *codec);
+
+ static int alc_init(struct hda_codec *codec)
+ {
+ struct alc_spec *spec = codec->spec;
+ unsigned int i;
+
++ if (codec->vendor_id == 0x10ec0269)
++ alc269_fill_coef(codec);
++
+ alc_fix_pll(codec);
+ alc_auto_init_amp(codec, spec->init_amp);
+
+@@ -5110,8 +5114,12 @@ static const struct alc_model_fixup alc269_fixup_models[] = {
+
+ static int alc269_fill_coef(struct hda_codec *codec)
+ {
++ struct alc_spec *spec = codec->spec;
+ int val;
+
++ if (spec->codec_variant != ALC269_TYPE_ALC269VB)
++ return 0;
++
+ if ((alc_get_coef0(codec) & 0x00ff) < 0x015) {
+ alc_write_coef_idx(codec, 0xf, 0x960b);
+ alc_write_coef_idx(codec, 0xe, 0x8817);
+diff --git a/sound/soc/samsung/neo1973_wm8753.c b/sound/soc/samsung/neo1973_wm8753.c
+index 7207189..2fba3f7 100644
+--- a/sound/soc/samsung/neo1973_wm8753.c
++++ b/sound/soc/samsung/neo1973_wm8753.c
+@@ -421,7 +421,7 @@ static struct snd_soc_dai_link neo1973_dai[] = {
+ .platform_name = "samsung-audio",
+ .cpu_dai_name = "s3c24xx-iis",
+ .codec_dai_name = "wm8753-hifi",
+- .codec_name = "wm8753-codec.0-001a",
++ .codec_name = "wm8753.0-001a",
+ .init = neo1973_wm8753_init,
+ .ops = &neo1973_hifi_ops,
+ },
+@@ -430,7 +430,7 @@ static struct snd_soc_dai_link neo1973_dai[] = {
+ .stream_name = "Voice",
+ .cpu_dai_name = "dfbmcs320-pcm",
+ .codec_dai_name = "wm8753-voice",
+- .codec_name = "wm8753-codec.0-001a",
++ .codec_name = "wm8753.0-001a",
+ .ops = &neo1973_voice_ops,
+ },
+ };
diff --git a/1012_linux-3.2.13.patch b/1012_linux-3.2.13.patch
new file mode 100644
index 00000000..21cf92e6
--- /dev/null
+++ b/1012_linux-3.2.13.patch
@@ -0,0 +1,445 @@
+diff --git a/Makefile b/Makefile
+index 15e80f1..172e041 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 12
++SUBLEVEL = 13
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
+index 9b6a820..3394254 100644
+--- a/arch/powerpc/platforms/powermac/smp.c
++++ b/arch/powerpc/platforms/powermac/smp.c
+@@ -414,7 +414,7 @@ static struct irqaction psurge_irqaction = {
+
+ static void __init smp_psurge_setup_cpu(int cpu_nr)
+ {
+- if (cpu_nr != 0)
++ if (cpu_nr != 0 || !psurge_start)
+ return;
+
+ /* reset the entry point so if we get another intr we won't
+diff --git a/drivers/net/wireless/iwlegacy/iwl-3945.c b/drivers/net/wireless/iwlegacy/iwl-3945.c
+index f7c0a74..7d1aa7c 100644
+--- a/drivers/net/wireless/iwlegacy/iwl-3945.c
++++ b/drivers/net/wireless/iwlegacy/iwl-3945.c
+@@ -1870,11 +1870,12 @@ static void iwl3945_bg_reg_txpower_periodic(struct work_struct *work)
+ struct iwl_priv *priv = container_of(work, struct iwl_priv,
+ _3945.thermal_periodic.work);
+
+- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+- return;
+-
+ mutex_lock(&priv->mutex);
++ if (test_bit(STATUS_EXIT_PENDING, &priv->status) || priv->txq == NULL)
++ goto out;
++
+ iwl3945_reg_txpower_periodic(priv);
++out:
+ mutex_unlock(&priv->mutex);
+ }
+
+diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
+index 05f2ad1..b3d9f3f 100644
+--- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
++++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
+@@ -2513,7 +2513,7 @@ static void iwl3945_bg_alive_start(struct work_struct *data)
+ container_of(data, struct iwl_priv, alive_start.work);
+
+ mutex_lock(&priv->mutex);
+- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
++ if (test_bit(STATUS_EXIT_PENDING, &priv->status) || priv->txq == NULL)
+ goto out;
+
+ iwl3945_alive_start(priv);
+diff --git a/fs/afs/internal.h b/fs/afs/internal.h
+index d2b0888..a306bb6 100644
+--- a/fs/afs/internal.h
++++ b/fs/afs/internal.h
+@@ -109,7 +109,7 @@ struct afs_call {
+ unsigned reply_size; /* current size of reply */
+ unsigned first_offset; /* offset into mapping[first] */
+ unsigned last_to; /* amount of mapping[last] */
+- unsigned short offset; /* offset into received data store */
++ unsigned offset; /* offset into received data store */
+ unsigned char unmarshall; /* unmarshalling phase */
+ bool incoming; /* T if incoming call */
+ bool send_pages; /* T if data from mapping should be sent */
+diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
+index e45a323..8ad8c2a 100644
+--- a/fs/afs/rxrpc.c
++++ b/fs/afs/rxrpc.c
+@@ -314,6 +314,7 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
+ struct msghdr msg;
+ struct kvec iov[1];
+ int ret;
++ struct sk_buff *skb;
+
+ _enter("%x,{%d},", addr->s_addr, ntohs(call->port));
+
+@@ -380,6 +381,8 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
+
+ error_do_abort:
+ rxrpc_kernel_abort_call(rxcall, RX_USER_ABORT);
++ while ((skb = skb_dequeue(&call->rx_queue)))
++ afs_free_skb(skb);
+ rxrpc_kernel_end_call(rxcall);
+ call->rxcall = NULL;
+ error_kill_call:
+diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
+index d327140..35a8970 100644
+--- a/fs/nilfs2/the_nilfs.c
++++ b/fs/nilfs2/the_nilfs.c
+@@ -515,6 +515,7 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs,
+ brelse(sbh[1]);
+ sbh[1] = NULL;
+ sbp[1] = NULL;
++ valid[1] = 0;
+ swp = 0;
+ }
+ if (!valid[swp]) {
+diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
+index 90f6544..769c0e9 100644
+--- a/net/ipv4/syncookies.c
++++ b/net/ipv4/syncookies.c
+@@ -278,6 +278,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
+ struct rtable *rt;
+ __u8 rcv_wscale;
+ bool ecn_ok = false;
++ struct flowi4 fl4;
+
+ if (!sysctl_tcp_syncookies || !th->ack || th->rst)
+ goto out;
+@@ -346,20 +347,16 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
+ * hasn't changed since we received the original syn, but I see
+ * no easy way to do this.
+ */
+- {
+- struct flowi4 fl4;
+-
+- flowi4_init_output(&fl4, 0, sk->sk_mark, RT_CONN_FLAGS(sk),
+- RT_SCOPE_UNIVERSE, IPPROTO_TCP,
+- inet_sk_flowi_flags(sk),
+- (opt && opt->srr) ? opt->faddr : ireq->rmt_addr,
+- ireq->loc_addr, th->source, th->dest);
+- security_req_classify_flow(req, flowi4_to_flowi(&fl4));
+- rt = ip_route_output_key(sock_net(sk), &fl4);
+- if (IS_ERR(rt)) {
+- reqsk_free(req);
+- goto out;
+- }
++ flowi4_init_output(&fl4, 0, sk->sk_mark, RT_CONN_FLAGS(sk),
++ RT_SCOPE_UNIVERSE, IPPROTO_TCP,
++ inet_sk_flowi_flags(sk),
++ (opt && opt->srr) ? opt->faddr : ireq->rmt_addr,
++ ireq->loc_addr, th->source, th->dest);
++ security_req_classify_flow(req, flowi4_to_flowi(&fl4));
++ rt = ip_route_output_key(sock_net(sk), &fl4);
++ if (IS_ERR(rt)) {
++ reqsk_free(req);
++ goto out;
+ }
+
+ /* Try to redo what tcp_v4_send_synack did. */
+@@ -373,5 +370,10 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
+ ireq->rcv_wscale = rcv_wscale;
+
+ ret = get_cookie_sock(sk, skb, req, &rt->dst);
++ /* ip_queue_xmit() depends on our flow being setup
++ * Normal sockets get it right from inet_csk_route_child_sock()
++ */
++ if (ret)
++ inet_sk(ret)->cork.fl.u.ip4 = fl4;
+ out: return ret;
+ }
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index eb90aa8..de69cec 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -1465,9 +1465,13 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
+ inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
+ newinet->inet_id = newtp->write_seq ^ jiffies;
+
+- if (!dst && (dst = inet_csk_route_child_sock(sk, newsk, req)) == NULL)
+- goto put_and_exit;
+-
++ if (!dst) {
++ dst = inet_csk_route_child_sock(sk, newsk, req);
++ if (!dst)
++ goto put_and_exit;
++ } else {
++ /* syncookie case : see end of cookie_v4_check() */
++ }
+ sk_setup_caps(newsk, dst);
+
+ tcp_mtup_init(newsk);
+diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
+index ee7839f..2257366 100644
+--- a/net/ipv6/mcast.c
++++ b/net/ipv6/mcast.c
+@@ -257,7 +257,6 @@ static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net,
+
+ if (rt) {
+ dev = rt->rt6i_dev;
+- dev_hold(dev);
+ dst_release(&rt->dst);
+ }
+ } else
+diff --git a/tools/perf/arch/powerpc/util/header.c b/tools/perf/arch/powerpc/util/header.c
+index eba80c2..2f7073d 100644
+--- a/tools/perf/arch/powerpc/util/header.c
++++ b/tools/perf/arch/powerpc/util/header.c
+@@ -25,7 +25,7 @@ get_cpuid(char *buffer, size_t sz)
+
+ pvr = mfspr(SPRN_PVR);
+
+- nb = snprintf(buffer, sz, "%lu,%lu$", PVR_VER(pvr), PVR_REV(pvr));
++ nb = scnprintf(buffer, sz, "%lu,%lu$", PVR_VER(pvr), PVR_REV(pvr));
+
+ /* look for end marker to ensure the entire data fit */
+ if (strchr(buffer, '$')) {
+diff --git a/tools/perf/arch/x86/util/header.c b/tools/perf/arch/x86/util/header.c
+index f940060..146d12a 100644
+--- a/tools/perf/arch/x86/util/header.c
++++ b/tools/perf/arch/x86/util/header.c
+@@ -48,7 +48,7 @@ get_cpuid(char *buffer, size_t sz)
+ if (family >= 0x6)
+ model += ((a >> 16) & 0xf) << 4;
+ }
+- nb = snprintf(buffer, sz, "%s,%u,%u,%u$", vendor, family, model, step);
++ nb = scnprintf(buffer, sz, "%s,%u,%u,%u$", vendor, family, model, step);
+
+ /* look for end marker to ensure the entire data fit */
+ if (strchr(buffer, '$')) {
+diff --git a/tools/perf/util/color.c b/tools/perf/util/color.c
+index 521c38a..11e46da1 100644
+--- a/tools/perf/util/color.c
++++ b/tools/perf/util/color.c
+@@ -1,3 +1,4 @@
++#include <linux/kernel.h>
+ #include "cache.h"
+ #include "color.h"
+
+@@ -182,12 +183,12 @@ static int __color_vsnprintf(char *bf, size_t size, const char *color,
+ }
+
+ if (perf_use_color_default && *color)
+- r += snprintf(bf, size, "%s", color);
+- r += vsnprintf(bf + r, size - r, fmt, args);
++ r += scnprintf(bf, size, "%s", color);
++ r += vscnprintf(bf + r, size - r, fmt, args);
+ if (perf_use_color_default && *color)
+- r += snprintf(bf + r, size - r, "%s", PERF_COLOR_RESET);
++ r += scnprintf(bf + r, size - r, "%s", PERF_COLOR_RESET);
+ if (trail)
+- r += snprintf(bf + r, size - r, "%s", trail);
++ r += scnprintf(bf + r, size - r, "%s", trail);
+ return r;
+ }
+
+diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
+index 33c17a2..2cd88c1 100644
+--- a/tools/perf/util/header.c
++++ b/tools/perf/util/header.c
+@@ -1227,7 +1227,7 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
+ if (realname == NULL || filename == NULL || linkname == NULL)
+ goto out_free;
+
+- len = snprintf(filename, size, "%s%s%s",
++ len = scnprintf(filename, size, "%s%s%s",
+ debugdir, is_kallsyms ? "/" : "", realname);
+ if (mkdir_p(filename, 0755))
+ goto out_free;
+@@ -1242,7 +1242,7 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
+ goto out_free;
+ }
+
+- len = snprintf(linkname, size, "%s/.build-id/%.2s",
++ len = scnprintf(linkname, size, "%s/.build-id/%.2s",
+ debugdir, sbuild_id);
+
+ if (access(linkname, X_OK) && mkdir_p(linkname, 0755))
+diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
+index abef270..216e33a 100644
+--- a/tools/perf/util/hist.c
++++ b/tools/perf/util/hist.c
+@@ -767,7 +767,7 @@ static int hist_entry__pcnt_snprintf(struct hist_entry *self, char *s,
+ sep ? "%.2f" : " %6.2f%%",
+ (period * 100.0) / total);
+ else
+- ret = snprintf(s, size, sep ? "%.2f" : " %6.2f%%",
++ ret = scnprintf(s, size, sep ? "%.2f" : " %6.2f%%",
+ (period * 100.0) / total);
+ if (symbol_conf.show_cpu_utilization) {
+ ret += percent_color_snprintf(s + ret, size - ret,
+@@ -790,20 +790,20 @@ static int hist_entry__pcnt_snprintf(struct hist_entry *self, char *s,
+ }
+ }
+ } else
+- ret = snprintf(s, size, sep ? "%" PRIu64 : "%12" PRIu64 " ", period);
++ ret = scnprintf(s, size, sep ? "%" PRIu64 : "%12" PRIu64 " ", period);
+
+ if (symbol_conf.show_nr_samples) {
+ if (sep)
+- ret += snprintf(s + ret, size - ret, "%c%" PRIu64, *sep, nr_events);
++ ret += scnprintf(s + ret, size - ret, "%c%" PRIu64, *sep, nr_events);
+ else
+- ret += snprintf(s + ret, size - ret, "%11" PRIu64, nr_events);
++ ret += scnprintf(s + ret, size - ret, "%11" PRIu64, nr_events);
+ }
+
+ if (symbol_conf.show_total_period) {
+ if (sep)
+- ret += snprintf(s + ret, size - ret, "%c%" PRIu64, *sep, period);
++ ret += scnprintf(s + ret, size - ret, "%c%" PRIu64, *sep, period);
+ else
+- ret += snprintf(s + ret, size - ret, " %12" PRIu64, period);
++ ret += scnprintf(s + ret, size - ret, " %12" PRIu64, period);
+ }
+
+ if (pair_hists) {
+@@ -818,25 +818,25 @@ static int hist_entry__pcnt_snprintf(struct hist_entry *self, char *s,
+ diff = new_percent - old_percent;
+
+ if (fabs(diff) >= 0.01)
+- snprintf(bf, sizeof(bf), "%+4.2F%%", diff);
++ ret += scnprintf(bf, sizeof(bf), "%+4.2F%%", diff);
+ else
+- snprintf(bf, sizeof(bf), " ");
++ ret += scnprintf(bf, sizeof(bf), " ");
+
+ if (sep)
+- ret += snprintf(s + ret, size - ret, "%c%s", *sep, bf);
++ ret += scnprintf(s + ret, size - ret, "%c%s", *sep, bf);
+ else
+- ret += snprintf(s + ret, size - ret, "%11.11s", bf);
++ ret += scnprintf(s + ret, size - ret, "%11.11s", bf);
+
+ if (show_displacement) {
+ if (displacement)
+- snprintf(bf, sizeof(bf), "%+4ld", displacement);
++ ret += scnprintf(bf, sizeof(bf), "%+4ld", displacement);
+ else
+- snprintf(bf, sizeof(bf), " ");
++ ret += scnprintf(bf, sizeof(bf), " ");
+
+ if (sep)
+- ret += snprintf(s + ret, size - ret, "%c%s", *sep, bf);
++ ret += scnprintf(s + ret, size - ret, "%c%s", *sep, bf);
+ else
+- ret += snprintf(s + ret, size - ret, "%6.6s", bf);
++ ret += scnprintf(s + ret, size - ret, "%6.6s", bf);
+ }
+ }
+
+@@ -854,7 +854,7 @@ int hist_entry__snprintf(struct hist_entry *he, char *s, size_t size,
+ if (se->elide)
+ continue;
+
+- ret += snprintf(s + ret, size - ret, "%s", sep ?: " ");
++ ret += scnprintf(s + ret, size - ret, "%s", sep ?: " ");
+ ret += se->se_snprintf(he, s + ret, size - ret,
+ hists__col_len(hists, se->se_width_idx));
+ }
+diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
+index 16da30d..076c9d4 100644
+--- a/tools/perf/util/sort.c
++++ b/tools/perf/util/sort.c
+@@ -33,6 +33,9 @@ static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
+ }
+ }
+ va_end(ap);
++
++ if (n >= (int)size)
++ return size - 1;
+ return n;
+ }
+
+diff --git a/tools/perf/util/strbuf.c b/tools/perf/util/strbuf.c
+index 92e0685..2eeb51b 100644
+--- a/tools/perf/util/strbuf.c
++++ b/tools/perf/util/strbuf.c
+@@ -1,4 +1,5 @@
+ #include "cache.h"
++#include <linux/kernel.h>
+
+ int prefixcmp(const char *str, const char *prefix)
+ {
+@@ -89,14 +90,14 @@ void strbuf_addf(struct strbuf *sb, const char *fmt, ...)
+ if (!strbuf_avail(sb))
+ strbuf_grow(sb, 64);
+ va_start(ap, fmt);
+- len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap);
++ len = vscnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap);
+ va_end(ap);
+ if (len < 0)
+- die("your vsnprintf is broken");
++ die("your vscnprintf is broken");
+ if (len > strbuf_avail(sb)) {
+ strbuf_grow(sb, len);
+ va_start(ap, fmt);
+- len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap);
++ len = vscnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap);
+ va_end(ap);
+ if (len > strbuf_avail(sb)) {
+ die("this should not happen, your snprintf is broken");
+diff --git a/tools/perf/util/ui/browsers/hists.c b/tools/perf/util/ui/browsers/hists.c
+index d0c94b4..81c9fa5 100644
+--- a/tools/perf/util/ui/browsers/hists.c
++++ b/tools/perf/util/ui/browsers/hists.c
+@@ -839,15 +839,15 @@ static int hists__browser_title(struct hists *self, char *bf, size_t size,
+ unsigned long nr_events = self->stats.nr_events[PERF_RECORD_SAMPLE];
+
+ nr_events = convert_unit(nr_events, &unit);
+- printed = snprintf(bf, size, "Events: %lu%c %s", nr_events, unit, ev_name);
++ printed = scnprintf(bf, size, "Events: %lu%c %s", nr_events, unit, ev_name);
+
+ if (thread)
+- printed += snprintf(bf + printed, size - printed,
++ printed += scnprintf(bf + printed, size - printed,
+ ", Thread: %s(%d)",
+ (thread->comm_set ? thread->comm : ""),
+ thread->pid);
+ if (dso)
+- printed += snprintf(bf + printed, size - printed,
++ printed += scnprintf(bf + printed, size - printed,
+ ", DSO: %s", dso->short_name);
+ return printed;
+ }
+@@ -1097,7 +1097,7 @@ static void perf_evsel_menu__write(struct ui_browser *browser,
+ HE_COLORSET_NORMAL);
+
+ nr_events = convert_unit(nr_events, &unit);
+- printed = snprintf(bf, sizeof(bf), "%lu%c%s%s", nr_events,
++ printed = scnprintf(bf, sizeof(bf), "%lu%c%s%s", nr_events,
+ unit, unit == ' ' ? "" : " ", ev_name);
+ slsmg_printf("%s", bf);
+
+@@ -1107,8 +1107,8 @@ static void perf_evsel_menu__write(struct ui_browser *browser,
+ if (!current_entry)
+ ui_browser__set_color(browser, HE_COLORSET_TOP);
+ nr_events = convert_unit(nr_events, &unit);
+- snprintf(bf, sizeof(bf), ": %ld%c%schunks LOST!", nr_events,
+- unit, unit == ' ' ? "" : " ");
++ printed += scnprintf(bf, sizeof(bf), ": %ld%c%schunks LOST!",
++ nr_events, unit, unit == ' ' ? "" : " ");
+ warn = bf;
+ }
+
+diff --git a/tools/perf/util/ui/helpline.c b/tools/perf/util/ui/helpline.c
+index 6ef3c56..f50f81c 100644
+--- a/tools/perf/util/ui/helpline.c
++++ b/tools/perf/util/ui/helpline.c
+@@ -65,7 +65,7 @@ int ui_helpline__show_help(const char *format, va_list ap)
+ static int backlog;
+
+ pthread_mutex_lock(&ui__lock);
+- ret = vsnprintf(ui_helpline__last_msg + backlog,
++ ret = vscnprintf(ui_helpline__last_msg + backlog,
+ sizeof(ui_helpline__last_msg) - backlog, format, ap);
+ backlog += ret;
+
diff --git a/1013_linux-3.2.14.patch b/1013_linux-3.2.14.patch
new file mode 100644
index 00000000..10a98035
--- /dev/null
+++ b/1013_linux-3.2.14.patch
@@ -0,0 +1,6098 @@
+diff --git a/Makefile b/Makefile
+index 172e041..afe4c7d 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 13
++SUBLEVEL = 14
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig
+index 91aff7c..dbc59fa 100644
+--- a/arch/arm/mach-tegra/Kconfig
++++ b/arch/arm/mach-tegra/Kconfig
+@@ -13,6 +13,13 @@ config ARCH_TEGRA_2x_SOC
+ select USB_ARCH_HAS_EHCI if USB_SUPPORT
+ select USB_ULPI if USB_SUPPORT
+ select USB_ULPI_VIEWPORT if USB_SUPPORT
++ select ARM_ERRATA_720789
++ select ARM_ERRATA_742230
++ select ARM_ERRATA_751472
++ select ARM_ERRATA_754327
++ select ARM_ERRATA_764369
++ select PL310_ERRATA_727915 if CACHE_L2X0
++ select PL310_ERRATA_769419 if CACHE_L2X0
+ help
+ Support for NVIDIA Tegra AP20 and T20 processors, based on the
+ ARM CortexA9MP CPU and the ARM PL310 L2 cache controller
+@@ -54,6 +61,11 @@ config MACH_SEABOARD
+ config MACH_TEGRA_DT
+ bool "Generic Tegra board (FDT support)"
+ select USE_OF
++ select ARM_ERRATA_743622
++ select ARM_ERRATA_751472
++ select ARM_ERRATA_754322
++ select ARM_ERRATA_764369
++ select PL310_ERRATA_769419 if CACHE_L2X0
+ help
+ Support for generic nVidia Tegra boards using Flattened Device Tree
+
+diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
+index a026507..70ea6fd 100644
+--- a/arch/x86/include/asm/kvm_emulate.h
++++ b/arch/x86/include/asm/kvm_emulate.h
+@@ -189,6 +189,9 @@ struct x86_emulate_ops {
+ int (*intercept)(struct x86_emulate_ctxt *ctxt,
+ struct x86_instruction_info *info,
+ enum x86_intercept_stage stage);
++
++ bool (*get_cpuid)(struct x86_emulate_ctxt *ctxt,
++ u32 *eax, u32 *ebx, u32 *ecx, u32 *edx);
+ };
+
+ typedef u32 __attribute__((vector_size(16))) sse128_t;
+@@ -297,6 +300,19 @@ struct x86_emulate_ctxt {
+ #define X86EMUL_MODE_PROT (X86EMUL_MODE_PROT16|X86EMUL_MODE_PROT32| \
+ X86EMUL_MODE_PROT64)
+
++/* CPUID vendors */
++#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx 0x68747541
++#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx 0x444d4163
++#define X86EMUL_CPUID_VENDOR_AuthenticAMD_edx 0x69746e65
++
++#define X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx 0x69444d41
++#define X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx 0x21726574
++#define X86EMUL_CPUID_VENDOR_AMDisbetterI_edx 0x74656273
++
++#define X86EMUL_CPUID_VENDOR_GenuineIntel_ebx 0x756e6547
++#define X86EMUL_CPUID_VENDOR_GenuineIntel_ecx 0x6c65746e
++#define X86EMUL_CPUID_VENDOR_GenuineIntel_edx 0x49656e69
++
+ enum x86_intercept_stage {
+ X86_ICTP_NONE = 0, /* Allow zero-init to not match anything */
+ X86_ICPT_PRE_EXCEPT,
+diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
+index 6d939d7..a25e276 100644
+--- a/arch/x86/kernel/apic/io_apic.c
++++ b/arch/x86/kernel/apic/io_apic.c
+@@ -3963,18 +3963,36 @@ int mp_find_ioapic_pin(int ioapic, u32 gsi)
+ static __init int bad_ioapic(unsigned long address)
+ {
+ if (nr_ioapics >= MAX_IO_APICS) {
+- printk(KERN_WARNING "WARNING: Max # of I/O APICs (%d) exceeded "
+- "(found %d), skipping\n", MAX_IO_APICS, nr_ioapics);
++ pr_warn("WARNING: Max # of I/O APICs (%d) exceeded (found %d), skipping\n",
++ MAX_IO_APICS, nr_ioapics);
+ return 1;
+ }
+ if (!address) {
+- printk(KERN_WARNING "WARNING: Bogus (zero) I/O APIC address"
+- " found in table, skipping!\n");
++ pr_warn("WARNING: Bogus (zero) I/O APIC address found in table, skipping!\n");
+ return 1;
+ }
+ return 0;
+ }
+
++static __init int bad_ioapic_register(int idx)
++{
++ union IO_APIC_reg_00 reg_00;
++ union IO_APIC_reg_01 reg_01;
++ union IO_APIC_reg_02 reg_02;
++
++ reg_00.raw = io_apic_read(idx, 0);
++ reg_01.raw = io_apic_read(idx, 1);
++ reg_02.raw = io_apic_read(idx, 2);
++
++ if (reg_00.raw == -1 && reg_01.raw == -1 && reg_02.raw == -1) {
++ pr_warn("I/O APIC 0x%x registers return all ones, skipping!\n",
++ mpc_ioapic_addr(idx));
++ return 1;
++ }
++
++ return 0;
++}
++
+ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
+ {
+ int idx = 0;
+@@ -3991,6 +4009,12 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
+ ioapics[idx].mp_config.apicaddr = address;
+
+ set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
++
++ if (bad_ioapic_register(idx)) {
++ clear_fixmap(FIX_IO_APIC_BASE_0 + idx);
++ return;
++ }
++
+ ioapics[idx].mp_config.apicid = io_apic_unique_id(id);
+ ioapics[idx].mp_config.apicver = io_apic_get_version(idx);
+
+@@ -4011,10 +4035,10 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
+ if (gsi_cfg->gsi_end >= gsi_top)
+ gsi_top = gsi_cfg->gsi_end + 1;
+
+- printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
+- "GSI %d-%d\n", idx, mpc_ioapic_id(idx),
+- mpc_ioapic_ver(idx), mpc_ioapic_addr(idx),
+- gsi_cfg->gsi_base, gsi_cfg->gsi_end);
++ pr_info("IOAPIC[%d]: apic_id %d, version %d, address 0x%x, GSI %d-%d\n",
++ idx, mpc_ioapic_id(idx),
++ mpc_ioapic_ver(idx), mpc_ioapic_addr(idx),
++ gsi_cfg->gsi_base, gsi_cfg->gsi_end);
+
+ nr_ioapics++;
+ }
+diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
+index f3f6f53..bcda816 100644
+--- a/arch/x86/kernel/entry_32.S
++++ b/arch/x86/kernel/entry_32.S
+@@ -99,12 +99,6 @@
+ #endif
+ .endm
+
+-#ifdef CONFIG_VM86
+-#define resume_userspace_sig check_userspace
+-#else
+-#define resume_userspace_sig resume_userspace
+-#endif
+-
+ /*
+ * User gs save/restore
+ *
+@@ -328,10 +322,19 @@ ret_from_exception:
+ preempt_stop(CLBR_ANY)
+ ret_from_intr:
+ GET_THREAD_INFO(%ebp)
+-check_userspace:
++resume_userspace_sig:
++#ifdef CONFIG_VM86
+ movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
+ movb PT_CS(%esp), %al
+ andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
++#else
++ /*
++ * We can be coming here from a syscall done in the kernel space,
++ * e.g. a failed kernel_execve().
++ */
++ movl PT_CS(%esp), %eax
++ andl $SEGMENT_RPL_MASK, %eax
++#endif
+ cmpl $USER_RPL, %eax
+ jb resume_kernel # not returning to v8086 or userspace
+
+diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
+index 6bb7b85..bcfec2d 100644
+--- a/arch/x86/kernel/tls.c
++++ b/arch/x86/kernel/tls.c
+@@ -163,7 +163,7 @@ int regset_tls_get(struct task_struct *target, const struct user_regset *regset,
+ {
+ const struct desc_struct *tls;
+
+- if (pos > GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) ||
++ if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) ||
+ (pos % sizeof(struct user_desc)) != 0 ||
+ (count % sizeof(struct user_desc)) != 0)
+ return -EINVAL;
+@@ -198,7 +198,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
+ struct user_desc infobuf[GDT_ENTRY_TLS_ENTRIES];
+ const struct user_desc *info;
+
+- if (pos > GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) ||
++ if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) ||
+ (pos % sizeof(struct user_desc)) != 0 ||
+ (count % sizeof(struct user_desc)) != 0)
+ return -EINVAL;
+diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
+index db48336..3fe298a 100644
+--- a/arch/x86/kernel/tsc.c
++++ b/arch/x86/kernel/tsc.c
+@@ -934,6 +934,16 @@ static int __init init_tsc_clocksource(void)
+ clocksource_tsc.rating = 0;
+ clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
+ }
++
++ /*
++ * Trust the results of the earlier calibration on systems
++ * exporting a reliable TSC.
++ */
++ if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) {
++ clocksource_register_khz(&clocksource_tsc, tsc_khz);
++ return 0;
++ }
++
+ schedule_delayed_work(&tsc_irqwork, 0);
+ return 0;
+ }
+diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
+index 863f875..04b8726 100644
+--- a/arch/x86/kernel/vm86_32.c
++++ b/arch/x86/kernel/vm86_32.c
+@@ -172,6 +172,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
+ spinlock_t *ptl;
+ int i;
+
++ down_write(&mm->mmap_sem);
+ pgd = pgd_offset(mm, 0xA0000);
+ if (pgd_none_or_clear_bad(pgd))
+ goto out;
+@@ -190,6 +191,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
+ }
+ pte_unmap_unlock(pte, ptl);
+ out:
++ up_write(&mm->mmap_sem);
+ flush_tlb();
+ }
+
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index f1e3be18..f5302da 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -1877,6 +1877,51 @@ setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
+ ss->p = 1;
+ }
+
++static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
++{
++ struct x86_emulate_ops *ops = ctxt->ops;
++ u32 eax, ebx, ecx, edx;
++
++ /*
++ * syscall should always be enabled in longmode - so only become
++ * vendor specific (cpuid) if other modes are active...
++ */
++ if (ctxt->mode == X86EMUL_MODE_PROT64)
++ return true;
++
++ eax = 0x00000000;
++ ecx = 0x00000000;
++ if (ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx)) {
++ /*
++ * Intel ("GenuineIntel")
++ * remark: Intel CPUs only support "syscall" in 64bit
++ * longmode. Also an 64bit guest with a
++ * 32bit compat-app running will #UD !! While this
++ * behaviour can be fixed (by emulating) into AMD
++ * response - CPUs of AMD can't behave like Intel.
++ */
++ if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
++ ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
++ edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
++ return false;
++
++ /* AMD ("AuthenticAMD") */
++ if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
++ ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
++ edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
++ return true;
++
++ /* AMD ("AMDisbetter!") */
++ if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
++ ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
++ edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
++ return true;
++ }
++
++ /* default: (not Intel, not AMD), apply Intel's stricter rules... */
++ return false;
++}
++
+ static int em_syscall(struct x86_emulate_ctxt *ctxt)
+ {
+ struct x86_emulate_ops *ops = ctxt->ops;
+@@ -1890,9 +1935,15 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt)
+ ctxt->mode == X86EMUL_MODE_VM86)
+ return emulate_ud(ctxt);
+
++ if (!(em_syscall_is_enabled(ctxt)))
++ return emulate_ud(ctxt);
++
+ ops->get_msr(ctxt, MSR_EFER, &efer);
+ setup_syscalls_segments(ctxt, &cs, &ss);
+
++ if (!(efer & EFER_SCE))
++ return emulate_ud(ctxt);
++
+ ops->get_msr(ctxt, MSR_STAR, &msr_data);
+ msr_data >>= 32;
+ cs_sel = (u16)(msr_data & 0xfffc);
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 4c938da..e04cae1 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -4655,6 +4655,28 @@ static int emulator_intercept(struct x86_emulate_ctxt *ctxt,
+ return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage);
+ }
+
++static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt,
++ u32 *eax, u32 *ebx, u32 *ecx, u32 *edx)
++{
++ struct kvm_cpuid_entry2 *cpuid = NULL;
++
++ if (eax && ecx)
++ cpuid = kvm_find_cpuid_entry(emul_to_vcpu(ctxt),
++ *eax, *ecx);
++
++ if (cpuid) {
++ *eax = cpuid->eax;
++ *ecx = cpuid->ecx;
++ if (ebx)
++ *ebx = cpuid->ebx;
++ if (edx)
++ *edx = cpuid->edx;
++ return true;
++ }
++
++ return false;
++}
++
+ static struct x86_emulate_ops emulate_ops = {
+ .read_std = kvm_read_guest_virt_system,
+ .write_std = kvm_write_guest_virt_system,
+@@ -4685,6 +4707,7 @@ static struct x86_emulate_ops emulate_ops = {
+ .get_fpu = emulator_get_fpu,
+ .put_fpu = emulator_put_fpu,
+ .intercept = emulator_intercept,
++ .get_cpuid = emulator_get_cpuid,
+ };
+
+ static void cache_all_regs(struct kvm_vcpu *vcpu)
+diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
+index 7c1b765..5671752 100644
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -475,8 +475,10 @@ void bpf_jit_compile(struct sk_filter *fp)
+ case BPF_S_LD_W_ABS:
+ func = sk_load_word;
+ common_load: seen |= SEEN_DATAREF;
+- if ((int)K < 0)
++ if ((int)K < 0) {
++ /* Abort the JIT because __load_pointer() is needed. */
+ goto out;
++ }
+ t_offset = func - (image + addrs[i]);
+ EMIT1_off32(0xbe, K); /* mov imm32,%esi */
+ EMIT1_off32(0xe8, t_offset); /* call */
+@@ -489,14 +491,8 @@ common_load: seen |= SEEN_DATAREF;
+ goto common_load;
+ case BPF_S_LDX_B_MSH:
+ if ((int)K < 0) {
+- if (pc_ret0 > 0) {
+- /* addrs[pc_ret0 - 1] is the start address */
+- EMIT_JMP(addrs[pc_ret0 - 1] - addrs[i]);
+- break;
+- }
+- CLEAR_A();
+- EMIT_JMP(cleanup_addr - addrs[i]);
+- break;
++ /* Abort the JIT because __load_pointer() is needed. */
++ goto out;
+ }
+ seen |= SEEN_DATAREF | SEEN_XREG;
+ t_offset = sk_load_byte_msh - (image + addrs[i]);
+diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
+index 35aca7d..4fe9d21 100644
+--- a/drivers/ata/pata_legacy.c
++++ b/drivers/ata/pata_legacy.c
+@@ -401,8 +401,7 @@ static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev)
+ ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000);
+
+ active = clamp_val(t.active, 2, 15);
+- recover = clamp_val(t.recover, 2, 16);
+- recover &= 0x15;
++ recover = clamp_val(t.recover, 2, 16) & 0x0F;
+
+ inb(0x3E6);
+ inb(0x3E6);
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index 6790cf7..79038e5 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -751,7 +751,8 @@ static int pm_genpd_resume_noirq(struct device *dev)
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+- if (genpd->suspend_power_off)
++ if (genpd->suspend_power_off
++ || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
+ return 0;
+
+ /*
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index 106beb1..db811d2 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -64,6 +64,7 @@ static struct usb_device_id ath3k_table[] = {
+ { USB_DEVICE(0x0CF3, 0x3002) },
+ { USB_DEVICE(0x13d3, 0x3304) },
+ { USB_DEVICE(0x0930, 0x0215) },
++ { USB_DEVICE(0x0489, 0xE03D) },
+
+ /* Atheros AR9285 Malbec with sflash firmware */
+ { USB_DEVICE(0x03F0, 0x311D) },
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index eabc437..c16c750 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -119,6 +119,7 @@ static struct usb_device_id blacklist_table[] = {
+ { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
+ { USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE },
+ { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
++ { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
+
+ /* Atheros AR9285 Malbec with sflash firmware */
+ { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
+@@ -506,15 +507,10 @@ static int btusb_submit_isoc_urb(struct hci_dev *hdev, gfp_t mem_flags)
+
+ pipe = usb_rcvisocpipe(data->udev, data->isoc_rx_ep->bEndpointAddress);
+
+- urb->dev = data->udev;
+- urb->pipe = pipe;
+- urb->context = hdev;
+- urb->complete = btusb_isoc_complete;
+- urb->interval = data->isoc_rx_ep->bInterval;
++ usb_fill_int_urb(urb, data->udev, pipe, buf, size, btusb_isoc_complete,
++ hdev, data->isoc_rx_ep->bInterval);
+
+ urb->transfer_flags = URB_FREE_BUFFER | URB_ISO_ASAP;
+- urb->transfer_buffer = buf;
+- urb->transfer_buffer_length = size;
+
+ __fill_isoc_descriptor(urb, size,
+ le16_to_cpu(data->isoc_rx_ep->wMaxPacketSize));
+diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
+index fa567f1..c9e045c 100644
+--- a/drivers/char/tpm/Kconfig
++++ b/drivers/char/tpm/Kconfig
+@@ -5,7 +5,6 @@
+ menuconfig TCG_TPM
+ tristate "TPM Hardware Support"
+ depends on HAS_IOMEM
+- depends on EXPERIMENTAL
+ select SECURITYFS
+ ---help---
+ If you have a TPM security chip in your system, which
+diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
+index 361a1df..b366b34 100644
+--- a/drivers/char/tpm/tpm.c
++++ b/drivers/char/tpm/tpm.c
+@@ -1115,12 +1115,13 @@ ssize_t tpm_read(struct file *file, char __user *buf,
+ ret_size = atomic_read(&chip->data_pending);
+ atomic_set(&chip->data_pending, 0);
+ if (ret_size > 0) { /* relay data */
++ ssize_t orig_ret_size = ret_size;
+ if (size < ret_size)
+ ret_size = size;
+
+ mutex_lock(&chip->buffer_mutex);
+ rc = copy_to_user(buf, chip->data_buffer, ret_size);
+- memset(chip->data_buffer, 0, ret_size);
++ memset(chip->data_buffer, 0, orig_ret_size);
+ if (rc)
+ ret_size = -EFAULT;
+
+diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
+index 7f5f0da..0a0225a 100644
+--- a/drivers/firewire/ohci.c
++++ b/drivers/firewire/ohci.c
+@@ -2748,7 +2748,7 @@ static int handle_ir_buffer_fill(struct context *context,
+ container_of(context, struct iso_context, context);
+ u32 buffer_dma;
+
+- if (!last->transfer_status)
++ if (last->res_count != 0)
+ /* Descriptor(s) not done yet, stop iteration */
+ return 0;
+
+@@ -2762,8 +2762,7 @@ static int handle_ir_buffer_fill(struct context *context,
+ if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS)
+ ctx->base.callback.mc(&ctx->base,
+ le32_to_cpu(last->data_address) +
+- le16_to_cpu(last->req_count) -
+- le16_to_cpu(last->res_count),
++ le16_to_cpu(last->req_count),
+ ctx->base.callback_data);
+
+ return 1;
+diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
+index df0d595..3d00016 100644
+--- a/drivers/gpio/gpio-davinci.c
++++ b/drivers/gpio/gpio-davinci.c
+@@ -313,10 +313,16 @@ static int gpio_to_irq_unbanked(struct gpio_chip *chip, unsigned offset)
+ return -ENODEV;
+ }
+
+-static int gpio_irq_type_unbanked(struct irq_data *d, unsigned trigger)
++static int gpio_irq_type_unbanked(struct irq_data *data, unsigned trigger)
+ {
+- struct davinci_gpio_regs __iomem *g = irq2regs(d->irq);
+- u32 mask = (u32) irq_data_get_irq_handler_data(d);
++ struct davinci_gpio_controller *d;
++ struct davinci_gpio_regs __iomem *g;
++ struct davinci_soc_info *soc_info = &davinci_soc_info;
++ u32 mask;
++
++ d = (struct davinci_gpio_controller *)data->handler_data;
++ g = (struct davinci_gpio_regs __iomem *)d->regs;
++ mask = __gpio_mask(data->irq - soc_info->gpio_irq);
+
+ if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
+ return -EINVAL;
+@@ -380,7 +386,7 @@ static int __init davinci_gpio_irq_setup(void)
+ * IRQ mux conflicts; gpio_irq_type_unbanked() is only for GPIOs.
+ */
+ if (soc_info->gpio_unbanked) {
+- static struct irq_chip gpio_irqchip_unbanked;
++ static struct irq_chip_type gpio_unbanked;
+
+ /* pass "bank 0" GPIO IRQs to AINTC */
+ chips[0].chip.to_irq = gpio_to_irq_unbanked;
+@@ -388,9 +394,10 @@ static int __init davinci_gpio_irq_setup(void)
+
+ /* AINTC handles mask/unmask; GPIO handles triggering */
+ irq = bank_irq;
+- gpio_irqchip_unbanked = *irq_get_chip(irq);
+- gpio_irqchip_unbanked.name = "GPIO-AINTC";
+- gpio_irqchip_unbanked.irq_set_type = gpio_irq_type_unbanked;
++ gpio_unbanked = *container_of(irq_get_chip(irq),
++ struct irq_chip_type, chip);
++ gpio_unbanked.chip.name = "GPIO-AINTC";
++ gpio_unbanked.chip.irq_set_type = gpio_irq_type_unbanked;
+
+ /* default trigger: both edges */
+ g = gpio2regs(0);
+@@ -399,9 +406,8 @@ static int __init davinci_gpio_irq_setup(void)
+
+ /* set the direct IRQs up to use that irqchip */
+ for (gpio = 0; gpio < soc_info->gpio_unbanked; gpio++, irq++) {
+- irq_set_chip(irq, &gpio_irqchip_unbanked);
+- irq_set_handler_data(irq, (void *)__gpio_mask(gpio));
+- irq_set_chip_data(irq, (__force void *)g);
++ irq_set_chip(irq, &gpio_unbanked.chip);
++ irq_set_handler_data(irq, &chips[gpio / 32]);
+ irq_set_status_flags(irq, IRQ_TYPE_EDGE_BOTH);
+ }
+
+diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
+index 0b05629..a6c10e8 100644
+--- a/drivers/gpio/gpio-omap.c
++++ b/drivers/gpio/gpio-omap.c
+@@ -508,7 +508,10 @@ static void _disable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
+
+ static inline void _set_gpio_irqenable(struct gpio_bank *bank, int gpio, int enable)
+ {
+- _enable_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
++ if (enable)
++ _enable_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
++ else
++ _disable_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
+ }
+
+ /*
+diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
+index e2d85a9..d04597d 100644
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -442,6 +442,10 @@ static int i915_drm_freeze(struct drm_device *dev)
+ /* Modeset on resume, not lid events */
+ dev_priv->modeset_on_lid = 0;
+
++ console_lock();
++ intel_fbdev_set_suspend(dev, 1);
++ console_unlock();
++
+ return 0;
+ }
+
+@@ -514,6 +518,9 @@ static int i915_drm_thaw(struct drm_device *dev)
+
+ dev_priv->modeset_on_lid = 0;
+
++ console_lock();
++ intel_fbdev_set_suspend(dev, 0);
++ console_unlock();
+ return error;
+ }
+
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 8359dc7..3e7c478 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -3084,10 +3084,13 @@ i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
+ return ret;
+ }
+
++ ret = i915_gem_object_wait_rendering(obj);
++ if (ret)
++ return ret;
++
+ /* Ensure that we invalidate the GPU's caches and TLBs. */
+ obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
+-
+- return i915_gem_object_wait_rendering(obj);
++ return 0;
+ }
+
+ /**
+diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
+index a1b4343..83e820e 100644
+--- a/drivers/gpu/drm/i915/intel_drv.h
++++ b/drivers/gpu/drm/i915/intel_drv.h
+@@ -364,7 +364,7 @@ extern int intel_framebuffer_init(struct drm_device *dev,
+ struct drm_i915_gem_object *obj);
+ extern int intel_fbdev_init(struct drm_device *dev);
+ extern void intel_fbdev_fini(struct drm_device *dev);
+-
++extern void intel_fbdev_set_suspend(struct drm_device *dev, int state);
+ extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
+ extern void intel_finish_page_flip(struct drm_device *dev, int pipe);
+ extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
+diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
+index ec49bae..d0ce34b 100644
+--- a/drivers/gpu/drm/i915/intel_fb.c
++++ b/drivers/gpu/drm/i915/intel_fb.c
+@@ -257,6 +257,16 @@ void intel_fbdev_fini(struct drm_device *dev)
+ kfree(dev_priv->fbdev);
+ dev_priv->fbdev = NULL;
+ }
++
++void intel_fbdev_set_suspend(struct drm_device *dev, int state)
++{
++ drm_i915_private_t *dev_priv = dev->dev_private;
++ if (!dev_priv->fbdev)
++ return;
++
++ fb_set_suspend(dev_priv->fbdev->helper.fbdev, state);
++}
++
+ MODULE_LICENSE("GPL and additional rights");
+
+ void intel_fb_output_poll_changed(struct drm_device *dev)
+diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
+index 5082d17..8e1532f 100644
+--- a/drivers/gpu/drm/radeon/radeon_atombios.c
++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
+@@ -442,6 +442,20 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
+ struct radeon_device *rdev = dev->dev_private;
+ *i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93);
+ }
++
++ /* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */
++ if ((dev->pdev->device == 0x9802) &&
++ (dev->pdev->subsystem_vendor == 0x1734) &&
++ (dev->pdev->subsystem_device == 0x11bd)) {
++ if (*connector_type == DRM_MODE_CONNECTOR_VGA) {
++ *connector_type = DRM_MODE_CONNECTOR_DVII;
++ *line_mux = 0x3103;
++ } else if (*connector_type == DRM_MODE_CONNECTOR_DVID) {
++ *connector_type = DRM_MODE_CONNECTOR_DVII;
++ }
++ }
++
++
+ return true;
+ }
+
+diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
+index e7cb3ab..f7d39ac 100644
+--- a/drivers/gpu/drm/radeon/radeon_connectors.c
++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
+@@ -946,6 +946,10 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
+
+ encoder = obj_to_encoder(obj);
+
++ if (encoder->encoder_type != DRM_MODE_ENCODER_DAC ||
++ encoder->encoder_type != DRM_MODE_ENCODER_TVDAC)
++ continue;
++
+ encoder_funcs = encoder->helper_private;
+ if (encoder_funcs->detect) {
+ if (ret != connector_status_connected) {
+diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
+index fde25c0..986d608 100644
+--- a/drivers/gpu/drm/radeon/radeon_cursor.c
++++ b/drivers/gpu/drm/radeon/radeon_cursor.c
+@@ -151,7 +151,9 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc,
+ uint32_t height)
+ {
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
++ struct radeon_device *rdev = crtc->dev->dev_private;
+ struct drm_gem_object *obj;
++ struct radeon_bo *robj;
+ uint64_t gpu_addr;
+ int ret;
+
+@@ -173,7 +175,15 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc,
+ return -ENOENT;
+ }
+
+- ret = radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
++ robj = gem_to_radeon_bo(obj);
++ ret = radeon_bo_reserve(robj, false);
++ if (unlikely(ret != 0))
++ goto fail;
++ /* Only 27 bit offset for legacy cursor */
++ ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
++ ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
++ &gpu_addr);
++ radeon_bo_unreserve(robj);
+ if (ret)
+ goto fail;
+
+@@ -181,7 +191,6 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc,
+ radeon_crtc->cursor_height = height;
+
+ radeon_lock_cursor(crtc, true);
+- /* XXX only 27 bit offset for legacy cursor */
+ radeon_set_cursor(crtc, obj, gpu_addr);
+ radeon_show_cursor(crtc);
+ radeon_lock_cursor(crtc, false);
+diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
+index 1c85152..f3ae607 100644
+--- a/drivers/gpu/drm/radeon/radeon_object.c
++++ b/drivers/gpu/drm/radeon/radeon_object.c
+@@ -204,7 +204,8 @@ void radeon_bo_unref(struct radeon_bo **bo)
+ *bo = NULL;
+ }
+
+-int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
++int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
++ u64 *gpu_addr)
+ {
+ int r, i;
+
+@@ -212,6 +213,7 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
+ bo->pin_count++;
+ if (gpu_addr)
+ *gpu_addr = radeon_bo_gpu_offset(bo);
++ WARN_ON_ONCE(max_offset != 0);
+ return 0;
+ }
+ radeon_ttm_placement_from_domain(bo, domain);
+@@ -219,6 +221,15 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
+ /* force to pin into visible video ram */
+ bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
+ }
++ if (max_offset) {
++ u64 lpfn = max_offset >> PAGE_SHIFT;
++
++ if (!bo->placement.lpfn)
++ bo->placement.lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT;
++
++ if (lpfn < bo->placement.lpfn)
++ bo->placement.lpfn = lpfn;
++ }
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
+@@ -232,6 +243,11 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
+ return r;
+ }
+
++int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
++{
++ return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr);
++}
++
+ int radeon_bo_unpin(struct radeon_bo *bo)
+ {
+ int r, i;
+diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
+index b07f0f9..fb3f433 100644
+--- a/drivers/gpu/drm/radeon/radeon_object.h
++++ b/drivers/gpu/drm/radeon/radeon_object.h
+@@ -108,6 +108,8 @@ extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
+ extern void radeon_bo_kunmap(struct radeon_bo *bo);
+ extern void radeon_bo_unref(struct radeon_bo **bo);
+ extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr);
++extern int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain,
++ u64 max_offset, u64 *gpu_addr);
+ extern int radeon_bo_unpin(struct radeon_bo *bo);
+ extern int radeon_bo_evict_vram(struct radeon_device *rdev);
+ extern void radeon_bo_force_delete(struct radeon_device *rdev);
+diff --git a/drivers/hid/hid-chicony.c b/drivers/hid/hid-chicony.c
+index 8965ad9..b99af34 100644
+--- a/drivers/hid/hid-chicony.c
++++ b/drivers/hid/hid-chicony.c
+@@ -45,6 +45,12 @@ static int ch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ case 0xff09: ch_map_key_clear(BTN_9); break;
+ case 0xff0a: ch_map_key_clear(BTN_A); break;
+ case 0xff0b: ch_map_key_clear(BTN_B); break;
++ case 0x00f1: ch_map_key_clear(KEY_WLAN); break;
++ case 0x00f2: ch_map_key_clear(KEY_BRIGHTNESSDOWN); break;
++ case 0x00f3: ch_map_key_clear(KEY_BRIGHTNESSUP); break;
++ case 0x00f4: ch_map_key_clear(KEY_DISPLAY_OFF); break;
++ case 0x00f7: ch_map_key_clear(KEY_CAMERA); break;
++ case 0x00f8: ch_map_key_clear(KEY_PROG1); break;
+ default:
+ return 0;
+ }
+@@ -53,6 +59,7 @@ static int ch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+
+ static const struct hid_device_id ch_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
+ { }
+ };
+ MODULE_DEVICE_TABLE(hid, ch_devices);
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index bb656d8..c27b402 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1394,6 +1394,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHUNGHWAT, USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CVTOUCH, USB_DEVICE_ID_CVTOUCH_SCREEN) },
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 3c3daec..fba3fc4 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -192,6 +192,7 @@
+ #define USB_DEVICE_ID_CHICONY_TACTICAL_PAD 0x0418
+ #define USB_DEVICE_ID_CHICONY_MULTI_TOUCH 0xb19d
+ #define USB_DEVICE_ID_CHICONY_WIRELESS 0x0618
++#define USB_DEVICE_ID_CHICONY_WIRELESS2 0x1123
+
+ #define USB_VENDOR_ID_CHUNGHWAT 0x2247
+ #define USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH 0x0001
+diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
+index 523f8fb..930370d 100644
+--- a/drivers/hwmon/fam15h_power.c
++++ b/drivers/hwmon/fam15h_power.c
+@@ -60,7 +60,7 @@ static ssize_t show_power(struct device *dev,
+ pci_bus_read_config_dword(f4->bus, PCI_DEVFN(PCI_SLOT(f4->devfn), 5),
+ REG_TDP_RUNNING_AVERAGE, &val);
+ running_avg_capture = (val >> 4) & 0x3fffff;
+- running_avg_capture = sign_extend32(running_avg_capture, 22);
++ running_avg_capture = sign_extend32(running_avg_capture, 21);
+ running_avg_range = val & 0xf;
+
+ pci_bus_read_config_dword(f4->bus, PCI_DEVFN(PCI_SLOT(f4->devfn), 5),
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index 7e7373a..d5f3b69 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -364,6 +364,9 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
+ }
+ ib_conn = ep->dd_data;
+
++ if (iser_alloc_rx_descriptors(ib_conn))
++ return -ENOMEM;
++
+ /* binds the iSER connection retrieved from the previously
+ * connected ep_handle to the iSCSI layer connection. exchanges
+ * connection pointers */
+@@ -398,19 +401,6 @@ iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
+ iser_conn->ib_conn = NULL;
+ }
+
+-static int
+-iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
+-{
+- struct iscsi_conn *conn = cls_conn->dd_data;
+- int err;
+-
+- err = iser_conn_set_full_featured_mode(conn);
+- if (err)
+- return err;
+-
+- return iscsi_conn_start(cls_conn);
+-}
+-
+ static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
+ {
+ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+@@ -724,7 +714,7 @@ static struct iscsi_transport iscsi_iser_transport = {
+ .get_conn_param = iscsi_conn_get_param,
+ .get_ep_param = iscsi_iser_get_ep_param,
+ .get_session_param = iscsi_session_get_param,
+- .start_conn = iscsi_iser_conn_start,
++ .start_conn = iscsi_conn_start,
+ .stop_conn = iscsi_iser_conn_stop,
+ /* iscsi host params */
+ .get_host_param = iscsi_host_get_param,
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
+index db7ea37..296be43 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
+@@ -366,4 +366,5 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
+ void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task);
+ int iser_initialize_task_headers(struct iscsi_task *task,
+ struct iser_tx_desc *tx_desc);
++int iser_alloc_rx_descriptors(struct iser_conn *ib_conn);
+ #endif
+diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
+index a607542..738a149 100644
+--- a/drivers/infiniband/ulp/iser/iser_initiator.c
++++ b/drivers/infiniband/ulp/iser/iser_initiator.c
+@@ -170,7 +170,7 @@ static void iser_create_send_desc(struct iser_conn *ib_conn,
+ }
+
+
+-static int iser_alloc_rx_descriptors(struct iser_conn *ib_conn)
++int iser_alloc_rx_descriptors(struct iser_conn *ib_conn)
+ {
+ int i, j;
+ u64 dma_addr;
+@@ -242,23 +242,24 @@ void iser_free_rx_descriptors(struct iser_conn *ib_conn)
+ kfree(ib_conn->rx_descs);
+ }
+
+-/**
+- * iser_conn_set_full_featured_mode - (iSER API)
+- */
+-int iser_conn_set_full_featured_mode(struct iscsi_conn *conn)
++static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
+ {
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+
+- iser_dbg("Initially post: %d\n", ISER_MIN_POSTED_RX);
+-
+- /* Check that there is no posted recv or send buffers left - */
+- /* they must be consumed during the login phase */
+- BUG_ON(iser_conn->ib_conn->post_recv_buf_count != 0);
+- BUG_ON(atomic_read(&iser_conn->ib_conn->post_send_buf_count) != 0);
++ iser_dbg("req op %x flags %x\n", req->opcode, req->flags);
++ /* check if this is the last login - going to full feature phase */
++ if ((req->flags & ISCSI_FULL_FEATURE_PHASE) != ISCSI_FULL_FEATURE_PHASE)
++ return 0;
+
+- if (iser_alloc_rx_descriptors(iser_conn->ib_conn))
+- return -ENOMEM;
++ /*
++ * Check that there is one posted recv buffer (for the last login
++ * response) and no posted send buffers left - they must have been
++ * consumed during previous login phases.
++ */
++ WARN_ON(iser_conn->ib_conn->post_recv_buf_count != 1);
++ WARN_ON(atomic_read(&iser_conn->ib_conn->post_send_buf_count) != 0);
+
++ iser_dbg("Initially post: %d\n", ISER_MIN_POSTED_RX);
+ /* Initial post receive buffers */
+ if (iser_post_recvm(iser_conn->ib_conn, ISER_MIN_POSTED_RX))
+ return -ENOMEM;
+@@ -438,6 +439,9 @@ int iser_send_control(struct iscsi_conn *conn,
+ err = iser_post_recvl(iser_conn->ib_conn);
+ if (err)
+ goto send_control_error;
++ err = iser_post_rx_bufs(conn, task->hdr);
++ if (err)
++ goto send_control_error;
+ }
+
+ err = iser_post_send(iser_conn->ib_conn, mdesc);
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index e0b3e33..966a6e7 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -2432,7 +2432,7 @@ static int amd_iommu_dma_supported(struct device *dev, u64 mask)
+ * we don't need to preallocate the protection domains anymore.
+ * For now we have to.
+ */
+-static void prealloc_protection_domains(void)
++static void __init prealloc_protection_domains(void)
+ {
+ struct pci_dev *dev = NULL;
+ struct dma_ops_domain *dma_dom;
+diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
+index 6d03774..2a8722b 100644
+--- a/drivers/md/bitmap.c
++++ b/drivers/md/bitmap.c
+@@ -1904,6 +1904,8 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
+ if (mddev->pers) {
+ mddev->pers->quiesce(mddev, 1);
+ rv = bitmap_create(mddev);
++ if (!rv)
++ rv = bitmap_load(mddev);
+ if (rv) {
+ bitmap_destroy(mddev);
+ mddev->bitmap_info.offset = 0;
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
+index 8c2a000..58d8c6d 100644
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -176,7 +176,6 @@ struct crypt_config {
+
+ #define MIN_IOS 16
+ #define MIN_POOL_PAGES 32
+-#define MIN_BIO_PAGES 8
+
+ static struct kmem_cache *_crypt_io_pool;
+
+@@ -848,12 +847,11 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
+ }
+
+ /*
+- * if additional pages cannot be allocated without waiting,
+- * return a partially allocated bio, the caller will then try
+- * to allocate additional bios while submitting this partial bio
++ * If additional pages cannot be allocated without waiting,
++ * return a partially-allocated bio. The caller will then try
++ * to allocate more bios while submitting this partial bio.
+ */
+- if (i == (MIN_BIO_PAGES - 1))
+- gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
++ gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
+
+ len = (size > PAGE_SIZE) ? PAGE_SIZE : size;
+
+@@ -1046,16 +1044,14 @@ static void kcryptd_queue_io(struct dm_crypt_io *io)
+ queue_work(cc->io_queue, &io->work);
+ }
+
+-static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io,
+- int error, int async)
++static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
+ {
+ struct bio *clone = io->ctx.bio_out;
+ struct crypt_config *cc = io->target->private;
+
+- if (unlikely(error < 0)) {
++ if (unlikely(io->error < 0)) {
+ crypt_free_buffer_pages(cc, clone);
+ bio_put(clone);
+- io->error = -EIO;
+ crypt_dec_pending(io);
+ return;
+ }
+@@ -1106,12 +1102,16 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
+ sector += bio_sectors(clone);
+
+ crypt_inc_pending(io);
++
+ r = crypt_convert(cc, &io->ctx);
++ if (r < 0)
++ io->error = -EIO;
++
+ crypt_finished = atomic_dec_and_test(&io->ctx.pending);
+
+ /* Encryption was already finished, submit io now */
+ if (crypt_finished) {
+- kcryptd_crypt_write_io_submit(io, r, 0);
++ kcryptd_crypt_write_io_submit(io, 0);
+
+ /*
+ * If there was an error, do not try next fragments.
+@@ -1162,11 +1162,8 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
+ crypt_dec_pending(io);
+ }
+
+-static void kcryptd_crypt_read_done(struct dm_crypt_io *io, int error)
++static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
+ {
+- if (unlikely(error < 0))
+- io->error = -EIO;
+-
+ crypt_dec_pending(io);
+ }
+
+@@ -1181,9 +1178,11 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
+ io->sector);
+
+ r = crypt_convert(cc, &io->ctx);
++ if (r < 0)
++ io->error = -EIO;
+
+ if (atomic_dec_and_test(&io->ctx.pending))
+- kcryptd_crypt_read_done(io, r);
++ kcryptd_crypt_read_done(io);
+
+ crypt_dec_pending(io);
+ }
+@@ -1204,15 +1203,18 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
+ if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
+ error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
+
++ if (error < 0)
++ io->error = -EIO;
++
+ mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool);
+
+ if (!atomic_dec_and_test(&ctx->pending))
+ return;
+
+ if (bio_data_dir(io->base_bio) == READ)
+- kcryptd_crypt_read_done(io, error);
++ kcryptd_crypt_read_done(io);
+ else
+- kcryptd_crypt_write_io_submit(io, error, 1);
++ kcryptd_crypt_write_io_submit(io, 1);
+ }
+
+ static void kcryptd_crypt(struct work_struct *work)
+diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
+index 042e719..aa70f7d 100644
+--- a/drivers/md/dm-exception-store.c
++++ b/drivers/md/dm-exception-store.c
+@@ -283,7 +283,7 @@ int dm_exception_store_init(void)
+ return 0;
+
+ persistent_fail:
+- dm_persistent_snapshot_exit();
++ dm_transient_snapshot_exit();
+ transient_fail:
+ return r;
+ }
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index c308757..da2f021 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -124,7 +124,7 @@ struct cell {
+ struct hlist_node list;
+ struct bio_prison *prison;
+ struct cell_key key;
+- unsigned count;
++ struct bio *holder;
+ struct bio_list bios;
+ };
+
+@@ -220,55 +220,60 @@ static struct cell *__search_bucket(struct hlist_head *bucket,
+ * This may block if a new cell needs allocating. You must ensure that
+ * cells will be unlocked even if the calling thread is blocked.
+ *
+- * Returns the number of entries in the cell prior to the new addition
+- * or < 0 on failure.
++ * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
+ */
+ static int bio_detain(struct bio_prison *prison, struct cell_key *key,
+ struct bio *inmate, struct cell **ref)
+ {
+- int r;
++ int r = 1;
+ unsigned long flags;
+ uint32_t hash = hash_key(prison, key);
+- struct cell *uninitialized_var(cell), *cell2 = NULL;
++ struct cell *cell, *cell2;
+
+ BUG_ON(hash > prison->nr_buckets);
+
+ spin_lock_irqsave(&prison->lock, flags);
++
+ cell = __search_bucket(prison->cells + hash, key);
++ if (cell) {
++ bio_list_add(&cell->bios, inmate);
++ goto out;
++ }
+
+- if (!cell) {
+- /*
+- * Allocate a new cell
+- */
+- spin_unlock_irqrestore(&prison->lock, flags);
+- cell2 = mempool_alloc(prison->cell_pool, GFP_NOIO);
+- spin_lock_irqsave(&prison->lock, flags);
++ /*
++ * Allocate a new cell
++ */
++ spin_unlock_irqrestore(&prison->lock, flags);
++ cell2 = mempool_alloc(prison->cell_pool, GFP_NOIO);
++ spin_lock_irqsave(&prison->lock, flags);
+
+- /*
+- * We've been unlocked, so we have to double check that
+- * nobody else has inserted this cell in the meantime.
+- */
+- cell = __search_bucket(prison->cells + hash, key);
++ /*
++ * We've been unlocked, so we have to double check that
++ * nobody else has inserted this cell in the meantime.
++ */
++ cell = __search_bucket(prison->cells + hash, key);
++ if (cell) {
++ mempool_free(cell2, prison->cell_pool);
++ bio_list_add(&cell->bios, inmate);
++ goto out;
++ }
++
++ /*
++ * Use new cell.
++ */
++ cell = cell2;
+
+- if (!cell) {
+- cell = cell2;
+- cell2 = NULL;
++ cell->prison = prison;
++ memcpy(&cell->key, key, sizeof(cell->key));
++ cell->holder = inmate;
++ bio_list_init(&cell->bios);
++ hlist_add_head(&cell->list, prison->cells + hash);
+
+- cell->prison = prison;
+- memcpy(&cell->key, key, sizeof(cell->key));
+- cell->count = 0;
+- bio_list_init(&cell->bios);
+- hlist_add_head(&cell->list, prison->cells + hash);
+- }
+- }
++ r = 0;
+
+- r = cell->count++;
+- bio_list_add(&cell->bios, inmate);
++out:
+ spin_unlock_irqrestore(&prison->lock, flags);
+
+- if (cell2)
+- mempool_free(cell2, prison->cell_pool);
+-
+ *ref = cell;
+
+ return r;
+@@ -283,8 +288,8 @@ static void __cell_release(struct cell *cell, struct bio_list *inmates)
+
+ hlist_del(&cell->list);
+
+- if (inmates)
+- bio_list_merge(inmates, &cell->bios);
++ bio_list_add(inmates, cell->holder);
++ bio_list_merge(inmates, &cell->bios);
+
+ mempool_free(cell, prison->cell_pool);
+ }
+@@ -305,22 +310,44 @@ static void cell_release(struct cell *cell, struct bio_list *bios)
+ * bio may be in the cell. This function releases the cell, and also does
+ * a sanity check.
+ */
++static void __cell_release_singleton(struct cell *cell, struct bio *bio)
++{
++ hlist_del(&cell->list);
++ BUG_ON(cell->holder != bio);
++ BUG_ON(!bio_list_empty(&cell->bios));
++}
++
+ static void cell_release_singleton(struct cell *cell, struct bio *bio)
+ {
+- struct bio_prison *prison = cell->prison;
+- struct bio_list bios;
+- struct bio *b;
+ unsigned long flags;
+-
+- bio_list_init(&bios);
++ struct bio_prison *prison = cell->prison;
+
+ spin_lock_irqsave(&prison->lock, flags);
+- __cell_release(cell, &bios);
++ __cell_release_singleton(cell, bio);
+ spin_unlock_irqrestore(&prison->lock, flags);
++}
++
++/*
++ * Sometimes we don't want the holder, just the additional bios.
++ */
++static void __cell_release_no_holder(struct cell *cell, struct bio_list *inmates)
++{
++ struct bio_prison *prison = cell->prison;
+
+- b = bio_list_pop(&bios);
+- BUG_ON(b != bio);
+- BUG_ON(!bio_list_empty(&bios));
++ hlist_del(&cell->list);
++ bio_list_merge(inmates, &cell->bios);
++
++ mempool_free(cell, prison->cell_pool);
++}
++
++static void cell_release_no_holder(struct cell *cell, struct bio_list *inmates)
++{
++ unsigned long flags;
++ struct bio_prison *prison = cell->prison;
++
++ spin_lock_irqsave(&prison->lock, flags);
++ __cell_release_no_holder(cell, inmates);
++ spin_unlock_irqrestore(&prison->lock, flags);
+ }
+
+ static void cell_error(struct cell *cell)
+@@ -800,21 +827,16 @@ static void cell_defer(struct thin_c *tc, struct cell *cell,
+ * Same as cell_defer above, except it omits one particular detainee,
+ * a write bio that covers the block and has already been processed.
+ */
+-static void cell_defer_except(struct thin_c *tc, struct cell *cell,
+- struct bio *exception)
++static void cell_defer_except(struct thin_c *tc, struct cell *cell)
+ {
+ struct bio_list bios;
+- struct bio *bio;
+ struct pool *pool = tc->pool;
+ unsigned long flags;
+
+ bio_list_init(&bios);
+- cell_release(cell, &bios);
+
+ spin_lock_irqsave(&pool->lock, flags);
+- while ((bio = bio_list_pop(&bios)))
+- if (bio != exception)
+- bio_list_add(&pool->deferred_bios, bio);
++ cell_release_no_holder(cell, &pool->deferred_bios);
+ spin_unlock_irqrestore(&pool->lock, flags);
+
+ wake_worker(pool);
+@@ -854,7 +876,7 @@ static void process_prepared_mapping(struct new_mapping *m)
+ * the bios in the cell.
+ */
+ if (bio) {
+- cell_defer_except(tc, m->cell, bio);
++ cell_defer_except(tc, m->cell);
+ bio_endio(bio, 0);
+ } else
+ cell_defer(tc, m->cell, m->data_block);
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index f47f1f8..6f37aa4 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -1801,13 +1801,13 @@ retry:
+ | BB_LEN(internal_bb));
+ *bbp++ = cpu_to_le64(store_bb);
+ }
++ bb->changed = 0;
+ if (read_seqretry(&bb->lock, seq))
+ goto retry;
+
+ bb->sector = (rdev->sb_start +
+ (int)le32_to_cpu(sb->bblog_offset));
+ bb->size = le16_to_cpu(sb->bblog_size);
+- bb->changed = 0;
+ }
+ }
+
+@@ -2362,6 +2362,7 @@ repeat:
+ clear_bit(MD_CHANGE_PENDING, &mddev->flags);
+ list_for_each_entry(rdev, &mddev->disks, same_set) {
+ if (rdev->badblocks.changed) {
++ rdev->badblocks.changed = 0;
+ md_ack_all_badblocks(&rdev->badblocks);
+ md_error(mddev, rdev);
+ }
+@@ -8097,30 +8098,23 @@ static int md_notify_reboot(struct notifier_block *this,
+ struct mddev *mddev;
+ int need_delay = 0;
+
+- if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) {
+-
+- printk(KERN_INFO "md: stopping all md devices.\n");
+-
+- for_each_mddev(mddev, tmp) {
+- if (mddev_trylock(mddev)) {
+- /* Force a switch to readonly even array
+- * appears to still be in use. Hence
+- * the '100'.
+- */
+- md_set_readonly(mddev, 100);
+- mddev_unlock(mddev);
+- }
+- need_delay = 1;
++ for_each_mddev(mddev, tmp) {
++ if (mddev_trylock(mddev)) {
++ __md_stop_writes(mddev);
++ mddev->safemode = 2;
++ mddev_unlock(mddev);
+ }
+- /*
+- * certain more exotic SCSI devices are known to be
+- * volatile wrt too early system reboots. While the
+- * right place to handle this issue is the given
+- * driver, we do want to have a safe RAID driver ...
+- */
+- if (need_delay)
+- mdelay(1000*1);
++ need_delay = 1;
+ }
++ /*
++ * certain more exotic SCSI devices are known to be
++ * volatile wrt too early system reboots. While the
++ * right place to handle this issue is the given
++ * driver, we do want to have a safe RAID driver ...
++ */
++ if (need_delay)
++ mdelay(1000*1);
++
+ return NOTIFY_DONE;
+ }
+
+diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
+index 023fbc2..1a35caf 100644
+--- a/drivers/md/persistent-data/dm-btree-remove.c
++++ b/drivers/md/persistent-data/dm-btree-remove.c
+@@ -128,18 +128,9 @@ static void delete_at(struct node *n, unsigned index)
+ n->header.nr_entries = cpu_to_le32(nr_entries - 1);
+ }
+
+-static unsigned del_threshold(struct node *n)
+-{
+- return le32_to_cpu(n->header.max_entries) / 3;
+-}
+-
+ static unsigned merge_threshold(struct node *n)
+ {
+- /*
+- * The extra one is because we know we're potentially going to
+- * delete an entry.
+- */
+- return 2 * (le32_to_cpu(n->header.max_entries) / 3) + 1;
++ return le32_to_cpu(n->header.max_entries) / 3;
+ }
+
+ struct child {
+@@ -188,6 +179,15 @@ static int exit_child(struct dm_btree_info *info, struct child *c)
+
+ static void shift(struct node *left, struct node *right, int count)
+ {
++ uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
++ uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
++ uint32_t max_entries = le32_to_cpu(left->header.max_entries);
++ uint32_t r_max_entries = le32_to_cpu(right->header.max_entries);
++
++ BUG_ON(max_entries != r_max_entries);
++ BUG_ON(nr_left - count > max_entries);
++ BUG_ON(nr_right + count > max_entries);
++
+ if (!count)
+ return;
+
+@@ -199,13 +199,8 @@ static void shift(struct node *left, struct node *right, int count)
+ node_shift(right, count);
+ }
+
+- left->header.nr_entries =
+- cpu_to_le32(le32_to_cpu(left->header.nr_entries) - count);
+- BUG_ON(le32_to_cpu(left->header.nr_entries) > le32_to_cpu(left->header.max_entries));
+-
+- right->header.nr_entries =
+- cpu_to_le32(le32_to_cpu(right->header.nr_entries) + count);
+- BUG_ON(le32_to_cpu(right->header.nr_entries) > le32_to_cpu(right->header.max_entries));
++ left->header.nr_entries = cpu_to_le32(nr_left - count);
++ right->header.nr_entries = cpu_to_le32(nr_right + count);
+ }
+
+ static void __rebalance2(struct dm_btree_info *info, struct node *parent,
+@@ -215,8 +210,9 @@ static void __rebalance2(struct dm_btree_info *info, struct node *parent,
+ struct node *right = r->n;
+ uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
+ uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
++ unsigned threshold = 2 * merge_threshold(left) + 1;
+
+- if (nr_left + nr_right <= merge_threshold(left)) {
++ if (nr_left + nr_right < threshold) {
+ /*
+ * Merge
+ */
+@@ -234,9 +230,6 @@ static void __rebalance2(struct dm_btree_info *info, struct node *parent,
+ * Rebalance.
+ */
+ unsigned target_left = (nr_left + nr_right) / 2;
+- unsigned shift_ = nr_left - target_left;
+- BUG_ON(le32_to_cpu(left->header.max_entries) <= nr_left - shift_);
+- BUG_ON(le32_to_cpu(right->header.max_entries) <= nr_right + shift_);
+ shift(left, right, nr_left - target_left);
+ *key_ptr(parent, r->index) = right->keys[0];
+ }
+@@ -272,6 +265,84 @@ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
+ return exit_child(info, &right);
+ }
+
++/*
++ * We dump as many entries from center as possible into left, then the rest
++ * in right, then rebalance2. This wastes some cpu, but I want something
++ * simple atm.
++ */
++static void delete_center_node(struct dm_btree_info *info, struct node *parent,
++ struct child *l, struct child *c, struct child *r,
++ struct node *left, struct node *center, struct node *right,
++ uint32_t nr_left, uint32_t nr_center, uint32_t nr_right)
++{
++ uint32_t max_entries = le32_to_cpu(left->header.max_entries);
++ unsigned shift = min(max_entries - nr_left, nr_center);
++
++ BUG_ON(nr_left + shift > max_entries);
++ node_copy(left, center, -shift);
++ left->header.nr_entries = cpu_to_le32(nr_left + shift);
++
++ if (shift != nr_center) {
++ shift = nr_center - shift;
++ BUG_ON((nr_right + shift) > max_entries);
++ node_shift(right, shift);
++ node_copy(center, right, shift);
++ right->header.nr_entries = cpu_to_le32(nr_right + shift);
++ }
++ *key_ptr(parent, r->index) = right->keys[0];
++
++ delete_at(parent, c->index);
++ r->index--;
++
++ dm_tm_dec(info->tm, dm_block_location(c->block));
++ __rebalance2(info, parent, l, r);
++}
++
++/*
++ * Redistributes entries among 3 sibling nodes.
++ */
++static void redistribute3(struct dm_btree_info *info, struct node *parent,
++ struct child *l, struct child *c, struct child *r,
++ struct node *left, struct node *center, struct node *right,
++ uint32_t nr_left, uint32_t nr_center, uint32_t nr_right)
++{
++ int s;
++ uint32_t max_entries = le32_to_cpu(left->header.max_entries);
++ unsigned target = (nr_left + nr_center + nr_right) / 3;
++ BUG_ON(target > max_entries);
++
++ if (nr_left < nr_right) {
++ s = nr_left - target;
++
++ if (s < 0 && nr_center < -s) {
++ /* not enough in central node */
++ shift(left, center, nr_center);
++ s = nr_center - target;
++ shift(left, right, s);
++ nr_right += s;
++ } else
++ shift(left, center, s);
++
++ shift(center, right, target - nr_right);
++
++ } else {
++ s = target - nr_right;
++ if (s > 0 && nr_center < s) {
++ /* not enough in central node */
++ shift(center, right, nr_center);
++ s = target - nr_center;
++ shift(left, right, s);
++ nr_left -= s;
++ } else
++ shift(center, right, s);
++
++ shift(left, center, nr_left - target);
++ }
++
++ *key_ptr(parent, c->index) = center->keys[0];
++ *key_ptr(parent, r->index) = right->keys[0];
++}
++
+ static void __rebalance3(struct dm_btree_info *info, struct node *parent,
+ struct child *l, struct child *c, struct child *r)
+ {
+@@ -282,62 +353,18 @@ static void __rebalance3(struct dm_btree_info *info, struct node *parent,
+ uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
+ uint32_t nr_center = le32_to_cpu(center->header.nr_entries);
+ uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
+- uint32_t max_entries = le32_to_cpu(left->header.max_entries);
+
+- unsigned target;
++ unsigned threshold = merge_threshold(left) * 4 + 1;
+
+ BUG_ON(left->header.max_entries != center->header.max_entries);
+ BUG_ON(center->header.max_entries != right->header.max_entries);
+
+- if (((nr_left + nr_center + nr_right) / 2) < merge_threshold(center)) {
+- /*
+- * Delete center node:
+- *
+- * We dump as many entries from center as possible into
+- * left, then the rest in right, then rebalance2. This
+- * wastes some cpu, but I want something simple atm.
+- */
+- unsigned shift = min(max_entries - nr_left, nr_center);
+-
+- BUG_ON(nr_left + shift > max_entries);
+- node_copy(left, center, -shift);
+- left->header.nr_entries = cpu_to_le32(nr_left + shift);
+-
+- if (shift != nr_center) {
+- shift = nr_center - shift;
+- BUG_ON((nr_right + shift) >= max_entries);
+- node_shift(right, shift);
+- node_copy(center, right, shift);
+- right->header.nr_entries = cpu_to_le32(nr_right + shift);
+- }
+- *key_ptr(parent, r->index) = right->keys[0];
+-
+- delete_at(parent, c->index);
+- r->index--;
+-
+- dm_tm_dec(info->tm, dm_block_location(c->block));
+- __rebalance2(info, parent, l, r);
+-
+- return;
+- }
+-
+- /*
+- * Rebalance
+- */
+- target = (nr_left + nr_center + nr_right) / 3;
+- BUG_ON(target > max_entries);
+-
+- /*
+- * Adjust the left node
+- */
+- shift(left, center, nr_left - target);
+-
+- /*
+- * Adjust the right node
+- */
+- shift(center, right, target - nr_right);
+- *key_ptr(parent, c->index) = center->keys[0];
+- *key_ptr(parent, r->index) = right->keys[0];
++ if ((nr_left + nr_center + nr_right) < threshold)
++ delete_center_node(info, parent, l, c, r, left, center, right,
++ nr_left, nr_center, nr_right);
++ else
++ redistribute3(info, parent, l, c, r, left, center, right,
++ nr_left, nr_center, nr_right);
+ }
+
+ static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
+@@ -441,9 +468,6 @@ static int rebalance_children(struct shadow_spine *s,
+ if (r)
+ return r;
+
+- if (child_entries > del_threshold(n))
+- return 0;
+-
+ has_left_sibling = i > 0;
+ has_right_sibling = i < (le32_to_cpu(n->header.nr_entries) - 1);
+
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 7d9e071..7af60ec 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -731,9 +731,22 @@ static void wait_barrier(struct r1conf *conf)
+ spin_lock_irq(&conf->resync_lock);
+ if (conf->barrier) {
+ conf->nr_waiting++;
+- wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
++ /* Wait for the barrier to drop.
++ * However if there are already pending
++ * requests (preventing the barrier from
++ * rising completely), and the
++ * pre-process bio queue isn't empty,
++ * then don't wait, as we need to empty
++ * that queue to get the nr_pending
++ * count down.
++ */
++ wait_event_lock_irq(conf->wait_barrier,
++ !conf->barrier ||
++ (conf->nr_pending &&
++ current->bio_list &&
++ !bio_list_empty(current->bio_list)),
+ conf->resync_lock,
+- );
++ );
+ conf->nr_waiting--;
+ }
+ conf->nr_pending++;
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index 685ddf3..b219449 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -790,9 +790,22 @@ static void wait_barrier(struct r10conf *conf)
+ spin_lock_irq(&conf->resync_lock);
+ if (conf->barrier) {
+ conf->nr_waiting++;
+- wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
++ /* Wait for the barrier to drop.
++ * However if there are already pending
++ * requests (preventing the barrier from
++ * rising completely), and the
++ * pre-process bio queue isn't empty,
++ * then don't wait, as we need to empty
++ * that queue to get the nr_pending
++ * count down.
++ */
++ wait_event_lock_irq(conf->wait_barrier,
++ !conf->barrier ||
++ (conf->nr_pending &&
++ current->bio_list &&
++ !bio_list_empty(current->bio_list)),
+ conf->resync_lock,
+- );
++ );
+ conf->nr_waiting--;
+ }
+ conf->nr_pending++;
+diff --git a/drivers/media/dvb/dvb-usb/mxl111sf.c b/drivers/media/dvb/dvb-usb/mxl111sf.c
+index b5c98da..bc6ea9f 100644
+--- a/drivers/media/dvb/dvb-usb/mxl111sf.c
++++ b/drivers/media/dvb/dvb-usb/mxl111sf.c
+@@ -351,15 +351,13 @@ static int mxl111sf_ep6_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
+ adap_state->ep6_clockphase,
+ 0, 0);
+ mxl_fail(ret);
++#if 0
+ } else {
+ ret = mxl111sf_disable_656_port(state);
+ mxl_fail(ret);
++#endif
+ }
+
+- mxl111sf_read_reg(state, 0x12, &tmp);
+- tmp &= ~0x04;
+- mxl111sf_write_reg(state, 0x12, tmp);
+-
+ return ret;
+ }
+
+diff --git a/drivers/media/dvb/frontends/lgdt330x.c b/drivers/media/dvb/frontends/lgdt330x.c
+index 43971e6..aa63d68 100644
+--- a/drivers/media/dvb/frontends/lgdt330x.c
++++ b/drivers/media/dvb/frontends/lgdt330x.c
+@@ -104,8 +104,8 @@ static int i2c_write_demod_bytes (struct lgdt330x_state* state,
+ * then reads the data returned for (len) bytes.
+ */
+
+-static u8 i2c_read_demod_bytes (struct lgdt330x_state* state,
+- enum I2C_REG reg, u8* buf, int len)
++static int i2c_read_demod_bytes(struct lgdt330x_state *state,
++ enum I2C_REG reg, u8 *buf, int len)
+ {
+ u8 wr [] = { reg };
+ struct i2c_msg msg [] = {
+@@ -118,6 +118,8 @@ static u8 i2c_read_demod_bytes (struct lgdt330x_state* state,
+ ret = i2c_transfer(state->i2c, msg, 2);
+ if (ret != 2) {
+ printk(KERN_WARNING "lgdt330x: %s: addr 0x%02x select 0x%02x error (ret == %i)\n", __func__, state->config->demod_address, reg, ret);
++ if (ret >= 0)
++ ret = -EIO;
+ } else {
+ ret = 0;
+ }
+diff --git a/drivers/media/video/pvrusb2/pvrusb2-devattr.c b/drivers/media/video/pvrusb2/pvrusb2-devattr.c
+index c6da8f7..d8c8982 100644
+--- a/drivers/media/video/pvrusb2/pvrusb2-devattr.c
++++ b/drivers/media/video/pvrusb2/pvrusb2-devattr.c
+@@ -320,7 +320,17 @@ static struct tda829x_config tda829x_no_probe = {
+ .probe_tuner = TDA829X_DONT_PROBE,
+ };
+
++static struct tda18271_std_map hauppauge_tda18271_dvbt_std_map = {
++ .dvbt_6 = { .if_freq = 3300, .agc_mode = 3, .std = 4,
++ .if_lvl = 1, .rfagc_top = 0x37, },
++ .dvbt_7 = { .if_freq = 3800, .agc_mode = 3, .std = 5,
++ .if_lvl = 1, .rfagc_top = 0x37, },
++ .dvbt_8 = { .if_freq = 4300, .agc_mode = 3, .std = 6,
++ .if_lvl = 1, .rfagc_top = 0x37, },
++};
++
+ static struct tda18271_config hauppauge_tda18271_dvb_config = {
++ .std_map = &hauppauge_tda18271_dvbt_std_map,
+ .gate = TDA18271_GATE_ANALOG,
+ .output_opt = TDA18271_OUTPUT_LT_OFF,
+ };
+diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
+index 0cb17d9..b99318e 100644
+--- a/drivers/mtd/ubi/scan.c
++++ b/drivers/mtd/ubi/scan.c
+@@ -1174,7 +1174,7 @@ struct ubi_scan_info *ubi_scan(struct ubi_device *ubi)
+
+ ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ech)
+- goto out_slab;
++ goto out_si;
+
+ vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+ if (!vidh)
+@@ -1235,8 +1235,6 @@ out_vidh:
+ ubi_free_vid_hdr(ubi, vidh);
+ out_ech:
+ kfree(ech);
+-out_slab:
+- kmem_cache_destroy(si->scan_leb_slab);
+ out_si:
+ ubi_scan_destroy_si(si);
+ return ERR_PTR(err);
+@@ -1325,7 +1323,9 @@ void ubi_scan_destroy_si(struct ubi_scan_info *si)
+ }
+ }
+
+- kmem_cache_destroy(si->scan_leb_slab);
++ if (si->scan_leb_slab)
++ kmem_cache_destroy(si->scan_leb_slab);
++
+ kfree(si);
+ }
+
+diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
+index 0696e36..cf42971 100644
+--- a/drivers/mtd/ubi/wl.c
++++ b/drivers/mtd/ubi/wl.c
+@@ -389,7 +389,7 @@ static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max)
+ */
+ int ubi_wl_get_peb(struct ubi_device *ubi, int dtype)
+ {
+- int err, medium_ec;
++ int err;
+ struct ubi_wl_entry *e, *first, *last;
+
+ ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM ||
+@@ -427,7 +427,7 @@ retry:
+ * For unknown data we pick a physical eraseblock with medium
+ * erase counter. But we by no means can pick a physical
+ * eraseblock with erase counter greater or equivalent than the
+- * lowest erase counter plus %WL_FREE_MAX_DIFF.
++ * lowest erase counter plus %WL_FREE_MAX_DIFF/2.
+ */
+ first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry,
+ u.rb);
+@@ -436,10 +436,8 @@ retry:
+ if (last->ec - first->ec < WL_FREE_MAX_DIFF)
+ e = rb_entry(ubi->free.rb_node,
+ struct ubi_wl_entry, u.rb);
+- else {
+- medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2;
+- e = find_wl_entry(&ubi->free, medium_ec);
+- }
++ else
++ e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF/2);
+ break;
+ case UBI_SHORTTERM:
+ /*
+diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
+index 9fe18d1..f478a22 100644
+--- a/drivers/net/ethernet/intel/e1000e/e1000.h
++++ b/drivers/net/ethernet/intel/e1000e/e1000.h
+@@ -309,6 +309,7 @@ struct e1000_adapter {
+ u32 txd_cmd;
+
+ bool detect_tx_hung;
++ bool tx_hang_recheck;
+ u8 tx_timeout_factor;
+
+ u32 tx_int_delay;
+diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
+index a855db1..4e933d1 100644
+--- a/drivers/net/ethernet/intel/e1000e/netdev.c
++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
+@@ -1030,6 +1030,7 @@ static void e1000_print_hw_hang(struct work_struct *work)
+ struct e1000_adapter *adapter = container_of(work,
+ struct e1000_adapter,
+ print_hang_task);
++ struct net_device *netdev = adapter->netdev;
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+ unsigned int i = tx_ring->next_to_clean;
+ unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
+@@ -1041,6 +1042,21 @@ static void e1000_print_hw_hang(struct work_struct *work)
+ if (test_bit(__E1000_DOWN, &adapter->state))
+ return;
+
++ if (!adapter->tx_hang_recheck &&
++ (adapter->flags2 & FLAG2_DMA_BURST)) {
++ /* May be block on write-back, flush and detect again
++ * flush pending descriptor writebacks to memory
++ */
++ ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
++ /* execute the writes immediately */
++ e1e_flush();
++ adapter->tx_hang_recheck = true;
++ return;
++ }
++ /* Real hang detected */
++ adapter->tx_hang_recheck = false;
++ netif_stop_queue(netdev);
++
+ e1e_rphy(hw, PHY_STATUS, &phy_status);
+ e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
+ e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
+@@ -1154,10 +1170,10 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
+ if (tx_ring->buffer_info[i].time_stamp &&
+ time_after(jiffies, tx_ring->buffer_info[i].time_stamp
+ + (adapter->tx_timeout_factor * HZ)) &&
+- !(er32(STATUS) & E1000_STATUS_TXOFF)) {
++ !(er32(STATUS) & E1000_STATUS_TXOFF))
+ schedule_work(&adapter->print_hang_task);
+- netif_stop_queue(netdev);
+- }
++ else
++ adapter->tx_hang_recheck = false;
+ }
+ adapter->total_tx_bytes += total_tx_bytes;
+ adapter->total_tx_packets += total_tx_packets;
+@@ -3782,6 +3798,7 @@ static int e1000_open(struct net_device *netdev)
+
+ e1000_irq_enable(adapter);
+
++ adapter->tx_hang_recheck = false;
+ netif_start_queue(netdev);
+
+ adapter->idle_check = true;
+diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
+index 7803efa..f612b35 100644
+--- a/drivers/net/ethernet/marvell/sky2.c
++++ b/drivers/net/ethernet/marvell/sky2.c
+@@ -95,6 +95,10 @@ static int disable_msi = 0;
+ module_param(disable_msi, int, 0);
+ MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
+
++static int legacy_pme = 0;
++module_param(legacy_pme, int, 0);
++MODULE_PARM_DESC(legacy_pme, "Legacy power management");
++
+ static DEFINE_PCI_DEVICE_TABLE(sky2_id_table) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, /* SK-9Sxx */
+ { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, /* SK-9Exx */
+@@ -867,6 +871,13 @@ static void sky2_wol_init(struct sky2_port *sky2)
+ /* Disable PiG firmware */
+ sky2_write16(hw, B0_CTST, Y2_HW_WOL_OFF);
+
++ /* Needed by some broken BIOSes, use PCI rather than PCI-e for WOL */
++ if (legacy_pme) {
++ u32 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
++ reg1 |= PCI_Y2_PME_LEGACY;
++ sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
++ }
++
+ /* block receiver */
+ sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
+ sky2_read32(hw, B0_CTST);
+diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
+index 81b96e3..750e330 100644
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -589,6 +589,14 @@ static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
+ entry = (struct skb_data *) skb->cb;
+ urb = entry->urb;
+
++ /*
++ * Get reference count of the URB to avoid it to be
++ * freed during usb_unlink_urb, which may trigger
++ * use-after-free problem inside usb_unlink_urb since
++ * usb_unlink_urb is always racing with .complete
++ * handler(include defer_bh).
++ */
++ usb_get_urb(urb);
+ spin_unlock_irqrestore(&q->lock, flags);
+ // during some PM-driven resume scenarios,
+ // these (async) unlinks complete immediately
+@@ -597,6 +605,7 @@ static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
+ netdev_dbg(dev->net, "unlink urb err, %d\n", retval);
+ else
+ count++;
++ usb_put_urb(urb);
+ spin_lock_irqsave(&q->lock, flags);
+ }
+ spin_unlock_irqrestore (&q->lock, flags);
+@@ -1028,7 +1037,6 @@ static void tx_complete (struct urb *urb)
+ }
+
+ usb_autopm_put_interface_async(dev->intf);
+- urb->dev = NULL;
+ entry->state = tx_done;
+ defer_bh(dev, skb, &dev->txq);
+ }
+diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
+index fcf5416..3d75d4c 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-core.c
++++ b/drivers/net/wireless/iwlwifi/iwl-core.c
+@@ -1787,20 +1787,10 @@ void iwl_bg_watchdog(unsigned long data)
+ if (timeout == 0)
+ return;
+
+- /* monitor and check for stuck cmd queue */
+- if (iwl_check_stuck_queue(priv, priv->shrd->cmd_queue))
+- return;
+-
+- /* monitor and check for other stuck queues */
+- if (iwl_is_any_associated(priv)) {
+- for (cnt = 0; cnt < hw_params(priv).max_txq_num; cnt++) {
+- /* skip as we already checked the command queue */
+- if (cnt == priv->shrd->cmd_queue)
+- continue;
+- if (iwl_check_stuck_queue(priv, cnt))
+- return;
+- }
+- }
++ /* monitor and check for stuck queues */
++ for (cnt = 0; cnt < hw_params(priv).max_txq_num; cnt++)
++ if (iwl_check_stuck_queue(priv, cnt))
++ return;
+
+ mod_timer(&priv->watchdog, jiffies +
+ msecs_to_jiffies(IWL_WD_TICK(timeout)));
+diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c
+index 78d0d69..428401b 100644
+--- a/drivers/net/wireless/p54/p54spi.c
++++ b/drivers/net/wireless/p54/p54spi.c
+@@ -622,19 +622,19 @@ static int __devinit p54spi_probe(struct spi_device *spi)
+ ret = spi_setup(spi);
+ if (ret < 0) {
+ dev_err(&priv->spi->dev, "spi_setup failed");
+- goto err_free_common;
++ goto err_free;
+ }
+
+ ret = gpio_request(p54spi_gpio_power, "p54spi power");
+ if (ret < 0) {
+ dev_err(&priv->spi->dev, "power GPIO request failed: %d", ret);
+- goto err_free_common;
++ goto err_free;
+ }
+
+ ret = gpio_request(p54spi_gpio_irq, "p54spi irq");
+ if (ret < 0) {
+ dev_err(&priv->spi->dev, "irq GPIO request failed: %d", ret);
+- goto err_free_common;
++ goto err_free_gpio_power;
+ }
+
+ gpio_direction_output(p54spi_gpio_power, 0);
+@@ -645,7 +645,7 @@ static int __devinit p54spi_probe(struct spi_device *spi)
+ priv->spi);
+ if (ret < 0) {
+ dev_err(&priv->spi->dev, "request_irq() failed");
+- goto err_free_common;
++ goto err_free_gpio_irq;
+ }
+
+ irq_set_irq_type(gpio_to_irq(p54spi_gpio_irq), IRQ_TYPE_EDGE_RISING);
+@@ -677,6 +677,12 @@ static int __devinit p54spi_probe(struct spi_device *spi)
+ return 0;
+
+ err_free_common:
++ free_irq(gpio_to_irq(p54spi_gpio_irq), spi);
++err_free_gpio_irq:
++ gpio_free(p54spi_gpio_irq);
++err_free_gpio_power:
++ gpio_free(p54spi_gpio_power);
++err_free:
+ p54_free_common(priv->hw);
+ return ret;
+ }
+diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
+index 3265b34..cb71e88 100644
+--- a/drivers/net/wireless/rt2x00/rt2800usb.c
++++ b/drivers/net/wireless/rt2x00/rt2800usb.c
+@@ -935,6 +935,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ { USB_DEVICE(0x07d1, 0x3c0f) },
+ { USB_DEVICE(0x07d1, 0x3c11) },
+ { USB_DEVICE(0x07d1, 0x3c16) },
++ { USB_DEVICE(0x2001, 0x3c1b) },
+ /* Draytek */
+ { USB_DEVICE(0x07fa, 0x7712) },
+ /* DVICO */
+diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
+index eb61061..9afcad3 100644
+--- a/drivers/net/wireless/rtlwifi/pci.c
++++ b/drivers/net/wireless/rtlwifi/pci.c
+@@ -657,6 +657,8 @@ static void _rtl_receive_one(struct ieee80211_hw *hw, struct sk_buff *skb,
+ return;
+
+ uskb = dev_alloc_skb(skb->len + 128);
++ if (!uskb)
++ return; /* exit if allocation failed */
+ memcpy(IEEE80211_SKB_RXCB(uskb), &rx_status, sizeof(rx_status));
+ pdata = (u8 *)skb_put(uskb, skb->len);
+ memcpy(pdata, skb->data, skb->len);
+@@ -1153,10 +1155,12 @@ static void _rtl_pci_free_tx_ring(struct ieee80211_hw *hw,
+ ring->idx = (ring->idx + 1) % ring->entries;
+ }
+
+- pci_free_consistent(rtlpci->pdev,
+- sizeof(*ring->desc) * ring->entries,
+- ring->desc, ring->dma);
+- ring->desc = NULL;
++ if (ring->desc) {
++ pci_free_consistent(rtlpci->pdev,
++ sizeof(*ring->desc) * ring->entries,
++ ring->desc, ring->dma);
++ ring->desc = NULL;
++ }
+ }
+
+ static void _rtl_pci_free_rx_ring(struct rtl_pci *rtlpci)
+@@ -1180,12 +1184,14 @@ static void _rtl_pci_free_rx_ring(struct rtl_pci *rtlpci)
+ kfree_skb(skb);
+ }
+
+- pci_free_consistent(rtlpci->pdev,
++ if (rtlpci->rx_ring[rx_queue_idx].desc) {
++ pci_free_consistent(rtlpci->pdev,
+ sizeof(*rtlpci->rx_ring[rx_queue_idx].
+ desc) * rtlpci->rxringcount,
+ rtlpci->rx_ring[rx_queue_idx].desc,
+ rtlpci->rx_ring[rx_queue_idx].dma);
+- rtlpci->rx_ring[rx_queue_idx].desc = NULL;
++ rtlpci->rx_ring[rx_queue_idx].desc = NULL;
++ }
+ }
+ }
+
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
+index 72a98ca..a004ad7 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
+@@ -524,6 +524,10 @@ void rtl92c_dm_write_dig(struct ieee80211_hw *hw)
+ dm_digtable.cur_igvalue, dm_digtable.pre_igvalue,
+ dm_digtable.backoff_val));
+
++ dm_digtable.cur_igvalue += 2;
++ if (dm_digtable.cur_igvalue > 0x3f)
++ dm_digtable.cur_igvalue = 0x3f;
++
+ if (dm_digtable.pre_igvalue != dm_digtable.cur_igvalue) {
+ rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f,
+ dm_digtable.cur_igvalue);
+@@ -1219,13 +1223,18 @@ static void rtl92c_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
+ ("PreState = %d, CurState = %d\n",
+ p_ra->pre_ratr_state, p_ra->ratr_state));
+
+- rcu_read_lock();
+- sta = ieee80211_find_sta(mac->vif, mac->bssid);
++ /* Only the PCI card uses sta in the update rate table
++ * callback routine */
++ if (rtlhal->interface == INTF_PCI) {
++ rcu_read_lock();
++ sta = ieee80211_find_sta(mac->vif, mac->bssid);
++ }
+ rtlpriv->cfg->ops->update_rate_tbl(hw, sta,
+ p_ra->ratr_state);
+
+ p_ra->pre_ratr_state = p_ra->ratr_state;
+- rcu_read_unlock();
++ if (rtlhal->interface == INTF_PCI)
++ rcu_read_unlock();
+ }
+ }
+ }
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
+index 950c65a..13fc0f9 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
+@@ -752,6 +752,8 @@ void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished)
+
+
+ skb = dev_alloc_skb(totalpacketlen);
++ if (!skb)
++ return;
+ memcpy((u8 *) skb_put(skb, totalpacketlen),
+ &reserved_page_packet, totalpacketlen);
+
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/rtlwifi/rtl8192de/fw.c
+index 82f060b..c44757f 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192de/fw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192de/fw.c
+@@ -763,12 +763,16 @@ void rtl92d_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished)
+ "rtl92d_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL\n",
+ u1RsvdPageLoc, 3);
+ skb = dev_alloc_skb(totalpacketlen);
+- memcpy((u8 *) skb_put(skb, totalpacketlen), &reserved_page_packet,
+- totalpacketlen);
+- rtstatus = _rtl92d_cmd_send_packet(hw, skb);
++ if (!skb) {
++ dlok = false;
++ } else {
++ memcpy((u8 *) skb_put(skb, totalpacketlen),
++ &reserved_page_packet, totalpacketlen);
++ rtstatus = _rtl92d_cmd_send_packet(hw, skb);
+
+- if (rtstatus)
+- dlok = true;
++ if (rtstatus)
++ dlok = true;
++ }
+ if (dlok) {
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ ("Set RSVD page location to Fw.\n"));
+diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
+index 54cb8a6..2b7bcc8 100644
+--- a/drivers/net/wireless/rtlwifi/usb.c
++++ b/drivers/net/wireless/rtlwifi/usb.c
+@@ -481,12 +481,14 @@ static void _rtl_usb_rx_process_noagg(struct ieee80211_hw *hw,
+ u8 *pdata;
+
+ uskb = dev_alloc_skb(skb->len + 128);
+- memcpy(IEEE80211_SKB_RXCB(uskb), &rx_status,
+- sizeof(rx_status));
+- pdata = (u8 *)skb_put(uskb, skb->len);
+- memcpy(pdata, skb->data, skb->len);
++ if (uskb) { /* drop packet on allocation failure */
++ memcpy(IEEE80211_SKB_RXCB(uskb), &rx_status,
++ sizeof(rx_status));
++ pdata = (u8 *)skb_put(uskb, skb->len);
++ memcpy(pdata, skb->data, skb->len);
++ ieee80211_rx_irqsafe(hw, uskb);
++ }
+ dev_kfree_skb_any(skb);
+- ieee80211_rx_irqsafe(hw, uskb);
+ } else {
+ dev_kfree_skb_any(skb);
+ }
+diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
+index 24f049e..2275162 100644
+--- a/drivers/pci/pcie/aspm.c
++++ b/drivers/pci/pcie/aspm.c
+@@ -500,9 +500,6 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
+ int pos;
+ u32 reg32;
+
+- if (aspm_disabled)
+- return 0;
+-
+ /*
+ * Some functions in a slot might not all be PCIe functions,
+ * very strange. Disable ASPM for the whole slot
+@@ -511,6 +508,16 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
+ pos = pci_pcie_cap(child);
+ if (!pos)
+ return -EINVAL;
++
++ /*
++ * If ASPM is disabled then we're not going to change
++ * the BIOS state. It's safe to continue even if it's a
++ * pre-1.1 device
++ */
++
++ if (aspm_disabled)
++ continue;
++
+ /*
+ * Disable ASPM for pre-1.1 PCIe device, we follow MS to use
+ * RBER bit to determine if a function is 1.1 version device
+diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
+index 8a1c031..565742b 100644
+--- a/drivers/rtc/interface.c
++++ b/drivers/rtc/interface.c
+@@ -445,6 +445,11 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
+ if (rtc->uie_rtctimer.enabled == enabled)
+ goto out;
+
++ if (rtc->uie_unsupported) {
++ err = -EINVAL;
++ goto out;
++ }
++
+ if (enabled) {
+ struct rtc_time tm;
+ ktime_t now, onesec;
+@@ -763,6 +768,14 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
+ return 0;
+ }
+
++static void rtc_alarm_disable(struct rtc_device *rtc)
++{
++ if (!rtc->ops || !rtc->ops->alarm_irq_enable)
++ return;
++
++ rtc->ops->alarm_irq_enable(rtc->dev.parent, false);
++}
++
+ /**
+ * rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue
+ * @rtc rtc device
+@@ -784,8 +797,10 @@ static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer)
+ struct rtc_wkalrm alarm;
+ int err;
+ next = timerqueue_getnext(&rtc->timerqueue);
+- if (!next)
++ if (!next) {
++ rtc_alarm_disable(rtc);
+ return;
++ }
+ alarm.time = rtc_ktime_to_tm(next->expires);
+ alarm.enabled = 1;
+ err = __rtc_set_alarm(rtc, &alarm);
+@@ -847,7 +862,8 @@ again:
+ err = __rtc_set_alarm(rtc, &alarm);
+ if (err == -ETIME)
+ goto again;
+- }
++ } else
++ rtc_alarm_disable(rtc);
+
+ mutex_unlock(&rtc->ops_lock);
+ }
+diff --git a/drivers/rtc/rtc-mpc5121.c b/drivers/rtc/rtc-mpc5121.c
+index da60915..0fc2d22 100644
+--- a/drivers/rtc/rtc-mpc5121.c
++++ b/drivers/rtc/rtc-mpc5121.c
+@@ -360,6 +360,8 @@ static int __devinit mpc5121_rtc_probe(struct platform_device *op)
+ &mpc5200_rtc_ops, THIS_MODULE);
+ }
+
++ rtc->rtc->uie_unsupported = 1;
++
+ if (IS_ERR(rtc->rtc)) {
+ err = PTR_ERR(rtc->rtc);
+ goto out_free_irq;
+diff --git a/drivers/staging/rtl8712/Kconfig b/drivers/staging/rtl8712/Kconfig
+index ea37473..6a43312 100644
+--- a/drivers/staging/rtl8712/Kconfig
++++ b/drivers/staging/rtl8712/Kconfig
+@@ -9,13 +9,6 @@ config R8712U
+ This option adds the Realtek RTL8712 USB device such as the D-Link DWA-130.
+ If built as a module, it will be called r8712u.
+
+-config R8712_AP
+- bool "Realtek RTL8712U AP code"
+- depends on R8712U
+- default N
+- ---help---
+- This option allows the Realtek RTL8712 USB device to be an Access Point.
+-
+ config R8712_TX_AGGR
+ bool "Realtek RTL8712U Transmit Aggregation code"
+ depends on R8712U && BROKEN
+diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c
+index 98a3d68..fb11743 100644
+--- a/drivers/staging/rtl8712/os_intfs.c
++++ b/drivers/staging/rtl8712/os_intfs.c
+@@ -476,8 +476,6 @@ static int netdev_close(struct net_device *pnetdev)
+ r8712_free_assoc_resources(padapter);
+ /*s2-4.*/
+ r8712_free_network_queue(padapter);
+- /* The interface is no longer Up: */
+- padapter->bup = false;
+ release_firmware(padapter->fw);
+ /* never exit with a firmware callback pending */
+ wait_for_completion(&padapter->rtl8712_fw_ready);
+diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+index 507584b8..ef35bc2 100644
+--- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
++++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+@@ -2380,13 +2380,7 @@ static struct iw_statistics *r871x_get_wireless_stats(struct net_device *dev)
+ tmp_qual = padapter->recvpriv.signal;
+ tmp_noise = padapter->recvpriv.noise;
+ piwstats->qual.level = tmp_level;
+- /*piwstats->qual.qual = tmp_qual;
+- * The NetworkManager of Fedora 10, 13 will use the link
+- * quality for its display.
+- * So, use the fw_rssi on link quality variable because
+- * fw_rssi will be updated per 2 seconds.
+- */
+- piwstats->qual.qual = tmp_level;
++ piwstats->qual.qual = tmp_qual;
+ piwstats->qual.noise = tmp_noise;
+ }
+ piwstats->qual.updated = IW_QUAL_ALL_UPDATED;
+diff --git a/drivers/staging/rtl8712/rtl871x_sta_mgt.c b/drivers/staging/rtl8712/rtl871x_sta_mgt.c
+index 64f5696..1247b3d 100644
+--- a/drivers/staging/rtl8712/rtl871x_sta_mgt.c
++++ b/drivers/staging/rtl8712/rtl871x_sta_mgt.c
+@@ -42,9 +42,8 @@ static void _init_stainfo(struct sta_info *psta)
+ _init_listhead(&psta->hash_list);
+ _r8712_init_sta_xmit_priv(&psta->sta_xmitpriv);
+ _r8712_init_sta_recv_priv(&psta->sta_recvpriv);
+-#ifdef CONFIG_R8712_AP
++ _init_listhead(&psta->asoc_list);
+ _init_listhead(&psta->auth_list);
+-#endif
+ }
+
+ u32 _r8712_init_sta_priv(struct sta_priv *pstapriv)
+@@ -71,10 +70,8 @@ u32 _r8712_init_sta_priv(struct sta_priv *pstapriv)
+ get_list_head(&pstapriv->free_sta_queue));
+ psta++;
+ }
+-#ifdef CONFIG_R8712_AP
+ _init_listhead(&pstapriv->asoc_list);
+ _init_listhead(&pstapriv->auth_list);
+-#endif
+ return _SUCCESS;
+ }
+
+diff --git a/drivers/staging/rtl8712/sta_info.h b/drivers/staging/rtl8712/sta_info.h
+index 48d6a14..f8016e9 100644
+--- a/drivers/staging/rtl8712/sta_info.h
++++ b/drivers/staging/rtl8712/sta_info.h
+@@ -90,7 +90,6 @@ struct sta_info {
+ * curr_network(mlme_priv/security_priv/qos/ht) : AP CAP/INFO
+ * sta_info: (AP & STA) CAP/INFO
+ */
+-#ifdef CONFIG_R8712_AP
+ struct list_head asoc_list;
+ struct list_head auth_list;
+ unsigned int expire_to;
+@@ -98,7 +97,6 @@ struct sta_info {
+ unsigned int authalg;
+ unsigned char chg_txt[128];
+ unsigned int tx_ra_bitmap;
+-#endif
+ };
+
+ struct sta_priv {
+@@ -111,13 +109,11 @@ struct sta_priv {
+ struct __queue sleep_q;
+ struct __queue wakeup_q;
+ struct _adapter *padapter;
+-#ifdef CONFIG_R8712_AP
+ struct list_head asoc_list;
+ struct list_head auth_list;
+ unsigned int auth_to; /* sec, time to expire in authenticating. */
+ unsigned int assoc_to; /* sec, time to expire before associating. */
+ unsigned int expire_to; /* sec , time to expire after associated. */
+-#endif
+ };
+
+ static inline u32 wifi_mac_hash(u8 *mac)
+diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
+index f5e469d..16ad9fe 100644
+--- a/drivers/staging/zcache/zcache-main.c
++++ b/drivers/staging/zcache/zcache-main.c
+@@ -299,10 +299,12 @@ static void zbud_free_and_delist(struct zbud_hdr *zh)
+ struct zbud_page *zbpg =
+ container_of(zh, struct zbud_page, buddy[budnum]);
+
++ spin_lock(&zbud_budlists_spinlock);
+ spin_lock(&zbpg->lock);
+ if (list_empty(&zbpg->bud_list)) {
+ /* ignore zombie page... see zbud_evict_pages() */
+ spin_unlock(&zbpg->lock);
++ spin_unlock(&zbud_budlists_spinlock);
+ return;
+ }
+ size = zbud_free(zh);
+@@ -310,7 +312,6 @@ static void zbud_free_and_delist(struct zbud_hdr *zh)
+ zh_other = &zbpg->buddy[(budnum == 0) ? 1 : 0];
+ if (zh_other->size == 0) { /* was unbuddied: unlist and free */
+ chunks = zbud_size_to_chunks(size) ;
+- spin_lock(&zbud_budlists_spinlock);
+ BUG_ON(list_empty(&zbud_unbuddied[chunks].list));
+ list_del_init(&zbpg->bud_list);
+ zbud_unbuddied[chunks].count--;
+@@ -318,7 +319,6 @@ static void zbud_free_and_delist(struct zbud_hdr *zh)
+ zbud_free_raw_page(zbpg);
+ } else { /* was buddied: move remaining buddy to unbuddied list */
+ chunks = zbud_size_to_chunks(zh_other->size) ;
+- spin_lock(&zbud_budlists_spinlock);
+ list_del_init(&zbpg->bud_list);
+ zcache_zbud_buddied_count--;
+ list_add_tail(&zbpg->bud_list, &zbud_unbuddied[chunks].list);
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 03d3528..0842cc7 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -781,7 +781,7 @@ static int iscsit_alloc_buffs(struct iscsi_cmd *cmd)
+ struct scatterlist *sgl;
+ u32 length = cmd->se_cmd.data_length;
+ int nents = DIV_ROUND_UP(length, PAGE_SIZE);
+- int i = 0, ret;
++ int i = 0, j = 0, ret;
+ /*
+ * If no SCSI payload is present, allocate the default iovecs used for
+ * iSCSI PDU Header
+@@ -822,17 +822,15 @@ static int iscsit_alloc_buffs(struct iscsi_cmd *cmd)
+ */
+ ret = iscsit_allocate_iovecs(cmd);
+ if (ret < 0)
+- goto page_alloc_failed;
++ return -ENOMEM;
+
+ return 0;
+
+ page_alloc_failed:
+- while (i >= 0) {
+- __free_page(sg_page(&sgl[i]));
+- i--;
+- }
+- kfree(cmd->t_mem_sg);
+- cmd->t_mem_sg = NULL;
++ while (j < i)
++ __free_page(sg_page(&sgl[j++]));
++
++ kfree(sgl);
+ return -ENOMEM;
+ }
+
+diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
+index db32784..83dcf49 100644
+--- a/drivers/target/iscsi/iscsi_target_configfs.c
++++ b/drivers/target/iscsi/iscsi_target_configfs.c
+@@ -816,9 +816,6 @@ static struct se_node_acl *lio_target_make_nodeacl(
+ if (!se_nacl_new)
+ return ERR_PTR(-ENOMEM);
+
+- acl = container_of(se_nacl_new, struct iscsi_node_acl,
+- se_node_acl);
+-
+ cmdsn_depth = ISCSI_TPG_ATTRIB(tpg)->default_cmdsn_depth;
+ /*
+ * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
+@@ -829,7 +826,8 @@ static struct se_node_acl *lio_target_make_nodeacl(
+ if (IS_ERR(se_nacl))
+ return se_nacl;
+
+- stats_cg = &acl->se_node_acl.acl_fabric_stat_group;
++ acl = container_of(se_nacl, struct iscsi_node_acl, se_node_acl);
++ stats_cg = &se_nacl->acl_fabric_stat_group;
+
+ stats_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+ GFP_KERNEL);
+diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
+index 81d5832..5d1d4f2 100644
+--- a/drivers/target/loopback/tcm_loop.c
++++ b/drivers/target/loopback/tcm_loop.c
+@@ -866,6 +866,9 @@ static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
+
+ sc->result = SAM_STAT_GOOD;
+ set_host_byte(sc, DID_OK);
++ if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
++ (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
++ scsi_set_resid(sc, se_cmd->residual_count);
+ sc->scsi_done(sc);
+ return 0;
+ }
+@@ -891,6 +894,9 @@ static int tcm_loop_queue_status(struct se_cmd *se_cmd)
+ sc->result = se_cmd->scsi_status;
+
+ set_host_byte(sc, DID_OK);
++ if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
++ (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
++ scsi_set_resid(sc, se_cmd->residual_count);
+ sc->scsi_done(sc);
+ return 0;
+ }
+diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
+index 1d24512..5b05744 100644
+--- a/drivers/target/target_core_alua.c
++++ b/drivers/target/target_core_alua.c
+@@ -30,6 +30,7 @@
+ #include <linux/export.h>
+ #include <scsi/scsi.h>
+ #include <scsi/scsi_cmnd.h>
++#include <asm/unaligned.h>
+
+ #include <target/target_core_base.h>
+ #include <target/target_core_device.h>
+@@ -268,8 +269,7 @@ int target_emulate_set_target_port_groups(struct se_task *task)
+ * changed.
+ */
+ if (primary) {
+- tg_pt_id = ((ptr[2] << 8) & 0xff);
+- tg_pt_id |= (ptr[3] & 0xff);
++ tg_pt_id = get_unaligned_be16(ptr + 2);
+ /*
+ * Locate the matching target port group ID from
+ * the global tg_pt_gp list
+@@ -313,8 +313,7 @@ int target_emulate_set_target_port_groups(struct se_task *task)
+ * the Target Port in question for the the incoming
+ * SET_TARGET_PORT_GROUPS op.
+ */
+- rtpi = ((ptr[2] << 8) & 0xff);
+- rtpi |= (ptr[3] & 0xff);
++ rtpi = get_unaligned_be16(ptr + 2);
+ /*
+ * Locate the matching relative target port identifer
+ * for the struct se_device storage object.
+diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
+index 8facd33..65ea65a 100644
+--- a/drivers/target/target_core_cdb.c
++++ b/drivers/target/target_core_cdb.c
+@@ -116,7 +116,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
+ goto out;
+ }
+
+- buf[7] = 0x32; /* Sync=1 and CmdQue=1 */
++ buf[7] = 0x2; /* CmdQue=1 */
+
+ /*
+ * Do not include vendor, product, reversion info in INQUIRY
+diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
+index 19f8aca..f8773ae 100644
+--- a/drivers/target/target_core_device.c
++++ b/drivers/target/target_core_device.c
+@@ -658,7 +658,9 @@ int target_report_luns(struct se_task *se_task)
+ unsigned char *buf;
+ u32 cdb_offset = 0, lun_count = 0, offset = 8, i;
+
+- buf = (unsigned char *) transport_kmap_data_sg(se_cmd);
++ buf = transport_kmap_data_sg(se_cmd);
++ if (!buf)
++ return -ENOMEM;
+
+ /*
+ * If no struct se_session pointer is present, this struct se_cmd is
+@@ -696,12 +698,12 @@ int target_report_luns(struct se_task *se_task)
+ * See SPC3 r07, page 159.
+ */
+ done:
+- transport_kunmap_data_sg(se_cmd);
+ lun_count *= 8;
+ buf[0] = ((lun_count >> 24) & 0xff);
+ buf[1] = ((lun_count >> 16) & 0xff);
+ buf[2] = ((lun_count >> 8) & 0xff);
+ buf[3] = (lun_count & 0xff);
++ transport_kunmap_data_sg(se_cmd);
+
+ se_task->task_scsi_status = GOOD;
+ transport_complete_task(se_task, 1);
+diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
+index 71fc9ce..754b669 100644
+--- a/drivers/target/tcm_fc/tfc_cmd.c
++++ b/drivers/target/tcm_fc/tfc_cmd.c
+@@ -329,10 +329,12 @@ static void ft_send_resp_status(struct fc_lport *lport,
+
+ fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_DD_CMD_STATUS, 0);
+ sp = fr_seq(fp);
+- if (sp)
++ if (sp) {
+ lport->tt.seq_send(lport, sp, fp);
+- else
++ lport->tt.exch_done(sp);
++ } else {
+ lport->tt.frame_send(lport, fp);
++ }
+ }
+
+ /*
+diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
+index d15a071..0174d2d 100644
+--- a/drivers/tty/moxa.c
++++ b/drivers/tty/moxa.c
+@@ -1331,7 +1331,7 @@ static void moxa_start(struct tty_struct *tty)
+ if (ch == NULL)
+ return;
+
+- if (!(ch->statusflags & TXSTOPPED))
++ if (!test_bit(TXSTOPPED, &ch->statusflags))
+ return;
+
+ MoxaPortTxEnable(ch);
+diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
+index aff9d61..829e51a 100644
+--- a/drivers/tty/serial/sh-sci.c
++++ b/drivers/tty/serial/sh-sci.c
+@@ -1123,17 +1123,20 @@ static void sci_dma_tx_complete(void *arg)
+ port->icount.tx += sg_dma_len(&s->sg_tx);
+
+ async_tx_ack(s->desc_tx);
+- s->cookie_tx = -EINVAL;
+ s->desc_tx = NULL;
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+ if (!uart_circ_empty(xmit)) {
++ s->cookie_tx = 0;
+ schedule_work(&s->work_tx);
+- } else if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
+- u16 ctrl = sci_in(port, SCSCR);
+- sci_out(port, SCSCR, ctrl & ~SCSCR_TIE);
++ } else {
++ s->cookie_tx = -EINVAL;
++ if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
++ u16 ctrl = sci_in(port, SCSCR);
++ sci_out(port, SCSCR, ctrl & ~SCSCR_TIE);
++ }
+ }
+
+ spin_unlock_irqrestore(&port->lock, flags);
+@@ -1395,8 +1398,10 @@ static void sci_start_tx(struct uart_port *port)
+ }
+
+ if (s->chan_tx && !uart_circ_empty(&s->port.state->xmit) &&
+- s->cookie_tx < 0)
++ s->cookie_tx < 0) {
++ s->cookie_tx = 0;
+ schedule_work(&s->work_tx);
++ }
+ #endif
+
+ if (!s->chan_tx || port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
+diff --git a/drivers/tty/vt/consolemap.c b/drivers/tty/vt/consolemap.c
+index 45d3e80..f343808 100644
+--- a/drivers/tty/vt/consolemap.c
++++ b/drivers/tty/vt/consolemap.c
+@@ -516,6 +516,7 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
+ int err = 0, err1, i;
+ struct uni_pagedir *p, *q;
+
++ /* Save original vc_unipagdir_loc in case we allocate a new one */
+ p = (struct uni_pagedir *)*vc->vc_uni_pagedir_loc;
+ if (p->readonly) return -EIO;
+
+@@ -528,26 +529,57 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
+ err1 = con_clear_unimap(vc, NULL);
+ if (err1) return err1;
+
++ /*
++ * Since refcount was > 1, con_clear_unimap() allocated a
++ * a new uni_pagedir for this vc. Re: p != q
++ */
+ q = (struct uni_pagedir *)*vc->vc_uni_pagedir_loc;
+- for (i = 0, l = 0; i < 32; i++)
++
++ /*
++ * uni_pgdir is a 32*32*64 table with rows allocated
++ * when its first entry is added. The unicode value must
++ * still be incremented for empty rows. We are copying
++ * entries from "p" (old) to "q" (new).
++ */
++ l = 0; /* unicode value */
++ for (i = 0; i < 32; i++)
+ if ((p1 = p->uni_pgdir[i]))
+ for (j = 0; j < 32; j++)
+- if ((p2 = p1[j]))
++ if ((p2 = p1[j])) {
+ for (k = 0; k < 64; k++, l++)
+ if (p2[k] != 0xffff) {
++ /*
++ * Found one, copy entry for unicode
++ * l with fontpos value p2[k].
++ */
+ err1 = con_insert_unipair(q, l, p2[k]);
+ if (err1) {
+ p->refcount++;
+ *vc->vc_uni_pagedir_loc = (unsigned long)p;
+ con_release_unimap(q);
+ kfree(q);
+- return err1;
++ return err1;
+ }
+- }
+- p = q;
+- } else if (p == dflt)
++ }
++ } else {
++ /* Account for row of 64 empty entries */
++ l += 64;
++ }
++ else
++ /* Account for empty table */
++ l += 32 * 64;
++
++ /*
++ * Finished copying font table, set vc_uni_pagedir to new table
++ */
++ p = q;
++ } else if (p == dflt) {
+ dflt = NULL;
+-
++ }
++
++ /*
++ * Insert user specified unicode pairs into new table.
++ */
+ while (ct--) {
+ unsigned short unicode, fontpos;
+ __get_user(unicode, &list->unicode);
+@@ -557,11 +589,14 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
+ list++;
+ }
+
++ /*
++ * Merge with fontmaps of any other virtual consoles.
++ */
+ if (con_unify_unimap(vc, p))
+ return err;
+
+ for (i = 0; i <= 3; i++)
+- set_inverse_transl(vc, p, i); /* Update all inverse translations */
++ set_inverse_transl(vc, p, i); /* Update inverse translations */
+ set_inverse_trans_unicode(vc, p);
+
+ return err;
+diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
+index fd4aee1..9eb71d8 100644
+--- a/drivers/usb/class/cdc-wdm.c
++++ b/drivers/usb/class/cdc-wdm.c
+@@ -397,7 +397,7 @@ outnl:
+ static ssize_t wdm_read
+ (struct file *file, char __user *buffer, size_t count, loff_t *ppos)
+ {
+- int rv, cntr = 0;
++ int rv, cntr;
+ int i = 0;
+ struct wdm_device *desc = file->private_data;
+
+@@ -406,7 +406,8 @@ static ssize_t wdm_read
+ if (rv < 0)
+ return -ERESTARTSYS;
+
+- if (desc->length == 0) {
++ cntr = ACCESS_ONCE(desc->length);
++ if (cntr == 0) {
+ desc->read = 0;
+ retry:
+ if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
+@@ -456,26 +457,30 @@ retry:
+ spin_unlock_irq(&desc->iuspin);
+ goto retry;
+ }
+- clear_bit(WDM_READ, &desc->flags);
++ cntr = desc->length;
+ spin_unlock_irq(&desc->iuspin);
+ }
+
+- cntr = count > desc->length ? desc->length : count;
++ if (cntr > count)
++ cntr = count;
+ rv = copy_to_user(buffer, desc->ubuf, cntr);
+ if (rv > 0) {
+ rv = -EFAULT;
+ goto err;
+ }
+
++ spin_lock_irq(&desc->iuspin);
++
+ for (i = 0; i < desc->length - cntr; i++)
+ desc->ubuf[i] = desc->ubuf[i + cntr];
+
+- spin_lock_irq(&desc->iuspin);
+ desc->length -= cntr;
+- spin_unlock_irq(&desc->iuspin);
+ /* in case we had outstanding data */
+ if (!desc->length)
+ clear_bit(WDM_READ, &desc->flags);
++
++ spin_unlock_irq(&desc->iuspin);
++
+ rv = cntr;
+
+ err:
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 25dbd86..3700aa6 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -449,16 +449,16 @@ static int dwc3_gadget_ep_enable(struct usb_ep *ep,
+
+ switch (usb_endpoint_type(desc)) {
+ case USB_ENDPOINT_XFER_CONTROL:
+- strncat(dep->name, "-control", sizeof(dep->name));
++ strlcat(dep->name, "-control", sizeof(dep->name));
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+- strncat(dep->name, "-isoc", sizeof(dep->name));
++ strlcat(dep->name, "-isoc", sizeof(dep->name));
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+- strncat(dep->name, "-bulk", sizeof(dep->name));
++ strlcat(dep->name, "-bulk", sizeof(dep->name));
+ break;
+ case USB_ENDPOINT_XFER_INT:
+- strncat(dep->name, "-int", sizeof(dep->name));
++ strlcat(dep->name, "-int", sizeof(dep->name));
+ break;
+ default:
+ dev_err(dwc->dev, "invalid endpoint transfer type\n");
+@@ -1405,7 +1405,7 @@ static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
+ static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
+ struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
+ {
+- u32 uf;
++ u32 uf, mask;
+
+ if (list_empty(&dep->request_list)) {
+ dev_vdbg(dwc->dev, "ISOC ep %s run out for requests.\n",
+@@ -1413,16 +1413,10 @@ static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
+ return;
+ }
+
+- if (event->parameters) {
+- u32 mask;
+-
+- mask = ~(dep->interval - 1);
+- uf = event->parameters & mask;
+- /* 4 micro frames in the future */
+- uf += dep->interval * 4;
+- } else {
+- uf = 0;
+- }
++ mask = ~(dep->interval - 1);
++ uf = event->parameters & mask;
++ /* 4 micro frames in the future */
++ uf += dep->interval * 4;
+
+ __dwc3_gadget_kick_transfer(dep, uf, 1);
+ }
+diff --git a/drivers/usb/gadget/f_subset.c b/drivers/usb/gadget/f_subset.c
+index c154064..21ab474 100644
+--- a/drivers/usb/gadget/f_subset.c
++++ b/drivers/usb/gadget/f_subset.c
+@@ -74,7 +74,7 @@ static inline struct f_gether *func_to_geth(struct usb_function *f)
+
+ /* interface descriptor: */
+
+-static struct usb_interface_descriptor subset_data_intf __initdata = {
++static struct usb_interface_descriptor subset_data_intf = {
+ .bLength = sizeof subset_data_intf,
+ .bDescriptorType = USB_DT_INTERFACE,
+
+@@ -87,7 +87,7 @@ static struct usb_interface_descriptor subset_data_intf __initdata = {
+ /* .iInterface = DYNAMIC */
+ };
+
+-static struct usb_cdc_header_desc mdlm_header_desc __initdata = {
++static struct usb_cdc_header_desc mdlm_header_desc = {
+ .bLength = sizeof mdlm_header_desc,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_HEADER_TYPE,
+@@ -95,7 +95,7 @@ static struct usb_cdc_header_desc mdlm_header_desc __initdata = {
+ .bcdCDC = cpu_to_le16(0x0110),
+ };
+
+-static struct usb_cdc_mdlm_desc mdlm_desc __initdata = {
++static struct usb_cdc_mdlm_desc mdlm_desc = {
+ .bLength = sizeof mdlm_desc,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_MDLM_TYPE,
+@@ -111,7 +111,7 @@ static struct usb_cdc_mdlm_desc mdlm_desc __initdata = {
+ * can't really use its struct. All we do here is say that we're using
+ * the submode of "SAFE" which directly matches the CDC Subset.
+ */
+-static u8 mdlm_detail_desc[] __initdata = {
++static u8 mdlm_detail_desc[] = {
+ 6,
+ USB_DT_CS_INTERFACE,
+ USB_CDC_MDLM_DETAIL_TYPE,
+@@ -121,7 +121,7 @@ static u8 mdlm_detail_desc[] __initdata = {
+ 0, /* network data capabilities ("raw" encapsulation) */
+ };
+
+-static struct usb_cdc_ether_desc ether_desc __initdata = {
++static struct usb_cdc_ether_desc ether_desc = {
+ .bLength = sizeof ether_desc,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubType = USB_CDC_ETHERNET_TYPE,
+@@ -136,7 +136,7 @@ static struct usb_cdc_ether_desc ether_desc __initdata = {
+
+ /* full speed support: */
+
+-static struct usb_endpoint_descriptor fs_subset_in_desc __initdata = {
++static struct usb_endpoint_descriptor fs_subset_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+@@ -144,7 +144,7 @@ static struct usb_endpoint_descriptor fs_subset_in_desc __initdata = {
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ };
+
+-static struct usb_endpoint_descriptor fs_subset_out_desc __initdata = {
++static struct usb_endpoint_descriptor fs_subset_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+@@ -152,7 +152,7 @@ static struct usb_endpoint_descriptor fs_subset_out_desc __initdata = {
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ };
+
+-static struct usb_descriptor_header *fs_eth_function[] __initdata = {
++static struct usb_descriptor_header *fs_eth_function[] = {
+ (struct usb_descriptor_header *) &subset_data_intf,
+ (struct usb_descriptor_header *) &mdlm_header_desc,
+ (struct usb_descriptor_header *) &mdlm_desc,
+@@ -165,7 +165,7 @@ static struct usb_descriptor_header *fs_eth_function[] __initdata = {
+
+ /* high speed support: */
+
+-static struct usb_endpoint_descriptor hs_subset_in_desc __initdata = {
++static struct usb_endpoint_descriptor hs_subset_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+@@ -173,7 +173,7 @@ static struct usb_endpoint_descriptor hs_subset_in_desc __initdata = {
+ .wMaxPacketSize = cpu_to_le16(512),
+ };
+
+-static struct usb_endpoint_descriptor hs_subset_out_desc __initdata = {
++static struct usb_endpoint_descriptor hs_subset_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+@@ -181,7 +181,7 @@ static struct usb_endpoint_descriptor hs_subset_out_desc __initdata = {
+ .wMaxPacketSize = cpu_to_le16(512),
+ };
+
+-static struct usb_descriptor_header *hs_eth_function[] __initdata = {
++static struct usb_descriptor_header *hs_eth_function[] = {
+ (struct usb_descriptor_header *) &subset_data_intf,
+ (struct usb_descriptor_header *) &mdlm_header_desc,
+ (struct usb_descriptor_header *) &mdlm_desc,
+@@ -194,7 +194,7 @@ static struct usb_descriptor_header *hs_eth_function[] __initdata = {
+
+ /* super speed support: */
+
+-static struct usb_endpoint_descriptor ss_subset_in_desc __initdata = {
++static struct usb_endpoint_descriptor ss_subset_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+@@ -202,7 +202,7 @@ static struct usb_endpoint_descriptor ss_subset_in_desc __initdata = {
+ .wMaxPacketSize = cpu_to_le16(1024),
+ };
+
+-static struct usb_endpoint_descriptor ss_subset_out_desc __initdata = {
++static struct usb_endpoint_descriptor ss_subset_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+@@ -210,7 +210,7 @@ static struct usb_endpoint_descriptor ss_subset_out_desc __initdata = {
+ .wMaxPacketSize = cpu_to_le16(1024),
+ };
+
+-static struct usb_ss_ep_comp_descriptor ss_subset_bulk_comp_desc __initdata = {
++static struct usb_ss_ep_comp_descriptor ss_subset_bulk_comp_desc = {
+ .bLength = sizeof ss_subset_bulk_comp_desc,
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+@@ -219,7 +219,7 @@ static struct usb_ss_ep_comp_descriptor ss_subset_bulk_comp_desc __initdata = {
+ /* .bmAttributes = 0, */
+ };
+
+-static struct usb_descriptor_header *ss_eth_function[] __initdata = {
++static struct usb_descriptor_header *ss_eth_function[] = {
+ (struct usb_descriptor_header *) &subset_data_intf,
+ (struct usb_descriptor_header *) &mdlm_header_desc,
+ (struct usb_descriptor_header *) &mdlm_desc,
+@@ -290,7 +290,7 @@ static void geth_disable(struct usb_function *f)
+
+ /* serial function driver setup/binding */
+
+-static int __init
++static int
+ geth_bind(struct usb_configuration *c, struct usb_function *f)
+ {
+ struct usb_composite_dev *cdev = c->cdev;
+@@ -404,7 +404,7 @@ geth_unbind(struct usb_configuration *c, struct usb_function *f)
+ * Caller must have called @gether_setup(). Caller is also responsible
+ * for calling @gether_cleanup() before module unload.
+ */
+-int __init geth_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN])
++int geth_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN])
+ {
+ struct f_gether *geth;
+ int status;
+diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c
+index dd28ef3..8e3e509 100644
+--- a/drivers/usb/gadget/fsl_udc_core.c
++++ b/drivers/usb/gadget/fsl_udc_core.c
+@@ -768,7 +768,7 @@ static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
+ * @is_last: return flag if it is the last dTD of the request
+ * return: pointer to the built dTD */
+ static struct ep_td_struct *fsl_build_dtd(struct fsl_req *req, unsigned *length,
+- dma_addr_t *dma, int *is_last)
++ dma_addr_t *dma, int *is_last, gfp_t gfp_flags)
+ {
+ u32 swap_temp;
+ struct ep_td_struct *dtd;
+@@ -777,7 +777,7 @@ static struct ep_td_struct *fsl_build_dtd(struct fsl_req *req, unsigned *length,
+ *length = min(req->req.length - req->req.actual,
+ (unsigned)EP_MAX_LENGTH_TRANSFER);
+
+- dtd = dma_pool_alloc(udc_controller->td_pool, GFP_KERNEL, dma);
++ dtd = dma_pool_alloc(udc_controller->td_pool, gfp_flags, dma);
+ if (dtd == NULL)
+ return dtd;
+
+@@ -827,7 +827,7 @@ static struct ep_td_struct *fsl_build_dtd(struct fsl_req *req, unsigned *length,
+ }
+
+ /* Generate dtd chain for a request */
+-static int fsl_req_to_dtd(struct fsl_req *req)
++static int fsl_req_to_dtd(struct fsl_req *req, gfp_t gfp_flags)
+ {
+ unsigned count;
+ int is_last;
+@@ -836,7 +836,7 @@ static int fsl_req_to_dtd(struct fsl_req *req)
+ dma_addr_t dma;
+
+ do {
+- dtd = fsl_build_dtd(req, &count, &dma, &is_last);
++ dtd = fsl_build_dtd(req, &count, &dma, &is_last, gfp_flags);
+ if (dtd == NULL)
+ return -ENOMEM;
+
+@@ -910,13 +910,11 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
+ req->req.actual = 0;
+ req->dtd_count = 0;
+
+- spin_lock_irqsave(&udc->lock, flags);
+-
+ /* build dtds and push them to device queue */
+- if (!fsl_req_to_dtd(req)) {
++ if (!fsl_req_to_dtd(req, gfp_flags)) {
++ spin_lock_irqsave(&udc->lock, flags);
+ fsl_queue_td(ep, req);
+ } else {
+- spin_unlock_irqrestore(&udc->lock, flags);
+ return -ENOMEM;
+ }
+
+@@ -1295,7 +1293,7 @@ static int ep0_prime_status(struct fsl_udc *udc, int direction)
+ ep_is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ req->mapped = 1;
+
+- if (fsl_req_to_dtd(req) == 0)
++ if (fsl_req_to_dtd(req, GFP_ATOMIC) == 0)
+ fsl_queue_td(ep, req);
+ else
+ return -ENOMEM;
+@@ -1379,7 +1377,7 @@ static void ch9getstatus(struct fsl_udc *udc, u8 request_type, u16 value,
+ req->mapped = 1;
+
+ /* prime the data phase */
+- if ((fsl_req_to_dtd(req) == 0))
++ if ((fsl_req_to_dtd(req, GFP_ATOMIC) == 0))
+ fsl_queue_td(ep, req);
+ else /* no mem */
+ goto stall;
+diff --git a/drivers/usb/gadget/hid.c b/drivers/usb/gadget/hid.c
+index f888c3e..3493adf 100644
+--- a/drivers/usb/gadget/hid.c
++++ b/drivers/usb/gadget/hid.c
+@@ -60,9 +60,9 @@ static struct usb_device_descriptor device_desc = {
+ /* .bDeviceClass = USB_CLASS_COMM, */
+ /* .bDeviceSubClass = 0, */
+ /* .bDeviceProtocol = 0, */
+- .bDeviceClass = 0xEF,
+- .bDeviceSubClass = 2,
+- .bDeviceProtocol = 1,
++ .bDeviceClass = USB_CLASS_PER_INTERFACE,
++ .bDeviceSubClass = 0,
++ .bDeviceProtocol = 0,
+ /* .bMaxPacketSize0 = f(hardware) */
+
+ /* Vendor and product id can be overridden by module parameters. */
+diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
+index 6ccae27..7138540 100644
+--- a/drivers/usb/gadget/inode.c
++++ b/drivers/usb/gadget/inode.c
+@@ -1043,6 +1043,8 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
+ // FIXME don't call this with the spinlock held ...
+ if (copy_to_user (buf, dev->req->buf, len))
+ retval = -EFAULT;
++ else
++ retval = len;
+ clean_req (dev->gadget->ep0, dev->req);
+ /* NOTE userspace can't yet choose to stall */
+ }
+diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
+index b556a72..da487fd 100644
+--- a/drivers/usb/host/ehci-fsl.c
++++ b/drivers/usb/host/ehci-fsl.c
+@@ -216,6 +216,8 @@ static void ehci_fsl_setup_phy(struct ehci_hcd *ehci,
+ unsigned int port_offset)
+ {
+ u32 portsc;
++ struct usb_hcd *hcd = ehci_to_hcd(ehci);
++ void __iomem *non_ehci = hcd->regs;
+
+ portsc = ehci_readl(ehci, &ehci->regs->port_status[port_offset]);
+ portsc &= ~(PORT_PTS_MSK | PORT_PTS_PTW);
+@@ -231,6 +233,8 @@ static void ehci_fsl_setup_phy(struct ehci_hcd *ehci,
+ portsc |= PORT_PTS_PTW;
+ /* fall through */
+ case FSL_USB2_PHY_UTMI:
++ /* enable UTMI PHY */
++ setbits32(non_ehci + FSL_SOC_USB_CTRL, CTRL_UTMI_PHY_EN);
+ portsc |= PORT_PTS_UTMI;
+ break;
+ case FSL_USB2_PHY_NONE:
+diff --git a/drivers/usb/host/ehci-fsl.h b/drivers/usb/host/ehci-fsl.h
+index 4918062..bea5013 100644
+--- a/drivers/usb/host/ehci-fsl.h
++++ b/drivers/usb/host/ehci-fsl.h
+@@ -45,5 +45,6 @@
+ #define FSL_SOC_USB_PRICTRL 0x40c /* NOTE: big-endian */
+ #define FSL_SOC_USB_SICTRL 0x410 /* NOTE: big-endian */
+ #define FSL_SOC_USB_CTRL 0x500 /* NOTE: big-endian */
++#define CTRL_UTMI_PHY_EN (1<<9)
+ #define SNOOP_SIZE_2GB 0x1e
+ #endif /* _EHCI_FSL_H */
+diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
+index 9037035..5a42cf0 100644
+--- a/drivers/usb/host/fsl-mph-dr-of.c
++++ b/drivers/usb/host/fsl-mph-dr-of.c
+@@ -94,7 +94,6 @@ struct platform_device * __devinit fsl_usb2_device_register(
+ pdev->dev.parent = &ofdev->dev;
+
+ pdev->dev.coherent_dma_mask = ofdev->dev.coherent_dma_mask;
+- pdev->dev.dma_mask = &pdev->archdata.dma_mask;
+ *pdev->dev.dma_mask = *ofdev->dev.dma_mask;
+
+ retval = platform_device_add_data(pdev, pdata, sizeof(*pdata));
+diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
+index 922148f..c860597 100644
+--- a/drivers/usb/musb/musb_gadget.c
++++ b/drivers/usb/musb/musb_gadget.c
+@@ -576,6 +576,15 @@ void musb_g_tx(struct musb *musb, u8 epnum)
+
+ if (request->actual == request->length) {
+ musb_g_giveback(musb_ep, request, 0);
++ /*
++ * In the giveback function the MUSB lock is
++ * released and acquired after sometime. During
++ * this time period the INDEX register could get
++ * changed by the gadget_queue function especially
++ * on SMP systems. Reselect the INDEX to be sure
++ * we are reading/modifying the right registers
++ */
++ musb_ep_select(mbase, epnum);
+ req = musb_ep->desc ? next_request(musb_ep) : NULL;
+ if (!req) {
+ dev_dbg(musb->controller, "%s idle now\n",
+@@ -985,6 +994,15 @@ void musb_g_rx(struct musb *musb, u8 epnum)
+ }
+ #endif
+ musb_g_giveback(musb_ep, request, 0);
++ /*
++ * In the giveback function the MUSB lock is
++ * released and acquired after sometime. During
++ * this time period the INDEX register could get
++ * changed by the gadget_queue function especially
++ * on SMP systems. Reselect the INDEX to be sure
++ * we are reading/modifying the right registers
++ */
++ musb_ep_select(mbase, epnum);
+
+ req = next_request(musb_ep);
+ if (!req)
+diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
+index 7f4e803..aa0d183 100644
+--- a/drivers/usb/renesas_usbhs/mod_gadget.c
++++ b/drivers/usb/renesas_usbhs/mod_gadget.c
+@@ -816,6 +816,11 @@ static int usbhsg_stop(struct usbhs_priv *priv)
+ return usbhsg_try_stop(priv, USBHSG_STATUS_STARTED);
+ }
+
++static void usbhs_mod_gadget_release(struct device *pdev)
++{
++ /* do nothing */
++}
++
+ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
+ {
+ struct usbhsg_gpriv *gpriv;
+@@ -864,6 +869,7 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
+ */
+ dev_set_name(&gpriv->gadget.dev, "gadget");
+ gpriv->gadget.dev.parent = dev;
++ gpriv->gadget.dev.release = usbhs_mod_gadget_release;
+ gpriv->gadget.name = "renesas_usbhs_udc";
+ gpriv->gadget.ops = &usbhsg_gadget_ops;
+ gpriv->gadget.is_dualspeed = 1;
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 33d25d4..4c12404 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -49,6 +49,7 @@ static int cp210x_tiocmset_port(struct usb_serial_port *port,
+ unsigned int, unsigned int);
+ static void cp210x_break_ctl(struct tty_struct *, int);
+ static int cp210x_startup(struct usb_serial *);
++static void cp210x_release(struct usb_serial *);
+ static void cp210x_dtr_rts(struct usb_serial_port *p, int on);
+
+ static int debug;
+@@ -121,6 +122,8 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
+ { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
+ { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
++ { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
++ { USB_DEVICE(0x10C4, 0xEA80) }, /* Silicon Labs factory default */
+ { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */
+ { USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */
+ { USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */
+@@ -149,6 +152,10 @@ static const struct usb_device_id id_table[] = {
+
+ MODULE_DEVICE_TABLE(usb, id_table);
+
++struct cp210x_port_private {
++ __u8 bInterfaceNumber;
++};
++
+ static struct usb_driver cp210x_driver = {
+ .name = "cp210x",
+ .probe = usb_serial_probe,
+@@ -174,6 +181,7 @@ static struct usb_serial_driver cp210x_device = {
+ .tiocmget = cp210x_tiocmget,
+ .tiocmset = cp210x_tiocmset,
+ .attach = cp210x_startup,
++ .release = cp210x_release,
+ .dtr_rts = cp210x_dtr_rts
+ };
+
+@@ -261,6 +269,7 @@ static int cp210x_get_config(struct usb_serial_port *port, u8 request,
+ unsigned int *data, int size)
+ {
+ struct usb_serial *serial = port->serial;
++ struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
+ __le32 *buf;
+ int result, i, length;
+
+@@ -276,7 +285,7 @@ static int cp210x_get_config(struct usb_serial_port *port, u8 request,
+ /* Issue the request, attempting to read 'size' bytes */
+ result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
+ request, REQTYPE_DEVICE_TO_HOST, 0x0000,
+- 0, buf, size, 300);
++ port_priv->bInterfaceNumber, buf, size, 300);
+
+ /* Convert data into an array of integers */
+ for (i = 0; i < length; i++)
+@@ -304,6 +313,7 @@ static int cp210x_set_config(struct usb_serial_port *port, u8 request,
+ unsigned int *data, int size)
+ {
+ struct usb_serial *serial = port->serial;
++ struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
+ __le32 *buf;
+ int result, i, length;
+
+@@ -325,12 +335,12 @@ static int cp210x_set_config(struct usb_serial_port *port, u8 request,
+ result = usb_control_msg(serial->dev,
+ usb_sndctrlpipe(serial->dev, 0),
+ request, REQTYPE_HOST_TO_DEVICE, 0x0000,
+- 0, buf, size, 300);
++ port_priv->bInterfaceNumber, buf, size, 300);
+ } else {
+ result = usb_control_msg(serial->dev,
+ usb_sndctrlpipe(serial->dev, 0),
+ request, REQTYPE_HOST_TO_DEVICE, data[0],
+- 0, NULL, 0, 300);
++ port_priv->bInterfaceNumber, NULL, 0, 300);
+ }
+
+ kfree(buf);
+@@ -830,11 +840,39 @@ static void cp210x_break_ctl (struct tty_struct *tty, int break_state)
+
+ static int cp210x_startup(struct usb_serial *serial)
+ {
++ struct cp210x_port_private *port_priv;
++ int i;
++
+ /* cp210x buffers behave strangely unless device is reset */
+ usb_reset_device(serial->dev);
++
++ for (i = 0; i < serial->num_ports; i++) {
++ port_priv = kzalloc(sizeof(*port_priv), GFP_KERNEL);
++ if (!port_priv)
++ return -ENOMEM;
++
++ memset(port_priv, 0x00, sizeof(*port_priv));
++ port_priv->bInterfaceNumber =
++ serial->interface->cur_altsetting->desc.bInterfaceNumber;
++
++ usb_set_serial_port_data(serial->port[i], port_priv);
++ }
++
+ return 0;
+ }
+
++static void cp210x_release(struct usb_serial *serial)
++{
++ struct cp210x_port_private *port_priv;
++ int i;
++
++ for (i = 0; i < serial->num_ports; i++) {
++ port_priv = usb_get_serial_port_data(serial->port[i]);
++ kfree(port_priv);
++ usb_set_serial_port_data(serial->port[i], NULL);
++ }
++}
++
+ static int __init cp210x_init(void)
+ {
+ int retval;
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index f030471..f2c9ef7 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -188,6 +188,7 @@ static struct usb_device_id id_table_combined [] = {
+ .driver_info = (kernel_ulong_t)&ftdi_8u2232c_quirk },
+ { USB_DEVICE(FTDI_VID, FTDI_4232H_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_232H_PID) },
++ { USB_DEVICE(FTDI_VID, FTDI_FTX_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_MICRO_CHAMELEON_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_RELAIS_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_PID) },
+@@ -536,6 +537,10 @@ static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_6_PID) },
+ { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_7_PID) },
+ { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_8_PID) },
++ { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803R_1_PID) },
++ { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803R_2_PID) },
++ { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803R_3_PID) },
++ { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803R_4_PID) },
+ { USB_DEVICE(IDTECH_VID, IDTECH_IDT1221U_PID) },
+ { USB_DEVICE(OCT_VID, OCT_US101_PID) },
+ { USB_DEVICE(OCT_VID, OCT_DK201_PID) },
+@@ -797,7 +802,7 @@ static struct usb_device_id id_table_combined [] = {
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(ADI_VID, ADI_GNICEPLUS_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+- { USB_DEVICE(HORNBY_VID, HORNBY_ELITE_PID) },
++ { USB_DEVICE(MICROCHIP_VID, MICROCHIP_USB_BOARD_PID) },
+ { USB_DEVICE(JETI_VID, JETI_SPC1201_PID) },
+ { USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+@@ -846,6 +851,9 @@ static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(ST_VID, ST_STMCLT1030_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_stmclite_quirk },
+ { USB_DEVICE(FTDI_VID, FTDI_RF_R106) },
++ { USB_DEVICE(FTDI_VID, FTDI_DISTORTEC_JTAG_LOCK_PICK_PID),
++ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
++ { USB_DEVICE(FTDI_VID, FTDI_LUMEL_PD12_PID) },
+ { }, /* Optional parameter entry */
+ { } /* Terminating entry */
+ };
+@@ -868,7 +876,8 @@ static const char *ftdi_chip_name[] = {
+ [FT232RL] = "FT232RL",
+ [FT2232H] = "FT2232H",
+ [FT4232H] = "FT4232H",
+- [FT232H] = "FT232H"
++ [FT232H] = "FT232H",
++ [FTX] = "FT-X"
+ };
+
+
+@@ -1169,7 +1178,8 @@ static __u32 get_ftdi_divisor(struct tty_struct *tty,
+ break;
+ case FT232BM: /* FT232BM chip */
+ case FT2232C: /* FT2232C chip */
+- case FT232RL:
++ case FT232RL: /* FT232RL chip */
++ case FTX: /* FT-X series */
+ if (baud <= 3000000) {
+ __u16 product_id = le16_to_cpu(
+ port->serial->dev->descriptor.idProduct);
+@@ -1458,10 +1468,14 @@ static void ftdi_determine_type(struct usb_serial_port *port)
+ } else if (version < 0x900) {
+ /* Assume it's an FT232RL */
+ priv->chip_type = FT232RL;
+- } else {
++ } else if (version < 0x1000) {
+ /* Assume it's an FT232H */
+ priv->chip_type = FT232H;
++ } else {
++ /* Assume it's an FT-X series device */
++ priv->chip_type = FTX;
+ }
++
+ dev_info(&udev->dev, "Detected %s\n", ftdi_chip_name[priv->chip_type]);
+ }
+
+@@ -1589,7 +1603,8 @@ static int create_sysfs_attrs(struct usb_serial_port *port)
+ priv->chip_type == FT232RL ||
+ priv->chip_type == FT2232H ||
+ priv->chip_type == FT4232H ||
+- priv->chip_type == FT232H)) {
++ priv->chip_type == FT232H ||
++ priv->chip_type == FTX)) {
+ retval = device_create_file(&port->dev,
+ &dev_attr_latency_timer);
+ }
+@@ -1611,7 +1626,8 @@ static void remove_sysfs_attrs(struct usb_serial_port *port)
+ priv->chip_type == FT232RL ||
+ priv->chip_type == FT2232H ||
+ priv->chip_type == FT4232H ||
+- priv->chip_type == FT232H) {
++ priv->chip_type == FT232H ||
++ priv->chip_type == FTX) {
+ device_remove_file(&port->dev, &dev_attr_latency_timer);
+ }
+ }
+@@ -1763,7 +1779,8 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial)
+
+ dbg("%s", __func__);
+
+- if (strcmp(udev->manufacturer, "CALAO Systems") == 0)
++ if ((udev->manufacturer && !strcmp(udev->manufacturer, "CALAO Systems")) ||
++ (udev->product && !strcmp(udev->product, "BeagleBone/XDS100")))
+ return ftdi_jtag_probe(serial);
+
+ return 0;
+@@ -2284,6 +2301,7 @@ static int ftdi_tiocmget(struct tty_struct *tty)
+ case FT2232H:
+ case FT4232H:
+ case FT232H:
++ case FTX:
+ len = 2;
+ break;
+ default:
+diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
+index 19584fa..ed58c6f 100644
+--- a/drivers/usb/serial/ftdi_sio.h
++++ b/drivers/usb/serial/ftdi_sio.h
+@@ -157,7 +157,8 @@ enum ftdi_chip_type {
+ FT232RL = 5,
+ FT2232H = 6,
+ FT4232H = 7,
+- FT232H = 8
++ FT232H = 8,
++ FTX = 9,
+ };
+
+ enum ftdi_sio_baudrate {
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 4eb7715..c6dd18e 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -23,12 +23,15 @@
+ #define FTDI_8U2232C_PID 0x6010 /* Dual channel device */
+ #define FTDI_4232H_PID 0x6011 /* Quad channel hi-speed device */
+ #define FTDI_232H_PID 0x6014 /* Single channel hi-speed device */
++#define FTDI_FTX_PID 0x6015 /* FT-X series (FT201X, FT230X, FT231X, etc) */
+ #define FTDI_SIO_PID 0x8372 /* Product Id SIO application of 8U100AX */
+ #define FTDI_232RL_PID 0xFBFA /* Product ID for FT232RL */
+
+
+ /*** third-party PIDs (using FTDI_VID) ***/
+
++#define FTDI_LUMEL_PD12_PID 0x6002
++
+ /*
+ * Marvell OpenRD Base, Client
+ * http://www.open-rd.org
+@@ -97,6 +100,8 @@
+ #define FTDI_TACTRIX_OPENPORT_13S_PID 0xCC49 /* OpenPort 1.3 Subaru */
+ #define FTDI_TACTRIX_OPENPORT_13U_PID 0xCC4A /* OpenPort 1.3 Universal */
+
++#define FTDI_DISTORTEC_JTAG_LOCK_PICK_PID 0xCFF8
++
+ /* SCS HF Radio Modems PID's (http://www.scs-ptc.com) */
+ /* the VID is the standard ftdi vid (FTDI_VID) */
+ #define FTDI_SCS_DEVICE_0_PID 0xD010 /* SCS PTC-IIusb */
+@@ -532,10 +537,14 @@
+ #define ADI_GNICEPLUS_PID 0xF001
+
+ /*
+- * Hornby Elite
++ * Microchip Technology, Inc.
++ *
++ * MICROCHIP_VID (0x04D8) and MICROCHIP_USB_BOARD_PID (0x000A) are also used by:
++ * Hornby Elite - Digital Command Control Console
++ * http://www.hornby.com/hornby-dcc/controllers/
+ */
+-#define HORNBY_VID 0x04D8
+-#define HORNBY_ELITE_PID 0x000A
++#define MICROCHIP_VID 0x04D8
++#define MICROCHIP_USB_BOARD_PID 0x000A /* CDC RS-232 Emulation Demo */
+
+ /*
+ * RATOC REX-USB60F
+@@ -680,6 +689,10 @@
+ #define SEALEVEL_2803_6_PID 0X2863 /* SeaLINK+8 (2803) Port 6 */
+ #define SEALEVEL_2803_7_PID 0X2873 /* SeaLINK+8 (2803) Port 7 */
+ #define SEALEVEL_2803_8_PID 0X2883 /* SeaLINK+8 (2803) Port 8 */
++#define SEALEVEL_2803R_1_PID 0Xa02a /* SeaLINK+8 (2803-ROHS) Port 1+2 */
++#define SEALEVEL_2803R_2_PID 0Xa02b /* SeaLINK+8 (2803-ROHS) Port 3+4 */
++#define SEALEVEL_2803R_3_PID 0Xa02c /* SeaLINK+8 (2803-ROHS) Port 5+6 */
++#define SEALEVEL_2803R_4_PID 0Xa02d /* SeaLINK+8 (2803-ROHS) Port 7+8 */
+
+ /*
+ * JETI SPECTROMETER SPECBOS 1201
+diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
+index e4db5ad..9f0b2bf 100644
+--- a/drivers/usb/serial/generic.c
++++ b/drivers/usb/serial/generic.c
+@@ -215,8 +215,10 @@ retry:
+ clear_bit(i, &port->write_urbs_free);
+ result = usb_submit_urb(urb, GFP_ATOMIC);
+ if (result) {
+- dev_err(&port->dev, "%s - error submitting urb: %d\n",
++ if (!port->port.console) {
++ dev_err(&port->dev, "%s - error submitting urb: %d\n",
+ __func__, result);
++ }
+ set_bit(i, &port->write_urbs_free);
+ spin_lock_irqsave(&port->lock, flags);
+ port->tx_bytes -= count;
+diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
+index c72abd5..5c7d654 100644
+--- a/drivers/usb/serial/mos7840.c
++++ b/drivers/usb/serial/mos7840.c
+@@ -174,6 +174,7 @@
+
+ #define CLK_MULTI_REGISTER ((__u16)(0x02))
+ #define CLK_START_VALUE_REGISTER ((__u16)(0x03))
++#define GPIO_REGISTER ((__u16)(0x07))
+
+ #define SERIAL_LCR_DLAB ((__u16)(0x0080))
+
+@@ -1103,14 +1104,25 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
+ mos7840_port->read_urb = port->read_urb;
+
+ /* set up our bulk in urb */
+-
+- usb_fill_bulk_urb(mos7840_port->read_urb,
+- serial->dev,
+- usb_rcvbulkpipe(serial->dev,
+- port->bulk_in_endpointAddress),
+- port->bulk_in_buffer,
+- mos7840_port->read_urb->transfer_buffer_length,
+- mos7840_bulk_in_callback, mos7840_port);
++ if ((serial->num_ports == 2)
++ && ((((__u16)port->number -
++ (__u16)(port->serial->minor)) % 2) != 0)) {
++ usb_fill_bulk_urb(mos7840_port->read_urb,
++ serial->dev,
++ usb_rcvbulkpipe(serial->dev,
++ (port->bulk_in_endpointAddress) + 2),
++ port->bulk_in_buffer,
++ mos7840_port->read_urb->transfer_buffer_length,
++ mos7840_bulk_in_callback, mos7840_port);
++ } else {
++ usb_fill_bulk_urb(mos7840_port->read_urb,
++ serial->dev,
++ usb_rcvbulkpipe(serial->dev,
++ port->bulk_in_endpointAddress),
++ port->bulk_in_buffer,
++ mos7840_port->read_urb->transfer_buffer_length,
++ mos7840_bulk_in_callback, mos7840_port);
++ }
+
+ dbg("mos7840_open: bulkin endpoint is %d",
+ port->bulk_in_endpointAddress);
+@@ -1521,13 +1533,25 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port,
+ memcpy(urb->transfer_buffer, current_position, transfer_size);
+
+ /* fill urb with data and submit */
+- usb_fill_bulk_urb(urb,
+- serial->dev,
+- usb_sndbulkpipe(serial->dev,
+- port->bulk_out_endpointAddress),
+- urb->transfer_buffer,
+- transfer_size,
+- mos7840_bulk_out_data_callback, mos7840_port);
++ if ((serial->num_ports == 2)
++ && ((((__u16)port->number -
++ (__u16)(port->serial->minor)) % 2) != 0)) {
++ usb_fill_bulk_urb(urb,
++ serial->dev,
++ usb_sndbulkpipe(serial->dev,
++ (port->bulk_out_endpointAddress) + 2),
++ urb->transfer_buffer,
++ transfer_size,
++ mos7840_bulk_out_data_callback, mos7840_port);
++ } else {
++ usb_fill_bulk_urb(urb,
++ serial->dev,
++ usb_sndbulkpipe(serial->dev,
++ port->bulk_out_endpointAddress),
++ urb->transfer_buffer,
++ transfer_size,
++ mos7840_bulk_out_data_callback, mos7840_port);
++ }
+
+ data1 = urb->transfer_buffer;
+ dbg("bulkout endpoint is %d", port->bulk_out_endpointAddress);
+@@ -1840,7 +1864,7 @@ static int mos7840_send_cmd_write_baud_rate(struct moschip_port *mos7840_port,
+
+ } else {
+ #ifdef HW_flow_control
+- / *setting h/w flow control bit to 0 */
++ /* setting h/w flow control bit to 0 */
+ Data = 0xb;
+ mos7840_port->shadowMCR = Data;
+ status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER,
+@@ -2309,19 +2333,26 @@ static int mos7840_ioctl(struct tty_struct *tty,
+
+ static int mos7840_calc_num_ports(struct usb_serial *serial)
+ {
+- int mos7840_num_ports = 0;
+-
+- dbg("numberofendpoints: cur %d, alt %d",
+- (int)serial->interface->cur_altsetting->desc.bNumEndpoints,
+- (int)serial->interface->altsetting->desc.bNumEndpoints);
+- if (serial->interface->cur_altsetting->desc.bNumEndpoints == 5) {
+- mos7840_num_ports = serial->num_ports = 2;
+- } else if (serial->interface->cur_altsetting->desc.bNumEndpoints == 9) {
++ __u16 Data = 0x00;
++ int ret = 0;
++ int mos7840_num_ports;
++
++ ret = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
++ MCS_RDREQ, MCS_RD_RTYPE, 0, GPIO_REGISTER, &Data,
++ VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT);
++
++ if ((Data & 0x01) == 0) {
++ mos7840_num_ports = 2;
++ serial->num_bulk_in = 2;
++ serial->num_bulk_out = 2;
++ serial->num_ports = 2;
++ } else {
++ mos7840_num_ports = 4;
+ serial->num_bulk_in = 4;
+ serial->num_bulk_out = 4;
+- mos7840_num_ports = serial->num_ports = 4;
++ serial->num_ports = 4;
+ }
+- dbg ("mos7840_num_ports = %d", mos7840_num_ports);
++
+ return mos7840_num_ports;
+ }
+
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 68fa8c7..54898c9 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -307,6 +307,9 @@ static void option_instat_callback(struct urb *urb);
+ #define TELIT_VENDOR_ID 0x1bc7
+ #define TELIT_PRODUCT_UC864E 0x1003
+ #define TELIT_PRODUCT_UC864G 0x1004
++#define TELIT_PRODUCT_CC864_DUAL 0x1005
++#define TELIT_PRODUCT_CC864_SINGLE 0x1006
++#define TELIT_PRODUCT_DE910_DUAL 0x1010
+
+ /* ZTE PRODUCTS */
+ #define ZTE_VENDOR_ID 0x19d2
+@@ -484,6 +487,9 @@ static void option_instat_callback(struct urb *urb);
+ #define LG_VENDOR_ID 0x1004
+ #define LG_PRODUCT_L02C 0x618f
+
++/* MediaTek products */
++#define MEDIATEK_VENDOR_ID 0x0e8d
++
+ /* some devices interfaces need special handling due to a number of reasons */
+ enum option_blacklist_reason {
+ OPTION_BLACKLIST_NONE = 0,
+@@ -768,6 +774,9 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864G) },
++ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) },
++ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) },
++ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+@@ -892,6 +901,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0164, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) },
+@@ -1198,6 +1209,10 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(VIETTEL_VENDOR_ID, VIETTEL_PRODUCT_VT1000, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZD_VENDOR_ID, ZD_PRODUCT_7000, 0xff, 0xff, 0xff) },
+ { USB_DEVICE(LG_VENDOR_ID, LG_PRODUCT_L02C) }, /* docomo L-02C modem */
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a1, 0xff, 0x00, 0x00) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a1, 0xff, 0x02, 0x01) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x00, 0x00) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x02, 0x01) }, /* MediaTek MT6276M modem & app port */
+ { } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, option_ids);
+@@ -1360,6 +1375,7 @@ static int option_probe(struct usb_serial *serial,
+ serial->interface->cur_altsetting->desc.bInterfaceNumber,
+ OPTION_BLACKLIST_RESERVED_IF,
+ (const struct option_blacklist_info *) id->driver_info))
++ return -ENODEV;
+
+ /* Don't bind network interface on Samsung GT-B3730, it is handled by a separate module */
+ if (serial->dev->descriptor.idVendor == SAMSUNG_VENDOR_ID &&
+diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
+index aa9367f..3187d8b 100644
+--- a/drivers/usb/serial/qcserial.c
++++ b/drivers/usb/serial/qcserial.c
+@@ -24,34 +24,44 @@
+
+ static int debug;
+
++#define DEVICE_G1K(v, p) \
++ USB_DEVICE(v, p), .driver_info = 1
++
+ static const struct usb_device_id id_table[] = {
+- {USB_DEVICE(0x05c6, 0x9211)}, /* Acer Gobi QDL device */
+- {USB_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
+- {USB_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */
+- {USB_DEVICE(0x03f0, 0x201d)}, /* HP un2400 Gobi QDL Device */
+- {USB_DEVICE(0x03f0, 0x371d)}, /* HP un2430 Mobile Broadband Module */
+- {USB_DEVICE(0x04da, 0x250d)}, /* Panasonic Gobi Modem device */
+- {USB_DEVICE(0x04da, 0x250c)}, /* Panasonic Gobi QDL device */
+- {USB_DEVICE(0x413c, 0x8172)}, /* Dell Gobi Modem device */
+- {USB_DEVICE(0x413c, 0x8171)}, /* Dell Gobi QDL device */
+- {USB_DEVICE(0x1410, 0xa001)}, /* Novatel Gobi Modem device */
+- {USB_DEVICE(0x1410, 0xa008)}, /* Novatel Gobi QDL device */
+- {USB_DEVICE(0x0b05, 0x1776)}, /* Asus Gobi Modem device */
+- {USB_DEVICE(0x0b05, 0x1774)}, /* Asus Gobi QDL device */
+- {USB_DEVICE(0x19d2, 0xfff3)}, /* ONDA Gobi Modem device */
+- {USB_DEVICE(0x19d2, 0xfff2)}, /* ONDA Gobi QDL device */
+- {USB_DEVICE(0x1557, 0x0a80)}, /* OQO Gobi QDL device */
+- {USB_DEVICE(0x05c6, 0x9001)}, /* Generic Gobi Modem device */
+- {USB_DEVICE(0x05c6, 0x9002)}, /* Generic Gobi Modem device */
+- {USB_DEVICE(0x05c6, 0x9202)}, /* Generic Gobi Modem device */
+- {USB_DEVICE(0x05c6, 0x9203)}, /* Generic Gobi Modem device */
+- {USB_DEVICE(0x05c6, 0x9222)}, /* Generic Gobi Modem device */
+- {USB_DEVICE(0x05c6, 0x9008)}, /* Generic Gobi QDL device */
+- {USB_DEVICE(0x05c6, 0x9009)}, /* Generic Gobi Modem device */
+- {USB_DEVICE(0x05c6, 0x9201)}, /* Generic Gobi QDL device */
+- {USB_DEVICE(0x05c6, 0x9221)}, /* Generic Gobi QDL device */
+- {USB_DEVICE(0x05c6, 0x9231)}, /* Generic Gobi QDL device */
+- {USB_DEVICE(0x1f45, 0x0001)}, /* Unknown Gobi QDL device */
++ /* Gobi 1000 devices */
++ {DEVICE_G1K(0x05c6, 0x9211)}, /* Acer Gobi QDL device */
++ {DEVICE_G1K(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
++ {DEVICE_G1K(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */
++ {DEVICE_G1K(0x03f0, 0x201d)}, /* HP un2400 Gobi QDL Device */
++ {DEVICE_G1K(0x04da, 0x250d)}, /* Panasonic Gobi Modem device */
++ {DEVICE_G1K(0x04da, 0x250c)}, /* Panasonic Gobi QDL device */
++ {DEVICE_G1K(0x413c, 0x8172)}, /* Dell Gobi Modem device */
++ {DEVICE_G1K(0x413c, 0x8171)}, /* Dell Gobi QDL device */
++ {DEVICE_G1K(0x1410, 0xa001)}, /* Novatel Gobi Modem device */
++ {DEVICE_G1K(0x1410, 0xa008)}, /* Novatel Gobi QDL device */
++ {DEVICE_G1K(0x0b05, 0x1776)}, /* Asus Gobi Modem device */
++ {DEVICE_G1K(0x0b05, 0x1774)}, /* Asus Gobi QDL device */
++ {DEVICE_G1K(0x19d2, 0xfff3)}, /* ONDA Gobi Modem device */
++ {DEVICE_G1K(0x19d2, 0xfff2)}, /* ONDA Gobi QDL device */
++ {DEVICE_G1K(0x1557, 0x0a80)}, /* OQO Gobi QDL device */
++ {DEVICE_G1K(0x05c6, 0x9001)}, /* Generic Gobi Modem device */
++ {DEVICE_G1K(0x05c6, 0x9002)}, /* Generic Gobi Modem device */
++ {DEVICE_G1K(0x05c6, 0x9202)}, /* Generic Gobi Modem device */
++ {DEVICE_G1K(0x05c6, 0x9203)}, /* Generic Gobi Modem device */
++ {DEVICE_G1K(0x05c6, 0x9222)}, /* Generic Gobi Modem device */
++ {DEVICE_G1K(0x05c6, 0x9008)}, /* Generic Gobi QDL device */
++ {DEVICE_G1K(0x05c6, 0x9009)}, /* Generic Gobi Modem device */
++ {DEVICE_G1K(0x05c6, 0x9201)}, /* Generic Gobi QDL device */
++ {DEVICE_G1K(0x05c6, 0x9221)}, /* Generic Gobi QDL device */
++ {DEVICE_G1K(0x05c6, 0x9231)}, /* Generic Gobi QDL device */
++ {DEVICE_G1K(0x1f45, 0x0001)}, /* Unknown Gobi QDL device */
++
++ /* Gobi 2000 devices */
++ {USB_DEVICE(0x1410, 0xa010)}, /* Novatel Gobi 2000 QDL device */
++ {USB_DEVICE(0x1410, 0xa011)}, /* Novatel Gobi 2000 QDL device */
++ {USB_DEVICE(0x1410, 0xa012)}, /* Novatel Gobi 2000 QDL device */
++ {USB_DEVICE(0x1410, 0xa013)}, /* Novatel Gobi 2000 QDL device */
++ {USB_DEVICE(0x1410, 0xa014)}, /* Novatel Gobi 2000 QDL device */
+ {USB_DEVICE(0x413c, 0x8185)}, /* Dell Gobi 2000 QDL device (N0218, VU936) */
+ {USB_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */
+ {USB_DEVICE(0x05c6, 0x9208)}, /* Generic Gobi 2000 QDL device */
+@@ -86,7 +96,18 @@ static const struct usb_device_id id_table[] = {
+ {USB_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */
+ {USB_DEVICE(0x05c6, 0x9204)}, /* Gobi 2000 QDL device */
+ {USB_DEVICE(0x05c6, 0x9205)}, /* Gobi 2000 Modem device */
++
++ /* Gobi 3000 devices */
++ {USB_DEVICE(0x03f0, 0x371d)}, /* HP un2430 Gobi 3000 QDL */
++ {USB_DEVICE(0x05c6, 0x920c)}, /* Gobi 3000 QDL */
++ {USB_DEVICE(0x05c6, 0x920d)}, /* Gobi 3000 Composite */
++ {USB_DEVICE(0x1410, 0xa020)}, /* Novatel Gobi 3000 QDL */
++ {USB_DEVICE(0x1410, 0xa021)}, /* Novatel Gobi 3000 Composite */
++ {USB_DEVICE(0x413c, 0x8193)}, /* Dell Gobi 3000 QDL */
++ {USB_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */
+ {USB_DEVICE(0x1199, 0x9013)}, /* Sierra Wireless Gobi 3000 Modem device (MC8355) */
++ {USB_DEVICE(0x12D1, 0x14F0)}, /* Sony Gobi 3000 QDL */
++ {USB_DEVICE(0x12D1, 0x14F1)}, /* Sony Gobi 3000 Composite */
+ { } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, id_table);
+@@ -108,8 +129,10 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
+ int retval = -ENODEV;
+ __u8 nintf;
+ __u8 ifnum;
++ bool is_gobi1k = id->driver_info ? true : false;
+
+ dbg("%s", __func__);
++ dbg("Is Gobi 1000 = %d", is_gobi1k);
+
+ nintf = serial->dev->actconfig->desc.bNumInterfaces;
+ dbg("Num Interfaces = %d", nintf);
+@@ -157,15 +180,25 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
+
+ case 3:
+ case 4:
+- /* Composite mode */
+- /* ifnum == 0 is a broadband network adapter */
+- if (ifnum == 1) {
+- /*
+- * Diagnostics Monitor (serial line 9600 8N1)
+- * Qualcomm DM protocol
+- * use "libqcdm" (ModemManager) for communication
+- */
+- dbg("Diagnostics Monitor found");
++ /* Composite mode; don't bind to the QMI/net interface as that
++ * gets handled by other drivers.
++ */
++
++ /* Gobi 1K USB layout:
++ * 0: serial port (doesn't respond)
++ * 1: serial port (doesn't respond)
++ * 2: AT-capable modem port
++ * 3: QMI/net
++ *
++ * Gobi 2K+ USB layout:
++ * 0: QMI/net
++ * 1: DM/DIAG (use libqcdm from ModemManager for communication)
++ * 2: AT-capable modem port
++ * 3: NMEA
++ */
++
++ if (ifnum == 1 && !is_gobi1k) {
++ dbg("Gobi 2K+ DM/DIAG interface found");
+ retval = usb_set_interface(serial->dev, ifnum, 0);
+ if (retval < 0) {
+ dev_err(&serial->dev->dev,
+@@ -184,13 +217,13 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
+ retval = -ENODEV;
+ kfree(data);
+ }
+- } else if (ifnum==3) {
++ } else if (ifnum==3 && !is_gobi1k) {
+ /*
+ * NMEA (serial line 9600 8N1)
+ * # echo "\$GPS_START" > /dev/ttyUSBx
+ * # echo "\$GPS_STOP" > /dev/ttyUSBx
+ */
+- dbg("NMEA GPS interface found");
++ dbg("Gobi 2K+ NMEA GPS interface found");
+ retval = usb_set_interface(serial->dev, ifnum, 0);
+ if (retval < 0) {
+ dev_err(&serial->dev->dev,
+diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
+index 32c93d7..e39b188 100644
+--- a/drivers/usb/storage/realtek_cr.c
++++ b/drivers/usb/storage/realtek_cr.c
+@@ -509,9 +509,14 @@ static int __do_config_autodelink(struct us_data *us, u8 *data, u16 len)
+ int retval;
+ u16 addr = 0xFE47;
+ u8 cmnd[12] = {0};
++ u8 *buf;
+
+ US_DEBUGP("%s, addr = 0x%x, len = %d\n", __FUNCTION__, addr, len);
+
++ buf = kmemdup(data, len, GFP_NOIO);
++ if (!buf)
++ return USB_STOR_TRANSPORT_ERROR;
++
+ cmnd[0] = 0xF0;
+ cmnd[1] = 0x0E;
+ cmnd[2] = (u8)(addr >> 8);
+@@ -519,7 +524,8 @@ static int __do_config_autodelink(struct us_data *us, u8 *data, u16 len)
+ cmnd[4] = (u8)(len >> 8);
+ cmnd[5] = (u8)len;
+
+- retval = rts51x_bulk_transport_special(us, 0, cmnd, 12, data, len, DMA_TO_DEVICE, NULL);
++ retval = rts51x_bulk_transport_special(us, 0, cmnd, 12, buf, len, DMA_TO_DEVICE, NULL);
++ kfree(buf);
+ if (retval != USB_STOR_TRANSPORT_GOOD) {
+ return -EIO;
+ }
+diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c
+index 772f601..6f54f74 100644
+--- a/drivers/video/backlight/tosa_lcd.c
++++ b/drivers/video/backlight/tosa_lcd.c
+@@ -271,7 +271,7 @@ static int tosa_lcd_resume(struct spi_device *spi)
+ }
+ #else
+ #define tosa_lcd_suspend NULL
+-#define tosa_lcd_reume NULL
++#define tosa_lcd_resume NULL
+ #endif
+
+ static struct spi_driver tosa_lcd_driver = {
+diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
+index ad93629..7a41220 100644
+--- a/drivers/video/fbmem.c
++++ b/drivers/video/fbmem.c
+@@ -1651,6 +1651,7 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
+ if (ret)
+ return -EINVAL;
+
++ unlink_framebuffer(fb_info);
+ if (fb_info->pixmap.addr &&
+ (fb_info->pixmap.flags & FB_PIXMAP_DEFAULT))
+ kfree(fb_info->pixmap.addr);
+@@ -1658,7 +1659,6 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
+ registered_fb[i] = NULL;
+ num_registered_fb--;
+ fb_cleanup_device(fb_info);
+- device_destroy(fb_class, MKDEV(FB_MAJOR, i));
+ event.info = fb_info;
+ fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event);
+
+@@ -1667,6 +1667,22 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
+ return 0;
+ }
+
++int unlink_framebuffer(struct fb_info *fb_info)
++{
++ int i;
++
++ i = fb_info->node;
++ if (i < 0 || i >= FB_MAX || registered_fb[i] != fb_info)
++ return -EINVAL;
++
++ if (fb_info->dev) {
++ device_destroy(fb_class, MKDEV(FB_MAJOR, i));
++ fb_info->dev = NULL;
++ }
++ return 0;
++}
++EXPORT_SYMBOL(unlink_framebuffer);
++
+ void remove_conflicting_framebuffers(struct apertures_struct *a,
+ const char *name, bool primary)
+ {
+diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
+index 3473e75..41746bb 100644
+--- a/drivers/video/udlfb.c
++++ b/drivers/video/udlfb.c
+@@ -1739,7 +1739,7 @@ static void dlfb_usb_disconnect(struct usb_interface *interface)
+ for (i = 0; i < ARRAY_SIZE(fb_device_attrs); i++)
+ device_remove_file(info->dev, &fb_device_attrs[i]);
+ device_remove_bin_file(info->dev, &edid_attr);
+-
++ unlink_framebuffer(info);
+ usb_set_intfdata(interface, NULL);
+
+ /* if clients still have us open, will be freed on last close */
+diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
+index 8f1fe32..b4c2c99 100644
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -76,7 +76,7 @@ MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
+ unsigned int cifs_max_pending = CIFS_MAX_REQ;
+ module_param(cifs_max_pending, int, 0444);
+ MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server. "
+- "Default: 50 Range: 2 to 256");
++ "Default: 32767 Range: 2 to 32767.");
+ unsigned short echo_retries = 5;
+ module_param(echo_retries, ushort, 0644);
+ MODULE_PARM_DESC(echo_retries, "Number of echo attempts before giving up and "
+@@ -1116,9 +1116,9 @@ init_cifs(void)
+ if (cifs_max_pending < 2) {
+ cifs_max_pending = 2;
+ cFYI(1, "cifs_max_pending set to min of 2");
+- } else if (cifs_max_pending > 256) {
+- cifs_max_pending = 256;
+- cFYI(1, "cifs_max_pending set to max of 256");
++ } else if (cifs_max_pending > CIFS_MAX_REQ) {
++ cifs_max_pending = CIFS_MAX_REQ;
++ cFYI(1, "cifs_max_pending set to max of %u", CIFS_MAX_REQ);
+ }
+
+ rc = cifs_fscache_register();
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index 8238aa1..c467ac8 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -55,14 +55,9 @@
+
+ /*
+ * MAX_REQ is the maximum number of requests that WE will send
+- * on one socket concurrently. It also matches the most common
+- * value of max multiplex returned by servers. We may
+- * eventually want to use the negotiated value (in case
+- * future servers can handle more) when we are more confident that
+- * we will not have problems oveloading the socket with pending
+- * write data.
++ * on one socket concurrently.
+ */
+-#define CIFS_MAX_REQ 50
++#define CIFS_MAX_REQ 32767
+
+ #define RFC1001_NAME_LEN 15
+ #define RFC1001_NAME_LEN_WITH_NULL (RFC1001_NAME_LEN + 1)
+@@ -263,6 +258,7 @@ struct TCP_Server_Info {
+ bool session_estab; /* mark when very first sess is established */
+ u16 dialect; /* dialect index that server chose */
+ enum securityEnum secType;
++ bool oplocks:1; /* enable oplocks */
+ unsigned int maxReq; /* Clients should submit no more */
+ /* than maxReq distinct unanswered SMBs to the server when using */
+ /* multiplexed reads or writes */
+diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
+index 6600aa2..0e6adac 100644
+--- a/fs/cifs/cifssmb.c
++++ b/fs/cifs/cifssmb.c
+@@ -458,7 +458,10 @@ CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses)
+ goto neg_err_exit;
+ }
+ server->sec_mode = (__u8)le16_to_cpu(rsp->SecurityMode);
+- server->maxReq = le16_to_cpu(rsp->MaxMpxCount);
++ server->maxReq = min_t(unsigned int,
++ le16_to_cpu(rsp->MaxMpxCount),
++ cifs_max_pending);
++ server->oplocks = server->maxReq > 1 ? enable_oplocks : false;
+ server->maxBuf = le16_to_cpu(rsp->MaxBufSize);
+ server->max_vcs = le16_to_cpu(rsp->MaxNumberVcs);
+ /* even though we do not use raw we might as well set this
+@@ -564,7 +567,9 @@ CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses)
+
+ /* one byte, so no need to convert this or EncryptionKeyLen from
+ little endian */
+- server->maxReq = le16_to_cpu(pSMBr->MaxMpxCount);
++ server->maxReq = min_t(unsigned int, le16_to_cpu(pSMBr->MaxMpxCount),
++ cifs_max_pending);
++ server->oplocks = server->maxReq > 1 ? enable_oplocks : false;
+ /* probably no need to store and check maxvcs */
+ server->maxBuf = le32_to_cpu(pSMBr->MaxBufferSize);
+ server->max_rw = le32_to_cpu(pSMBr->MaxRawSize);
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 720edf5..9e0675a 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -625,14 +625,10 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
+ spin_unlock(&GlobalMid_Lock);
+ wake_up_all(&server->response_q);
+
+- /*
+- * Check if we have blocked requests that need to free. Note that
+- * cifs_max_pending is normally 50, but can be set at module install
+- * time to as little as two.
+- */
++ /* Check if we have blocked requests that need to free. */
+ spin_lock(&GlobalMid_Lock);
+- if (atomic_read(&server->inFlight) >= cifs_max_pending)
+- atomic_set(&server->inFlight, cifs_max_pending - 1);
++ if (atomic_read(&server->inFlight) >= server->maxReq)
++ atomic_set(&server->inFlight, server->maxReq - 1);
+ /*
+ * We do not want to set the max_pending too low or we could end up
+ * with the counter going negative.
+@@ -1890,6 +1886,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
+ tcp_ses->noautotune = volume_info->noautotune;
+ tcp_ses->tcp_nodelay = volume_info->sockopt_tcp_nodelay;
+ atomic_set(&tcp_ses->inFlight, 0);
++ tcp_ses->maxReq = 1; /* enough to send negotiate request */
+ init_waitqueue_head(&tcp_ses->response_q);
+ init_waitqueue_head(&tcp_ses->request_q);
+ INIT_LIST_HEAD(&tcp_ses->pending_mid_q);
+@@ -3220,7 +3217,7 @@ cifs_ra_pages(struct cifs_sb_info *cifs_sb)
+ int
+ cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info)
+ {
+- int rc = 0;
++ int rc;
+ int xid;
+ struct cifs_ses *pSesInfo;
+ struct cifs_tcon *tcon;
+@@ -3247,6 +3244,7 @@ try_mount_again:
+ FreeXid(xid);
+ }
+ #endif
++ rc = 0;
+ tcon = NULL;
+ pSesInfo = NULL;
+ srvTcp = NULL;
+diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
+index bf68b4f..6937e7c 100644
+--- a/fs/cifs/dir.c
++++ b/fs/cifs/dir.c
+@@ -171,7 +171,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
+ }
+ tcon = tlink_tcon(tlink);
+
+- if (enable_oplocks)
++ if (tcon->ses->server->oplocks)
+ oplock = REQ_OPLOCK;
+
+ if (nd)
+@@ -492,7 +492,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
+ {
+ int xid;
+ int rc = 0; /* to get around spurious gcc warning, set to zero here */
+- __u32 oplock = enable_oplocks ? REQ_OPLOCK : 0;
++ __u32 oplock;
+ __u16 fileHandle = 0;
+ bool posix_open = false;
+ struct cifs_sb_info *cifs_sb;
+@@ -518,6 +518,8 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
+ }
+ pTcon = tlink_tcon(tlink);
+
++ oplock = pTcon->ses->server->oplocks ? REQ_OPLOCK : 0;
++
+ /*
+ * Don't allow the separator character in a path component.
+ * The VFS will not allow "/", but "\" is allowed by posix.
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index 5e64748..159fcc5 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -380,7 +380,7 @@ int cifs_open(struct inode *inode, struct file *file)
+ cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
+ inode, file->f_flags, full_path);
+
+- if (enable_oplocks)
++ if (tcon->ses->server->oplocks)
+ oplock = REQ_OPLOCK;
+ else
+ oplock = 0;
+@@ -505,7 +505,7 @@ static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
+ cFYI(1, "inode = 0x%p file flags 0x%x for %s",
+ inode, pCifsFile->f_flags, full_path);
+
+- if (enable_oplocks)
++ if (tcon->ses->server->oplocks)
+ oplock = REQ_OPLOCK;
+ else
+ oplock = 0;
+@@ -960,9 +960,9 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
+ INIT_LIST_HEAD(&locks_to_send);
+
+ /*
+- * Allocating count locks is enough because no locks can be added to
+- * the list while we are holding cinode->lock_mutex that protects
+- * locking operations of this inode.
++ * Allocating count locks is enough because no FL_POSIX locks can be
++ * added to the list while we are holding cinode->lock_mutex that
++ * protects locking operations of this inode.
+ */
+ for (; i < count; i++) {
+ lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
+@@ -973,18 +973,20 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
+ list_add_tail(&lck->llist, &locks_to_send);
+ }
+
+- i = 0;
+ el = locks_to_send.next;
+ lock_flocks();
+ cifs_for_each_lock(cfile->dentry->d_inode, before) {
++ flock = *before;
++ if ((flock->fl_flags & FL_POSIX) == 0)
++ continue;
+ if (el == &locks_to_send) {
+- /* something is really wrong */
++ /*
++ * The list ended. We don't have enough allocated
++ * structures - something is really wrong.
++ */
+ cERROR(1, "Can't push all brlocks!");
+ break;
+ }
+- flock = *before;
+- if ((flock->fl_flags & FL_POSIX) == 0)
+- continue;
+ length = 1 + flock->fl_end - flock->fl_start;
+ if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
+ type = CIFS_RDLCK;
+@@ -996,7 +998,6 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
+ lck->length = length;
+ lck->type = type;
+ lck->offset = flock->fl_start;
+- i++;
+ el = el->next;
+ }
+ unlock_flocks();
+diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
+index 0cc9584..99a27cf 100644
+--- a/fs/cifs/transport.c
++++ b/fs/cifs/transport.c
+@@ -265,12 +265,12 @@ static int wait_for_free_request(struct TCP_Server_Info *server,
+
+ spin_lock(&GlobalMid_Lock);
+ while (1) {
+- if (atomic_read(&server->inFlight) >= cifs_max_pending) {
++ if (atomic_read(&server->inFlight) >= server->maxReq) {
+ spin_unlock(&GlobalMid_Lock);
+ cifs_num_waiters_inc(server);
+ wait_event(server->request_q,
+ atomic_read(&server->inFlight)
+- < cifs_max_pending);
++ < server->maxReq);
+ cifs_num_waiters_dec(server);
+ spin_lock(&GlobalMid_Lock);
+ } else {
+diff --git a/fs/dcache.c b/fs/dcache.c
+index f7908ae..eb723d3 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -2357,6 +2357,7 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
+ if (d_ancestor(alias, dentry)) {
+ /* Check for loops */
+ actual = ERR_PTR(-ELOOP);
++ spin_unlock(&inode->i_lock);
+ } else if (IS_ROOT(alias)) {
+ /* Is this an anonymous mountpoint that we
+ * could splice into our tree? */
+@@ -2366,7 +2367,7 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
+ goto found;
+ } else {
+ /* Nope, but we must(!) avoid directory
+- * aliasing */
++ * aliasing. This drops inode->i_lock */
+ actual = __d_unalias(inode, dentry, alias);
+ }
+ write_sequnlock(&rename_lock);
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 5b0e26a..dbae4d9 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -184,6 +184,8 @@ struct mpage_da_data {
+ #define EXT4_IO_END_UNWRITTEN 0x0001
+ #define EXT4_IO_END_ERROR 0x0002
+ #define EXT4_IO_END_QUEUED 0x0004
++#define EXT4_IO_END_DIRECT 0x0008
++#define EXT4_IO_END_IN_FSYNC 0x0010
+
+ struct ext4_io_page {
+ struct page *p_page;
+diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
+index 5802fa1..95af6f8 100644
+--- a/fs/ext4/ext4_jbd2.h
++++ b/fs/ext4/ext4_jbd2.h
+@@ -261,43 +261,45 @@ static inline void ext4_update_inode_fsync_trans(handle_t *handle,
+ /* super.c */
+ int ext4_force_commit(struct super_block *sb);
+
+-static inline int ext4_should_journal_data(struct inode *inode)
++/*
++ * Ext4 inode journal modes
++ */
++#define EXT4_INODE_JOURNAL_DATA_MODE 0x01 /* journal data mode */
++#define EXT4_INODE_ORDERED_DATA_MODE 0x02 /* ordered data mode */
++#define EXT4_INODE_WRITEBACK_DATA_MODE 0x04 /* writeback data mode */
++
++static inline int ext4_inode_journal_mode(struct inode *inode)
+ {
+ if (EXT4_JOURNAL(inode) == NULL)
+- return 0;
+- if (!S_ISREG(inode->i_mode))
+- return 1;
+- if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
+- return 1;
+- if (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA))
+- return 1;
+- return 0;
++ return EXT4_INODE_WRITEBACK_DATA_MODE; /* writeback */
++ /* We do not support data journalling with delayed allocation */
++ if (!S_ISREG(inode->i_mode) ||
++ test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
++ return EXT4_INODE_JOURNAL_DATA_MODE; /* journal data */
++ if (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA) &&
++ !test_opt(inode->i_sb, DELALLOC))
++ return EXT4_INODE_JOURNAL_DATA_MODE; /* journal data */
++ if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
++ return EXT4_INODE_ORDERED_DATA_MODE; /* ordered */
++ if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
++ return EXT4_INODE_WRITEBACK_DATA_MODE; /* writeback */
++ else
++ BUG();
++}
++
++static inline int ext4_should_journal_data(struct inode *inode)
++{
++ return ext4_inode_journal_mode(inode) & EXT4_INODE_JOURNAL_DATA_MODE;
+ }
+
+ static inline int ext4_should_order_data(struct inode *inode)
+ {
+- if (EXT4_JOURNAL(inode) == NULL)
+- return 0;
+- if (!S_ISREG(inode->i_mode))
+- return 0;
+- if (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA))
+- return 0;
+- if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
+- return 1;
+- return 0;
++ return ext4_inode_journal_mode(inode) & EXT4_INODE_ORDERED_DATA_MODE;
+ }
+
+ static inline int ext4_should_writeback_data(struct inode *inode)
+ {
+- if (EXT4_JOURNAL(inode) == NULL)
+- return 1;
+- if (!S_ISREG(inode->i_mode))
+- return 0;
+- if (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA))
+- return 0;
+- if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
+- return 1;
+- return 0;
++ return ext4_inode_journal_mode(inode) & EXT4_INODE_WRITEBACK_DATA_MODE;
+ }
+
+ /*
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 607b155..7507036 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -301,6 +301,8 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
+ ext4_fsblk_t block = ext4_ext_pblock(ext);
+ int len = ext4_ext_get_actual_len(ext);
+
++ if (len == 0)
++ return 0;
+ return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
+ }
+
+diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
+index 00a2cb7..bb6c7d8 100644
+--- a/fs/ext4/fsync.c
++++ b/fs/ext4/fsync.c
+@@ -89,6 +89,7 @@ int ext4_flush_completed_IO(struct inode *inode)
+ io = list_entry(ei->i_completed_io_list.next,
+ ext4_io_end_t, list);
+ list_del_init(&io->list);
++ io->flag |= EXT4_IO_END_IN_FSYNC;
+ /*
+ * Calling ext4_end_io_nolock() to convert completed
+ * IO to written.
+@@ -108,6 +109,7 @@ int ext4_flush_completed_IO(struct inode *inode)
+ if (ret < 0)
+ ret2 = ret;
+ spin_lock_irqsave(&ei->i_completed_io_lock, flags);
++ io->flag &= ~EXT4_IO_END_IN_FSYNC;
+ }
+ spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
+ return (ret2 < 0) ? ret2 : 0;
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 92655fd..3ce7613 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -2480,13 +2480,14 @@ static int ext4_da_write_end(struct file *file,
+ int write_mode = (int)(unsigned long)fsdata;
+
+ if (write_mode == FALL_BACK_TO_NONDELALLOC) {
+- if (ext4_should_order_data(inode)) {
++ switch (ext4_inode_journal_mode(inode)) {
++ case EXT4_INODE_ORDERED_DATA_MODE:
+ return ext4_ordered_write_end(file, mapping, pos,
+ len, copied, page, fsdata);
+- } else if (ext4_should_writeback_data(inode)) {
++ case EXT4_INODE_WRITEBACK_DATA_MODE:
+ return ext4_writeback_write_end(file, mapping, pos,
+ len, copied, page, fsdata);
+- } else {
++ default:
+ BUG();
+ }
+ }
+@@ -2793,9 +2794,6 @@ out:
+
+ /* queue the work to convert unwritten extents to written */
+ queue_work(wq, &io_end->work);
+-
+- /* XXX: probably should move into the real I/O completion handler */
+- inode_dio_done(inode);
+ }
+
+ static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate)
+@@ -2919,9 +2917,12 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
+ iocb->private = NULL;
+ EXT4_I(inode)->cur_aio_dio = NULL;
+ if (!is_sync_kiocb(iocb)) {
+- iocb->private = ext4_init_io_end(inode, GFP_NOFS);
+- if (!iocb->private)
++ ext4_io_end_t *io_end =
++ ext4_init_io_end(inode, GFP_NOFS);
++ if (!io_end)
+ return -ENOMEM;
++ io_end->flag |= EXT4_IO_END_DIRECT;
++ iocb->private = io_end;
+ /*
+ * we save the io structure for current async
+ * direct IO, so that later ext4_map_blocks()
+@@ -3084,18 +3085,25 @@ static const struct address_space_operations ext4_da_aops = {
+
+ void ext4_set_aops(struct inode *inode)
+ {
+- if (ext4_should_order_data(inode) &&
+- test_opt(inode->i_sb, DELALLOC))
+- inode->i_mapping->a_ops = &ext4_da_aops;
+- else if (ext4_should_order_data(inode))
+- inode->i_mapping->a_ops = &ext4_ordered_aops;
+- else if (ext4_should_writeback_data(inode) &&
+- test_opt(inode->i_sb, DELALLOC))
+- inode->i_mapping->a_ops = &ext4_da_aops;
+- else if (ext4_should_writeback_data(inode))
+- inode->i_mapping->a_ops = &ext4_writeback_aops;
+- else
++ switch (ext4_inode_journal_mode(inode)) {
++ case EXT4_INODE_ORDERED_DATA_MODE:
++ if (test_opt(inode->i_sb, DELALLOC))
++ inode->i_mapping->a_ops = &ext4_da_aops;
++ else
++ inode->i_mapping->a_ops = &ext4_ordered_aops;
++ break;
++ case EXT4_INODE_WRITEBACK_DATA_MODE:
++ if (test_opt(inode->i_sb, DELALLOC))
++ inode->i_mapping->a_ops = &ext4_da_aops;
++ else
++ inode->i_mapping->a_ops = &ext4_writeback_aops;
++ break;
++ case EXT4_INODE_JOURNAL_DATA_MODE:
+ inode->i_mapping->a_ops = &ext4_journalled_aops;
++ break;
++ default:
++ BUG();
++ }
+ }
+
+
+diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
+index 7e106c8..24feb1c 100644
+--- a/fs/ext4/page-io.c
++++ b/fs/ext4/page-io.c
+@@ -111,6 +111,8 @@ int ext4_end_io_nolock(ext4_io_end_t *io)
+ if (io->iocb)
+ aio_complete(io->iocb, io->result, 0);
+
++ if (io->flag & EXT4_IO_END_DIRECT)
++ inode_dio_done(inode);
+ /* Wake up anyone waiting on unwritten extent conversion */
+ if (atomic_dec_and_test(&EXT4_I(inode)->i_aiodio_unwritten))
+ wake_up_all(ext4_ioend_wq(io->inode));
+@@ -128,12 +130,18 @@ static void ext4_end_io_work(struct work_struct *work)
+ unsigned long flags;
+
+ spin_lock_irqsave(&ei->i_completed_io_lock, flags);
++ if (io->flag & EXT4_IO_END_IN_FSYNC)
++ goto requeue;
+ if (list_empty(&io->list)) {
+ spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
+ goto free;
+ }
+
+ if (!mutex_trylock(&inode->i_mutex)) {
++ bool was_queued;
++requeue:
++ was_queued = !!(io->flag & EXT4_IO_END_QUEUED);
++ io->flag |= EXT4_IO_END_QUEUED;
+ spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
+ /*
+ * Requeue the work instead of waiting so that the work
+@@ -146,9 +154,8 @@ static void ext4_end_io_work(struct work_struct *work)
+ * yield the cpu if it sees an end_io request that has already
+ * been requeued.
+ */
+- if (io->flag & EXT4_IO_END_QUEUED)
++ if (was_queued)
+ yield();
+- io->flag |= EXT4_IO_END_QUEUED;
+ return;
+ }
+ list_del_init(&io->list);
+diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
+index 0be5a78..2d0ca24 100644
+--- a/fs/hugetlbfs/inode.c
++++ b/fs/hugetlbfs/inode.c
+@@ -238,17 +238,10 @@ static ssize_t hugetlbfs_read(struct file *filp, char __user *buf,
+ loff_t isize;
+ ssize_t retval = 0;
+
+- mutex_lock(&inode->i_mutex);
+-
+ /* validate length */
+ if (len == 0)
+ goto out;
+
+- isize = i_size_read(inode);
+- if (!isize)
+- goto out;
+-
+- end_index = (isize - 1) >> huge_page_shift(h);
+ for (;;) {
+ struct page *page;
+ unsigned long nr, ret;
+@@ -256,18 +249,21 @@ static ssize_t hugetlbfs_read(struct file *filp, char __user *buf,
+
+ /* nr is the maximum number of bytes to copy from this page */
+ nr = huge_page_size(h);
++ isize = i_size_read(inode);
++ if (!isize)
++ goto out;
++ end_index = (isize - 1) >> huge_page_shift(h);
+ if (index >= end_index) {
+ if (index > end_index)
+ goto out;
+ nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
+- if (nr <= offset) {
++ if (nr <= offset)
+ goto out;
+- }
+ }
+ nr = nr - offset;
+
+ /* Find the page */
+- page = find_get_page(mapping, index);
++ page = find_lock_page(mapping, index);
+ if (unlikely(page == NULL)) {
+ /*
+ * We have a HOLE, zero out the user-buffer for the
+@@ -279,17 +275,18 @@ static ssize_t hugetlbfs_read(struct file *filp, char __user *buf,
+ else
+ ra = 0;
+ } else {
++ unlock_page(page);
++
+ /*
+ * We have the page, copy it to user space buffer.
+ */
+ ra = hugetlbfs_read_actor(page, offset, buf, len, nr);
+ ret = ra;
++ page_cache_release(page);
+ }
+ if (ra < 0) {
+ if (retval == 0)
+ retval = ra;
+- if (page)
+- page_cache_release(page);
+ goto out;
+ }
+
+@@ -299,16 +296,12 @@ static ssize_t hugetlbfs_read(struct file *filp, char __user *buf,
+ index += offset >> huge_page_shift(h);
+ offset &= ~huge_page_mask(h);
+
+- if (page)
+- page_cache_release(page);
+-
+ /* short read or no more work */
+ if ((ret != nr) || (len == 0))
+ break;
+ }
+ out:
+ *ppos = ((loff_t)index << huge_page_shift(h)) + offset;
+- mutex_unlock(&inode->i_mutex);
+ return retval;
+ }
+
+diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
+index a0e41a4..8267de5 100644
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -1948,6 +1948,8 @@ zap_buffer_unlocked:
+ clear_buffer_mapped(bh);
+ clear_buffer_req(bh);
+ clear_buffer_new(bh);
++ clear_buffer_delay(bh);
++ clear_buffer_unwritten(bh);
+ bh->b_bdev = NULL;
+ return may_free;
+ }
+diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
+index c061b9a..2444780 100644
+--- a/fs/lockd/svc.c
++++ b/fs/lockd/svc.c
+@@ -440,7 +440,7 @@ static int param_set_##name(const char *val, struct kernel_param *kp) \
+ __typeof__(type) num = which_strtol(val, &endp, 0); \
+ if (endp == val || *endp || num < (min) || num > (max)) \
+ return -EINVAL; \
+- *((int *) kp->arg) = num; \
++ *((type *) kp->arg) = num; \
+ return 0; \
+ }
+
+diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
+index 726e59a..168cb93 100644
+--- a/fs/nfs/callback_xdr.c
++++ b/fs/nfs/callback_xdr.c
+@@ -9,6 +9,8 @@
+ #include <linux/sunrpc/svc.h>
+ #include <linux/nfs4.h>
+ #include <linux/nfs_fs.h>
++#include <linux/ratelimit.h>
++#include <linux/printk.h>
+ #include <linux/slab.h>
+ #include <linux/sunrpc/bc_xprt.h>
+ #include "nfs4_fs.h"
+@@ -167,7 +169,7 @@ static __be32 decode_compound_hdr_arg(struct xdr_stream *xdr, struct cb_compound
+ if (hdr->minorversion <= 1) {
+ hdr->cb_ident = ntohl(*p++); /* ignored by v4.1 */
+ } else {
+- printk(KERN_WARNING "%s: NFSv4 server callback with "
++ pr_warn_ratelimited("NFS: %s: NFSv4 server callback with "
+ "illegal minor version %u!\n",
+ __func__, hdr->minorversion);
+ return htonl(NFS4ERR_MINOR_VERS_MISMATCH);
+diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
+index 7f26540..ac889af 100644
+--- a/fs/nfs/delegation.c
++++ b/fs/nfs/delegation.c
+@@ -466,6 +466,17 @@ static void nfs_delegation_run_state_manager(struct nfs_client *clp)
+ nfs4_schedule_state_manager(clp);
+ }
+
++void nfs_remove_bad_delegation(struct inode *inode)
++{
++ struct nfs_delegation *delegation;
++
++ delegation = nfs_detach_delegation(NFS_I(inode), NFS_SERVER(inode));
++ if (delegation) {
++ nfs_inode_find_state_and_recover(inode, &delegation->stateid);
++ nfs_free_delegation(delegation);
++ }
++}
++
+ /**
+ * nfs_expire_all_delegation_types
+ * @clp: client to process
+diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
+index d9322e4..691a796 100644
+--- a/fs/nfs/delegation.h
++++ b/fs/nfs/delegation.h
+@@ -45,6 +45,7 @@ void nfs_expire_unreferenced_delegations(struct nfs_client *clp);
+ void nfs_handle_cb_pathdown(struct nfs_client *clp);
+ int nfs_client_return_marked_delegations(struct nfs_client *clp);
+ int nfs_delegations_present(struct nfs_client *clp);
++void nfs_remove_bad_delegation(struct inode *inode);
+
+ void nfs_delegation_mark_reclaim(struct nfs_client *clp);
+ void nfs_delegation_reap_unclaimed(struct nfs_client *clp);
+diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
+index 693ae22..0983b25 100644
+--- a/fs/nfs/nfs4_fs.h
++++ b/fs/nfs/nfs4_fs.h
+@@ -191,6 +191,7 @@ struct nfs4_exception {
+ long timeout;
+ int retry;
+ struct nfs4_state *state;
++ struct inode *inode;
+ };
+
+ struct nfs4_state_recovery_ops {
+@@ -324,6 +325,8 @@ extern void nfs4_put_open_state(struct nfs4_state *);
+ extern void nfs4_close_state(struct nfs4_state *, fmode_t);
+ extern void nfs4_close_sync(struct nfs4_state *, fmode_t);
+ extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t);
++extern void nfs_inode_find_state_and_recover(struct inode *inode,
++ const nfs4_stateid *stateid);
+ extern void nfs4_schedule_lease_recovery(struct nfs_client *);
+ extern void nfs4_schedule_state_manager(struct nfs_client *);
+ extern void nfs4_schedule_path_down_recovery(struct nfs_client *clp);
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index e527030..d945700 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -257,15 +257,28 @@ static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struc
+ {
+ struct nfs_client *clp = server->nfs_client;
+ struct nfs4_state *state = exception->state;
++ struct inode *inode = exception->inode;
+ int ret = errorcode;
+
+ exception->retry = 0;
+ switch(errorcode) {
+ case 0:
+ return 0;
++ case -NFS4ERR_OPENMODE:
++ if (nfs_have_delegation(inode, FMODE_READ)) {
++ nfs_inode_return_delegation(inode);
++ exception->retry = 1;
++ return 0;
++ }
++ if (state == NULL)
++ break;
++ nfs4_schedule_stateid_recovery(server, state);
++ goto wait_on_recovery;
++ case -NFS4ERR_DELEG_REVOKED:
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_BAD_STATEID:
+- case -NFS4ERR_OPENMODE:
++ if (state != NULL)
++ nfs_remove_bad_delegation(state->inode);
+ if (state == NULL)
+ break;
+ nfs4_schedule_stateid_recovery(server, state);
+@@ -1316,8 +1329,11 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state
+ * The show must go on: exit, but mark the
+ * stateid as needing recovery.
+ */
++ case -NFS4ERR_DELEG_REVOKED:
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_BAD_STATEID:
++ nfs_inode_find_state_and_recover(state->inode,
++ stateid);
+ nfs4_schedule_stateid_recovery(server, state);
+ case -EKEYEXPIRED:
+ /*
+@@ -1822,7 +1838,7 @@ static struct nfs4_state *nfs4_do_open(struct inode *dir, struct dentry *dentry,
+ * the user though...
+ */
+ if (status == -NFS4ERR_BAD_SEQID) {
+- printk(KERN_WARNING "NFS: v4 server %s "
++ pr_warn_ratelimited("NFS: v4 server %s "
+ " returned a bad sequence-id error!\n",
+ NFS_SERVER(dir)->nfs_client->cl_hostname);
+ exception.retry = 1;
+@@ -1893,7 +1909,10 @@ static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
+ struct nfs4_state *state)
+ {
+ struct nfs_server *server = NFS_SERVER(inode);
+- struct nfs4_exception exception = { };
++ struct nfs4_exception exception = {
++ .state = state,
++ .inode = inode,
++ };
+ int err;
+ do {
+ err = nfs4_handle_exception(server,
+@@ -2223,11 +2242,12 @@ static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
+ switch (err) {
+ case 0:
+ case -NFS4ERR_WRONGSEC:
+- break;
++ goto out;
+ default:
+ err = nfs4_handle_exception(server, err, &exception);
+ }
+ } while (exception.retry);
++out:
+ return err;
+ }
+
+@@ -3707,8 +3727,11 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
+ if (task->tk_status >= 0)
+ return 0;
+ switch(task->tk_status) {
++ case -NFS4ERR_DELEG_REVOKED:
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_BAD_STATEID:
++ if (state != NULL)
++ nfs_remove_bad_delegation(state->inode);
+ case -NFS4ERR_OPENMODE:
+ if (state == NULL)
+ break;
+@@ -4526,7 +4549,9 @@ out:
+
+ static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
+ {
+- struct nfs4_exception exception = { };
++ struct nfs4_exception exception = {
++ .state = state,
++ };
+ int err;
+
+ do {
+@@ -4619,6 +4644,7 @@ int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
+ * The show must go on: exit, but mark the
+ * stateid as needing recovery.
+ */
++ case -NFS4ERR_DELEG_REVOKED:
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_BAD_STATEID:
+ case -NFS4ERR_OPENMODE:
+@@ -5957,21 +5983,22 @@ nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
+ return;
+
+ switch (task->tk_status) { /* Just ignore these failures */
+- case NFS4ERR_DELEG_REVOKED: /* layout was recalled */
+- case NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */
+- case NFS4ERR_BADLAYOUT: /* no layout */
+- case NFS4ERR_GRACE: /* loca_recalim always false */
++ case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */
++ case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */
++ case -NFS4ERR_BADLAYOUT: /* no layout */
++ case -NFS4ERR_GRACE: /* loca_recalim always false */
+ task->tk_status = 0;
+- }
+-
+- if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
+- rpc_restart_call_prepare(task);
+- return;
+- }
+-
+- if (task->tk_status == 0)
++ break;
++ case 0:
+ nfs_post_op_update_inode_force_wcc(data->args.inode,
+ data->res.fattr);
++ break;
++ default:
++ if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
++ rpc_restart_call_prepare(task);
++ return;
++ }
++ }
+ }
+
+ static void nfs4_layoutcommit_release(void *calldata)
+@@ -6074,11 +6101,12 @@ nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
+ case 0:
+ case -NFS4ERR_WRONGSEC:
+ case -NFS4ERR_NOTSUPP:
+- break;
++ goto out;
+ default:
+ err = nfs4_handle_exception(server, err, &exception);
+ }
+ } while (exception.retry);
++out:
+ return err;
+ }
+
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index a58eed7..66020ac 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -935,7 +935,7 @@ static void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
+ case -NFS4ERR_BAD_SEQID:
+ if (seqid->sequence->flags & NFS_SEQID_CONFIRMED)
+ return;
+- printk(KERN_WARNING "NFS: v4 server returned a bad"
++ pr_warn_ratelimited("NFS: v4 server returned a bad"
+ " sequence-id error on an"
+ " unconfirmed sequence %p!\n",
+ seqid->sequence);
+@@ -1071,12 +1071,37 @@ void nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4
+ {
+ struct nfs_client *clp = server->nfs_client;
+
+- if (test_and_clear_bit(NFS_DELEGATED_STATE, &state->flags))
+- nfs_async_inode_return_delegation(state->inode, &state->stateid);
+ nfs4_state_mark_reclaim_nograce(clp, state);
+ nfs4_schedule_state_manager(clp);
+ }
+
++void nfs_inode_find_state_and_recover(struct inode *inode,
++ const nfs4_stateid *stateid)
++{
++ struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
++ struct nfs_inode *nfsi = NFS_I(inode);
++ struct nfs_open_context *ctx;
++ struct nfs4_state *state;
++ bool found = false;
++
++ spin_lock(&inode->i_lock);
++ list_for_each_entry(ctx, &nfsi->open_files, list) {
++ state = ctx->state;
++ if (state == NULL)
++ continue;
++ if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
++ continue;
++ if (memcmp(state->stateid.data, stateid->data, sizeof(state->stateid.data)) != 0)
++ continue;
++ nfs4_state_mark_reclaim_nograce(clp, state);
++ found = true;
++ }
++ spin_unlock(&inode->i_lock);
++ if (found)
++ nfs4_schedule_state_manager(clp);
++}
++
++
+ static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_recovery_ops *ops)
+ {
+ struct inode *inode = state->inode;
+@@ -1739,7 +1764,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
+ } while (atomic_read(&clp->cl_count) > 1);
+ return;
+ out_error:
+- printk(KERN_WARNING "Error: state manager failed on NFSv4 server %s"
++ pr_warn_ratelimited("NFS: state manager failed on NFSv4 server %s"
+ " with error %d\n", clp->cl_hostname, -status);
+ nfs4_end_drain_session(clp);
+ nfs4_clear_state_manager_bit(clp);
+diff --git a/fs/proc/namespaces.c b/fs/proc/namespaces.c
+index be177f7..d6c078e 100644
+--- a/fs/proc/namespaces.c
++++ b/fs/proc/namespaces.c
+@@ -54,7 +54,7 @@ static struct dentry *proc_ns_instantiate(struct inode *dir,
+ ei->ns_ops = ns_ops;
+ ei->ns = ns;
+
+- dentry->d_op = &pid_dentry_operations;
++ d_set_d_op(dentry, &pid_dentry_operations);
+ d_add(dentry, inode);
+ /* Close the race of the process dying before we return the dentry */
+ if (pid_revalidate(dentry, NULL))
+diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
+index a6b6217..53c3bce 100644
+--- a/fs/proc/proc_sysctl.c
++++ b/fs/proc/proc_sysctl.c
+@@ -188,20 +188,32 @@ static ssize_t proc_sys_write(struct file *filp, const char __user *buf,
+
+ static int proc_sys_open(struct inode *inode, struct file *filp)
+ {
++ struct ctl_table_header *head = grab_header(inode);
+ struct ctl_table *table = PROC_I(inode)->sysctl_entry;
+
++ /* sysctl was unregistered */
++ if (IS_ERR(head))
++ return PTR_ERR(head);
++
+ if (table->poll)
+ filp->private_data = proc_sys_poll_event(table->poll);
+
++ sysctl_head_finish(head);
++
+ return 0;
+ }
+
+ static unsigned int proc_sys_poll(struct file *filp, poll_table *wait)
+ {
+ struct inode *inode = filp->f_path.dentry->d_inode;
++ struct ctl_table_header *head = grab_header(inode);
+ struct ctl_table *table = PROC_I(inode)->sysctl_entry;
+- unsigned long event = (unsigned long)filp->private_data;
+ unsigned int ret = DEFAULT_POLLMASK;
++ unsigned long event;
++
++ /* sysctl was unregistered */
++ if (IS_ERR(head))
++ return POLLERR | POLLHUP;
+
+ if (!table->proc_handler)
+ goto out;
+@@ -209,6 +221,7 @@ static unsigned int proc_sys_poll(struct file *filp, poll_table *wait)
+ if (!table->poll)
+ goto out;
+
++ event = (unsigned long)filp->private_data;
+ poll_wait(filp, &table->poll->wait, wait);
+
+ if (event != atomic_read(&table->poll->event)) {
+@@ -217,6 +230,8 @@ static unsigned int proc_sys_poll(struct file *filp, poll_table *wait)
+ }
+
+ out:
++ sysctl_head_finish(head);
++
+ return ret;
+ }
+
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index 7dcd2a2..3efa725 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -409,6 +409,9 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
+ } else {
+ spin_unlock(&walk->mm->page_table_lock);
+ }
++
++ if (pmd_trans_unstable(pmd))
++ return 0;
+ /*
+ * The mmap_sem held all the way back in m_start() is what
+ * keeps khugepaged out of here and from collapsing things
+@@ -507,6 +510,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
+ struct page *page;
+
+ split_huge_page_pmd(walk->mm, pmd);
++ if (pmd_trans_unstable(pmd))
++ return 0;
+
+ pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
+ for (; addr != end; pte++, addr += PAGE_SIZE) {
+@@ -670,6 +675,8 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
+ int err = 0;
+
+ split_huge_page_pmd(walk->mm, pmd);
++ if (pmd_trans_unstable(pmd))
++ return 0;
+
+ /* find the first VMA at or above 'addr' */
+ vma = find_vma(walk->mm, addr);
+@@ -961,6 +968,8 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
+ spin_unlock(&walk->mm->page_table_lock);
+ }
+
++ if (pmd_trans_unstable(pmd))
++ return 0;
+ orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+ do {
+ struct page *page = can_gather_numa_stats(*pte, md->vma, addr);
+diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c
+index deb804b..9db61a4 100644
+--- a/fs/sysfs/inode.c
++++ b/fs/sysfs/inode.c
+@@ -136,12 +136,13 @@ static int sysfs_sd_setsecdata(struct sysfs_dirent *sd, void **secdata, u32 *sec
+ void *old_secdata;
+ size_t old_secdata_len;
+
+- iattrs = sd->s_iattr;
+- if (!iattrs)
+- iattrs = sysfs_init_inode_attrs(sd);
+- if (!iattrs)
+- return -ENOMEM;
++ if (!sd->s_iattr) {
++ sd->s_iattr = sysfs_init_inode_attrs(sd);
++ if (!sd->s_iattr)
++ return -ENOMEM;
++ }
+
++ iattrs = sd->s_iattr;
+ old_secdata = iattrs->ia_secdata;
+ old_secdata_len = iattrs->ia_secdata_len;
+
+diff --git a/fs/udf/file.c b/fs/udf/file.c
+index dca0c38..d567b84 100644
+--- a/fs/udf/file.c
++++ b/fs/udf/file.c
+@@ -201,12 +201,10 @@ out:
+ static int udf_release_file(struct inode *inode, struct file *filp)
+ {
+ if (filp->f_mode & FMODE_WRITE) {
+- mutex_lock(&inode->i_mutex);
+ down_write(&UDF_I(inode)->i_data_sem);
+ udf_discard_prealloc(inode);
+ udf_truncate_tail_extent(inode);
+ up_write(&UDF_I(inode)->i_data_sem);
+- mutex_unlock(&inode->i_mutex);
+ }
+ return 0;
+ }
+diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
+index 0fa98b1..cfc4277 100644
+--- a/fs/xfs/xfs_iget.c
++++ b/fs/xfs/xfs_iget.c
+@@ -353,9 +353,20 @@ xfs_iget_cache_miss(
+ BUG();
+ }
+
+- spin_lock(&pag->pag_ici_lock);
++ /*
++ * These values must be set before inserting the inode into the radix
++ * tree as the moment it is inserted a concurrent lookup (allowed by the
++ * RCU locking mechanism) can find it and that lookup must see that this
++ * is an inode currently under construction (i.e. that XFS_INEW is set).
++ * The ip->i_flags_lock that protects the XFS_INEW flag forms the
++ * memory barrier that ensures this detection works correctly at lookup
++ * time.
++ */
++ ip->i_udquot = ip->i_gdquot = NULL;
++ xfs_iflags_set(ip, XFS_INEW);
+
+ /* insert the new inode */
++ spin_lock(&pag->pag_ici_lock);
+ error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
+ if (unlikely(error)) {
+ WARN_ON(error != -EEXIST);
+@@ -363,11 +374,6 @@ xfs_iget_cache_miss(
+ error = EAGAIN;
+ goto out_preload_end;
+ }
+-
+- /* These values _must_ be set before releasing the radix tree lock! */
+- ip->i_udquot = ip->i_gdquot = NULL;
+- xfs_iflags_set(ip, XFS_INEW);
+-
+ spin_unlock(&pag->pag_ici_lock);
+ radix_tree_preload_end();
+
+diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
+index 541a508..4f5d0ce 100644
+--- a/fs/xfs/xfs_log_recover.c
++++ b/fs/xfs/xfs_log_recover.c
+@@ -3161,37 +3161,26 @@ xlog_recover_process_iunlinks(
+ */
+ continue;
+ }
++ /*
++ * Unlock the buffer so that it can be acquired in the normal
++ * course of the transaction to truncate and free each inode.
++ * Because we are not racing with anyone else here for the AGI
++ * buffer, we don't even need to hold it locked to read the
++ * initial unlinked bucket entries out of the buffer. We keep
++ * buffer reference though, so that it stays pinned in memory
++ * while we need the buffer.
++ */
+ agi = XFS_BUF_TO_AGI(agibp);
++ xfs_buf_unlock(agibp);
+
+ for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
+ agino = be32_to_cpu(agi->agi_unlinked[bucket]);
+ while (agino != NULLAGINO) {
+- /*
+- * Release the agi buffer so that it can
+- * be acquired in the normal course of the
+- * transaction to truncate and free the inode.
+- */
+- xfs_buf_relse(agibp);
+-
+ agino = xlog_recover_process_one_iunlink(mp,
+ agno, agino, bucket);
+-
+- /*
+- * Reacquire the agibuffer and continue around
+- * the loop. This should never fail as we know
+- * the buffer was good earlier on.
+- */
+- error = xfs_read_agi(mp, NULL, agno, &agibp);
+- ASSERT(error == 0);
+- agi = XFS_BUF_TO_AGI(agibp);
+ }
+ }
+-
+- /*
+- * Release the buffer for the current agi so we can
+- * go on to the next one.
+- */
+- xfs_buf_relse(agibp);
++ xfs_buf_rele(agibp);
+ }
+
+ mp->m_dmevmask = mp_dmevmask;
+diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
+index 76bff2b..a03c098 100644
+--- a/include/asm-generic/pgtable.h
++++ b/include/asm-generic/pgtable.h
+@@ -425,6 +425,8 @@ extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
+ unsigned long size);
+ #endif
+
++#ifdef CONFIG_MMU
++
+ #ifndef CONFIG_TRANSPARENT_HUGEPAGE
+ static inline int pmd_trans_huge(pmd_t pmd)
+ {
+@@ -441,7 +443,66 @@ static inline int pmd_write(pmd_t pmd)
+ return 0;
+ }
+ #endif /* __HAVE_ARCH_PMD_WRITE */
++#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
++
++/*
++ * This function is meant to be used by sites walking pagetables with
++ * the mmap_sem hold in read mode to protect against MADV_DONTNEED and
++ * transhuge page faults. MADV_DONTNEED can convert a transhuge pmd
++ * into a null pmd and the transhuge page fault can convert a null pmd
++ * into an hugepmd or into a regular pmd (if the hugepage allocation
++ * fails). While holding the mmap_sem in read mode the pmd becomes
++ * stable and stops changing under us only if it's not null and not a
++ * transhuge pmd. When those races occurs and this function makes a
++ * difference vs the standard pmd_none_or_clear_bad, the result is
++ * undefined so behaving like if the pmd was none is safe (because it
++ * can return none anyway). The compiler level barrier() is critically
++ * important to compute the two checks atomically on the same pmdval.
++ */
++static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
++{
++ /* depend on compiler for an atomic pmd read */
++ pmd_t pmdval = *pmd;
++ /*
++ * The barrier will stabilize the pmdval in a register or on
++ * the stack so that it will stop changing under the code.
++ */
++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
++ barrier();
++#endif
++ if (pmd_none(pmdval))
++ return 1;
++ if (unlikely(pmd_bad(pmdval))) {
++ if (!pmd_trans_huge(pmdval))
++ pmd_clear_bad(pmd);
++ return 1;
++ }
++ return 0;
++}
++
++/*
++ * This is a noop if Transparent Hugepage Support is not built into
++ * the kernel. Otherwise it is equivalent to
++ * pmd_none_or_trans_huge_or_clear_bad(), and shall only be called in
++ * places that already verified the pmd is not none and they want to
++ * walk ptes while holding the mmap sem in read mode (write mode don't
++ * need this). If THP is not enabled, the pmd can't go away under the
++ * code even if MADV_DONTNEED runs, but if THP is enabled we need to
++ * run a pmd_trans_unstable before walking the ptes after
++ * split_huge_page_pmd returns (because it may have run when the pmd
++ * become null, but then a page fault can map in a THP and not a
++ * regular page).
++ */
++static inline int pmd_trans_unstable(pmd_t *pmd)
++{
++#ifdef CONFIG_TRANSPARENT_HUGEPAGE
++ return pmd_none_or_trans_huge_or_clear_bad(pmd);
++#else
++ return 0;
+ #endif
++}
++
++#endif /* CONFIG_MMU */
+
+ #endif /* !__ASSEMBLY__ */
+
+diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h
+index 2292d1a..991ef01 100644
+--- a/include/asm-generic/unistd.h
++++ b/include/asm-generic/unistd.h
+@@ -218,7 +218,7 @@ __SC_COMP(__NR_pwritev, sys_pwritev, compat_sys_pwritev)
+
+ /* fs/sendfile.c */
+ #define __NR3264_sendfile 71
+-__SC_3264(__NR3264_sendfile, sys_sendfile64, sys_sendfile)
++__SYSCALL(__NR3264_sendfile, sys_sendfile64)
+
+ /* fs/select.c */
+ #define __NR_pselect6 72
+diff --git a/include/linux/fb.h b/include/linux/fb.h
+index 1d6836c..73845ce 100644
+--- a/include/linux/fb.h
++++ b/include/linux/fb.h
+@@ -997,6 +997,7 @@ extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf,
+ /* drivers/video/fbmem.c */
+ extern int register_framebuffer(struct fb_info *fb_info);
+ extern int unregister_framebuffer(struct fb_info *fb_info);
++extern int unlink_framebuffer(struct fb_info *fb_info);
+ extern void remove_conflicting_framebuffers(struct apertures_struct *a,
+ const char *name, bool primary);
+ extern int fb_prepare_logo(struct fb_info *fb_info, int rotate);
+diff --git a/include/linux/math64.h b/include/linux/math64.h
+index 23fcdfc..b8ba855 100644
+--- a/include/linux/math64.h
++++ b/include/linux/math64.h
+@@ -6,6 +6,8 @@
+
+ #if BITS_PER_LONG == 64
+
++#define div64_long(x,y) div64_s64((x),(y))
++
+ /**
+ * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
+ *
+@@ -45,6 +47,8 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
+
+ #elif BITS_PER_LONG == 32
+
++#define div64_long(x,y) div_s64((x),(y))
++
+ #ifndef div_u64_rem
+ static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
+ {
+diff --git a/include/linux/rtc.h b/include/linux/rtc.h
+index 93f4d03..fcabfb4 100644
+--- a/include/linux/rtc.h
++++ b/include/linux/rtc.h
+@@ -202,7 +202,8 @@ struct rtc_device
+ struct hrtimer pie_timer; /* sub second exp, so needs hrtimer */
+ int pie_enabled;
+ struct work_struct irqwork;
+-
++ /* Some hardware can't support UIE mode */
++ int uie_unsupported;
+
+ #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
+ struct work_struct uie_task;
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 1614be2..0677023 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -2641,6 +2641,16 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
+ }
+
+ switch (cmd) {
++ case FUTEX_LOCK_PI:
++ case FUTEX_UNLOCK_PI:
++ case FUTEX_TRYLOCK_PI:
++ case FUTEX_WAIT_REQUEUE_PI:
++ case FUTEX_CMP_REQUEUE_PI:
++ if (!futex_cmpxchg_enabled)
++ return -ENOSYS;
++ }
++
++ switch (cmd) {
+ case FUTEX_WAIT:
+ val3 = FUTEX_BITSET_MATCH_ANY;
+ case FUTEX_WAIT_BITSET:
+@@ -2661,16 +2671,13 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
+ ret = futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
+ break;
+ case FUTEX_LOCK_PI:
+- if (futex_cmpxchg_enabled)
+- ret = futex_lock_pi(uaddr, flags, val, timeout, 0);
++ ret = futex_lock_pi(uaddr, flags, val, timeout, 0);
+ break;
+ case FUTEX_UNLOCK_PI:
+- if (futex_cmpxchg_enabled)
+- ret = futex_unlock_pi(uaddr, flags);
++ ret = futex_unlock_pi(uaddr, flags);
+ break;
+ case FUTEX_TRYLOCK_PI:
+- if (futex_cmpxchg_enabled)
+- ret = futex_lock_pi(uaddr, flags, 0, timeout, 1);
++ ret = futex_lock_pi(uaddr, flags, 0, timeout, 1);
+ break;
+ case FUTEX_WAIT_REQUEUE_PI:
+ val3 = FUTEX_BITSET_MATCH_ANY;
+diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
+index fb7db75..25784d6 100644
+--- a/kernel/irq/chip.c
++++ b/kernel/irq/chip.c
+@@ -61,8 +61,7 @@ int irq_set_irq_type(unsigned int irq, unsigned int type)
+ return -EINVAL;
+
+ type &= IRQ_TYPE_SENSE_MASK;
+- if (type != IRQ_TYPE_NONE)
+- ret = __irq_set_trigger(desc, irq, type);
++ ret = __irq_set_trigger(desc, irq, type);
+ irq_put_desc_busunlock(desc, flags);
+ return ret;
+ }
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index ae95cd2..7600092 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -773,7 +773,7 @@ static int irq_thread(void *data)
+ struct irqaction *action);
+ int wake;
+
+- if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD,
++ if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
+ &action->thread_flags))
+ handler_fn = irq_forced_thread_fn;
+ else
+diff --git a/kernel/module.c b/kernel/module.c
+index 178333c..6969ef0 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -2341,8 +2341,7 @@ static int copy_and_check(struct load_info *info,
+ return -ENOEXEC;
+
+ /* Suck in entire file: we'll want most of it. */
+- /* vmalloc barfs on "unusual" numbers. Check here */
+- if (len > 64 * 1024 * 1024 || (hdr = vmalloc(len)) == NULL)
++ if ((hdr = vmalloc(len)) == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(hdr, umod, len) != 0) {
+diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
+index 624538a..7c0d578 100644
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -648,7 +648,7 @@ int hibernate(void)
+ /* Allocate memory management structures */
+ error = create_basic_memory_bitmaps();
+ if (error)
+- goto Exit;
++ goto Enable_umh;
+
+ printk(KERN_INFO "PM: Syncing filesystems ... ");
+ sys_sync();
+@@ -656,7 +656,7 @@ int hibernate(void)
+
+ error = prepare_processes();
+ if (error)
+- goto Finish;
++ goto Free_bitmaps;
+
+ error = hibernation_snapshot(hibernation_mode == HIBERNATION_PLATFORM);
+ if (error)
+@@ -689,8 +689,9 @@ int hibernate(void)
+
+ Thaw:
+ thaw_processes();
+- Finish:
++ Free_bitmaps:
+ free_basic_memory_bitmaps();
++ Enable_umh:
+ usermodehelper_enable();
+ Exit:
+ pm_notifier_call_chain(PM_POST_HIBERNATION);
+diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
+index f6117a4..4b85a7a 100644
+--- a/kernel/time/ntp.c
++++ b/kernel/time/ntp.c
+@@ -275,7 +275,7 @@ static inline s64 ntp_update_offset_fll(s64 offset64, long secs)
+
+ time_status |= STA_MODE;
+
+- return div_s64(offset64 << (NTP_SCALE_SHIFT - SHIFT_FLL), secs);
++ return div64_long(offset64 << (NTP_SCALE_SHIFT - SHIFT_FLL), secs);
+ }
+
+ static void ntp_update_offset(long offset)
+diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
+index ad72a03..6d40244 100644
+--- a/lib/kobject_uevent.c
++++ b/lib/kobject_uevent.c
+@@ -29,16 +29,17 @@
+
+ u64 uevent_seqnum;
+ char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH;
+-static DEFINE_SPINLOCK(sequence_lock);
+ #ifdef CONFIG_NET
+ struct uevent_sock {
+ struct list_head list;
+ struct sock *sk;
+ };
+ static LIST_HEAD(uevent_sock_list);
+-static DEFINE_MUTEX(uevent_sock_mutex);
+ #endif
+
++/* This lock protects uevent_seqnum and uevent_sock_list */
++static DEFINE_MUTEX(uevent_sock_mutex);
++
+ /* the strings here must match the enum in include/linux/kobject.h */
+ static const char *kobject_actions[] = {
+ [KOBJ_ADD] = "add",
+@@ -136,7 +137,6 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
+ struct kobject *top_kobj;
+ struct kset *kset;
+ const struct kset_uevent_ops *uevent_ops;
+- u64 seq;
+ int i = 0;
+ int retval = 0;
+ #ifdef CONFIG_NET
+@@ -243,17 +243,16 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
+ else if (action == KOBJ_REMOVE)
+ kobj->state_remove_uevent_sent = 1;
+
++ mutex_lock(&uevent_sock_mutex);
+ /* we will send an event, so request a new sequence number */
+- spin_lock(&sequence_lock);
+- seq = ++uevent_seqnum;
+- spin_unlock(&sequence_lock);
+- retval = add_uevent_var(env, "SEQNUM=%llu", (unsigned long long)seq);
+- if (retval)
++ retval = add_uevent_var(env, "SEQNUM=%llu", (unsigned long long)++uevent_seqnum);
++ if (retval) {
++ mutex_unlock(&uevent_sock_mutex);
+ goto exit;
++ }
+
+ #if defined(CONFIG_NET)
+ /* send netlink message */
+- mutex_lock(&uevent_sock_mutex);
+ list_for_each_entry(ue_sk, &uevent_sock_list, list) {
+ struct sock *uevent_sock = ue_sk->sk;
+ struct sk_buff *skb;
+@@ -287,8 +286,8 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
+ } else
+ retval = -ENOMEM;
+ }
+- mutex_unlock(&uevent_sock_mutex);
+ #endif
++ mutex_unlock(&uevent_sock_mutex);
+
+ /* call uevent_helper, usually only enabled during early boot */
+ if (uevent_helper[0] && !kobj_usermode_filter(kobj)) {
+diff --git a/mm/bootmem.c b/mm/bootmem.c
+index 1a77012..b863822 100644
+--- a/mm/bootmem.c
++++ b/mm/bootmem.c
+@@ -768,14 +768,13 @@ void * __init alloc_bootmem_section(unsigned long size,
+ unsigned long section_nr)
+ {
+ bootmem_data_t *bdata;
+- unsigned long pfn, goal, limit;
++ unsigned long pfn, goal;
+
+ pfn = section_nr_to_pfn(section_nr);
+ goal = pfn << PAGE_SHIFT;
+- limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT;
+ bdata = &bootmem_node_data[early_pfn_to_nid(pfn)];
+
+- return alloc_bootmem_core(bdata, size, SMP_CACHE_BYTES, goal, limit);
++ return alloc_bootmem_core(bdata, size, SMP_CACHE_BYTES, goal, 0);
+ }
+ #endif
+
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index de67e91..778554f 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -5237,6 +5237,8 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
+ spinlock_t *ptl;
+
+ split_huge_page_pmd(walk->mm, pmd);
++ if (pmd_trans_unstable(pmd))
++ return 0;
+
+ pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
+ for (; addr != end; pte++, addr += PAGE_SIZE)
+@@ -5398,6 +5400,8 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
+ spinlock_t *ptl;
+
+ split_huge_page_pmd(walk->mm, pmd);
++ if (pmd_trans_unstable(pmd))
++ return 0;
+ retry:
+ pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
+ for (; addr != end; addr += PAGE_SIZE) {
+diff --git a/mm/memory.c b/mm/memory.c
+index 829d437..1b1ca17 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -1228,16 +1228,24 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
+ do {
+ next = pmd_addr_end(addr, end);
+ if (pmd_trans_huge(*pmd)) {
+- if (next-addr != HPAGE_PMD_SIZE) {
++ if (next - addr != HPAGE_PMD_SIZE) {
+ VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
+ split_huge_page_pmd(vma->vm_mm, pmd);
+ } else if (zap_huge_pmd(tlb, vma, pmd))
+- continue;
++ goto next;
+ /* fall through */
+ }
+- if (pmd_none_or_clear_bad(pmd))
+- continue;
++ /*
++ * Here there can be other concurrent MADV_DONTNEED or
++ * trans huge page faults running, and if the pmd is
++ * none or trans huge it can change under us. This is
++ * because MADV_DONTNEED holds the mmap_sem in read
++ * mode.
++ */
++ if (pmd_none_or_trans_huge_or_clear_bad(pmd))
++ goto next;
+ next = zap_pte_range(tlb, vma, pmd, addr, next, details);
++next:
+ cond_resched();
+ } while (pmd++, addr = next, addr != end);
+
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index c3fdbcb..b26aae2 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -512,7 +512,7 @@ static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
+ do {
+ next = pmd_addr_end(addr, end);
+ split_huge_page_pmd(vma->vm_mm, pmd);
+- if (pmd_none_or_clear_bad(pmd))
++ if (pmd_none_or_trans_huge_or_clear_bad(pmd))
+ continue;
+ if (check_pte_range(vma, pmd, addr, next, nodes,
+ flags, private))
+diff --git a/mm/mincore.c b/mm/mincore.c
+index 636a868..936b4ce 100644
+--- a/mm/mincore.c
++++ b/mm/mincore.c
+@@ -164,7 +164,7 @@ static void mincore_pmd_range(struct vm_area_struct *vma, pud_t *pud,
+ }
+ /* fall through */
+ }
+- if (pmd_none_or_clear_bad(pmd))
++ if (pmd_none_or_trans_huge_or_clear_bad(pmd))
+ mincore_unmapped_range(vma, addr, next, vec);
+ else
+ mincore_pte_range(vma, pmd, addr, next, vec);
+diff --git a/mm/pagewalk.c b/mm/pagewalk.c
+index 2f5cf10..aa9701e 100644
+--- a/mm/pagewalk.c
++++ b/mm/pagewalk.c
+@@ -59,7 +59,7 @@ again:
+ continue;
+
+ split_huge_page_pmd(walk->mm, pmd);
+- if (pmd_none_or_clear_bad(pmd))
++ if (pmd_none_or_trans_huge_or_clear_bad(pmd))
+ goto again;
+ err = walk_pte_range(pmd, addr, next, walk);
+ if (err)
+diff --git a/mm/slub.c b/mm/slub.c
+index 1a919f0..a99c785 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -3911,13 +3911,14 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
+ if (kmem_cache_open(s, n,
+ size, align, flags, ctor)) {
+ list_add(&s->list, &slab_caches);
++ up_write(&slub_lock);
+ if (sysfs_slab_add(s)) {
++ down_write(&slub_lock);
+ list_del(&s->list);
+ kfree(n);
+ kfree(s);
+ goto err;
+ }
+- up_write(&slub_lock);
+ return s;
+ }
+ kfree(n);
+diff --git a/mm/sparse.c b/mm/sparse.c
+index 61d7cde..a8bc7d3 100644
+--- a/mm/sparse.c
++++ b/mm/sparse.c
+@@ -353,29 +353,21 @@ static void __init sparse_early_usemaps_alloc_node(unsigned long**usemap_map,
+
+ usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid),
+ usemap_count);
+- if (usemap) {
+- for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
+- if (!present_section_nr(pnum))
+- continue;
+- usemap_map[pnum] = usemap;
+- usemap += size;
++ if (!usemap) {
++ usemap = alloc_bootmem_node(NODE_DATA(nodeid), size * usemap_count);
++ if (!usemap) {
++ printk(KERN_WARNING "%s: allocation failed\n", __func__);
++ return;
+ }
+- return;
+ }
+
+- usemap = alloc_bootmem_node(NODE_DATA(nodeid), size * usemap_count);
+- if (usemap) {
+- for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
+- if (!present_section_nr(pnum))
+- continue;
+- usemap_map[pnum] = usemap;
+- usemap += size;
+- check_usemap_section_nr(nodeid, usemap_map[pnum]);
+- }
+- return;
++ for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
++ if (!present_section_nr(pnum))
++ continue;
++ usemap_map[pnum] = usemap;
++ usemap += size;
++ check_usemap_section_nr(nodeid, usemap_map[pnum]);
+ }
+-
+- printk(KERN_WARNING "%s: allocation failed\n", __func__);
+ }
+
+ #ifndef CONFIG_SPARSEMEM_VMEMMAP
+diff --git a/mm/swapfile.c b/mm/swapfile.c
+index b1cd120..2015a1e 100644
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -931,9 +931,7 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
+ pmd = pmd_offset(pud, addr);
+ do {
+ next = pmd_addr_end(addr, end);
+- if (unlikely(pmd_trans_huge(*pmd)))
+- continue;
+- if (pmd_none_or_clear_bad(pmd))
++ if (pmd_none_or_trans_huge_or_clear_bad(pmd))
+ continue;
+ ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
+ if (ret)
+diff --git a/net/core/dev.c b/net/core/dev.c
+index c56cacf..55cd370 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3634,7 +3634,8 @@ EXPORT_SYMBOL(napi_gro_receive);
+ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
+ {
+ __skb_pull(skb, skb_headlen(skb));
+- skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
++ /* restore the reserve we had after netdev_alloc_skb_ip_align() */
++ skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
+ skb->vlan_tci = 0;
+ skb->dev = napi->dev;
+ skb->skb_iif = 0;
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 9083e82..2ef859a 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -1116,6 +1116,8 @@ static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
+ .len = sizeof(struct ifla_vf_vlan) },
+ [IFLA_VF_TX_RATE] = { .type = NLA_BINARY,
+ .len = sizeof(struct ifla_vf_tx_rate) },
++ [IFLA_VF_SPOOFCHK] = { .type = NLA_BINARY,
++ .len = sizeof(struct ifla_vf_spoofchk) },
+ };
+
+ static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index ec56271..f7f07e2 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1411,8 +1411,9 @@ alloc_new_skb:
+ */
+ skb->ip_summed = csummode;
+ skb->csum = 0;
+- /* reserve for fragmentation */
+- skb_reserve(skb, hh_len+sizeof(struct frag_hdr));
++ /* reserve for fragmentation and ipsec header */
++ skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
++ dst_exthdrlen);
+
+ if (sk->sk_type == SOCK_DGRAM)
+ skb_shinfo(skb)->tx_flags = tx_flags;
+@@ -1420,9 +1421,9 @@ alloc_new_skb:
+ /*
+ * Find where to start putting bytes
+ */
+- data = skb_put(skb, fraglen + dst_exthdrlen);
+- skb_set_network_header(skb, exthdrlen + dst_exthdrlen);
+- data += fragheaderlen + dst_exthdrlen;
++ data = skb_put(skb, fraglen);
++ skb_set_network_header(skb, exthdrlen);
++ data += fragheaderlen;
+ skb->transport_header = (skb->network_header +
+ fragheaderlen);
+ if (fraggap) {
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index b582a0a..059b9d9 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -2446,8 +2446,12 @@ static int rt6_fill_node(struct net *net,
+
+ rcu_read_lock();
+ n = dst_get_neighbour(&rt->dst);
+- if (n)
+- NLA_PUT(skb, RTA_GATEWAY, 16, &n->primary_key);
++ if (n) {
++ if (nla_put(skb, RTA_GATEWAY, 16, &n->primary_key) < 0) {
++ rcu_read_unlock();
++ goto nla_put_failure;
++ }
++ }
+ rcu_read_unlock();
+
+ if (rt->dst.dev)
+diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
+index 8a90d75..b1bd16f 100644
+--- a/net/l2tp/l2tp_ppp.c
++++ b/net/l2tp/l2tp_ppp.c
+@@ -915,7 +915,7 @@ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
+ goto end_put_sess;
+ }
+
+- inet = inet_sk(sk);
++ inet = inet_sk(tunnel->sock);
+ if (tunnel->version == 2) {
+ struct sockaddr_pppol2tp sp;
+ len = sizeof(sp);
+diff --git a/net/rds/send.c b/net/rds/send.c
+index e2d63c5..96531d4 100644
+--- a/net/rds/send.c
++++ b/net/rds/send.c
+@@ -935,7 +935,6 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
+ /* Mirror Linux UDP mirror of BSD error message compatibility */
+ /* XXX: Perhaps MSG_MORE someday */
+ if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) {
+- printk(KERN_INFO "msg_flags 0x%08X\n", msg->msg_flags);
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
+index 72ad836..4530a91 100644
+--- a/net/sunrpc/cache.c
++++ b/net/sunrpc/cache.c
+@@ -828,6 +828,8 @@ static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
+ {
+ ssize_t ret;
+
++ if (count == 0)
++ return -EINVAL;
+ if (copy_from_user(kaddr, buf, count))
+ return -EFAULT;
+ kaddr[count] = '\0';
+diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
+index 00a1a2a..4e2b3b4 100644
+--- a/net/sunrpc/sched.c
++++ b/net/sunrpc/sched.c
+@@ -500,14 +500,18 @@ EXPORT_SYMBOL_GPL(rpc_wake_up_next);
+ */
+ void rpc_wake_up(struct rpc_wait_queue *queue)
+ {
+- struct rpc_task *task, *next;
+ struct list_head *head;
+
+ spin_lock_bh(&queue->lock);
+ head = &queue->tasks[queue->maxpriority];
+ for (;;) {
+- list_for_each_entry_safe(task, next, head, u.tk_wait.list)
++ while (!list_empty(head)) {
++ struct rpc_task *task;
++ task = list_first_entry(head,
++ struct rpc_task,
++ u.tk_wait.list);
+ rpc_wake_up_task_queue_locked(queue, task);
++ }
+ if (head == &queue->tasks[0])
+ break;
+ head--;
+@@ -525,13 +529,16 @@ EXPORT_SYMBOL_GPL(rpc_wake_up);
+ */
+ void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
+ {
+- struct rpc_task *task, *next;
+ struct list_head *head;
+
+ spin_lock_bh(&queue->lock);
+ head = &queue->tasks[queue->maxpriority];
+ for (;;) {
+- list_for_each_entry_safe(task, next, head, u.tk_wait.list) {
++ while (!list_empty(head)) {
++ struct rpc_task *task;
++ task = list_first_entry(head,
++ struct rpc_task,
++ u.tk_wait.list);
+ task->tk_status = status;
+ rpc_wake_up_task_queue_locked(queue, task);
+ }
+diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
+index 39e02c5..2f6d11d 100644
+--- a/net/xfrm/xfrm_replay.c
++++ b/net/xfrm/xfrm_replay.c
+@@ -167,7 +167,7 @@ static void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq)
+ }
+
+ if (xfrm_aevent_is_on(xs_net(x)))
+- xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
++ x->repl->notify(x, XFRM_REPLAY_UPDATE);
+ }
+
+ static int xfrm_replay_overflow_bmp(struct xfrm_state *x, struct sk_buff *skb)
+@@ -279,7 +279,7 @@ static void xfrm_replay_advance_bmp(struct xfrm_state *x, __be32 net_seq)
+ replay_esn->bmp[nr] |= (1U << bitnr);
+
+ if (xfrm_aevent_is_on(xs_net(x)))
+- xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
++ x->repl->notify(x, XFRM_REPLAY_UPDATE);
+ }
+
+ static void xfrm_replay_notify_bmp(struct xfrm_state *x, int event)
+@@ -473,7 +473,7 @@ static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq)
+ replay_esn->bmp[nr] |= (1U << bitnr);
+
+ if (xfrm_aevent_is_on(xs_net(x)))
+- xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
++ x->repl->notify(x, XFRM_REPLAY_UPDATE);
+ }
+
+ static struct xfrm_replay xfrm_replay_legacy = {
+diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
+index 4f554f2..063298a 100644
+--- a/security/integrity/ima/Kconfig
++++ b/security/integrity/ima/Kconfig
+@@ -9,7 +9,7 @@ config IMA
+ select CRYPTO_HMAC
+ select CRYPTO_MD5
+ select CRYPTO_SHA1
+- select TCG_TPM if !S390 && !UML
++ select TCG_TPM if HAS_IOMEM && !UML
+ select TCG_TIS if TCG_TPM
+ help
+ The Trusted Computing Group(TCG) runtime Integrity
+diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c
+index c1da422..b58b4b1 100644
+--- a/sound/pci/hda/hda_eld.c
++++ b/sound/pci/hda/hda_eld.c
+@@ -385,8 +385,8 @@ error:
+ static void hdmi_print_pcm_rates(int pcm, char *buf, int buflen)
+ {
+ static unsigned int alsa_rates[] = {
+- 5512, 8000, 11025, 16000, 22050, 32000, 44100, 48000, 88200,
+- 96000, 176400, 192000, 384000
++ 5512, 8000, 11025, 16000, 22050, 32000, 44100, 48000, 64000,
++ 88200, 96000, 176400, 192000, 384000
+ };
+ int i, j;
+
+diff --git a/sound/soc/fsl/p1022_ds.c b/sound/soc/fsl/p1022_ds.c
+index 2c064a9..075677c 100644
+--- a/sound/soc/fsl/p1022_ds.c
++++ b/sound/soc/fsl/p1022_ds.c
+@@ -392,7 +392,8 @@ static int p1022_ds_probe(struct platform_device *pdev)
+ }
+
+ if (strcasecmp(sprop, "i2s-slave") == 0) {
+- mdata->dai_format = SND_SOC_DAIFMT_I2S;
++ mdata->dai_format = SND_SOC_DAIFMT_NB_NF |
++ SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBM_CFM;
+ mdata->codec_clk_direction = SND_SOC_CLOCK_OUT;
+ mdata->cpu_clk_direction = SND_SOC_CLOCK_IN;
+
+@@ -409,31 +410,38 @@ static int p1022_ds_probe(struct platform_device *pdev)
+ }
+ mdata->clk_frequency = be32_to_cpup(iprop);
+ } else if (strcasecmp(sprop, "i2s-master") == 0) {
+- mdata->dai_format = SND_SOC_DAIFMT_I2S;
++ mdata->dai_format = SND_SOC_DAIFMT_NB_NF |
++ SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBS_CFS;
+ mdata->codec_clk_direction = SND_SOC_CLOCK_IN;
+ mdata->cpu_clk_direction = SND_SOC_CLOCK_OUT;
+ } else if (strcasecmp(sprop, "lj-slave") == 0) {
+- mdata->dai_format = SND_SOC_DAIFMT_LEFT_J;
++ mdata->dai_format = SND_SOC_DAIFMT_NB_NF |
++ SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_CBM_CFM;
+ mdata->codec_clk_direction = SND_SOC_CLOCK_OUT;
+ mdata->cpu_clk_direction = SND_SOC_CLOCK_IN;
+ } else if (strcasecmp(sprop, "lj-master") == 0) {
+- mdata->dai_format = SND_SOC_DAIFMT_LEFT_J;
++ mdata->dai_format = SND_SOC_DAIFMT_NB_NF |
++ SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_CBS_CFS;
+ mdata->codec_clk_direction = SND_SOC_CLOCK_IN;
+ mdata->cpu_clk_direction = SND_SOC_CLOCK_OUT;
+ } else if (strcasecmp(sprop, "rj-slave") == 0) {
+- mdata->dai_format = SND_SOC_DAIFMT_RIGHT_J;
++ mdata->dai_format = SND_SOC_DAIFMT_NB_NF |
++ SND_SOC_DAIFMT_RIGHT_J | SND_SOC_DAIFMT_CBM_CFM;
+ mdata->codec_clk_direction = SND_SOC_CLOCK_OUT;
+ mdata->cpu_clk_direction = SND_SOC_CLOCK_IN;
+ } else if (strcasecmp(sprop, "rj-master") == 0) {
+- mdata->dai_format = SND_SOC_DAIFMT_RIGHT_J;
++ mdata->dai_format = SND_SOC_DAIFMT_NB_NF |
++ SND_SOC_DAIFMT_RIGHT_J | SND_SOC_DAIFMT_CBS_CFS;
+ mdata->codec_clk_direction = SND_SOC_CLOCK_IN;
+ mdata->cpu_clk_direction = SND_SOC_CLOCK_OUT;
+ } else if (strcasecmp(sprop, "ac97-slave") == 0) {
+- mdata->dai_format = SND_SOC_DAIFMT_AC97;
++ mdata->dai_format = SND_SOC_DAIFMT_NB_NF |
++ SND_SOC_DAIFMT_AC97 | SND_SOC_DAIFMT_CBM_CFM;
+ mdata->codec_clk_direction = SND_SOC_CLOCK_OUT;
+ mdata->cpu_clk_direction = SND_SOC_CLOCK_IN;
+ } else if (strcasecmp(sprop, "ac97-master") == 0) {
+- mdata->dai_format = SND_SOC_DAIFMT_AC97;
++ mdata->dai_format = SND_SOC_DAIFMT_NB_NF |
++ SND_SOC_DAIFMT_AC97 | SND_SOC_DAIFMT_CBS_CFS;
+ mdata->codec_clk_direction = SND_SOC_CLOCK_IN;
+ mdata->cpu_clk_direction = SND_SOC_CLOCK_OUT;
+ } else {
+diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c
+index 8ad93ee..b583e60 100644
+--- a/sound/soc/pxa/pxa-ssp.c
++++ b/sound/soc/pxa/pxa-ssp.c
+@@ -668,6 +668,38 @@ static int pxa_ssp_hw_params(struct snd_pcm_substream *substream,
+ return 0;
+ }
+
++static void pxa_ssp_set_running_bit(struct snd_pcm_substream *substream,
++ struct ssp_device *ssp, int value)
++{
++ uint32_t sscr0 = pxa_ssp_read_reg(ssp, SSCR0);
++ uint32_t sscr1 = pxa_ssp_read_reg(ssp, SSCR1);
++ uint32_t sspsp = pxa_ssp_read_reg(ssp, SSPSP);
++ uint32_t sssr = pxa_ssp_read_reg(ssp, SSSR);
++
++ if (value && (sscr0 & SSCR0_SSE))
++ pxa_ssp_write_reg(ssp, SSCR0, sscr0 & ~SSCR0_SSE);
++
++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
++ if (value)
++ sscr1 |= SSCR1_TSRE;
++ else
++ sscr1 &= ~SSCR1_TSRE;
++ } else {
++ if (value)
++ sscr1 |= SSCR1_RSRE;
++ else
++ sscr1 &= ~SSCR1_RSRE;
++ }
++
++ pxa_ssp_write_reg(ssp, SSCR1, sscr1);
++
++ if (value) {
++ pxa_ssp_write_reg(ssp, SSSR, sssr);
++ pxa_ssp_write_reg(ssp, SSPSP, sspsp);
++ pxa_ssp_write_reg(ssp, SSCR0, sscr0 | SSCR0_SSE);
++ }
++}
++
+ static int pxa_ssp_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *cpu_dai)
+ {
+@@ -681,42 +713,21 @@ static int pxa_ssp_trigger(struct snd_pcm_substream *substream, int cmd,
+ pxa_ssp_enable(ssp);
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+- val = pxa_ssp_read_reg(ssp, SSCR1);
+- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+- val |= SSCR1_TSRE;
+- else
+- val |= SSCR1_RSRE;
+- pxa_ssp_write_reg(ssp, SSCR1, val);
++ pxa_ssp_set_running_bit(substream, ssp, 1);
+ val = pxa_ssp_read_reg(ssp, SSSR);
+ pxa_ssp_write_reg(ssp, SSSR, val);
+ break;
+ case SNDRV_PCM_TRIGGER_START:
+- val = pxa_ssp_read_reg(ssp, SSCR1);
+- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+- val |= SSCR1_TSRE;
+- else
+- val |= SSCR1_RSRE;
+- pxa_ssp_write_reg(ssp, SSCR1, val);
+- pxa_ssp_enable(ssp);
++ pxa_ssp_set_running_bit(substream, ssp, 1);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+- val = pxa_ssp_read_reg(ssp, SSCR1);
+- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+- val &= ~SSCR1_TSRE;
+- else
+- val &= ~SSCR1_RSRE;
+- pxa_ssp_write_reg(ssp, SSCR1, val);
++ pxa_ssp_set_running_bit(substream, ssp, 0);
+ break;
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ pxa_ssp_disable(ssp);
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+- val = pxa_ssp_read_reg(ssp, SSCR1);
+- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+- val &= ~SSCR1_TSRE;
+- else
+- val &= ~SSCR1_RSRE;
+- pxa_ssp_write_reg(ssp, SSCR1, val);
++ pxa_ssp_set_running_bit(substream, ssp, 0);
+ break;
+
+ default:
diff --git a/1014_linux-3.2.15.patch b/1014_linux-3.2.15.patch
new file mode 100644
index 00000000..b836a633
--- /dev/null
+++ b/1014_linux-3.2.15.patch
@@ -0,0 +1,2169 @@
+diff --git a/Makefile b/Makefile
+index afe4c7d..6195122 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 14
++SUBLEVEL = 15
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig
+index dbc59fa..379cdc7 100644
+--- a/arch/arm/mach-tegra/Kconfig
++++ b/arch/arm/mach-tegra/Kconfig
+@@ -61,11 +61,6 @@ config MACH_SEABOARD
+ config MACH_TEGRA_DT
+ bool "Generic Tegra board (FDT support)"
+ select USE_OF
+- select ARM_ERRATA_743622
+- select ARM_ERRATA_751472
+- select ARM_ERRATA_754322
+- select ARM_ERRATA_764369
+- select PL310_ERRATA_769419 if CACHE_L2X0
+ help
+ Support for generic nVidia Tegra boards using Flattened Device Tree
+
+diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
+index c247de0..1918d76 100644
+--- a/arch/m68k/mac/config.c
++++ b/arch/m68k/mac/config.c
+@@ -950,6 +950,9 @@ int __init mac_platform_init(void)
+ {
+ u8 *swim_base;
+
++ if (!MACH_IS_MAC)
++ return -ENODEV;
++
+ /*
+ * Serial devices
+ */
+diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h
+index 431793e..34baa0e 100644
+--- a/arch/x86/include/asm/timer.h
++++ b/arch/x86/include/asm/timer.h
+@@ -57,14 +57,10 @@ DECLARE_PER_CPU(unsigned long long, cyc2ns_offset);
+
+ static inline unsigned long long __cycles_2_ns(unsigned long long cyc)
+ {
+- unsigned long long quot;
+- unsigned long long rem;
+ int cpu = smp_processor_id();
+ unsigned long long ns = per_cpu(cyc2ns_offset, cpu);
+- quot = (cyc >> CYC2NS_SCALE_FACTOR);
+- rem = cyc & ((1ULL << CYC2NS_SCALE_FACTOR) - 1);
+- ns += quot * per_cpu(cyc2ns, cpu) +
+- ((rem * per_cpu(cyc2ns, cpu)) >> CYC2NS_SCALE_FACTOR);
++ ns += mult_frac(cyc, per_cpu(cyc2ns, cpu),
++ (1UL << CYC2NS_SCALE_FACTOR));
+ return ns;
+ }
+
+diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
+index a25e276..6d939d7 100644
+--- a/arch/x86/kernel/apic/io_apic.c
++++ b/arch/x86/kernel/apic/io_apic.c
+@@ -3963,36 +3963,18 @@ int mp_find_ioapic_pin(int ioapic, u32 gsi)
+ static __init int bad_ioapic(unsigned long address)
+ {
+ if (nr_ioapics >= MAX_IO_APICS) {
+- pr_warn("WARNING: Max # of I/O APICs (%d) exceeded (found %d), skipping\n",
+- MAX_IO_APICS, nr_ioapics);
++ printk(KERN_WARNING "WARNING: Max # of I/O APICs (%d) exceeded "
++ "(found %d), skipping\n", MAX_IO_APICS, nr_ioapics);
+ return 1;
+ }
+ if (!address) {
+- pr_warn("WARNING: Bogus (zero) I/O APIC address found in table, skipping!\n");
++ printk(KERN_WARNING "WARNING: Bogus (zero) I/O APIC address"
++ " found in table, skipping!\n");
+ return 1;
+ }
+ return 0;
+ }
+
+-static __init int bad_ioapic_register(int idx)
+-{
+- union IO_APIC_reg_00 reg_00;
+- union IO_APIC_reg_01 reg_01;
+- union IO_APIC_reg_02 reg_02;
+-
+- reg_00.raw = io_apic_read(idx, 0);
+- reg_01.raw = io_apic_read(idx, 1);
+- reg_02.raw = io_apic_read(idx, 2);
+-
+- if (reg_00.raw == -1 && reg_01.raw == -1 && reg_02.raw == -1) {
+- pr_warn("I/O APIC 0x%x registers return all ones, skipping!\n",
+- mpc_ioapic_addr(idx));
+- return 1;
+- }
+-
+- return 0;
+-}
+-
+ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
+ {
+ int idx = 0;
+@@ -4009,12 +3991,6 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
+ ioapics[idx].mp_config.apicaddr = address;
+
+ set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
+-
+- if (bad_ioapic_register(idx)) {
+- clear_fixmap(FIX_IO_APIC_BASE_0 + idx);
+- return;
+- }
+-
+ ioapics[idx].mp_config.apicid = io_apic_unique_id(id);
+ ioapics[idx].mp_config.apicver = io_apic_get_version(idx);
+
+@@ -4035,10 +4011,10 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
+ if (gsi_cfg->gsi_end >= gsi_top)
+ gsi_top = gsi_cfg->gsi_end + 1;
+
+- pr_info("IOAPIC[%d]: apic_id %d, version %d, address 0x%x, GSI %d-%d\n",
+- idx, mpc_ioapic_id(idx),
+- mpc_ioapic_ver(idx), mpc_ioapic_addr(idx),
+- gsi_cfg->gsi_base, gsi_cfg->gsi_end);
++ printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
++ "GSI %d-%d\n", idx, mpc_ioapic_id(idx),
++ mpc_ioapic_ver(idx), mpc_ioapic_addr(idx),
++ gsi_cfg->gsi_base, gsi_cfg->gsi_end);
+
+ nr_ioapics++;
+ }
+diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
+index faba577..2f45c4c 100644
+--- a/arch/x86/kernel/kgdb.c
++++ b/arch/x86/kernel/kgdb.c
+@@ -43,6 +43,8 @@
+ #include <linux/smp.h>
+ #include <linux/nmi.h>
+ #include <linux/hw_breakpoint.h>
++#include <linux/uaccess.h>
++#include <linux/memory.h>
+
+ #include <asm/debugreg.h>
+ #include <asm/apicdef.h>
+@@ -740,6 +742,64 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+ regs->ip = ip;
+ }
+
++int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
++{
++ int err;
++ char opc[BREAK_INSTR_SIZE];
++
++ bpt->type = BP_BREAKPOINT;
++ err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
++ BREAK_INSTR_SIZE);
++ if (err)
++ return err;
++ err = probe_kernel_write((char *)bpt->bpt_addr,
++ arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
++#ifdef CONFIG_DEBUG_RODATA
++ if (!err)
++ return err;
++ /*
++ * It is safe to call text_poke() because normal kernel execution
++ * is stopped on all cores, so long as the text_mutex is not locked.
++ */
++ if (mutex_is_locked(&text_mutex))
++ return -EBUSY;
++ text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
++ BREAK_INSTR_SIZE);
++ err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
++ if (err)
++ return err;
++ if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
++ return -EINVAL;
++ bpt->type = BP_POKE_BREAKPOINT;
++#endif /* CONFIG_DEBUG_RODATA */
++ return err;
++}
++
++int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
++{
++#ifdef CONFIG_DEBUG_RODATA
++ int err;
++ char opc[BREAK_INSTR_SIZE];
++
++ if (bpt->type != BP_POKE_BREAKPOINT)
++ goto knl_write;
++ /*
++ * It is safe to call text_poke() because normal kernel execution
++ * is stopped on all cores, so long as the text_mutex is not locked.
++ */
++ if (mutex_is_locked(&text_mutex))
++ goto knl_write;
++ text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
++ err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
++ if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
++ goto knl_write;
++ return err;
++knl_write:
++#endif /* CONFIG_DEBUG_RODATA */
++ return probe_kernel_write((char *)bpt->bpt_addr,
++ (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
++}
++
+ struct kgdb_arch arch_kgdb_ops = {
+ /* Breakpoint instruction: */
+ .gdb_bpt_instr = { 0xcc },
+diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
+index 3fe298a..1ec515b 100644
+--- a/arch/x86/kernel/tsc.c
++++ b/arch/x86/kernel/tsc.c
+@@ -622,7 +622,8 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
+
+ if (cpu_khz) {
+ *scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz;
+- *offset = ns_now - (tsc_now * *scale >> CYC2NS_SCALE_FACTOR);
++ *offset = ns_now - mult_frac(tsc_now, *scale,
++ (1UL << CYC2NS_SCALE_FACTOR));
+ }
+
+ sched_clock_idle_wakeup_event(0);
+diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
+index 5671752..5a5b6e4 100644
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -289,7 +289,7 @@ void bpf_jit_compile(struct sk_filter *fp)
+ EMIT2(0x24, K & 0xFF); /* and imm8,%al */
+ } else if (K >= 0xFFFF0000) {
+ EMIT2(0x66, 0x25); /* and imm16,%ax */
+- EMIT2(K, 2);
++ EMIT(K, 2);
+ } else {
+ EMIT1_off32(0x25, K); /* and imm32,%eax */
+ }
+diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
+index f8348ab..0ed97d8 100644
+--- a/arch/x86/pci/acpi.c
++++ b/arch/x86/pci/acpi.c
+@@ -54,6 +54,16 @@ static const struct dmi_system_id pci_use_crs_table[] __initconst = {
+ DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
+ },
+ },
++ /* https://bugzilla.kernel.org/show_bug.cgi?id=42619 */
++ {
++ .callback = set_use_crs,
++ .ident = "MSI MS-7253",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
++ DMI_MATCH(DMI_BOARD_NAME, "MS-7253"),
++ DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
++ },
++ },
+ {}
+ };
+
+diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
+index 6f5588e..4c531b4 100644
+--- a/drivers/acpi/acpica/tbfadt.c
++++ b/drivers/acpi/acpica/tbfadt.c
+@@ -350,10 +350,6 @@ static void acpi_tb_convert_fadt(void)
+ u32 address32;
+ u32 i;
+
+- /* Update the local FADT table header length */
+-
+- acpi_gbl_FADT.header.length = sizeof(struct acpi_table_fadt);
+-
+ /*
+ * Expand the 32-bit FACS and DSDT addresses to 64-bit as necessary.
+ * Later code will always use the X 64-bit field. Also, check for an
+@@ -395,6 +391,10 @@ static void acpi_tb_convert_fadt(void)
+ acpi_gbl_FADT.boot_flags = 0;
+ }
+
++ /* Update the local FADT table header length */
++
++ acpi_gbl_FADT.header.length = sizeof(struct acpi_table_fadt);
++
+ /*
+ * Expand the ACPI 1.0 32-bit addresses to the ACPI 2.0 64-bit "X"
+ * generic address structures as necessary. Later code will always use
+diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
+index 870550d..4da7d9d 100644
+--- a/drivers/acpi/processor_thermal.c
++++ b/drivers/acpi/processor_thermal.c
+@@ -58,6 +58,27 @@ ACPI_MODULE_NAME("processor_thermal");
+ static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_pctg);
+ static unsigned int acpi_thermal_cpufreq_is_init = 0;
+
++#define reduction_pctg(cpu) \
++ per_cpu(cpufreq_thermal_reduction_pctg, phys_package_first_cpu(cpu))
++
++/*
++ * Emulate "per package data" using per cpu data (which should really be
++ * provided elsewhere)
++ *
++ * Note we can lose a CPU on cpu hotunplug, in this case we forget the state
++ * temporarily. Fortunately that's not a big issue here (I hope)
++ */
++static int phys_package_first_cpu(int cpu)
++{
++ int i;
++ int id = topology_physical_package_id(cpu);
++
++ for_each_online_cpu(i)
++ if (topology_physical_package_id(i) == id)
++ return i;
++ return 0;
++}
++
+ static int cpu_has_cpufreq(unsigned int cpu)
+ {
+ struct cpufreq_policy policy;
+@@ -77,7 +98,7 @@ static int acpi_thermal_cpufreq_notifier(struct notifier_block *nb,
+
+ max_freq = (
+ policy->cpuinfo.max_freq *
+- (100 - per_cpu(cpufreq_thermal_reduction_pctg, policy->cpu) * 20)
++ (100 - reduction_pctg(policy->cpu) * 20)
+ ) / 100;
+
+ cpufreq_verify_within_limits(policy, 0, max_freq);
+@@ -103,16 +124,28 @@ static int cpufreq_get_cur_state(unsigned int cpu)
+ if (!cpu_has_cpufreq(cpu))
+ return 0;
+
+- return per_cpu(cpufreq_thermal_reduction_pctg, cpu);
++ return reduction_pctg(cpu);
+ }
+
+ static int cpufreq_set_cur_state(unsigned int cpu, int state)
+ {
++ int i;
++
+ if (!cpu_has_cpufreq(cpu))
+ return 0;
+
+- per_cpu(cpufreq_thermal_reduction_pctg, cpu) = state;
+- cpufreq_update_policy(cpu);
++ reduction_pctg(cpu) = state;
++
++ /*
++ * Update all the CPUs in the same package because they all
++ * contribute to the temperature and often share the same
++ * frequency.
++ */
++ for_each_online_cpu(i) {
++ if (topology_physical_package_id(i) ==
++ topology_physical_package_id(cpu))
++ cpufreq_update_policy(i);
++ }
+ return 0;
+ }
+
+@@ -120,10 +153,6 @@ void acpi_thermal_cpufreq_init(void)
+ {
+ int i;
+
+- for (i = 0; i < nr_cpu_ids; i++)
+- if (cpu_present(i))
+- per_cpu(cpufreq_thermal_reduction_pctg, i) = 0;
+-
+ i = cpufreq_register_notifier(&acpi_thermal_cpufreq_notifier_block,
+ CPUFREQ_POLICY_NOTIFIER);
+ if (!i)
+diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
+index a4d6cb0..6595180 100644
+--- a/drivers/dma/ioat/dma.c
++++ b/drivers/dma/ioat/dma.c
+@@ -548,9 +548,9 @@ void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
+ PCI_DMA_TODEVICE, flags, 0);
+ }
+
+-unsigned long ioat_get_current_completion(struct ioat_chan_common *chan)
++dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan)
+ {
+- unsigned long phys_complete;
++ dma_addr_t phys_complete;
+ u64 completion;
+
+ completion = *chan->completion;
+@@ -571,7 +571,7 @@ unsigned long ioat_get_current_completion(struct ioat_chan_common *chan)
+ }
+
+ bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
+- unsigned long *phys_complete)
++ dma_addr_t *phys_complete)
+ {
+ *phys_complete = ioat_get_current_completion(chan);
+ if (*phys_complete == chan->last_completion)
+@@ -582,14 +582,14 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
+ return true;
+ }
+
+-static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete)
++static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete)
+ {
+ struct ioat_chan_common *chan = &ioat->base;
+ struct list_head *_desc, *n;
+ struct dma_async_tx_descriptor *tx;
+
+- dev_dbg(to_dev(chan), "%s: phys_complete: %lx\n",
+- __func__, phys_complete);
++ dev_dbg(to_dev(chan), "%s: phys_complete: %llx\n",
++ __func__, (unsigned long long) phys_complete);
+ list_for_each_safe(_desc, n, &ioat->used_desc) {
+ struct ioat_desc_sw *desc;
+
+@@ -655,7 +655,7 @@ static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete)
+ static void ioat1_cleanup(struct ioat_dma_chan *ioat)
+ {
+ struct ioat_chan_common *chan = &ioat->base;
+- unsigned long phys_complete;
++ dma_addr_t phys_complete;
+
+ prefetch(chan->completion);
+
+@@ -701,7 +701,7 @@ static void ioat1_timer_event(unsigned long data)
+ mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
+ spin_unlock_bh(&ioat->desc_lock);
+ } else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
+- unsigned long phys_complete;
++ dma_addr_t phys_complete;
+
+ spin_lock_bh(&ioat->desc_lock);
+ /* if we haven't made progress and we have already
+diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
+index 5216c8a..8bebddd 100644
+--- a/drivers/dma/ioat/dma.h
++++ b/drivers/dma/ioat/dma.h
+@@ -88,7 +88,7 @@ struct ioatdma_device {
+ struct ioat_chan_common {
+ struct dma_chan common;
+ void __iomem *reg_base;
+- unsigned long last_completion;
++ dma_addr_t last_completion;
+ spinlock_t cleanup_lock;
+ dma_cookie_t completed_cookie;
+ unsigned long state;
+@@ -333,7 +333,7 @@ int __devinit ioat_dma_self_test(struct ioatdma_device *device);
+ void __devexit ioat_dma_remove(struct ioatdma_device *device);
+ struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev,
+ void __iomem *iobase);
+-unsigned long ioat_get_current_completion(struct ioat_chan_common *chan);
++dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan);
+ void ioat_init_channel(struct ioatdma_device *device,
+ struct ioat_chan_common *chan, int idx);
+ enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
+@@ -341,7 +341,7 @@ enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
+ void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
+ size_t len, struct ioat_dma_descriptor *hw);
+ bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
+- unsigned long *phys_complete);
++ dma_addr_t *phys_complete);
+ void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
+ void ioat_kobject_del(struct ioatdma_device *device);
+ extern const struct sysfs_ops ioat_sysfs_ops;
+diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
+index 5d65f83..cb8864d 100644
+--- a/drivers/dma/ioat/dma_v2.c
++++ b/drivers/dma/ioat/dma_v2.c
+@@ -126,7 +126,7 @@ static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
+ spin_unlock_bh(&ioat->prep_lock);
+ }
+
+-static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
++static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
+ {
+ struct ioat_chan_common *chan = &ioat->base;
+ struct dma_async_tx_descriptor *tx;
+@@ -178,7 +178,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
+ static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
+ {
+ struct ioat_chan_common *chan = &ioat->base;
+- unsigned long phys_complete;
++ dma_addr_t phys_complete;
+
+ spin_lock_bh(&chan->cleanup_lock);
+ if (ioat_cleanup_preamble(chan, &phys_complete))
+@@ -259,7 +259,7 @@ int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo)
+ static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
+ {
+ struct ioat_chan_common *chan = &ioat->base;
+- unsigned long phys_complete;
++ dma_addr_t phys_complete;
+
+ ioat2_quiesce(chan, 0);
+ if (ioat_cleanup_preamble(chan, &phys_complete))
+@@ -274,7 +274,7 @@ void ioat2_timer_event(unsigned long data)
+ struct ioat_chan_common *chan = &ioat->base;
+
+ if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
+- unsigned long phys_complete;
++ dma_addr_t phys_complete;
+ u64 status;
+
+ status = ioat_chansts(chan);
+diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
+index f519c93..2dbf32b 100644
+--- a/drivers/dma/ioat/dma_v3.c
++++ b/drivers/dma/ioat/dma_v3.c
+@@ -256,7 +256,7 @@ static bool desc_has_ext(struct ioat_ring_ent *desc)
+ * The difference from the dma_v2.c __cleanup() is that this routine
+ * handles extended descriptors and dma-unmapping raid operations.
+ */
+-static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
++static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
+ {
+ struct ioat_chan_common *chan = &ioat->base;
+ struct ioat_ring_ent *desc;
+@@ -314,7 +314,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
+ static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
+ {
+ struct ioat_chan_common *chan = &ioat->base;
+- unsigned long phys_complete;
++ dma_addr_t phys_complete;
+
+ spin_lock_bh(&chan->cleanup_lock);
+ if (ioat_cleanup_preamble(chan, &phys_complete))
+@@ -333,7 +333,7 @@ static void ioat3_cleanup_event(unsigned long data)
+ static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
+ {
+ struct ioat_chan_common *chan = &ioat->base;
+- unsigned long phys_complete;
++ dma_addr_t phys_complete;
+
+ ioat2_quiesce(chan, 0);
+ if (ioat_cleanup_preamble(chan, &phys_complete))
+@@ -348,7 +348,7 @@ static void ioat3_timer_event(unsigned long data)
+ struct ioat_chan_common *chan = &ioat->base;
+
+ if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
+- unsigned long phys_complete;
++ dma_addr_t phys_complete;
+ u64 status;
+
+ status = ioat_chansts(chan);
+diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
+index 80fe39d..dd58373 100644
+--- a/drivers/gpu/drm/drm_fb_helper.c
++++ b/drivers/gpu/drm/drm_fb_helper.c
+@@ -610,9 +610,13 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
+ return -EINVAL;
+
+ /* Need to resize the fb object !!! */
+- if (var->bits_per_pixel > fb->bits_per_pixel || var->xres > fb->width || var->yres > fb->height) {
++ if (var->bits_per_pixel > fb->bits_per_pixel ||
++ var->xres > fb->width || var->yres > fb->height ||
++ var->xres_virtual > fb->width || var->yres_virtual > fb->height) {
+ DRM_DEBUG("fb userspace requested width/height/bpp is greater than current fb "
+- "object %dx%d-%d > %dx%d-%d\n", var->xres, var->yres, var->bits_per_pixel,
++ "request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n",
++ var->xres, var->yres, var->bits_per_pixel,
++ var->xres_virtual, var->yres_virtual,
+ fb->width, fb->height, fb->bits_per_pixel);
+ return -EINVAL;
+ }
+diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
+index d04597d..e52b705 100644
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -508,7 +508,9 @@ static int i915_drm_thaw(struct drm_device *dev)
+ drm_irq_install(dev);
+
+ /* Resume the modeset for every activated CRTC */
++ mutex_lock(&dev->mode_config.mutex);
+ drm_helper_resume_force_mode(dev);
++ mutex_unlock(&dev->mode_config.mutex);
+
+ if (IS_IRONLAKE_M(dev))
+ ironlake_enable_rc6(dev);
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 1608d2a..2f99fd4 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -2312,6 +2312,7 @@
+ #define PIPECONF_DISABLE 0
+ #define PIPECONF_DOUBLE_WIDE (1<<30)
+ #define I965_PIPECONF_ACTIVE (1<<30)
++#define PIPECONF_FRAME_START_DELAY_MASK (3<<27)
+ #define PIPECONF_SINGLE_WIDE 0
+ #define PIPECONF_PIPE_UNLOCKED 0
+ #define PIPECONF_PIPE_LOCKED (1<<25)
+diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
+index 63880e2..22efb08 100644
+--- a/drivers/gpu/drm/i915/intel_bios.c
++++ b/drivers/gpu/drm/i915/intel_bios.c
+@@ -24,6 +24,7 @@
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
++#include <linux/dmi.h>
+ #include <drm/drm_dp_helper.h>
+ #include "drmP.h"
+ #include "drm.h"
+@@ -621,6 +622,26 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
+ dev_priv->edp.bpp = 18;
+ }
+
++static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
++{
++ DRM_DEBUG_KMS("Falling back to manually reading VBT from "
++ "VBIOS ROM for %s\n",
++ id->ident);
++ return 1;
++}
++
++static const struct dmi_system_id intel_no_opregion_vbt[] = {
++ {
++ .callback = intel_no_opregion_vbt_callback,
++ .ident = "ThinkCentre A57",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "97027RG"),
++ },
++ },
++ { }
++};
++
+ /**
+ * intel_parse_bios - find VBT and initialize settings from the BIOS
+ * @dev: DRM device
+@@ -641,7 +662,7 @@ intel_parse_bios(struct drm_device *dev)
+ init_vbt_defaults(dev_priv);
+
+ /* XXX Should this validation be moved to intel_opregion.c? */
+- if (dev_priv->opregion.vbt) {
++ if (!dmi_check_system(intel_no_opregion_vbt) && dev_priv->opregion.vbt) {
+ struct vbt_header *vbt = dev_priv->opregion.vbt;
+ if (memcmp(vbt->signature, "$VBT", 4) == 0) {
+ DRM_DEBUG_KMS("Using VBT from OpRegion: %20s\n",
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 9ec9755..9011f48 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -7278,6 +7278,12 @@ static void intel_sanitize_modesetting(struct drm_device *dev,
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg, val;
+
++ /* Clear any frame start delays used for debugging left by the BIOS */
++ for_each_pipe(pipe) {
++ reg = PIPECONF(pipe);
++ I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
++ }
++
+ if (HAS_PCH_SPLIT(dev))
+ return;
+
+diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
+index b83f745..583c2d0 100644
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -731,6 +731,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
+ DMI_MATCH(DMI_BOARD_NAME, "AT5NM10T-I"),
+ },
+ },
++ {
++ .callback = intel_no_lvds_dmi_callback,
++ .ident = "MSI Wind Box DC500",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
++ DMI_MATCH(DMI_BOARD_NAME, "MS-7469"),
++ },
++ },
+
+ { } /* terminating entry */
+ };
+diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
+index 14cc88a..3a05cdb 100644
+--- a/drivers/gpu/drm/radeon/atom.c
++++ b/drivers/gpu/drm/radeon/atom.c
+@@ -1304,8 +1304,11 @@ struct atom_context *atom_parse(struct card_info *card, void *bios)
+
+ int atom_asic_init(struct atom_context *ctx)
+ {
++ struct radeon_device *rdev = ctx->card->dev->dev_private;
+ int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR);
+ uint32_t ps[16];
++ int ret;
++
+ memset(ps, 0, 64);
+
+ ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR));
+@@ -1315,7 +1318,17 @@ int atom_asic_init(struct atom_context *ctx)
+
+ if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
+ return 1;
+- return atom_execute_table(ctx, ATOM_CMD_INIT, ps);
++ ret = atom_execute_table(ctx, ATOM_CMD_INIT, ps);
++ if (ret)
++ return ret;
++
++ memset(ps, 0, 64);
++
++ if (rdev->family < CHIP_R600) {
++ if (CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_SPDFANCNTL))
++ atom_execute_table(ctx, ATOM_CMD_SPDFANCNTL, ps);
++ }
++ return ret;
+ }
+
+ void atom_destroy(struct atom_context *ctx)
+diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h
+index 93cfe20..25fea63 100644
+--- a/drivers/gpu/drm/radeon/atom.h
++++ b/drivers/gpu/drm/radeon/atom.h
+@@ -44,6 +44,7 @@
+ #define ATOM_CMD_SETSCLK 0x0A
+ #define ATOM_CMD_SETMCLK 0x0B
+ #define ATOM_CMD_SETPCLK 0x0C
++#define ATOM_CMD_SPDFANCNTL 0x39
+
+ #define ATOM_DATA_FWI_PTR 0xC
+ #define ATOM_DATA_IIO_PTR 0x32
+diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
+index 5c74179..20d5852 100644
+--- a/drivers/iommu/amd_iommu_init.c
++++ b/drivers/iommu/amd_iommu_init.c
+@@ -1033,8 +1033,9 @@ static int iommu_setup_msi(struct amd_iommu *iommu)
+ {
+ int r;
+
+- if (pci_enable_msi(iommu->dev))
+- return 1;
++ r = pci_enable_msi(iommu->dev);
++ if (r)
++ return r;
+
+ r = request_threaded_irq(iommu->dev->irq,
+ amd_iommu_int_handler,
+@@ -1044,24 +1045,33 @@ static int iommu_setup_msi(struct amd_iommu *iommu)
+
+ if (r) {
+ pci_disable_msi(iommu->dev);
+- return 1;
++ return r;
+ }
+
+ iommu->int_enabled = true;
+- iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
+
+ return 0;
+ }
+
+ static int iommu_init_msi(struct amd_iommu *iommu)
+ {
++ int ret;
++
+ if (iommu->int_enabled)
+- return 0;
++ goto enable_faults;
+
+ if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSI))
+- return iommu_setup_msi(iommu);
++ ret = iommu_setup_msi(iommu);
++ else
++ ret = -ENODEV;
+
+- return 1;
++ if (ret)
++ return ret;
++
++enable_faults:
++ iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
++
++ return 0;
+ }
+
+ /****************************************************************************
+diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
+index 3eee45f..83f4988 100644
+--- a/drivers/mfd/twl6030-irq.c
++++ b/drivers/mfd/twl6030-irq.c
+@@ -187,8 +187,17 @@ static int twl6030_irq_thread(void *data)
+ }
+ local_irq_enable();
+ }
+- ret = twl_i2c_write(TWL_MODULE_PIH, sts.bytes,
+- REG_INT_STS_A, 3); /* clear INT_STS_A */
++
++ /*
++ * NOTE:
++ * Simulation confirms that documentation is wrong w.r.t the
++ * interrupt status clear operation. A single *byte* write to
++ * any one of STS_A to STS_C register results in all three
++ * STS registers being reset. Since it does not matter which
++ * value is written, all three registers are cleared on a
++ * single byte write, so we just use 0x0 to clear.
++ */
++ ret = twl_i2c_write_u8(TWL_MODULE_PIH, 0x00, REG_INT_STS_A);
+ if (ret)
+ pr_warning("twl6030: I2C error in clearing PIH ISR\n");
+
+diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
+index 3f7ad83..3aa9a96 100644
+--- a/drivers/misc/kgdbts.c
++++ b/drivers/misc/kgdbts.c
+@@ -134,12 +134,17 @@ static int force_hwbrks;
+ static int hwbreaks_ok;
+ static int hw_break_val;
+ static int hw_break_val2;
++static int cont_instead_of_sstep;
++static unsigned long cont_thread_id;
++static unsigned long sstep_thread_id;
+ #if defined(CONFIG_ARM) || defined(CONFIG_MIPS) || defined(CONFIG_SPARC)
+ static int arch_needs_sstep_emulation = 1;
+ #else
+ static int arch_needs_sstep_emulation;
+ #endif
++static unsigned long cont_addr;
+ static unsigned long sstep_addr;
++static int restart_from_top_after_write;
+ static int sstep_state;
+
+ /* Storage for the registers, in GDB format. */
+@@ -187,7 +192,8 @@ static int kgdbts_unreg_thread(void *ptr)
+ */
+ while (!final_ack)
+ msleep_interruptible(1500);
+-
++ /* Pause for any other threads to exit after final ack. */
++ msleep_interruptible(1000);
+ if (configured)
+ kgdb_unregister_io_module(&kgdbts_io_ops);
+ configured = 0;
+@@ -211,7 +217,7 @@ static unsigned long lookup_addr(char *arg)
+ if (!strcmp(arg, "kgdbts_break_test"))
+ addr = (unsigned long)kgdbts_break_test;
+ else if (!strcmp(arg, "sys_open"))
+- addr = (unsigned long)sys_open;
++ addr = (unsigned long)do_sys_open;
+ else if (!strcmp(arg, "do_fork"))
+ addr = (unsigned long)do_fork;
+ else if (!strcmp(arg, "hw_break_val"))
+@@ -283,6 +289,16 @@ static void hw_break_val_write(void)
+ hw_break_val++;
+ }
+
++static int get_thread_id_continue(char *put_str, char *arg)
++{
++ char *ptr = &put_str[11];
++
++ if (put_str[1] != 'T' || put_str[2] != '0')
++ return 1;
++ kgdb_hex2long(&ptr, &cont_thread_id);
++ return 0;
++}
++
+ static int check_and_rewind_pc(char *put_str, char *arg)
+ {
+ unsigned long addr = lookup_addr(arg);
+@@ -299,13 +315,21 @@ static int check_and_rewind_pc(char *put_str, char *arg)
+ if (addr + BREAK_INSTR_SIZE == ip)
+ offset = -BREAK_INSTR_SIZE;
+ #endif
+- if (strcmp(arg, "silent") && ip + offset != addr) {
++
++ if (arch_needs_sstep_emulation && sstep_addr &&
++ ip + offset == sstep_addr &&
++ ((!strcmp(arg, "sys_open") || !strcmp(arg, "do_fork")))) {
++ /* This is special case for emulated single step */
++ v2printk("Emul: rewind hit single step bp\n");
++ restart_from_top_after_write = 1;
++ } else if (strcmp(arg, "silent") && ip + offset != addr) {
+ eprintk("kgdbts: BP mismatch %lx expected %lx\n",
+ ip + offset, addr);
+ return 1;
+ }
+ /* Readjust the instruction pointer if needed */
+ ip += offset;
++ cont_addr = ip;
+ #ifdef GDB_ADJUSTS_BREAK_OFFSET
+ instruction_pointer_set(&kgdbts_regs, ip);
+ #endif
+@@ -315,6 +339,8 @@ static int check_and_rewind_pc(char *put_str, char *arg)
+ static int check_single_step(char *put_str, char *arg)
+ {
+ unsigned long addr = lookup_addr(arg);
++ static int matched_id;
++
+ /*
+ * From an arch indepent point of view the instruction pointer
+ * should be on a different instruction
+@@ -324,6 +350,29 @@ static int check_single_step(char *put_str, char *arg)
+ gdb_regs_to_pt_regs(kgdbts_gdb_regs, &kgdbts_regs);
+ v2printk("Singlestep stopped at IP: %lx\n",
+ instruction_pointer(&kgdbts_regs));
++
++ if (sstep_thread_id != cont_thread_id) {
++ /*
++ * Ensure we stopped in the same thread id as before, else the
++ * debugger should continue until the original thread that was
++ * single stepped is scheduled again, emulating gdb's behavior.
++ */
++ v2printk("ThrID does not match: %lx\n", cont_thread_id);
++ if (arch_needs_sstep_emulation) {
++ if (matched_id &&
++ instruction_pointer(&kgdbts_regs) != addr)
++ goto continue_test;
++ matched_id++;
++ ts.idx -= 2;
++ sstep_state = 0;
++ return 0;
++ }
++ cont_instead_of_sstep = 1;
++ ts.idx -= 4;
++ return 0;
++ }
++continue_test:
++ matched_id = 0;
+ if (instruction_pointer(&kgdbts_regs) == addr) {
+ eprintk("kgdbts: SingleStep failed at %lx\n",
+ instruction_pointer(&kgdbts_regs));
+@@ -365,10 +414,40 @@ static int got_break(char *put_str, char *arg)
+ return 1;
+ }
+
++static void get_cont_catch(char *arg)
++{
++ /* Always send detach because the test is completed at this point */
++ fill_get_buf("D");
++}
++
++static int put_cont_catch(char *put_str, char *arg)
++{
++ /* This is at the end of the test and we catch any and all input */
++ v2printk("kgdbts: cleanup task: %lx\n", sstep_thread_id);
++ ts.idx--;
++ return 0;
++}
++
++static int emul_reset(char *put_str, char *arg)
++{
++ if (strncmp(put_str, "$OK", 3))
++ return 1;
++ if (restart_from_top_after_write) {
++ restart_from_top_after_write = 0;
++ ts.idx = -1;
++ }
++ return 0;
++}
++
+ static void emul_sstep_get(char *arg)
+ {
+ if (!arch_needs_sstep_emulation) {
+- fill_get_buf(arg);
++ if (cont_instead_of_sstep) {
++ cont_instead_of_sstep = 0;
++ fill_get_buf("c");
++ } else {
++ fill_get_buf(arg);
++ }
+ return;
+ }
+ switch (sstep_state) {
+@@ -398,9 +477,11 @@ static void emul_sstep_get(char *arg)
+ static int emul_sstep_put(char *put_str, char *arg)
+ {
+ if (!arch_needs_sstep_emulation) {
+- if (!strncmp(put_str+1, arg, 2))
+- return 0;
+- return 1;
++ char *ptr = &put_str[11];
++ if (put_str[1] != 'T' || put_str[2] != '0')
++ return 1;
++ kgdb_hex2long(&ptr, &sstep_thread_id);
++ return 0;
+ }
+ switch (sstep_state) {
+ case 1:
+@@ -411,8 +492,7 @@ static int emul_sstep_put(char *put_str, char *arg)
+ v2printk("Stopped at IP: %lx\n",
+ instruction_pointer(&kgdbts_regs));
+ /* Want to stop at IP + break instruction size by default */
+- sstep_addr = instruction_pointer(&kgdbts_regs) +
+- BREAK_INSTR_SIZE;
++ sstep_addr = cont_addr + BREAK_INSTR_SIZE;
+ break;
+ case 2:
+ if (strncmp(put_str, "$OK", 3)) {
+@@ -424,6 +504,9 @@ static int emul_sstep_put(char *put_str, char *arg)
+ if (strncmp(put_str, "$T0", 3)) {
+ eprintk("kgdbts: failed continue sstep\n");
+ return 1;
++ } else {
++ char *ptr = &put_str[11];
++ kgdb_hex2long(&ptr, &sstep_thread_id);
+ }
+ break;
+ case 4:
+@@ -502,10 +585,10 @@ static struct test_struct bad_read_test[] = {
+ static struct test_struct singlestep_break_test[] = {
+ { "?", "S0*" }, /* Clear break points */
+ { "kgdbts_break_test", "OK", sw_break, }, /* set sw breakpoint */
+- { "c", "T0*", }, /* Continue */
++ { "c", "T0*", NULL, get_thread_id_continue }, /* Continue */
++ { "kgdbts_break_test", "OK", sw_rem_break }, /*remove breakpoint */
+ { "g", "kgdbts_break_test", NULL, check_and_rewind_pc },
+ { "write", "OK", write_regs }, /* Write registers */
+- { "kgdbts_break_test", "OK", sw_rem_break }, /*remove breakpoint */
+ { "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */
+ { "g", "kgdbts_break_test", NULL, check_single_step },
+ { "kgdbts_break_test", "OK", sw_break, }, /* set sw breakpoint */
+@@ -523,16 +606,16 @@ static struct test_struct singlestep_break_test[] = {
+ static struct test_struct do_fork_test[] = {
+ { "?", "S0*" }, /* Clear break points */
+ { "do_fork", "OK", sw_break, }, /* set sw breakpoint */
+- { "c", "T0*", }, /* Continue */
+- { "g", "do_fork", NULL, check_and_rewind_pc }, /* check location */
+- { "write", "OK", write_regs }, /* Write registers */
++ { "c", "T0*", NULL, get_thread_id_continue }, /* Continue */
+ { "do_fork", "OK", sw_rem_break }, /*remove breakpoint */
++ { "g", "do_fork", NULL, check_and_rewind_pc }, /* check location */
++ { "write", "OK", write_regs, emul_reset }, /* Write registers */
+ { "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */
+ { "g", "do_fork", NULL, check_single_step },
+ { "do_fork", "OK", sw_break, }, /* set sw breakpoint */
+ { "7", "T0*", skip_back_repeat_test }, /* Loop based on repeat_test */
+ { "D", "OK", NULL, final_ack_set }, /* detach and unregister I/O */
+- { "", "" },
++ { "", "", get_cont_catch, put_cont_catch },
+ };
+
+ /* Test for hitting a breakpoint at sys_open for what ever the number
+@@ -541,16 +624,16 @@ static struct test_struct do_fork_test[] = {
+ static struct test_struct sys_open_test[] = {
+ { "?", "S0*" }, /* Clear break points */
+ { "sys_open", "OK", sw_break, }, /* set sw breakpoint */
+- { "c", "T0*", }, /* Continue */
+- { "g", "sys_open", NULL, check_and_rewind_pc }, /* check location */
+- { "write", "OK", write_regs }, /* Write registers */
++ { "c", "T0*", NULL, get_thread_id_continue }, /* Continue */
+ { "sys_open", "OK", sw_rem_break }, /*remove breakpoint */
++ { "g", "sys_open", NULL, check_and_rewind_pc }, /* check location */
++ { "write", "OK", write_regs, emul_reset }, /* Write registers */
+ { "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */
+ { "g", "sys_open", NULL, check_single_step },
+ { "sys_open", "OK", sw_break, }, /* set sw breakpoint */
+ { "7", "T0*", skip_back_repeat_test }, /* Loop based on repeat_test */
+ { "D", "OK", NULL, final_ack_set }, /* detach and unregister I/O */
+- { "", "" },
++ { "", "", get_cont_catch, put_cont_catch },
+ };
+
+ /*
+@@ -693,8 +776,8 @@ static int run_simple_test(int is_get_char, int chr)
+ /* This callback is a put char which is when kgdb sends data to
+ * this I/O module.
+ */
+- if (ts.tst[ts.idx].get[0] == '\0' &&
+- ts.tst[ts.idx].put[0] == '\0') {
++ if (ts.tst[ts.idx].get[0] == '\0' && ts.tst[ts.idx].put[0] == '\0' &&
++ !ts.tst[ts.idx].get_handler) {
+ eprintk("kgdbts: ERROR: beyond end of test on"
+ " '%s' line %i\n", ts.name, ts.idx);
+ return 0;
+@@ -907,6 +990,17 @@ static void kgdbts_run_tests(void)
+ if (ptr)
+ sstep_test = simple_strtol(ptr+1, NULL, 10);
+
++ /* All HW break point tests */
++ if (arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT) {
++ hwbreaks_ok = 1;
++ v1printk("kgdbts:RUN hw breakpoint test\n");
++ run_breakpoint_test(1);
++ v1printk("kgdbts:RUN hw write breakpoint test\n");
++ run_hw_break_test(1);
++ v1printk("kgdbts:RUN access write breakpoint test\n");
++ run_hw_break_test(0);
++ }
++
+ /* required internal KGDB tests */
+ v1printk("kgdbts:RUN plant and detach test\n");
+ run_plant_and_detach_test(0);
+@@ -924,35 +1018,11 @@ static void kgdbts_run_tests(void)
+
+ /* ===Optional tests=== */
+
+- /* All HW break point tests */
+- if (arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT) {
+- hwbreaks_ok = 1;
+- v1printk("kgdbts:RUN hw breakpoint test\n");
+- run_breakpoint_test(1);
+- v1printk("kgdbts:RUN hw write breakpoint test\n");
+- run_hw_break_test(1);
+- v1printk("kgdbts:RUN access write breakpoint test\n");
+- run_hw_break_test(0);
+- }
+-
+ if (nmi_sleep) {
+ v1printk("kgdbts:RUN NMI sleep %i seconds test\n", nmi_sleep);
+ run_nmi_sleep_test(nmi_sleep);
+ }
+
+-#ifdef CONFIG_DEBUG_RODATA
+- /* Until there is an api to write to read-only text segments, use
+- * HW breakpoints for the remainder of any tests, else print a
+- * failure message if hw breakpoints do not work.
+- */
+- if (!(arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT && hwbreaks_ok)) {
+- eprintk("kgdbts: HW breakpoints do not work,"
+- "skipping remaining tests\n");
+- return;
+- }
+- force_hwbrks = 1;
+-#endif /* CONFIG_DEBUG_RODATA */
+-
+ /* If the do_fork test is run it will be the last test that is
+ * executed because a kernel thread will be spawned at the very
+ * end to unregister the debug hooks.
+diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
+index 9896933..0932024 100644
+--- a/drivers/mmc/host/atmel-mci.c
++++ b/drivers/mmc/host/atmel-mci.c
+@@ -480,7 +480,14 @@ err:
+ static inline unsigned int atmci_ns_to_clocks(struct atmel_mci *host,
+ unsigned int ns)
+ {
+- return (ns * (host->bus_hz / 1000000) + 999) / 1000;
++ /*
++ * It is easier here to use us instead of ns for the timeout,
++ * it prevents from overflows during calculation.
++ */
++ unsigned int us = DIV_ROUND_UP(ns, 1000);
++
++ /* Maximum clock frequency is host->bus_hz/2 */
++ return us * (DIV_ROUND_UP(host->bus_hz, 2000000));
+ }
+
+ static void atmci_set_timeout(struct atmel_mci *host,
+diff --git a/drivers/mmc/host/sdhci-dove.c b/drivers/mmc/host/sdhci-dove.c
+index a81312c..31acb70 100644
+--- a/drivers/mmc/host/sdhci-dove.c
++++ b/drivers/mmc/host/sdhci-dove.c
+@@ -20,6 +20,7 @@
+ */
+
+ #include <linux/io.h>
++#include <linux/module.h>
+ #include <linux/mmc/host.h>
+
+ #include "sdhci-pltfm.h"
+diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
+index b78f231..8cd983c 100644
+--- a/drivers/mtd/devices/block2mtd.c
++++ b/drivers/mtd/devices/block2mtd.c
+@@ -284,6 +284,7 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
+ dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
+ dev->mtd.erasesize = erase_size;
+ dev->mtd.writesize = 1;
++ dev->mtd.writebufsize = PAGE_SIZE;
+ dev->mtd.type = MTD_RAM;
+ dev->mtd.flags = MTD_CAP_RAM;
+ dev->mtd.erase = block2mtd_erase;
+diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c
+index 3a11ea6..5f12668 100644
+--- a/drivers/mtd/devices/lart.c
++++ b/drivers/mtd/devices/lart.c
+@@ -630,6 +630,7 @@ static int __init lart_flash_init (void)
+ mtd.name = module_name;
+ mtd.type = MTD_NORFLASH;
+ mtd.writesize = 1;
++ mtd.writebufsize = 4;
+ mtd.flags = MTD_CAP_NORFLASH;
+ mtd.size = FLASH_BLOCKSIZE_PARAM * FLASH_NUMBLOCKS_16m_PARAM + FLASH_BLOCKSIZE_MAIN * FLASH_NUMBLOCKS_16m_MAIN;
+ mtd.erasesize = FLASH_BLOCKSIZE_MAIN;
+diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
+index 884904d..9f9982f 100644
+--- a/drivers/mtd/devices/m25p80.c
++++ b/drivers/mtd/devices/m25p80.c
+@@ -932,6 +932,7 @@ static int __devinit m25p_probe(struct spi_device *spi)
+ ppdata.of_node = spi->dev.of_node;
+ flash->mtd.dev.parent = &spi->dev;
+ flash->page_size = info->page_size;
++ flash->mtd.writebufsize = flash->page_size;
+
+ if (info->addr_width)
+ flash->addr_width = info->addr_width;
+diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
+index d38ef3b..9c35250 100644
+--- a/drivers/mtd/devices/sst25l.c
++++ b/drivers/mtd/devices/sst25l.c
+@@ -402,6 +402,7 @@ static int __devinit sst25l_probe(struct spi_device *spi)
+ flash->mtd.flags = MTD_CAP_NORFLASH;
+ flash->mtd.erasesize = flash_info->erase_size;
+ flash->mtd.writesize = flash_info->page_size;
++ flash->mtd.writebufsize = flash_info->page_size;
+ flash->mtd.size = flash_info->page_size * flash_info->nr_pages;
+ flash->mtd.erase = sst25l_erase;
+ flash->mtd.read = sst25l_read;
+diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
+index 3040901..696372f 100644
+--- a/drivers/mtd/maps/ixp4xx.c
++++ b/drivers/mtd/maps/ixp4xx.c
+@@ -182,6 +182,9 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
+ {
+ struct flash_platform_data *plat = dev->dev.platform_data;
+ struct ixp4xx_flash_info *info;
++ struct mtd_part_parser_data ppdata = {
++ .origin = dev->resource->start,
++ };
+ int err = -1;
+
+ if (!plat)
+@@ -247,7 +250,7 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
+ /* Use the fast version */
+ info->map.write = ixp4xx_write16;
+
+- err = mtd_device_parse_register(info->mtd, probes, dev->resource->start,
++ err = mtd_device_parse_register(info->mtd, probes, &ppdata,
+ plat->parts, plat->nr_parts);
+ if (err) {
+ printk(KERN_ERR "Could not parse partitions\n");
+diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c
+index 4f10e27..764d468 100644
+--- a/drivers/mtd/maps/lantiq-flash.c
++++ b/drivers/mtd/maps/lantiq-flash.c
+@@ -45,6 +45,7 @@ struct ltq_mtd {
+ };
+
+ static char ltq_map_name[] = "ltq_nor";
++static const char *ltq_probe_types[] __devinitconst = { "cmdlinepart", NULL };
+
+ static map_word
+ ltq_read16(struct map_info *map, unsigned long adr)
+@@ -168,7 +169,7 @@ ltq_mtd_probe(struct platform_device *pdev)
+ cfi->addr_unlock1 ^= 1;
+ cfi->addr_unlock2 ^= 1;
+
+- err = mtd_device_parse_register(ltq_mtd->mtd, NULL, 0,
++ err = mtd_device_parse_register(ltq_mtd->mtd, ltq_probe_types, 0,
+ ltq_mtd_data->parts, ltq_mtd_data->nr_parts);
+ if (err) {
+ dev_err(&pdev->dev, "failed to add partitions\n");
+diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+index 493ec2f..f39f83e 100644
+--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
++++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+@@ -1124,7 +1124,7 @@ static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs)
+ chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
+
+ /* Do we have a flash based bad block table ? */
+- if (chip->options & NAND_BBT_USE_FLASH)
++ if (chip->bbt_options & NAND_BBT_USE_FLASH)
+ ret = nand_update_bbt(mtd, ofs);
+ else {
+ chipnr = (int)(ofs >> chip->chip_shift);
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index bf40741..3d55883 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -2794,7 +2794,9 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
+- (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
++ (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
++ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
++ !tp->pci_fn))
+ return;
+
+ if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
+diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+index 4d9f84b..ada234a 100644
+--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
++++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+@@ -356,16 +356,15 @@ static int fsl_pq_mdio_probe(struct platform_device *ofdev)
+
+ if (prop)
+ tbiaddr = *prop;
+- }
+-
+- if (tbiaddr == -1) {
+- err = -EBUSY;
+
+- goto err_free_irqs;
++ if (tbiaddr == -1) {
++ err = -EBUSY;
++ goto err_free_irqs;
++ } else {
++ out_be32(tbipa, tbiaddr);
++ }
+ }
+
+- out_be32(tbipa, tbiaddr);
+-
+ err = of_mdiobus_register(new_bus, np);
+ if (err) {
+ printk (KERN_ERR "%s: Cannot register as MDIO bus\n",
+diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
+index f612b35..7eb8a00 100644
+--- a/drivers/net/ethernet/marvell/sky2.c
++++ b/drivers/net/ethernet/marvell/sky2.c
+@@ -1766,13 +1766,14 @@ static int sky2_open(struct net_device *dev)
+
+ sky2_hw_up(sky2);
+
++ /* Enable interrupts from phy/mac for port */
++ imask = sky2_read32(hw, B0_IMSK);
++
+ if (hw->chip_id == CHIP_ID_YUKON_OPT ||
+ hw->chip_id == CHIP_ID_YUKON_PRM ||
+ hw->chip_id == CHIP_ID_YUKON_OP_2)
+ imask |= Y2_IS_PHY_QLNK; /* enable PHY Quick Link */
+
+- /* Enable interrupts from phy/mac for port */
+- imask = sky2_read32(hw, B0_IMSK);
+ imask |= portirq_msk[port];
+ sky2_write32(hw, B0_IMSK, imask);
+ sky2_read32(hw, B0_IMSK);
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index 0cf2351..697cae3 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -6194,6 +6194,9 @@ static void rtl_shutdown(struct pci_dev *pdev)
+ {
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct rtl8169_private *tp = netdev_priv(dev);
++ struct device *d = &pdev->dev;
++
++ pm_runtime_get_sync(d);
+
+ rtl8169_net_suspend(dev);
+
+@@ -6215,6 +6218,8 @@ static void rtl_shutdown(struct pci_dev *pdev)
+ pci_wake_from_d3(pdev, true);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
++
++ pm_runtime_put_noidle(d);
+ }
+
+ static struct pci_driver rtl8169_pci_driver = {
+diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c
+index 882f53f..82d43b2 100644
+--- a/drivers/net/usb/cdc_eem.c
++++ b/drivers/net/usb/cdc_eem.c
+@@ -93,6 +93,7 @@ static int eem_bind(struct usbnet *dev, struct usb_interface *intf)
+ /* no jumbogram (16K) support for now */
+
+ dev->net->hard_header_len += EEM_HEAD + ETH_FCS_LEN;
++ dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
+
+ return 0;
+ }
+diff --git a/drivers/net/usb/zaurus.c b/drivers/net/usb/zaurus.c
+index 246b3bb..c1e6a44 100644
+--- a/drivers/net/usb/zaurus.c
++++ b/drivers/net/usb/zaurus.c
+@@ -332,6 +332,11 @@ static const struct usb_device_id products [] = {
+ .driver_info = ZAURUS_PXA_INFO,
+ },
+ {
++ /* Motorola Rokr E6 */
++ USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x6027, USB_CLASS_COMM,
++ USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
++ .driver_info = (unsigned long) &bogus_mdlm_info,
++}, {
+ /* Motorola MOTOMAGX phones */
+ USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x6425, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
+index 1f07558..f20678a 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
+@@ -1968,7 +1968,7 @@ void rtl92c_phy_set_io(struct ieee80211_hw *hw)
+ break;
+ case IO_CMD_PAUSE_DM_BY_SCAN:
+ rtlphy->initgain_backup.xaagccore1 = dm_digtable.cur_igvalue;
+- dm_digtable.cur_igvalue = 0x17;
++ dm_digtable.cur_igvalue = 0x37;
+ rtl92c_dm_write_dig(hw);
+ break;
+ default:
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+index 0883349..2cf4c5f 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+@@ -3192,7 +3192,7 @@ static void rtl92d_phy_set_io(struct ieee80211_hw *hw)
+ break;
+ case IO_CMD_PAUSE_DM_BY_SCAN:
+ rtlphy->initgain_backup.xaagccore1 = de_digtable.cur_igvalue;
+- de_digtable.cur_igvalue = 0x17;
++ de_digtable.cur_igvalue = 0x37;
+ rtl92d_dm_write_dig(hw);
+ break;
+ default:
+diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
+index 1e5290b..110e4af 100644
+--- a/drivers/platform/x86/acer-wmi.c
++++ b/drivers/platform/x86/acer-wmi.c
+@@ -692,6 +692,7 @@ static const struct acpi_device_id norfkill_ids[] = {
+ { "VPC2004", 0},
+ { "IBM0068", 0},
+ { "LEN0068", 0},
++ { "SNY5001", 0}, /* sony-laptop in charge */
+ { "", 0},
+ };
+
+diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
+index b00c176..d21e8f5 100644
+--- a/drivers/pnp/pnpacpi/core.c
++++ b/drivers/pnp/pnpacpi/core.c
+@@ -321,9 +321,14 @@ static int __init acpi_pnp_match(struct device *dev, void *_pnp)
+ {
+ struct acpi_device *acpi = to_acpi_device(dev);
+ struct pnp_dev *pnp = _pnp;
++ struct device *physical_device;
++
++ physical_device = acpi_get_physical_device(acpi->handle);
++ if (physical_device)
++ put_device(physical_device);
+
+ /* true means it matched */
+- return !acpi_get_physical_device(acpi->handle)
++ return !physical_device
+ && compare_pnp_id(pnp->id, acpi_device_hid(acpi));
+ }
+
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index cdb774b..5660916 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -2666,7 +2666,7 @@ static int transport_generic_cmd_sequencer(
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+
+ if (target_check_write_same_discard(&cdb[10], dev) < 0)
+- goto out_invalid_cdb_field;
++ goto out_unsupported_cdb;
+ if (!passthrough)
+ cmd->execute_task = target_emulate_write_same;
+ break;
+@@ -2949,7 +2949,7 @@ static int transport_generic_cmd_sequencer(
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
+
+ if (target_check_write_same_discard(&cdb[1], dev) < 0)
+- goto out_invalid_cdb_field;
++ goto out_unsupported_cdb;
+ if (!passthrough)
+ cmd->execute_task = target_emulate_write_same;
+ break;
+@@ -2972,7 +2972,7 @@ static int transport_generic_cmd_sequencer(
+ * of byte 1 bit 3 UNMAP instead of original reserved field
+ */
+ if (target_check_write_same_discard(&cdb[1], dev) < 0)
+- goto out_invalid_cdb_field;
++ goto out_unsupported_cdb;
+ if (!passthrough)
+ cmd->execute_task = target_emulate_write_same;
+ break;
+diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
+index e05c551..be7ed12 100644
+--- a/drivers/target/tcm_fc/tcm_fc.h
++++ b/drivers/target/tcm_fc/tcm_fc.h
+@@ -124,6 +124,7 @@ struct ft_cmd {
+ /* Local sense buffer */
+ unsigned char ft_sense_buffer[TRANSPORT_SENSE_BUFFER];
+ u32 was_ddp_setup:1; /* Set only if ddp is setup */
++ u32 aborted:1; /* Set if aborted by reset or timeout */
+ struct scatterlist *sg; /* Set only if DDP is setup */
+ u32 sg_cnt; /* No. of item in scatterlist */
+ };
+diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
+index 754b669..d95cfe2 100644
+--- a/drivers/target/tcm_fc/tfc_cmd.c
++++ b/drivers/target/tcm_fc/tfc_cmd.c
+@@ -130,6 +130,8 @@ int ft_queue_status(struct se_cmd *se_cmd)
+ struct fc_exch *ep;
+ size_t len;
+
++ if (cmd->aborted)
++ return 0;
+ ft_dump_cmd(cmd, __func__);
+ ep = fc_seq_exch(cmd->seq);
+ lport = ep->lp;
+@@ -196,6 +198,8 @@ int ft_write_pending(struct se_cmd *se_cmd)
+
+ ft_dump_cmd(cmd, __func__);
+
++ if (cmd->aborted)
++ return 0;
+ ep = fc_seq_exch(cmd->seq);
+ lport = ep->lp;
+ fp = fc_frame_alloc(lport, sizeof(*txrdy));
+@@ -266,10 +270,10 @@ static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg)
+ struct ft_cmd *cmd = arg;
+ struct fc_frame_header *fh;
+
+- if (IS_ERR(fp)) {
++ if (unlikely(IS_ERR(fp))) {
+ /* XXX need to find cmd if queued */
+ cmd->seq = NULL;
+- transport_generic_free_cmd(&cmd->se_cmd, 0);
++ cmd->aborted = true;
+ return;
+ }
+
+@@ -447,6 +451,8 @@ int ft_queue_tm_resp(struct se_cmd *se_cmd)
+ struct se_tmr_req *tmr = se_cmd->se_tmr_req;
+ enum fcp_resp_rsp_codes code;
+
++ if (cmd->aborted)
++ return 0;
+ switch (tmr->response) {
+ case TMR_FUNCTION_COMPLETE:
+ code = FCP_TMF_CMPL;
+diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
+index 9402b73..7962325 100644
+--- a/drivers/target/tcm_fc/tfc_conf.c
++++ b/drivers/target/tcm_fc/tfc_conf.c
+@@ -304,6 +304,7 @@ static struct se_portal_group *ft_add_tpg(
+ {
+ struct ft_lport_acl *lacl;
+ struct ft_tpg *tpg;
++ struct workqueue_struct *wq;
+ unsigned long index;
+ int ret;
+
+@@ -325,18 +326,20 @@ static struct se_portal_group *ft_add_tpg(
+ tpg->lport_acl = lacl;
+ INIT_LIST_HEAD(&tpg->lun_list);
+
+- ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg,
+- tpg, TRANSPORT_TPG_TYPE_NORMAL);
+- if (ret < 0) {
++ wq = alloc_workqueue("tcm_fc", 0, 1);
++ if (!wq) {
+ kfree(tpg);
+ return NULL;
+ }
+
+- tpg->workqueue = alloc_workqueue("tcm_fc", 0, 1);
+- if (!tpg->workqueue) {
++ ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg,
++ tpg, TRANSPORT_TPG_TYPE_NORMAL);
++ if (ret < 0) {
++ destroy_workqueue(wq);
+ kfree(tpg);
+ return NULL;
+ }
++ tpg->workqueue = wq;
+
+ mutex_lock(&ft_lport_lock);
+ list_add_tail(&tpg->list, &lacl->tpg_list);
+diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
+index 1369b1c..ada131c 100644
+--- a/drivers/target/tcm_fc/tfc_io.c
++++ b/drivers/target/tcm_fc/tfc_io.c
+@@ -84,6 +84,8 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
+ void *from;
+ void *to = NULL;
+
++ if (cmd->aborted)
++ return 0;
+ ep = fc_seq_exch(cmd->seq);
+ lport = ep->lp;
+ cmd->seq = lport->tt.seq_start_next(cmd->seq);
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index 159fcc5..0f7dc22 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -835,13 +835,21 @@ cifs_posix_lock_set(struct file *file, struct file_lock *flock)
+ if ((flock->fl_flags & FL_POSIX) == 0)
+ return rc;
+
++try_again:
+ mutex_lock(&cinode->lock_mutex);
+ if (!cinode->can_cache_brlcks) {
+ mutex_unlock(&cinode->lock_mutex);
+ return rc;
+ }
+- rc = posix_lock_file_wait(file, flock);
++
++ rc = posix_lock_file(file, flock, NULL);
+ mutex_unlock(&cinode->lock_mutex);
++ if (rc == FILE_LOCK_DEFERRED) {
++ rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
++ if (!rc)
++ goto try_again;
++ locks_delete_block(flock);
++ }
+ return rc;
+ }
+
+diff --git a/fs/locks.c b/fs/locks.c
+index 637694b..0d68f1f 100644
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -510,12 +510,13 @@ static void __locks_delete_block(struct file_lock *waiter)
+
+ /*
+ */
+-static void locks_delete_block(struct file_lock *waiter)
++void locks_delete_block(struct file_lock *waiter)
+ {
+ lock_flocks();
+ __locks_delete_block(waiter);
+ unlock_flocks();
+ }
++EXPORT_SYMBOL(locks_delete_block);
+
+ /* Insert waiter into blocker's block list.
+ * We use a circular list so that processes can be easily woken up in
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index d945700..757293b 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -3618,7 +3618,7 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
+ if (acl_len > buflen)
+ goto out_free;
+ _copy_from_pages(buf, pages, res.acl_data_offset,
+- res.acl_len);
++ acl_len);
+ }
+ ret = acl_len;
+ out_free:
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 10b2288..11f1951 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -1203,6 +1203,7 @@ extern int vfs_setlease(struct file *, long, struct file_lock **);
+ extern int lease_modify(struct file_lock **, int);
+ extern int lock_may_read(struct inode *, loff_t start, unsigned long count);
+ extern int lock_may_write(struct inode *, loff_t start, unsigned long count);
++extern void locks_delete_block(struct file_lock *waiter);
+ extern void lock_flocks(void);
+ extern void unlock_flocks(void);
+ #else /* !CONFIG_FILE_LOCKING */
+@@ -1347,6 +1348,10 @@ static inline int lock_may_write(struct inode *inode, loff_t start,
+ return 1;
+ }
+
++static inline void locks_delete_block(struct file_lock *waiter)
++{
++}
++
+ static inline void lock_flocks(void)
+ {
+ }
+diff --git a/include/linux/kernel.h b/include/linux/kernel.h
+index e8b1597..a70783d 100644
+--- a/include/linux/kernel.h
++++ b/include/linux/kernel.h
+@@ -85,6 +85,19 @@
+ } \
+ )
+
++/*
++ * Multiplies an integer by a fraction, while avoiding unnecessary
++ * overflow or loss of precision.
++ */
++#define mult_frac(x, numer, denom)( \
++{ \
++ typeof(x) quot = (x) / (denom); \
++ typeof(x) rem = (x) % (denom); \
++ (quot * (numer)) + ((rem * (numer)) / (denom)); \
++} \
++)
++
++
+ #define _RET_IP_ (unsigned long)__builtin_return_address(0)
+ #define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
+
+diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
+index fa39183..c4d2fc1 100644
+--- a/include/linux/kgdb.h
++++ b/include/linux/kgdb.h
+@@ -63,7 +63,8 @@ enum kgdb_bptype {
+ BP_HARDWARE_BREAKPOINT,
+ BP_WRITE_WATCHPOINT,
+ BP_READ_WATCHPOINT,
+- BP_ACCESS_WATCHPOINT
++ BP_ACCESS_WATCHPOINT,
++ BP_POKE_BREAKPOINT,
+ };
+
+ enum kgdb_bpstate {
+@@ -207,8 +208,8 @@ extern void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc);
+
+ /* Optional functions. */
+ extern int kgdb_validate_break_address(unsigned long addr);
+-extern int kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr);
+-extern int kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle);
++extern int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt);
++extern int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt);
+
+ /**
+ * kgdb_arch_late - Perform any architecture specific initalization.
+diff --git a/kernel/cred.c b/kernel/cred.c
+index 5791612..48c6fd3 100644
+--- a/kernel/cred.c
++++ b/kernel/cred.c
+@@ -385,6 +385,8 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
+ struct cred *new;
+ int ret;
+
++ p->replacement_session_keyring = NULL;
++
+ if (
+ #ifdef CONFIG_KEYS
+ !p->cred->thread_keyring &&
+diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
+index 0d7c087..7fda904 100644
+--- a/kernel/debug/debug_core.c
++++ b/kernel/debug/debug_core.c
+@@ -157,37 +157,39 @@ early_param("nokgdbroundup", opt_nokgdbroundup);
+ * Weak aliases for breakpoint management,
+ * can be overriden by architectures when needed:
+ */
+-int __weak kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr)
++int __weak kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
+ {
+ int err;
+
+- err = probe_kernel_read(saved_instr, (char *)addr, BREAK_INSTR_SIZE);
++ err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
++ BREAK_INSTR_SIZE);
+ if (err)
+ return err;
+-
+- return probe_kernel_write((char *)addr, arch_kgdb_ops.gdb_bpt_instr,
+- BREAK_INSTR_SIZE);
++ err = probe_kernel_write((char *)bpt->bpt_addr,
++ arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
++ return err;
+ }
+
+-int __weak kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle)
++int __weak kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
+ {
+- return probe_kernel_write((char *)addr,
+- (char *)bundle, BREAK_INSTR_SIZE);
++ return probe_kernel_write((char *)bpt->bpt_addr,
++ (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
+ }
+
+ int __weak kgdb_validate_break_address(unsigned long addr)
+ {
+- char tmp_variable[BREAK_INSTR_SIZE];
++ struct kgdb_bkpt tmp;
+ int err;
+- /* Validate setting the breakpoint and then removing it. In the
++ /* Validate setting the breakpoint and then removing it. If the
+ * remove fails, the kernel needs to emit a bad message because we
+ * are deep trouble not being able to put things back the way we
+ * found them.
+ */
+- err = kgdb_arch_set_breakpoint(addr, tmp_variable);
++ tmp.bpt_addr = addr;
++ err = kgdb_arch_set_breakpoint(&tmp);
+ if (err)
+ return err;
+- err = kgdb_arch_remove_breakpoint(addr, tmp_variable);
++ err = kgdb_arch_remove_breakpoint(&tmp);
+ if (err)
+ printk(KERN_ERR "KGDB: Critical breakpoint error, kernel "
+ "memory destroyed at: %lx", addr);
+@@ -231,7 +233,6 @@ static void kgdb_flush_swbreak_addr(unsigned long addr)
+ */
+ int dbg_activate_sw_breakpoints(void)
+ {
+- unsigned long addr;
+ int error;
+ int ret = 0;
+ int i;
+@@ -240,16 +241,15 @@ int dbg_activate_sw_breakpoints(void)
+ if (kgdb_break[i].state != BP_SET)
+ continue;
+
+- addr = kgdb_break[i].bpt_addr;
+- error = kgdb_arch_set_breakpoint(addr,
+- kgdb_break[i].saved_instr);
++ error = kgdb_arch_set_breakpoint(&kgdb_break[i]);
+ if (error) {
+ ret = error;
+- printk(KERN_INFO "KGDB: BP install failed: %lx", addr);
++ printk(KERN_INFO "KGDB: BP install failed: %lx",
++ kgdb_break[i].bpt_addr);
+ continue;
+ }
+
+- kgdb_flush_swbreak_addr(addr);
++ kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr);
+ kgdb_break[i].state = BP_ACTIVE;
+ }
+ return ret;
+@@ -298,7 +298,6 @@ int dbg_set_sw_break(unsigned long addr)
+
+ int dbg_deactivate_sw_breakpoints(void)
+ {
+- unsigned long addr;
+ int error;
+ int ret = 0;
+ int i;
+@@ -306,15 +305,14 @@ int dbg_deactivate_sw_breakpoints(void)
+ for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
+ if (kgdb_break[i].state != BP_ACTIVE)
+ continue;
+- addr = kgdb_break[i].bpt_addr;
+- error = kgdb_arch_remove_breakpoint(addr,
+- kgdb_break[i].saved_instr);
++ error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
+ if (error) {
+- printk(KERN_INFO "KGDB: BP remove failed: %lx\n", addr);
++ printk(KERN_INFO "KGDB: BP remove failed: %lx\n",
++ kgdb_break[i].bpt_addr);
+ ret = error;
+ }
+
+- kgdb_flush_swbreak_addr(addr);
++ kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr);
+ kgdb_break[i].state = BP_SET;
+ }
+ return ret;
+@@ -348,7 +346,6 @@ int kgdb_isremovedbreak(unsigned long addr)
+
+ int dbg_remove_all_break(void)
+ {
+- unsigned long addr;
+ int error;
+ int i;
+
+@@ -356,12 +353,10 @@ int dbg_remove_all_break(void)
+ for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
+ if (kgdb_break[i].state != BP_ACTIVE)
+ goto setundefined;
+- addr = kgdb_break[i].bpt_addr;
+- error = kgdb_arch_remove_breakpoint(addr,
+- kgdb_break[i].saved_instr);
++ error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
+ if (error)
+ printk(KERN_ERR "KGDB: breakpoint remove failed: %lx\n",
+- addr);
++ kgdb_break[i].bpt_addr);
+ setundefined:
+ kgdb_break[i].state = BP_UNDEFINED;
+ }
+diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
+index 4742090..c3c8975 100644
+--- a/kernel/irq/migration.c
++++ b/kernel/irq/migration.c
+@@ -43,12 +43,16 @@ void irq_move_masked_irq(struct irq_data *idata)
+ * masking the irqs.
+ */
+ if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask)
+- < nr_cpu_ids))
+- if (!chip->irq_set_affinity(&desc->irq_data,
+- desc->pending_mask, false)) {
++ < nr_cpu_ids)) {
++ int ret = chip->irq_set_affinity(&desc->irq_data,
++ desc->pending_mask, false);
++ switch (ret) {
++ case IRQ_SET_MASK_OK:
+ cpumask_copy(desc->irq_data.affinity, desc->pending_mask);
++ case IRQ_SET_MASK_OK_NOCOPY:
+ irq_set_thread_affinity(desc);
+ }
++ }
+
+ cpumask_clear(desc->pending_mask);
+ }
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index ae27196..ea7ec7f 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -166,7 +166,7 @@ static int proc_taint(struct ctl_table *table, int write,
+ #endif
+
+ #ifdef CONFIG_PRINTK
+-static int proc_dmesg_restrict(struct ctl_table *table, int write,
++static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
+ #endif
+
+@@ -713,7 +713,7 @@ static struct ctl_table kern_table[] = {
+ .data = &dmesg_restrict,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+- .proc_handler = proc_dointvec_minmax,
++ .proc_handler = proc_dointvec_minmax_sysadmin,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+@@ -722,7 +722,7 @@ static struct ctl_table kern_table[] = {
+ .data = &kptr_restrict,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+- .proc_handler = proc_dmesg_restrict,
++ .proc_handler = proc_dointvec_minmax_sysadmin,
+ .extra1 = &zero,
+ .extra2 = &two,
+ },
+@@ -2422,7 +2422,7 @@ static int proc_taint(struct ctl_table *table, int write,
+ }
+
+ #ifdef CONFIG_PRINTK
+-static int proc_dmesg_restrict(struct ctl_table *table, int write,
++static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+ if (write && !capable(CAP_SYS_ADMIN))
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index f2bd275..697e49d 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -1642,6 +1642,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
+ int cpu_file = iter->cpu_file;
+ u64 next_ts = 0, ts;
+ int next_cpu = -1;
++ int next_size = 0;
+ int cpu;
+
+ /*
+@@ -1673,9 +1674,12 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
+ next_cpu = cpu;
+ next_ts = ts;
+ next_lost = lost_events;
++ next_size = iter->ent_size;
+ }
+ }
+
++ iter->ent_size = next_size;
++
+ if (ent_cpu)
+ *ent_cpu = next_cpu;
+
+diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
+index 9336590..205dcac 100644
+--- a/kernel/trace/trace_entries.h
++++ b/kernel/trace/trace_entries.h
+@@ -156,6 +156,12 @@ FTRACE_ENTRY_DUP(wakeup, ctx_switch_entry,
+
+ #define FTRACE_STACK_ENTRIES 8
+
++#ifndef CONFIG_64BIT
++# define IP_FMT "%08lx"
++#else
++# define IP_FMT "%016lx"
++#endif
++
+ FTRACE_ENTRY(kernel_stack, stack_entry,
+
+ TRACE_STACK,
+@@ -165,8 +171,9 @@ FTRACE_ENTRY(kernel_stack, stack_entry,
+ __dynamic_array(unsigned long, caller )
+ ),
+
+- F_printk("\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n"
+- "\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n",
++ F_printk("\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n"
++ "\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n"
++ "\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n",
+ __entry->caller[0], __entry->caller[1], __entry->caller[2],
+ __entry->caller[3], __entry->caller[4], __entry->caller[5],
+ __entry->caller[6], __entry->caller[7])
+@@ -181,8 +188,9 @@ FTRACE_ENTRY(user_stack, userstack_entry,
+ __array( unsigned long, caller, FTRACE_STACK_ENTRIES )
+ ),
+
+- F_printk("\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n"
+- "\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n",
++ F_printk("\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n"
++ "\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n"
++ "\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n",
+ __entry->caller[0], __entry->caller[1], __entry->caller[2],
+ __entry->caller[3], __entry->caller[4], __entry->caller[5],
+ __entry->caller[6], __entry->caller[7])
+diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
+index bbeec31..ad4000c 100644
+--- a/kernel/trace/trace_export.c
++++ b/kernel/trace/trace_export.c
+@@ -150,7 +150,7 @@ ftrace_define_fields_##name(struct ftrace_event_call *event_call) \
+ #define __dynamic_array(type, item)
+
+ #undef F_printk
+-#define F_printk(fmt, args...) #fmt ", " __stringify(args)
++#define F_printk(fmt, args...) __stringify(fmt) ", " __stringify(args)
+
+ #undef FTRACE_ENTRY
+ #define FTRACE_ENTRY(call, struct_name, etype, tstruct, print) \
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index c1c597e..98bfbd5 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -608,6 +608,10 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
+
+ if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
+ struct hci_cp_auth_requested cp;
++
++ /* encrypt must be pending if auth is also pending */
++ set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
++
+ cp.handle = cpu_to_le16(conn->handle);
+ hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
+ sizeof(cp), &cp);
+diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
+index 41c2310..aea1559 100644
+--- a/net/mac80211/agg-rx.c
++++ b/net/mac80211/agg-rx.c
+@@ -49,6 +49,8 @@ static void ieee80211_free_tid_rx(struct rcu_head *h)
+ container_of(h, struct tid_ampdu_rx, rcu_head);
+ int i;
+
++ del_timer_sync(&tid_rx->reorder_timer);
++
+ for (i = 0; i < tid_rx->buf_size; i++)
+ dev_kfree_skb(tid_rx->reorder_buf[i]);
+ kfree(tid_rx->reorder_buf);
+@@ -88,7 +90,6 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
+ tid, 0, reason);
+
+ del_timer_sync(&tid_rx->session_timer);
+- del_timer_sync(&tid_rx->reorder_timer);
+
+ call_rcu(&tid_rx->rcu_head, ieee80211_free_tid_rx);
+ }
+diff --git a/net/rose/rose_dev.c b/net/rose/rose_dev.c
+index 178ff4f..2679507 100644
+--- a/net/rose/rose_dev.c
++++ b/net/rose/rose_dev.c
+@@ -96,11 +96,11 @@ static int rose_set_mac_address(struct net_device *dev, void *addr)
+ struct sockaddr *sa = addr;
+ int err;
+
+- if (!memcpy(dev->dev_addr, sa->sa_data, dev->addr_len))
++ if (!memcmp(dev->dev_addr, sa->sa_data, dev->addr_len))
+ return 0;
+
+ if (dev->flags & IFF_UP) {
+- err = rose_add_loopback_node((rose_address *)dev->dev_addr);
++ err = rose_add_loopback_node((rose_address *)sa->sa_data);
+ if (err)
+ return err;
+
+diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
+index 2bd594e..619228d 100644
+--- a/scripts/mod/modpost.c
++++ b/scripts/mod/modpost.c
+@@ -132,8 +132,10 @@ static struct module *new_module(char *modname)
+ /* strip trailing .o */
+ s = strrchr(p, '.');
+ if (s != NULL)
+- if (strcmp(s, ".o") == 0)
++ if (strcmp(s, ".o") == 0) {
+ *s = '\0';
++ mod->is_dot_o = 1;
++ }
+
+ /* add to list */
+ mod->name = p;
+@@ -587,7 +589,8 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
+ unsigned int crc;
+ enum export export;
+
+- if (!is_vmlinux(mod->name) && strncmp(symname, "__ksymtab", 9) == 0)
++ if ((!is_vmlinux(mod->name) || mod->is_dot_o) &&
++ strncmp(symname, "__ksymtab", 9) == 0)
+ export = export_from_secname(info, get_secindex(info, sym));
+ else
+ export = export_from_sec(info, get_secindex(info, sym));
+@@ -849,7 +852,7 @@ static void check_section(const char *modname, struct elf_info *elf,
+
+ #define ALL_INIT_DATA_SECTIONS \
+ ".init.setup$", ".init.rodata$", \
+- ".devinit.rodata$", ".cpuinit.rodata$", ".meminit.rodata$" \
++ ".devinit.rodata$", ".cpuinit.rodata$", ".meminit.rodata$", \
+ ".init.data$", ".devinit.data$", ".cpuinit.data$", ".meminit.data$"
+ #define ALL_EXIT_DATA_SECTIONS \
+ ".exit.data$", ".devexit.data$", ".cpuexit.data$", ".memexit.data$"
+diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
+index 2031119..51207e4 100644
+--- a/scripts/mod/modpost.h
++++ b/scripts/mod/modpost.h
+@@ -113,6 +113,7 @@ struct module {
+ int has_cleanup;
+ struct buffer dev_table_buf;
+ char srcversion[25];
++ int is_dot_o;
+ };
+
+ struct elf_info {
+diff --git a/security/tomoyo/mount.c b/security/tomoyo/mount.c
+index bee09d0..fe00cdf 100644
+--- a/security/tomoyo/mount.c
++++ b/security/tomoyo/mount.c
+@@ -199,30 +199,32 @@ int tomoyo_mount_permission(char *dev_name, struct path *path,
+ if (flags & MS_REMOUNT) {
+ type = tomoyo_mounts[TOMOYO_MOUNT_REMOUNT];
+ flags &= ~MS_REMOUNT;
+- }
+- if (flags & MS_MOVE) {
+- type = tomoyo_mounts[TOMOYO_MOUNT_MOVE];
+- flags &= ~MS_MOVE;
+- }
+- if (flags & MS_BIND) {
++ } else if (flags & MS_BIND) {
+ type = tomoyo_mounts[TOMOYO_MOUNT_BIND];
+ flags &= ~MS_BIND;
+- }
+- if (flags & MS_UNBINDABLE) {
+- type = tomoyo_mounts[TOMOYO_MOUNT_MAKE_UNBINDABLE];
+- flags &= ~MS_UNBINDABLE;
+- }
+- if (flags & MS_PRIVATE) {
++ } else if (flags & MS_SHARED) {
++ if (flags & (MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
++ return -EINVAL;
++ type = tomoyo_mounts[TOMOYO_MOUNT_MAKE_SHARED];
++ flags &= ~MS_SHARED;
++ } else if (flags & MS_PRIVATE) {
++ if (flags & (MS_SHARED | MS_SLAVE | MS_UNBINDABLE))
++ return -EINVAL;
+ type = tomoyo_mounts[TOMOYO_MOUNT_MAKE_PRIVATE];
+ flags &= ~MS_PRIVATE;
+- }
+- if (flags & MS_SLAVE) {
++ } else if (flags & MS_SLAVE) {
++ if (flags & (MS_SHARED | MS_PRIVATE | MS_UNBINDABLE))
++ return -EINVAL;
+ type = tomoyo_mounts[TOMOYO_MOUNT_MAKE_SLAVE];
+ flags &= ~MS_SLAVE;
+- }
+- if (flags & MS_SHARED) {
+- type = tomoyo_mounts[TOMOYO_MOUNT_MAKE_SHARED];
+- flags &= ~MS_SHARED;
++ } else if (flags & MS_UNBINDABLE) {
++ if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE))
++ return -EINVAL;
++ type = tomoyo_mounts[TOMOYO_MOUNT_MAKE_UNBINDABLE];
++ flags &= ~MS_UNBINDABLE;
++ } else if (flags & MS_MOVE) {
++ type = tomoyo_mounts[TOMOYO_MOUNT_MOVE];
++ flags &= ~MS_MOVE;
+ }
+ if (!type)
+ type = "<NULL>";
+diff --git a/sound/soc/codecs/ak4642.c b/sound/soc/codecs/ak4642.c
+index 12c1bde..1c4999d 100644
+--- a/sound/soc/codecs/ak4642.c
++++ b/sound/soc/codecs/ak4642.c
+@@ -144,7 +144,7 @@
+ * min : 0xFE : -115.0 dB
+ * mute: 0xFF
+ */
+-static const DECLARE_TLV_DB_SCALE(out_tlv, -11500, 50, 1);
++static const DECLARE_TLV_DB_SCALE(out_tlv, -11550, 50, 1);
+
+ static const struct snd_kcontrol_new ak4642_snd_controls[] = {
+
+diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
+index 6e502af..2f1f5f8 100644
+--- a/sound/soc/codecs/wm8994.c
++++ b/sound/soc/codecs/wm8994.c
+@@ -3190,7 +3190,7 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
+ case 2:
+ case 3:
+ wm8994->hubs.dcs_codes_l = -9;
+- wm8994->hubs.dcs_codes_r = -5;
++ wm8994->hubs.dcs_codes_r = -7;
+ break;
+ default:
+ break;
diff --git a/1015_linux-3.2.16.patch b/1015_linux-3.2.16.patch
new file mode 100644
index 00000000..d9378ae1
--- /dev/null
+++ b/1015_linux-3.2.16.patch
@@ -0,0 +1,2005 @@
+diff --git a/Makefile b/Makefile
+index 6195122..3da29cb 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 15
++SUBLEVEL = 16
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
+index c2effc9..9c18ebd 100644
+--- a/arch/arm/boot/compressed/head.S
++++ b/arch/arm/boot/compressed/head.S
+@@ -273,7 +273,7 @@ restart: adr r0, LC0
+ add r0, r0, #0x100
+ mov r1, r6
+ sub r2, sp, r6
+- blne atags_to_fdt
++ bleq atags_to_fdt
+
+ ldmfd sp!, {r0-r3, ip, lr}
+ sub sp, sp, #0x10000
+diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
+index 82ef81d..785365e 100644
+--- a/arch/arm/mm/proc-v7.S
++++ b/arch/arm/mm/proc-v7.S
+@@ -382,6 +382,18 @@ __v7_setup:
+ mcr p15, 0, r5, c10, c2, 0 @ write PRRR
+ mcr p15, 0, r6, c10, c2, 1 @ write NMRR
+ #endif
++#ifndef CONFIG_ARM_THUMBEE
++ mrc p15, 0, r0, c0, c1, 0 @ read ID_PFR0 for ThumbEE
++ and r0, r0, #(0xf << 12) @ ThumbEE enabled field
++ teq r0, #(1 << 12) @ check if ThumbEE is present
++ bne 1f
++ mov r5, #0
++ mcr p14, 6, r5, c1, c0, 0 @ Initialize TEEHBR to 0
++ mrc p14, 6, r0, c0, c0, 0 @ load TEECR
++ orr r0, r0, #1 @ set the 1st bit in order to
++ mcr p14, 6, r0, c0, c0, 0 @ stop userspace TEEHBR access
++1:
++#endif
+ adr r5, v7_crval
+ ldmia r5, {r5, r6}
+ #ifdef CONFIG_CPU_ENDIAN_BE8
+diff --git a/arch/ia64/include/asm/futex.h b/arch/ia64/include/asm/futex.h
+index 8428525..21ab376 100644
+--- a/arch/ia64/include/asm/futex.h
++++ b/arch/ia64/include/asm/futex.h
+@@ -107,15 +107,16 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ return -EFAULT;
+
+ {
+- register unsigned long r8 __asm ("r8") = 0;
++ register unsigned long r8 __asm ("r8");
+ unsigned long prev;
+ __asm__ __volatile__(
+ " mf;; \n"
+- " mov ar.ccv=%3;; \n"
+- "[1:] cmpxchg4.acq %0=[%1],%2,ar.ccv \n"
++ " mov %0=r0 \n"
++ " mov ar.ccv=%4;; \n"
++ "[1:] cmpxchg4.acq %1=[%2],%3,ar.ccv \n"
+ " .xdata4 \"__ex_table\", 1b-., 2f-. \n"
+ "[2:]"
+- : "=r" (prev)
++ : "=r" (r8), "=r" (prev)
+ : "r" (uaddr), "r" (newval),
+ "rO" ((long) (unsigned) oldval)
+ : "memory");
+diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
+index f929db9..a3c40e8 100644
+--- a/arch/s390/Kconfig
++++ b/arch/s390/Kconfig
+@@ -90,7 +90,6 @@ config S390
+ select HAVE_GET_USER_PAGES_FAST
+ select HAVE_ARCH_MUTEX_CPU_RELAX
+ select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
+- select HAVE_RCU_TABLE_FREE if SMP
+ select ARCH_SAVE_PAGE_KEYS if HIBERNATION
+ select ARCH_INLINE_SPIN_TRYLOCK
+ select ARCH_INLINE_SPIN_TRYLOCK_BH
+diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
+index 8eef9b5..78e3041 100644
+--- a/arch/s390/include/asm/pgalloc.h
++++ b/arch/s390/include/asm/pgalloc.h
+@@ -22,10 +22,7 @@ void crst_table_free(struct mm_struct *, unsigned long *);
+
+ unsigned long *page_table_alloc(struct mm_struct *, unsigned long);
+ void page_table_free(struct mm_struct *, unsigned long *);
+-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+ void page_table_free_rcu(struct mmu_gather *, unsigned long *);
+-void __tlb_remove_table(void *_table);
+-#endif
+
+ static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
+ {
+diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
+index c687a2c..775a5ee 100644
+--- a/arch/s390/include/asm/tlb.h
++++ b/arch/s390/include/asm/tlb.h
+@@ -30,14 +30,10 @@
+
+ struct mmu_gather {
+ struct mm_struct *mm;
+-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+ struct mmu_table_batch *batch;
+-#endif
+ unsigned int fullmm;
+- unsigned int need_flush;
+ };
+
+-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+ struct mmu_table_batch {
+ struct rcu_head rcu;
+ unsigned int nr;
+@@ -49,7 +45,6 @@ struct mmu_table_batch {
+
+ extern void tlb_table_flush(struct mmu_gather *tlb);
+ extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
+-#endif
+
+ static inline void tlb_gather_mmu(struct mmu_gather *tlb,
+ struct mm_struct *mm,
+@@ -57,29 +52,20 @@ static inline void tlb_gather_mmu(struct mmu_gather *tlb,
+ {
+ tlb->mm = mm;
+ tlb->fullmm = full_mm_flush;
+- tlb->need_flush = 0;
+-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+ tlb->batch = NULL;
+-#endif
+ if (tlb->fullmm)
+ __tlb_flush_mm(mm);
+ }
+
+ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
+ {
+- if (!tlb->need_flush)
+- return;
+- tlb->need_flush = 0;
+- __tlb_flush_mm(tlb->mm);
+-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+ tlb_table_flush(tlb);
+-#endif
+ }
+
+ static inline void tlb_finish_mmu(struct mmu_gather *tlb,
+ unsigned long start, unsigned long end)
+ {
+- tlb_flush_mmu(tlb);
++ tlb_table_flush(tlb);
+ }
+
+ /*
+@@ -105,10 +91,8 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
+ unsigned long address)
+ {
+-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+ if (!tlb->fullmm)
+ return page_table_free_rcu(tlb, (unsigned long *) pte);
+-#endif
+ page_table_free(tlb->mm, (unsigned long *) pte);
+ }
+
+@@ -125,10 +109,8 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
+ #ifdef __s390x__
+ if (tlb->mm->context.asce_limit <= (1UL << 31))
+ return;
+-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+ if (!tlb->fullmm)
+ return tlb_remove_table(tlb, pmd);
+-#endif
+ crst_table_free(tlb->mm, (unsigned long *) pmd);
+ #endif
+ }
+@@ -146,10 +128,8 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
+ #ifdef __s390x__
+ if (tlb->mm->context.asce_limit <= (1UL << 42))
+ return;
+-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+ if (!tlb->fullmm)
+ return tlb_remove_table(tlb, pud);
+-#endif
+ crst_table_free(tlb->mm, (unsigned long *) pud);
+ #endif
+ }
+diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
+index 301c84d..f8ceac4 100644
+--- a/arch/s390/mm/pgtable.c
++++ b/arch/s390/mm/pgtable.c
+@@ -687,8 +687,6 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
+ }
+ }
+
+-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+-
+ static void __page_table_free_rcu(void *table, unsigned bit)
+ {
+ struct page *page;
+@@ -742,7 +740,66 @@ void __tlb_remove_table(void *_table)
+ free_pages((unsigned long) table, ALLOC_ORDER);
+ }
+
+-#endif
++static void tlb_remove_table_smp_sync(void *arg)
++{
++ /* Simply deliver the interrupt */
++}
++
++static void tlb_remove_table_one(void *table)
++{
++ /*
++ * This isn't an RCU grace period and hence the page-tables cannot be
++ * assumed to be actually RCU-freed.
++ *
++ * It is however sufficient for software page-table walkers that rely
++ * on IRQ disabling. See the comment near struct mmu_table_batch.
++ */
++ smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
++ __tlb_remove_table(table);
++}
++
++static void tlb_remove_table_rcu(struct rcu_head *head)
++{
++ struct mmu_table_batch *batch;
++ int i;
++
++ batch = container_of(head, struct mmu_table_batch, rcu);
++
++ for (i = 0; i < batch->nr; i++)
++ __tlb_remove_table(batch->tables[i]);
++
++ free_page((unsigned long)batch);
++}
++
++void tlb_table_flush(struct mmu_gather *tlb)
++{
++ struct mmu_table_batch **batch = &tlb->batch;
++
++ if (*batch) {
++ __tlb_flush_mm(tlb->mm);
++ call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
++ *batch = NULL;
++ }
++}
++
++void tlb_remove_table(struct mmu_gather *tlb, void *table)
++{
++ struct mmu_table_batch **batch = &tlb->batch;
++
++ if (*batch == NULL) {
++ *batch = (struct mmu_table_batch *)
++ __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
++ if (*batch == NULL) {
++ __tlb_flush_mm(tlb->mm);
++ tlb_remove_table_one(table);
++ return;
++ }
++ (*batch)->nr = 0;
++ }
++ (*batch)->tables[(*batch)->nr++] = table;
++ if ((*batch)->nr == MAX_TABLE_BATCH)
++ tlb_table_flush(tlb);
++}
+
+ /*
+ * switch on pgstes for its userspace process (for kvm)
+diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
+index 381edcd..27728e1 100644
+--- a/arch/sparc/kernel/ds.c
++++ b/arch/sparc/kernel/ds.c
+@@ -1267,4 +1267,4 @@ static int __init ds_init(void)
+ return vio_register_driver(&ds_driver);
+ }
+
+-subsys_initcall(ds_init);
++fs_initcall(ds_init);
+diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
+index 77f1b95..9171fc2 100644
+--- a/arch/sparc/kernel/rtrap_64.S
++++ b/arch/sparc/kernel/rtrap_64.S
+@@ -20,11 +20,6 @@
+
+ .text
+ .align 32
+-__handle_softirq:
+- call do_softirq
+- nop
+- ba,a,pt %xcc, __handle_softirq_continue
+- nop
+ __handle_preemption:
+ call schedule
+ wrpr %g0, RTRAP_PSTATE, %pstate
+@@ -89,9 +84,7 @@ rtrap:
+ cmp %l1, 0
+
+ /* mm/ultra.S:xcall_report_regs KNOWS about this load. */
+- bne,pn %icc, __handle_softirq
+ ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
+-__handle_softirq_continue:
+ rtrap_xcall:
+ sethi %hi(0xf << 20), %l4
+ and %l1, %l4, %l4
+diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
+index 1055769..6d276c2 100644
+--- a/drivers/acpi/acpica/acobject.h
++++ b/drivers/acpi/acpica/acobject.h
+@@ -358,6 +358,7 @@ typedef enum {
+ */
+ struct acpi_object_extra {
+ ACPI_OBJECT_COMMON_HEADER struct acpi_namespace_node *method_REG; /* _REG method for this region (if any) */
++ struct acpi_namespace_node *scope_node;
+ void *region_context; /* Region-specific data */
+ u8 *aml_start;
+ u32 aml_length;
+diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c
+index 42163d8..d69e4a5 100644
+--- a/drivers/acpi/acpica/dsargs.c
++++ b/drivers/acpi/acpica/dsargs.c
+@@ -384,7 +384,7 @@ acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc)
+
+ /* Execute the argument AML */
+
+- status = acpi_ds_execute_arguments(node, node->parent,
++ status = acpi_ds_execute_arguments(node, extra_desc->extra.scope_node,
+ extra_desc->extra.aml_length,
+ extra_desc->extra.aml_start);
+ if (ACPI_FAILURE(status)) {
+diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
+index 110711a..8a06dc5 100644
+--- a/drivers/acpi/acpica/excreate.c
++++ b/drivers/acpi/acpica/excreate.c
+@@ -330,6 +330,12 @@ acpi_ex_create_region(u8 * aml_start,
+ region_obj2 = obj_desc->common.next_object;
+ region_obj2->extra.aml_start = aml_start;
+ region_obj2->extra.aml_length = aml_length;
++ if (walk_state->scope_info) {
++ region_obj2->extra.scope_node =
++ walk_state->scope_info->scope.node;
++ } else {
++ region_obj2->extra.scope_node = node;
++ }
+
+ /* Init the region from the operands */
+
+diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
+index e820b68..acda773 100644
+--- a/drivers/block/cciss_scsi.c
++++ b/drivers/block/cciss_scsi.c
+@@ -866,6 +866,7 @@ cciss_scsi_detect(ctlr_info_t *h)
+ sh->can_queue = cciss_tape_cmds;
+ sh->sg_tablesize = h->maxsgentries;
+ sh->max_cmd_len = MAX_COMMAND_SIZE;
++ sh->max_sectors = h->cciss_max_sectors;
+
+ ((struct cciss_scsi_adapter_data_t *)
+ h->scsi_ctlr)->scsi_host = sh;
+@@ -1410,7 +1411,7 @@ static void cciss_scatter_gather(ctlr_info_t *h, CommandList_struct *c,
+ /* track how many SG entries we are using */
+ if (request_nsgs > h->maxSG)
+ h->maxSG = request_nsgs;
+- c->Header.SGTotal = (__u8) request_nsgs + chained;
++ c->Header.SGTotal = (u16) request_nsgs + chained;
+ if (request_nsgs > h->max_cmd_sgentries)
+ c->Header.SGList = h->max_cmd_sgentries;
+ else
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index db811d2..003cd8d 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -71,6 +71,8 @@ static struct usb_device_id ath3k_table[] = {
+
+ /* Atheros AR3012 with sflash firmware*/
+ { USB_DEVICE(0x0CF3, 0x3004) },
++ { USB_DEVICE(0x0CF3, 0x311D) },
++ { USB_DEVICE(0x13d3, 0x3375) },
+
+ /* Atheros AR5BBU12 with sflash firmware */
+ { USB_DEVICE(0x0489, 0xE02C) },
+@@ -87,6 +89,8 @@ static struct usb_device_id ath3k_blist_tbl[] = {
+
+ /* Atheros AR3012 with sflash firmware*/
+ { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+
+ { } /* Terminating entry */
+ };
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index c16c750..db44ad5 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -101,6 +101,7 @@ static struct usb_device_id btusb_table[] = {
+ { USB_DEVICE(0x0c10, 0x0000) },
+
+ /* Broadcom BCM20702A0 */
++ { USB_DEVICE(0x0a5c, 0x21e3) },
+ { USB_DEVICE(0x413c, 0x8197) },
+
+ { } /* Terminating entry */
+@@ -126,6 +127,8 @@ static struct usb_device_id blacklist_table[] = {
+
+ /* Atheros 3012 with sflash firmware */
+ { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+
+ /* Atheros AR5BBU12 with sflash firmware */
+ { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
+diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
+index 48ad2a7..8f3d6db 100644
+--- a/drivers/bluetooth/hci_ldisc.c
++++ b/drivers/bluetooth/hci_ldisc.c
+@@ -237,7 +237,6 @@ static void hci_uart_destruct(struct hci_dev *hdev)
+ return;
+
+ BT_DBG("%s", hdev->name);
+- kfree(hdev->driver_data);
+ }
+
+ /* ------ LDISC part ------ */
+@@ -310,12 +309,13 @@ static void hci_uart_tty_close(struct tty_struct *tty)
+ hci_uart_close(hdev);
+
+ if (test_and_clear_bit(HCI_UART_PROTO_SET, &hu->flags)) {
+- hu->proto->close(hu);
+ if (hdev) {
+ hci_unregister_dev(hdev);
+ hci_free_dev(hdev);
+ }
++ hu->proto->close(hu);
+ }
++ kfree(hu);
+ }
+ }
+
+diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
+index 5a99bb3..da85c0d 100644
+--- a/drivers/dma/Kconfig
++++ b/drivers/dma/Kconfig
+@@ -201,18 +201,17 @@ config PL330_DMA
+ platform_data for a dma-pl330 device.
+
+ config PCH_DMA
+- tristate "Intel EG20T PCH / OKI Semi IOH(ML7213/ML7223) DMA support"
++ tristate "Intel EG20T PCH / OKI Semi IOH(ML7213/ML7223/ML7831) DMA support"
+ depends on PCI && X86
+ select DMA_ENGINE
+ help
+ Enable support for Intel EG20T PCH DMA engine.
+-
+ This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
+- Output Hub), ML7213 and ML7223.
+- ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is
+- for MP(Media Phone) use.
+- ML7213/ML7223 is companion chip for Intel Atom E6xx series.
+- ML7213/ML7223 is completely compatible for Intel EG20T PCH.
++ Output Hub), ML7213, ML7223 and ML7831.
++ ML7213 IOH is for IVI(In-Vehicle Infotainment) use, ML7223 IOH is
++ for MP(Media Phone) use and ML7831 IOH is for general purpose use.
++ ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
++ ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
+
+ config IMX_SDMA
+ tristate "i.MX SDMA support"
+diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
+index a6d0e3d..629c430 100644
+--- a/drivers/dma/pch_dma.c
++++ b/drivers/dma/pch_dma.c
+@@ -1018,6 +1018,8 @@ static void __devexit pch_dma_remove(struct pci_dev *pdev)
+ #define PCI_DEVICE_ID_ML7223_DMA2_4CH 0x800E
+ #define PCI_DEVICE_ID_ML7223_DMA3_4CH 0x8017
+ #define PCI_DEVICE_ID_ML7223_DMA4_4CH 0x803B
++#define PCI_DEVICE_ID_ML7831_DMA1_8CH 0x8810
++#define PCI_DEVICE_ID_ML7831_DMA2_4CH 0x8815
+
+ DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = {
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 },
+@@ -1030,6 +1032,8 @@ DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = {
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA2_4CH), 4}, /* Video SPI */
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA3_4CH), 4}, /* Security */
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA4_4CH), 4}, /* FPGA */
++ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA1_8CH), 8}, /* UART */
++ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA2_4CH), 4}, /* SPI */
+ { 0, },
+ };
+
+@@ -1057,7 +1061,7 @@ static void __exit pch_dma_exit(void)
+ module_init(pch_dma_init);
+ module_exit(pch_dma_exit);
+
+-MODULE_DESCRIPTION("Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH "
+- "DMA controller driver");
++MODULE_DESCRIPTION("Intel EG20T PCH / OKI SEMICON ML7213/ML7223/ML7831 IOH"
++ "DMA controller driver");
+ MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
+ MODULE_LICENSE("GPL v2");
+diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
+index 8482a23..4e04157 100644
+--- a/drivers/gpio/Kconfig
++++ b/drivers/gpio/Kconfig
+@@ -387,7 +387,7 @@ config GPIO_LANGWELL
+ Say Y here to support Intel Langwell/Penwell GPIO.
+
+ config GPIO_PCH
+- tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7223 IOH GPIO"
++ tristate "Intel EG20T PCH/LAPIS Semiconductor IOH(ML7223/ML7831) GPIO"
+ depends on PCI && X86
+ select GENERIC_IRQ_CHIP
+ help
+@@ -395,11 +395,12 @@ config GPIO_PCH
+ which is an IOH(Input/Output Hub) for x86 embedded processor.
+ This driver can access PCH GPIO device.
+
+- This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
+- Output Hub), ML7223.
++ This driver also can be used for LAPIS Semiconductor IOH(Input/
++ Output Hub), ML7223 and ML7831.
+ ML7223 IOH is for MP(Media Phone) use.
+- ML7223 is companion chip for Intel Atom E6xx series.
+- ML7223 is completely compatible for Intel EG20T PCH.
++ ML7831 IOH is for general purpose use.
++ ML7223/ML7831 is companion chip for Intel Atom E6xx series.
++ ML7223/ML7831 is completely compatible for Intel EG20T PCH.
+
+ config GPIO_ML_IOH
+ tristate "OKI SEMICONDUCTOR ML7213 IOH GPIO support"
+diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c
+index a6008e1..779ff70 100644
+--- a/drivers/gpio/gpio-pch.c
++++ b/drivers/gpio/gpio-pch.c
+@@ -392,6 +392,7 @@ static int __devinit pch_gpio_probe(struct pci_dev *pdev,
+ chip->reg = chip->base;
+ pci_set_drvdata(pdev, chip);
+ mutex_init(&chip->lock);
++ spin_lock_init(&chip->spinlock);
+ pch_gpio_setup(chip);
+ ret = gpiochip_add(&chip->gpio);
+ if (ret) {
+@@ -524,6 +525,7 @@ static DEFINE_PCI_DEVICE_TABLE(pch_gpio_pcidev_id) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8803) },
+ { PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8014) },
+ { PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8043) },
++ { PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8803) },
+ { 0, }
+ };
+ MODULE_DEVICE_TABLE(pci, pch_gpio_pcidev_id);
+diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
+index e52b705..d0f8830 100644
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -64,7 +64,7 @@ MODULE_PARM_DESC(semaphores,
+ "Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
+
+ int i915_enable_rc6 __read_mostly = -1;
+-module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
++module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0400);
+ MODULE_PARM_DESC(i915_enable_rc6,
+ "Enable power-saving render C-state 6 (default: -1 (use per-chip default)");
+
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 9011f48..390768f 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -5646,12 +5646,15 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
+ if (is_lvds) {
+ temp = I915_READ(PCH_LVDS);
+ temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
+- if (HAS_PCH_CPT(dev))
++ if (HAS_PCH_CPT(dev)) {
++ temp &= ~PORT_TRANS_SEL_MASK;
+ temp |= PORT_TRANS_SEL_CPT(pipe);
+- else if (pipe == 1)
+- temp |= LVDS_PIPEB_SELECT;
+- else
+- temp &= ~LVDS_PIPEB_SELECT;
++ } else {
++ if (pipe == 1)
++ temp |= LVDS_PIPEB_SELECT;
++ else
++ temp &= ~LVDS_PIPEB_SELECT;
++ }
+
+ /* set the corresponsding LVDS_BORDER bit */
+ temp |= dev_priv->lvds_border_bits;
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index 94f860c..12eb789 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -219,14 +219,38 @@ intel_dp_max_data_rate(int max_link_clock, int max_lanes)
+ return (max_link_clock * max_lanes * 8) / 10;
+ }
+
++static bool
++intel_dp_adjust_dithering(struct intel_dp *intel_dp,
++ struct drm_display_mode *mode,
++ struct drm_display_mode *adjusted_mode)
++{
++ int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
++ int max_lanes = intel_dp_max_lane_count(intel_dp);
++ int max_rate, mode_rate;
++
++ mode_rate = intel_dp_link_required(mode->clock, 24);
++ max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
++
++ if (mode_rate > max_rate) {
++ mode_rate = intel_dp_link_required(mode->clock, 18);
++ if (mode_rate > max_rate)
++ return false;
++
++ if (adjusted_mode)
++ adjusted_mode->private_flags
++ |= INTEL_MODE_DP_FORCE_6BPC;
++
++ return true;
++ }
++
++ return true;
++}
++
+ static int
+ intel_dp_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+ {
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+- int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
+- int max_lanes = intel_dp_max_lane_count(intel_dp);
+- int max_rate, mode_rate;
+
+ if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
+ if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
+@@ -236,16 +260,8 @@ intel_dp_mode_valid(struct drm_connector *connector,
+ return MODE_PANEL;
+ }
+
+- mode_rate = intel_dp_link_required(mode->clock, 24);
+- max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
+-
+- if (mode_rate > max_rate) {
+- mode_rate = intel_dp_link_required(mode->clock, 18);
+- if (mode_rate > max_rate)
+- return MODE_CLOCK_HIGH;
+- else
+- mode->private_flags |= INTEL_MODE_DP_FORCE_6BPC;
+- }
++ if (!intel_dp_adjust_dithering(intel_dp, mode, NULL))
++ return MODE_CLOCK_HIGH;
+
+ if (mode->clock < 10000)
+ return MODE_CLOCK_LOW;
+@@ -673,7 +689,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ int lane_count, clock;
+ int max_lane_count = intel_dp_max_lane_count(intel_dp);
+ int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
+- int bpp = mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
++ int bpp;
+ static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
+
+ if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
+@@ -687,6 +703,11 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ mode->clock = intel_dp->panel_fixed_mode->clock;
+ }
+
++ if (!intel_dp_adjust_dithering(intel_dp, mode, adjusted_mode))
++ return false;
++
++ bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
++
+ for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
+ for (clock = 0; clock <= max_clock; clock++) {
+ int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
+diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
+index 30a9af9..8673581 100644
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
+@@ -1052,7 +1052,7 @@ int intel_init_ring_buffer(struct drm_device *dev,
+ * of the buffer.
+ */
+ ring->effective_size = ring->size;
+- if (IS_I830(ring->dev))
++ if (IS_I830(ring->dev) || IS_845G(ring->dev))
+ ring->effective_size -= 128;
+
+ return 0;
+diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
+index 0f8eb48..5351ee1 100644
+--- a/drivers/gpu/drm/radeon/atombios_encoders.c
++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
+@@ -246,6 +246,10 @@ atombios_dvo_setup(struct drm_encoder *encoder, int action)
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return;
+
++ /* some R4xx chips have the wrong frev */
++ if (rdev->family <= CHIP_RV410)
++ frev = 1;
++
+ switch (frev) {
+ case 1:
+ switch (crev) {
+diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
+index f7d39ac..4a4493f 100644
+--- a/drivers/gpu/drm/radeon/radeon_connectors.c
++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
+@@ -946,7 +946,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
+
+ encoder = obj_to_encoder(obj);
+
+- if (encoder->encoder_type != DRM_MODE_ENCODER_DAC ||
++ if (encoder->encoder_type != DRM_MODE_ENCODER_DAC &&
+ encoder->encoder_type != DRM_MODE_ENCODER_TVDAC)
+ continue;
+
+@@ -976,6 +976,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
+ * cases the DVI port is actually a virtual KVM port connected to the service
+ * processor.
+ */
++out:
+ if ((!rdev->is_atom_bios) &&
+ (ret == connector_status_disconnected) &&
+ rdev->mode_info.bios_hardcoded_edid_size) {
+@@ -983,7 +984,6 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
+ ret = connector_status_connected;
+ }
+
+-out:
+ /* updated in get modes as well since we need to know if it's analog or digital */
+ radeon_connector_update_scratch_regs(connector, ret);
+ return ret;
+diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
+index 7bb1b07..1441b00 100644
+--- a/drivers/gpu/drm/radeon/radeon_i2c.c
++++ b/drivers/gpu/drm/radeon/radeon_i2c.c
+@@ -890,6 +890,10 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
+ struct radeon_i2c_chan *i2c;
+ int ret;
+
++ /* don't add the mm_i2c bus unless hw_i2c is enabled */
++ if (rec->mm_i2c && (radeon_hw_i2c == 0))
++ return NULL;
++
+ i2c = kzalloc(sizeof(struct radeon_i2c_chan), GFP_KERNEL);
+ if (i2c == NULL)
+ return NULL;
+diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
+index e7ddb49..baa019e 100644
+--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
++++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
+@@ -143,6 +143,12 @@ static bool radeon_msi_ok(struct radeon_device *rdev)
+ (rdev->pdev->subsystem_device == 0x01fd))
+ return true;
+
++ /* RV515 seems to have MSI issues where it loses
++ * MSI rearms occasionally. This leads to lockups and freezes.
++ * disable it by default.
++ */
++ if (rdev->family == CHIP_RV515)
++ return false;
+ if (rdev->flags & RADEON_IS_IGP) {
+ /* APUs work fine with MSIs */
+ if (rdev->family >= CHIP_PALM)
+diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
+index 2a8722b..62a8e68 100644
+--- a/drivers/md/bitmap.c
++++ b/drivers/md/bitmap.c
+@@ -1819,7 +1819,9 @@ int bitmap_load(struct mddev *mddev)
+ * re-add of a missing device */
+ start = mddev->recovery_cp;
+
++ mutex_lock(&mddev->bitmap_info.mutex);
+ err = bitmap_init_from_disk(bitmap, start);
++ mutex_unlock(&mddev->bitmap_info.mutex);
+
+ if (err)
+ goto out;
+diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+index 48406ca..43c7b25 100644
+--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
++++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+@@ -1745,6 +1745,12 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)
+ struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
+ int err;
+
++ /* Ensure we have a valid MAC */
++ if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
++ pr_err("Error: Invalid MAC address\n");
++ return -EINVAL;
++ }
++
+ /* hardware has been reset, we need to reload some things */
+ pch_gbe_set_multi(netdev);
+
+@@ -2467,9 +2473,14 @@ static int pch_gbe_probe(struct pci_dev *pdev,
+
+ memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
+- dev_err(&pdev->dev, "Invalid MAC Address\n");
+- ret = -EIO;
+- goto err_free_adapter;
++ /*
++ * If the MAC is invalid (or just missing), display a warning
++ * but do not abort setting up the device. pch_gbe_up will
++ * prevent the interface from being brought up until a valid MAC
++ * is set.
++ */
++ dev_err(&pdev->dev, "Invalid MAC address, "
++ "interface disabled.\n");
+ }
+ setup_timer(&adapter->watchdog_timer, pch_gbe_watchdog,
+ (unsigned long)adapter);
+diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
+index 9cb5f91..29e23be 100644
+--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
++++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
+@@ -321,10 +321,10 @@ static void pch_gbe_check_copper_options(struct pch_gbe_adapter *adapter)
+ pr_debug("AutoNeg specified along with Speed or Duplex, AutoNeg parameter ignored\n");
+ hw->phy.autoneg_advertised = opt.def;
+ } else {
+- hw->phy.autoneg_advertised = AutoNeg;
+- pch_gbe_validate_option(
+- (int *)(&hw->phy.autoneg_advertised),
+- &opt, adapter);
++ int tmp = AutoNeg;
++
++ pch_gbe_validate_option(&tmp, &opt, adapter);
++ hw->phy.autoneg_advertised = tmp;
+ }
+ }
+
+@@ -495,9 +495,10 @@ void pch_gbe_check_options(struct pch_gbe_adapter *adapter)
+ .arg = { .l = { .nr = (int)ARRAY_SIZE(fc_list),
+ .p = fc_list } }
+ };
+- hw->mac.fc = FlowControl;
+- pch_gbe_validate_option((int *)(&hw->mac.fc),
+- &opt, adapter);
++ int tmp = FlowControl;
++
++ pch_gbe_validate_option(&tmp, &opt, adapter);
++ hw->mac.fc = tmp;
+ }
+
+ pch_gbe_check_copper_options(adapter);
+diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
+index 8ddef3e..d771de5 100644
+--- a/drivers/net/wireless/ath/ath9k/calib.c
++++ b/drivers/net/wireless/ath/ath9k/calib.c
+@@ -20,7 +20,6 @@
+
+ /* Common calibration code */
+
+-#define ATH9K_NF_TOO_HIGH -60
+
+ static int16_t ath9k_hw_get_nf_hist_mid(int16_t *nfCalBuffer)
+ {
+@@ -348,10 +347,10 @@ static void ath9k_hw_nf_sanitize(struct ath_hw *ah, s16 *nf)
+ "NF calibrated [%s] [chain %d] is %d\n",
+ (i >= 3 ? "ext" : "ctl"), i % 3, nf[i]);
+
+- if (nf[i] > ATH9K_NF_TOO_HIGH) {
++ if (nf[i] > limit->max) {
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "NF[%d] (%d) > MAX (%d), correcting to MAX\n",
+- i, nf[i], ATH9K_NF_TOO_HIGH);
++ i, nf[i], limit->max);
+ nf[i] = limit->max;
+ } else if (nf[i] < limit->min) {
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
+index 9afcad3..d44d398 100644
+--- a/drivers/net/wireless/rtlwifi/pci.c
++++ b/drivers/net/wireless/rtlwifi/pci.c
+@@ -926,8 +926,13 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
+ memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
+ ring = &rtlpci->tx_ring[BEACON_QUEUE];
+ pskb = __skb_dequeue(&ring->queue);
+- if (pskb)
++ if (pskb) {
++ struct rtl_tx_desc *entry = &ring->desc[ring->idx];
++ pci_unmap_single(rtlpci->pdev, rtlpriv->cfg->ops->get_desc(
++ (u8 *) entry, true, HW_DESC_TXBUFF_ADDR),
++ pskb->len, PCI_DMA_TODEVICE);
+ kfree_skb(pskb);
++ }
+
+ /*NB: the beacon data buffer must be 32-bit aligned. */
+ pskb = ieee80211_beacon_get(hw, mac->vif);
+diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
+index ff1b84b..e86edfc 100644
+--- a/drivers/rtc/rtc-pl031.c
++++ b/drivers/rtc/rtc-pl031.c
+@@ -339,8 +339,7 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
+ dev_dbg(&adev->dev, "revision = 0x%01x\n", ldata->hw_revision);
+
+ /* Enable the clockwatch on ST Variants */
+- if ((ldata->hw_designer == AMBA_VENDOR_ST) &&
+- (ldata->hw_revision > 1))
++ if (ldata->hw_designer == AMBA_VENDOR_ST)
+ writel(readl(ldata->base + RTC_CR) | RTC_CR_CWEN,
+ ldata->base + RTC_CR);
+
+diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
+index 8ba4510..7587796 100644
+--- a/drivers/spi/Kconfig
++++ b/drivers/spi/Kconfig
+@@ -346,14 +346,14 @@ config SPI_TI_SSP
+ serial port.
+
+ config SPI_TOPCLIFF_PCH
+- tristate "Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH SPI controller"
++ tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) SPI"
+ depends on PCI
+ help
+ SPI driver for the Topcliff PCH (Platform Controller Hub) SPI bus
+ used in some x86 embedded processors.
+
+- This driver also supports the ML7213, a companion chip for the
+- Atom E6xx series and compatible with the Intel EG20T PCH.
++ This driver also supports the ML7213/ML7223/ML7831, a companion chip
++ for the Atom E6xx series and compatible with the Intel EG20T PCH.
+
+ config SPI_TXX9
+ tristate "Toshiba TXx9 SPI controller"
+diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
+index 027b6d0..54b9d2e 100644
+--- a/drivers/spi/spi-topcliff-pch.c
++++ b/drivers/spi/spi-topcliff-pch.c
+@@ -95,16 +95,18 @@
+ #define PCH_CLOCK_HZ 50000000
+ #define PCH_MAX_SPBR 1023
+
+-/* Definition for ML7213 by OKI SEMICONDUCTOR */
++/* Definition for ML7213/ML7831 by OKI SEMICONDUCTOR */
+ #define PCI_VENDOR_ID_ROHM 0x10DB
+ #define PCI_DEVICE_ID_ML7213_SPI 0x802c
+ #define PCI_DEVICE_ID_ML7223_SPI 0x800F
++#define PCI_DEVICE_ID_ML7831_SPI 0x8816
+
+ /*
+ * Set the number of SPI instance max
+ * Intel EG20T PCH : 1ch
+ * OKI SEMICONDUCTOR ML7213 IOH : 2ch
+ * OKI SEMICONDUCTOR ML7223 IOH : 1ch
++ * OKI SEMICONDUCTOR ML7831 IOH : 1ch
+ */
+ #define PCH_SPI_MAX_DEV 2
+
+@@ -218,6 +220,7 @@ static struct pci_device_id pch_spi_pcidev_id[] = {
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_GE_SPI), 1, },
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_SPI), 2, },
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_SPI), 1, },
++ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_SPI), 1, },
+ { }
+ };
+
+@@ -315,22 +318,23 @@ static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val,
+ data->tx_index = tx_index;
+ data->rx_index = rx_index;
+
+- }
+-
+- /* if transfer complete interrupt */
+- if (reg_spsr_val & SPSR_FI_BIT) {
+- if ((tx_index == bpw_len) && (rx_index == tx_index)) {
+- /* disable interrupts */
+- pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
+-
+- /* transfer is completed;
+- inform pch_spi_process_messages */
+- data->transfer_complete = true;
+- data->transfer_active = false;
+- wake_up(&data->wait);
+- } else {
+- dev_err(&data->master->dev,
+- "%s : Transfer is not completed", __func__);
++ /* if transfer complete interrupt */
++ if (reg_spsr_val & SPSR_FI_BIT) {
++ if ((tx_index == bpw_len) && (rx_index == tx_index)) {
++ /* disable interrupts */
++ pch_spi_setclr_reg(data->master, PCH_SPCR, 0,
++ PCH_ALL);
++
++ /* transfer is completed;
++ inform pch_spi_process_messages */
++ data->transfer_complete = true;
++ data->transfer_active = false;
++ wake_up(&data->wait);
++ } else {
++ dev_err(&data->master->dev,
++ "%s : Transfer is not completed",
++ __func__);
++ }
+ }
+ }
+ }
+diff --git a/drivers/staging/iio/magnetometer/hmc5843.c b/drivers/staging/iio/magnetometer/hmc5843.c
+index fc9ee97..870db4f 100644
+--- a/drivers/staging/iio/magnetometer/hmc5843.c
++++ b/drivers/staging/iio/magnetometer/hmc5843.c
+@@ -521,7 +521,9 @@ static int hmc5843_detect(struct i2c_client *client,
+ /* Called when we have found a new HMC5843. */
+ static void hmc5843_init_client(struct i2c_client *client)
+ {
+- struct hmc5843_data *data = i2c_get_clientdata(client);
++ struct iio_dev *indio_dev = i2c_get_clientdata(client);
++ struct hmc5843_data *data = iio_priv(indio_dev);
++
+ hmc5843_set_meas_conf(client, data->meas_conf);
+ hmc5843_set_rate(client, data->rate);
+ hmc5843_configure(client, data->operating_mode);
+diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c
+index 1d04c50..5ba0898 100644
+--- a/drivers/tty/serial/altera_uart.c
++++ b/drivers/tty/serial/altera_uart.c
+@@ -555,7 +555,7 @@ static int __devinit altera_uart_probe(struct platform_device *pdev)
+ res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res_mem)
+ port->mapbase = res_mem->start;
+- else if (platp->mapbase)
++ else if (platp)
+ port->mapbase = platp->mapbase;
+ else
+ return -EINVAL;
+@@ -563,7 +563,7 @@ static int __devinit altera_uart_probe(struct platform_device *pdev)
+ res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res_irq)
+ port->irq = res_irq->start;
+- else if (platp->irq)
++ else if (platp)
+ port->irq = platp->irq;
+
+ /* Check platform data first so we can override device node data */
+diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
+index 8e00926..6da8cf8 100644
+--- a/drivers/tty/serial/amba-pl011.c
++++ b/drivers/tty/serial/amba-pl011.c
+@@ -1380,6 +1380,10 @@ static int pl011_startup(struct uart_port *port)
+
+ uap->port.uartclk = clk_get_rate(uap->clk);
+
++ /* Clear pending error and receive interrupts */
++ writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS |
++ UART011_RTIS | UART011_RXIS, uap->port.membase + UART011_ICR);
++
+ /*
+ * Allocate the IRQ
+ */
+@@ -1414,10 +1418,6 @@ static int pl011_startup(struct uart_port *port)
+ cr = UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE;
+ writew(cr, uap->port.membase + UART011_CR);
+
+- /* Clear pending error interrupts */
+- writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS,
+- uap->port.membase + UART011_ICR);
+-
+ /*
+ * initialise the old status of the modem signals
+ */
+@@ -1432,6 +1432,9 @@ static int pl011_startup(struct uart_port *port)
+ * as well.
+ */
+ spin_lock_irq(&uap->port.lock);
++ /* Clear out any spuriously appearing RX interrupts */
++ writew(UART011_RTIS | UART011_RXIS,
++ uap->port.membase + UART011_ICR);
+ uap->im = UART011_RTIM;
+ if (!pl011_dma_rx_running(uap))
+ uap->im |= UART011_RXIM;
+@@ -1932,6 +1935,10 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
+ uap->port.line = i;
+ pl011_dma_probe(uap);
+
++ /* Ensure interrupts from this UART are masked and cleared */
++ writew(0, uap->port.membase + UART011_IMSC);
++ writew(0xffff, uap->port.membase + UART011_ICR);
++
+ snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev));
+
+ amba_ports[i] = uap;
+diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
+index d6aba8c..da776a0 100644
+--- a/drivers/tty/serial/pch_uart.c
++++ b/drivers/tty/serial/pch_uart.c
+@@ -1438,6 +1438,7 @@ static struct eg20t_port *pch_uart_init_port(struct pci_dev *pdev,
+ }
+
+ pci_enable_msi(pdev);
++ pci_set_master(pdev);
+
+ iobase = pci_resource_start(pdev, 0);
+ mapbase = pci_resource_start(pdev, 1);
+diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
+index 45887a0..c77f0d6 100644
+--- a/drivers/usb/core/driver.c
++++ b/drivers/usb/core/driver.c
+@@ -1198,8 +1198,13 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
+ if (status == 0) {
+ status = usb_suspend_device(udev, msg);
+
+- /* Again, ignore errors during system sleep transitions */
+- if (!PMSG_IS_AUTO(msg))
++ /*
++ * Ignore errors from non-root-hub devices during
++ * system sleep transitions. For the most part,
++ * these devices should go to low power anyway when
++ * the entire bus is suspended.
++ */
++ if (udev->parent && !PMSG_IS_AUTO(msg))
+ status = 0;
+ }
+
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index bc06a8f..e238b3b 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -3072,6 +3072,22 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
+ if (retval)
+ goto fail;
+
++ /*
++ * Some superspeed devices have finished the link training process
++ * and attached to a superspeed hub port, but the device descriptor
++ * got from those devices show they aren't superspeed devices. Warm
++ * reset the port attached by the devices can fix them.
++ */
++ if ((udev->speed == USB_SPEED_SUPER) &&
++ (le16_to_cpu(udev->descriptor.bcdUSB) < 0x0300)) {
++ dev_err(&udev->dev, "got a wrong device descriptor, "
++ "warm reset device\n");
++ hub_port_reset(hub, port1, udev,
++ HUB_BH_RESET_TIME, true);
++ retval = -EINVAL;
++ goto fail;
++ }
++
+ if (udev->descriptor.bMaxPacketSize0 == 0xff ||
+ udev->speed == USB_SPEED_SUPER)
+ i = 512;
+diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
+index b3bdfed..aed3e07 100644
+--- a/drivers/usb/core/message.c
++++ b/drivers/usb/core/message.c
+@@ -308,7 +308,8 @@ static void sg_complete(struct urb *urb)
+ retval = usb_unlink_urb(io->urbs [i]);
+ if (retval != -EINPROGRESS &&
+ retval != -ENODEV &&
+- retval != -EBUSY)
++ retval != -EBUSY &&
++ retval != -EIDRM)
+ dev_err(&io->dev->dev,
+ "%s, unlink --> %d\n",
+ __func__, retval);
+@@ -317,7 +318,6 @@ static void sg_complete(struct urb *urb)
+ }
+ spin_lock(&io->lock);
+ }
+- urb->dev = NULL;
+
+ /* on the last completion, signal usb_sg_wait() */
+ io->bytes += urb->actual_length;
+@@ -524,7 +524,6 @@ void usb_sg_wait(struct usb_sg_request *io)
+ case -ENXIO: /* hc didn't queue this one */
+ case -EAGAIN:
+ case -ENOMEM:
+- io->urbs[i]->dev = NULL;
+ retval = 0;
+ yield();
+ break;
+@@ -542,7 +541,6 @@ void usb_sg_wait(struct usb_sg_request *io)
+
+ /* fail any uncompleted urbs */
+ default:
+- io->urbs[i]->dev = NULL;
+ io->urbs[i]->status = retval;
+ dev_dbg(&io->dev->dev, "%s, submit --> %d\n",
+ __func__, retval);
+@@ -593,7 +591,10 @@ void usb_sg_cancel(struct usb_sg_request *io)
+ if (!io->urbs [i]->dev)
+ continue;
+ retval = usb_unlink_urb(io->urbs [i]);
+- if (retval != -EINPROGRESS && retval != -EBUSY)
++ if (retval != -EINPROGRESS
++ && retval != -ENODEV
++ && retval != -EBUSY
++ && retval != -EIDRM)
+ dev_warn(&io->dev->dev, "%s, unlink --> %d\n",
+ __func__, retval);
+ }
+diff --git a/drivers/usb/gadget/pch_udc.c b/drivers/usb/gadget/pch_udc.c
+index 5048a0c..e7fb1a3 100644
+--- a/drivers/usb/gadget/pch_udc.c
++++ b/drivers/usb/gadget/pch_udc.c
+@@ -311,6 +311,7 @@ struct pch_udc_ep {
+ * @registered: driver regsitered with system
+ * @suspended: driver in suspended state
+ * @connected: gadget driver associated
++ * @vbus_session: required vbus_session state
+ * @set_cfg_not_acked: pending acknowledgement 4 setup
+ * @waiting_zlp_ack: pending acknowledgement 4 ZLP
+ * @data_requests: DMA pool for data requests
+@@ -337,6 +338,7 @@ struct pch_udc_dev {
+ registered:1,
+ suspended:1,
+ connected:1,
++ vbus_session:1,
+ set_cfg_not_acked:1,
+ waiting_zlp_ack:1;
+ struct pci_pool *data_requests;
+@@ -554,6 +556,29 @@ static void pch_udc_clear_disconnect(struct pch_udc_dev *dev)
+ }
+
+ /**
++ * pch_udc_reconnect() - This API initializes usb device controller,
++ * and clear the disconnect status.
++ * @dev: Reference to pch_udc_regs structure
++ */
++static void pch_udc_init(struct pch_udc_dev *dev);
++static void pch_udc_reconnect(struct pch_udc_dev *dev)
++{
++ pch_udc_init(dev);
++
++ /* enable device interrupts */
++ /* pch_udc_enable_interrupts() */
++ pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR,
++ UDC_DEVINT_UR | UDC_DEVINT_ENUM);
++
++ /* Clear the disconnect */
++ pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
++ pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
++ mdelay(1);
++ /* Resume USB signalling */
++ pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
++}
++
++/**
+ * pch_udc_vbus_session() - set or clearr the disconnect status.
+ * @dev: Reference to pch_udc_regs structure
+ * @is_active: Parameter specifying the action
+@@ -563,10 +588,18 @@ static void pch_udc_clear_disconnect(struct pch_udc_dev *dev)
+ static inline void pch_udc_vbus_session(struct pch_udc_dev *dev,
+ int is_active)
+ {
+- if (is_active)
+- pch_udc_clear_disconnect(dev);
+- else
++ if (is_active) {
++ pch_udc_reconnect(dev);
++ dev->vbus_session = 1;
++ } else {
++ if (dev->driver && dev->driver->disconnect) {
++ spin_unlock(&dev->lock);
++ dev->driver->disconnect(&dev->gadget);
++ spin_lock(&dev->lock);
++ }
+ pch_udc_set_disconnect(dev);
++ dev->vbus_session = 0;
++ }
+ }
+
+ /**
+@@ -1126,7 +1159,17 @@ static int pch_udc_pcd_pullup(struct usb_gadget *gadget, int is_on)
+ if (!gadget)
+ return -EINVAL;
+ dev = container_of(gadget, struct pch_udc_dev, gadget);
+- pch_udc_vbus_session(dev, is_on);
++ if (is_on) {
++ pch_udc_reconnect(dev);
++ } else {
++ if (dev->driver && dev->driver->disconnect) {
++ spin_unlock(&dev->lock);
++ dev->driver->disconnect(&dev->gadget);
++ spin_lock(&dev->lock);
++ }
++ pch_udc_set_disconnect(dev);
++ }
++
+ return 0;
+ }
+
+@@ -2335,8 +2378,11 @@ static void pch_udc_svc_ur_interrupt(struct pch_udc_dev *dev)
+ /* Complete request queue */
+ empty_req_queue(ep);
+ }
+- if (dev->driver && dev->driver->disconnect)
++ if (dev->driver && dev->driver->disconnect) {
++ spin_unlock(&dev->lock);
+ dev->driver->disconnect(&dev->gadget);
++ spin_lock(&dev->lock);
++ }
+ }
+
+ /**
+@@ -2371,6 +2417,11 @@ static void pch_udc_svc_enum_interrupt(struct pch_udc_dev *dev)
+ pch_udc_set_dma(dev, DMA_DIR_TX);
+ pch_udc_set_dma(dev, DMA_DIR_RX);
+ pch_udc_ep_set_rrdy(&(dev->ep[UDC_EP0OUT_IDX]));
++
++ /* enable device interrupts */
++ pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
++ UDC_DEVINT_ES | UDC_DEVINT_ENUM |
++ UDC_DEVINT_SI | UDC_DEVINT_SC);
+ }
+
+ /**
+@@ -2472,8 +2523,24 @@ static void pch_udc_dev_isr(struct pch_udc_dev *dev, u32 dev_intr)
+ if (dev_intr & UDC_DEVINT_SC)
+ pch_udc_svc_cfg_interrupt(dev);
+ /* USB Suspend interrupt */
+- if (dev_intr & UDC_DEVINT_US)
++ if (dev_intr & UDC_DEVINT_US) {
++ if (dev->driver
++ && dev->driver->suspend) {
++ spin_unlock(&dev->lock);
++ dev->driver->suspend(&dev->gadget);
++ spin_lock(&dev->lock);
++ }
++
++ if (dev->vbus_session == 0) {
++ if (dev->driver && dev->driver->disconnect) {
++ spin_unlock(&dev->lock);
++ dev->driver->disconnect(&dev->gadget);
++ spin_lock(&dev->lock);
++ }
++ pch_udc_reconnect(dev);
++ }
+ dev_dbg(&dev->pdev->dev, "USB_SUSPEND\n");
++ }
+ /* Clear the SOF interrupt, if enabled */
+ if (dev_intr & UDC_DEVINT_SOF)
+ dev_dbg(&dev->pdev->dev, "SOF\n");
+@@ -2499,6 +2566,14 @@ static irqreturn_t pch_udc_isr(int irq, void *pdev)
+ dev_intr = pch_udc_read_device_interrupts(dev);
+ ep_intr = pch_udc_read_ep_interrupts(dev);
+
++ /* For a hot plug, this find that the controller is hung up. */
++ if (dev_intr == ep_intr)
++ if (dev_intr == pch_udc_readl(dev, UDC_DEVCFG_ADDR)) {
++ dev_dbg(&dev->pdev->dev, "UDC: Hung up\n");
++ /* The controller is reset */
++ pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
++ return IRQ_HANDLED;
++ }
+ if (dev_intr)
+ /* Clear device interrupts */
+ pch_udc_write_device_interrupts(dev, dev_intr);
+@@ -2912,8 +2987,10 @@ static int pch_udc_probe(struct pci_dev *pdev,
+ }
+ pch_udc = dev;
+ /* initialize the hardware */
+- if (pch_udc_pcd_init(dev))
++ if (pch_udc_pcd_init(dev)) {
++ retval = -ENODEV;
+ goto finished;
++ }
+ if (request_irq(pdev->irq, pch_udc_isr, IRQF_SHARED, KBUILD_MODNAME,
+ dev)) {
+ dev_err(&pdev->dev, "%s: request_irq(%d) fail\n", __func__,
+diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
+index 7732d69..2afff88 100644
+--- a/drivers/usb/host/pci-quirks.c
++++ b/drivers/usb/host/pci-quirks.c
+@@ -825,9 +825,13 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
+ }
+ }
+
+- /* Disable any BIOS SMIs */
+- writel(XHCI_LEGACY_DISABLE_SMI,
+- base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
++ val = readl(base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
++ /* Mask off (turn off) any enabled SMIs */
++ val &= XHCI_LEGACY_DISABLE_SMI;
++ /* Mask all SMI events bits, RW1C */
++ val |= XHCI_LEGACY_SMI_EVENTS;
++ /* Disable any BIOS SMIs and clear all SMI events*/
++ writel(val, base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
+
+ if (usb_is_intel_switchable_xhci(pdev))
+ usb_enable_xhci_ports(pdev);
+diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h
+index c7f3312..377f424 100644
+--- a/drivers/usb/host/xhci-ext-caps.h
++++ b/drivers/usb/host/xhci-ext-caps.h
+@@ -62,8 +62,9 @@
+ /* USB Legacy Support Control and Status Register - section 7.1.2 */
+ /* Add this offset, plus the value of xECP in HCCPARAMS to the base address */
+ #define XHCI_LEGACY_CONTROL_OFFSET (0x04)
+-/* bits 1:2, 5:12, and 17:19 need to be preserved; bits 21:28 should be zero */
+-#define XHCI_LEGACY_DISABLE_SMI ((0x3 << 1) + (0xff << 5) + (0x7 << 17))
++/* bits 1:3, 5:12, and 17:19 need to be preserved; bits 21:28 should be zero */
++#define XHCI_LEGACY_DISABLE_SMI ((0x7 << 1) + (0xff << 5) + (0x7 << 17))
++#define XHCI_LEGACY_SMI_EVENTS (0x7 << 29)
+
+ /* USB 2.0 xHCI 0.96 L1C capability - section 7.2.2.1.3.2 */
+ #define XHCI_L1C (1 << 16)
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index c69cf54..01c3800 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -1704,11 +1704,6 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
+ int i;
+
+ /* Free the Event Ring Segment Table and the actual Event Ring */
+- if (xhci->ir_set) {
+- xhci_writel(xhci, 0, &xhci->ir_set->erst_size);
+- xhci_write_64(xhci, 0, &xhci->ir_set->erst_base);
+- xhci_write_64(xhci, 0, &xhci->ir_set->erst_dequeue);
+- }
+ size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
+ if (xhci->erst.entries)
+ dma_free_coherent(&pdev->dev, size,
+@@ -1720,7 +1715,6 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
+ xhci->event_ring = NULL;
+ xhci_dbg(xhci, "Freed event ring\n");
+
+- xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
+ if (xhci->cmd_ring)
+ xhci_ring_free(xhci, xhci->cmd_ring);
+ xhci->cmd_ring = NULL;
+@@ -1749,7 +1743,6 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
+ xhci->medium_streams_pool = NULL;
+ xhci_dbg(xhci, "Freed medium stream array pool\n");
+
+- xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
+ if (xhci->dcbaa)
+ dma_free_coherent(&pdev->dev, sizeof(*xhci->dcbaa),
+ xhci->dcbaa, xhci->dcbaa->dma);
+@@ -2358,6 +2351,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
+
+ fail:
+ xhci_warn(xhci, "Couldn't initialize memory\n");
++ xhci_halt(xhci);
++ xhci_reset(xhci);
+ xhci_mem_cleanup(xhci);
+ return -ENOMEM;
+ }
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index ef98b38..211296a 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -95,6 +95,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ xhci->quirks |= XHCI_RESET_ON_RESUME;
+ xhci_dbg(xhci, "QUIRK: Resetting on resume\n");
+ }
++ if (pdev->vendor == PCI_VENDOR_ID_VIA)
++ xhci->quirks |= XHCI_RESET_ON_RESUME;
+ }
+
+ /* called during probe() after chip reset completes */
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index ae92dc4..43b3447 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -2343,7 +2343,7 @@ hw_died:
+ u32 irq_pending;
+ /* Acknowledge the PCI interrupt */
+ irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
+- irq_pending |= 0x3;
++ irq_pending |= IMAN_IP;
+ xhci_writel(xhci, irq_pending, &xhci->ir_set->irq_pending);
+ }
+
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 034f554..4c00606 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -664,11 +664,11 @@ static void xhci_save_registers(struct xhci_hcd *xhci)
+ xhci->s3.dev_nt = xhci_readl(xhci, &xhci->op_regs->dev_notification);
+ xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
+ xhci->s3.config_reg = xhci_readl(xhci, &xhci->op_regs->config_reg);
+- xhci->s3.irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
+- xhci->s3.irq_control = xhci_readl(xhci, &xhci->ir_set->irq_control);
+ xhci->s3.erst_size = xhci_readl(xhci, &xhci->ir_set->erst_size);
+ xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
+ xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
++ xhci->s3.irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
++ xhci->s3.irq_control = xhci_readl(xhci, &xhci->ir_set->irq_control);
+ }
+
+ static void xhci_restore_registers(struct xhci_hcd *xhci)
+@@ -677,10 +677,11 @@ static void xhci_restore_registers(struct xhci_hcd *xhci)
+ xhci_writel(xhci, xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
+ xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
+ xhci_writel(xhci, xhci->s3.config_reg, &xhci->op_regs->config_reg);
+- xhci_writel(xhci, xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
+- xhci_writel(xhci, xhci->s3.irq_control, &xhci->ir_set->irq_control);
+ xhci_writel(xhci, xhci->s3.erst_size, &xhci->ir_set->erst_size);
+ xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
++ xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
++ xhci_writel(xhci, xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
++ xhci_writel(xhci, xhci->s3.irq_control, &xhci->ir_set->irq_control);
+ }
+
+ static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 09eda3a..4850c4d 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -205,6 +205,10 @@ struct xhci_op_regs {
+ #define CMD_PM_INDEX (1 << 11)
+ /* bits 12:31 are reserved (and should be preserved on writes). */
+
++/* IMAN - Interrupt Management Register */
++#define IMAN_IP (1 << 1)
++#define IMAN_IE (1 << 0)
++
+ /* USBSTS - USB status - status bitmasks */
+ /* HC not running - set to 1 when run/stop bit is cleared. */
+ #define STS_HALT XHCI_STS_HALT
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index f2c9ef7..c4cf3f3 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -75,7 +75,8 @@ struct ftdi_private {
+ unsigned long last_dtr_rts; /* saved modem control outputs */
+ struct async_icount icount;
+ wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */
+- char prev_status, diff_status; /* Used for TIOCMIWAIT */
++ char prev_status; /* Used for TIOCMIWAIT */
++ bool dev_gone; /* Used to abort TIOCMIWAIT */
+ char transmit_empty; /* If transmitter is empty or not */
+ struct usb_serial_port *port;
+ __u16 interface; /* FT2232C, FT2232H or FT4232H port interface
+@@ -1679,6 +1680,7 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port)
+ init_waitqueue_head(&priv->delta_msr_wait);
+
+ priv->flags = ASYNC_LOW_LATENCY;
++ priv->dev_gone = false;
+
+ if (quirk && quirk->port_probe)
+ quirk->port_probe(priv);
+@@ -1836,6 +1838,9 @@ static int ftdi_sio_port_remove(struct usb_serial_port *port)
+
+ dbg("%s", __func__);
+
++ priv->dev_gone = true;
++ wake_up_interruptible_all(&priv->delta_msr_wait);
++
+ remove_sysfs_attrs(port);
+
+ kref_put(&priv->kref, ftdi_sio_priv_release);
+@@ -1979,17 +1984,19 @@ static int ftdi_process_packet(struct tty_struct *tty,
+ N.B. packet may be processed more than once, but differences
+ are only processed once. */
+ status = packet[0] & FTDI_STATUS_B0_MASK;
+- if (status & FTDI_RS0_CTS)
+- priv->icount.cts++;
+- if (status & FTDI_RS0_DSR)
+- priv->icount.dsr++;
+- if (status & FTDI_RS0_RI)
+- priv->icount.rng++;
+- if (status & FTDI_RS0_RLSD)
+- priv->icount.dcd++;
+ if (status != priv->prev_status) {
+- priv->diff_status |= status ^ priv->prev_status;
+- wake_up_interruptible(&priv->delta_msr_wait);
++ char diff_status = status ^ priv->prev_status;
++
++ if (diff_status & FTDI_RS0_CTS)
++ priv->icount.cts++;
++ if (diff_status & FTDI_RS0_DSR)
++ priv->icount.dsr++;
++ if (diff_status & FTDI_RS0_RI)
++ priv->icount.rng++;
++ if (diff_status & FTDI_RS0_RLSD)
++ priv->icount.dcd++;
++
++ wake_up_interruptible_all(&priv->delta_msr_wait);
+ priv->prev_status = status;
+ }
+
+@@ -2388,15 +2395,12 @@ static int ftdi_ioctl(struct tty_struct *tty,
+ */
+ case TIOCMIWAIT:
+ cprev = priv->icount;
+- while (1) {
++ while (!priv->dev_gone) {
+ interruptible_sleep_on(&priv->delta_msr_wait);
+ /* see if a signal did it */
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+ cnow = priv->icount;
+- if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
+- cnow.dcd == cprev.dcd && cnow.cts == cprev.cts)
+- return -EIO; /* no change => error */
+ if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) ||
+ ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) ||
+ ((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) ||
+@@ -2405,7 +2409,7 @@ static int ftdi_ioctl(struct tty_struct *tty,
+ }
+ cprev = cnow;
+ }
+- /* not reached */
++ return -EIO;
+ break;
+ case TIOCSERGETLSR:
+ return get_lsr_info(port, (struct serial_struct __user *)arg);
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 54898c9..cbe3451 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -708,6 +708,7 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED) },
++ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED3) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED4) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED5) },
+diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
+index fc2d66f..5532ea5 100644
+--- a/drivers/usb/serial/pl2303.c
++++ b/drivers/usb/serial/pl2303.c
+@@ -421,7 +421,7 @@ static void pl2303_set_termios(struct tty_struct *tty,
+ control = priv->line_control;
+ if ((cflag & CBAUD) == B0)
+ priv->line_control &= ~(CONTROL_DTR | CONTROL_RTS);
+- else
++ else if ((old_termios->c_cflag & CBAUD) == B0)
+ priv->line_control |= (CONTROL_DTR | CONTROL_RTS);
+ if (control != priv->line_control) {
+ control = priv->line_control;
+diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
+index b18179b..7c3ec9e 100644
+--- a/drivers/usb/serial/sierra.c
++++ b/drivers/usb/serial/sierra.c
+@@ -289,6 +289,7 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x1199, 0x6856) }, /* Sierra Wireless AirCard 881 U */
+ { USB_DEVICE(0x1199, 0x6859) }, /* Sierra Wireless AirCard 885 E */
+ { USB_DEVICE(0x1199, 0x685A) }, /* Sierra Wireless AirCard 885 E */
++ { USB_DEVICE(0x1199, 0x68A2) }, /* Sierra Wireless MC7710 */
+ /* Sierra Wireless C885 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6880, 0xFF, 0xFF, 0xFF)},
+ /* Sierra Wireless C888, Air Card 501, USB 303, USB 304 */
+diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
+index cc274fd..38d7ebd 100644
+--- a/drivers/usb/serial/usb-serial.c
++++ b/drivers/usb/serial/usb-serial.c
+@@ -1059,6 +1059,12 @@ int usb_serial_probe(struct usb_interface *interface,
+ serial->attached = 1;
+ }
+
++ /* Avoid race with tty_open and serial_install by setting the
++ * disconnected flag and not clearing it until all ports have been
++ * registered.
++ */
++ serial->disconnected = 1;
++
+ if (get_free_serial(serial, num_ports, &minor) == NULL) {
+ dev_err(&interface->dev, "No more free serial devices\n");
+ goto probe_error;
+@@ -1083,6 +1089,8 @@ int usb_serial_probe(struct usb_interface *interface,
+ }
+ }
+
++ serial->disconnected = 0;
++
+ usb_serial_console_init(debug, minor);
+
+ exit:
+diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
+index 7f8472c..8813588 100644
+--- a/drivers/video/uvesafb.c
++++ b/drivers/video/uvesafb.c
+@@ -815,8 +815,15 @@ static int __devinit uvesafb_vbe_init(struct fb_info *info)
+ par->pmi_setpal = pmi_setpal;
+ par->ypan = ypan;
+
+- if (par->pmi_setpal || par->ypan)
+- uvesafb_vbe_getpmi(task, par);
++ if (par->pmi_setpal || par->ypan) {
++ if (__supported_pte_mask & _PAGE_NX) {
++ par->pmi_setpal = par->ypan = 0;
++ printk(KERN_WARNING "uvesafb: NX protection is actively."
++ "We have better not to use the PMI.\n");
++ } else {
++ uvesafb_vbe_getpmi(task, par);
++ }
++ }
+ #else
+ /* The protected mode interface is not available on non-x86. */
+ par->pmi_setpal = par->ypan = 0;
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index dbae4d9..7b1cd5c 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -1185,9 +1185,6 @@ struct ext4_sb_info {
+ unsigned long s_ext_blocks;
+ unsigned long s_ext_extents;
+ #endif
+- /* ext4 extent cache stats */
+- unsigned long extent_cache_hits;
+- unsigned long extent_cache_misses;
+
+ /* for buddy allocator */
+ struct ext4_group_info ***s_group_info;
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 7507036..c2a2012 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -2052,10 +2052,6 @@ static int ext4_ext_check_cache(struct inode *inode, ext4_lblk_t block,
+ ret = 1;
+ }
+ errout:
+- if (!ret)
+- sbi->extent_cache_misses++;
+- else
+- sbi->extent_cache_hits++;
+ trace_ext4_ext_in_cache(inode, block, ret);
+ spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
+ return ret;
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 9281dbe..961059b 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -2504,18 +2504,6 @@ static ssize_t lifetime_write_kbytes_show(struct ext4_attr *a,
+ EXT4_SB(sb)->s_sectors_written_start) >> 1)));
+ }
+
+-static ssize_t extent_cache_hits_show(struct ext4_attr *a,
+- struct ext4_sb_info *sbi, char *buf)
+-{
+- return snprintf(buf, PAGE_SIZE, "%lu\n", sbi->extent_cache_hits);
+-}
+-
+-static ssize_t extent_cache_misses_show(struct ext4_attr *a,
+- struct ext4_sb_info *sbi, char *buf)
+-{
+- return snprintf(buf, PAGE_SIZE, "%lu\n", sbi->extent_cache_misses);
+-}
+-
+ static ssize_t inode_readahead_blks_store(struct ext4_attr *a,
+ struct ext4_sb_info *sbi,
+ const char *buf, size_t count)
+@@ -2573,8 +2561,6 @@ static struct ext4_attr ext4_attr_##name = __ATTR(name, mode, show, store)
+ EXT4_RO_ATTR(delayed_allocation_blocks);
+ EXT4_RO_ATTR(session_write_kbytes);
+ EXT4_RO_ATTR(lifetime_write_kbytes);
+-EXT4_RO_ATTR(extent_cache_hits);
+-EXT4_RO_ATTR(extent_cache_misses);
+ EXT4_ATTR_OFFSET(inode_readahead_blks, 0644, sbi_ui_show,
+ inode_readahead_blks_store, s_inode_readahead_blks);
+ EXT4_RW_ATTR_SBI_UI(inode_goal, s_inode_goal);
+@@ -2590,8 +2576,6 @@ static struct attribute *ext4_attrs[] = {
+ ATTR_LIST(delayed_allocation_blocks),
+ ATTR_LIST(session_write_kbytes),
+ ATTR_LIST(lifetime_write_kbytes),
+- ATTR_LIST(extent_cache_hits),
+- ATTR_LIST(extent_cache_misses),
+ ATTR_LIST(inode_readahead_blks),
+ ATTR_LIST(inode_goal),
+ ATTR_LIST(mb_stats),
+diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
+index aaf79af..a7e13bf 100644
+--- a/include/net/bluetooth/hci.h
++++ b/include/net/bluetooth/hci.h
+@@ -84,6 +84,7 @@ enum {
+ HCI_SERVICE_CACHE,
+ HCI_LINK_KEYS,
+ HCI_DEBUG_KEYS,
++ HCI_UNREGISTER,
+
+ HCI_RESET,
+ };
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 0677023..866c9d5 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -59,6 +59,7 @@
+ #include <linux/magic.h>
+ #include <linux/pid.h>
+ #include <linux/nsproxy.h>
++#include <linux/ptrace.h>
+
+ #include <asm/futex.h>
+
+@@ -2443,40 +2444,29 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
+ {
+ struct robust_list_head __user *head;
+ unsigned long ret;
+- const struct cred *cred = current_cred(), *pcred;
++ struct task_struct *p;
+
+ if (!futex_cmpxchg_enabled)
+ return -ENOSYS;
+
++ rcu_read_lock();
++
++ ret = -ESRCH;
+ if (!pid)
+- head = current->robust_list;
++ p = current;
+ else {
+- struct task_struct *p;
+-
+- ret = -ESRCH;
+- rcu_read_lock();
+ p = find_task_by_vpid(pid);
+ if (!p)
+ goto err_unlock;
+- ret = -EPERM;
+- pcred = __task_cred(p);
+- /* If victim is in different user_ns, then uids are not
+- comparable, so we must have CAP_SYS_PTRACE */
+- if (cred->user->user_ns != pcred->user->user_ns) {
+- if (!ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
+- goto err_unlock;
+- goto ok;
+- }
+- /* If victim is in same user_ns, then uids are comparable */
+- if (cred->euid != pcred->euid &&
+- cred->euid != pcred->uid &&
+- !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
+- goto err_unlock;
+-ok:
+- head = p->robust_list;
+- rcu_read_unlock();
+ }
+
++ ret = -EPERM;
++ if (!ptrace_may_access(p, PTRACE_MODE_READ))
++ goto err_unlock;
++
++ head = p->robust_list;
++ rcu_read_unlock();
++
+ if (put_user(sizeof(*head), len_ptr))
+ return -EFAULT;
+ return put_user(head, head_ptr);
+diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
+index 5f9e689..a9642d5 100644
+--- a/kernel/futex_compat.c
++++ b/kernel/futex_compat.c
+@@ -10,6 +10,7 @@
+ #include <linux/compat.h>
+ #include <linux/nsproxy.h>
+ #include <linux/futex.h>
++#include <linux/ptrace.h>
+
+ #include <asm/uaccess.h>
+
+@@ -136,40 +137,29 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
+ {
+ struct compat_robust_list_head __user *head;
+ unsigned long ret;
+- const struct cred *cred = current_cred(), *pcred;
++ struct task_struct *p;
+
+ if (!futex_cmpxchg_enabled)
+ return -ENOSYS;
+
++ rcu_read_lock();
++
++ ret = -ESRCH;
+ if (!pid)
+- head = current->compat_robust_list;
++ p = current;
+ else {
+- struct task_struct *p;
+-
+- ret = -ESRCH;
+- rcu_read_lock();
+ p = find_task_by_vpid(pid);
+ if (!p)
+ goto err_unlock;
+- ret = -EPERM;
+- pcred = __task_cred(p);
+- /* If victim is in different user_ns, then uids are not
+- comparable, so we must have CAP_SYS_PTRACE */
+- if (cred->user->user_ns != pcred->user->user_ns) {
+- if (!ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
+- goto err_unlock;
+- goto ok;
+- }
+- /* If victim is in same user_ns, then uids are comparable */
+- if (cred->euid != pcred->euid &&
+- cred->euid != pcred->uid &&
+- !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
+- goto err_unlock;
+-ok:
+- head = p->compat_robust_list;
+- rcu_read_unlock();
+ }
+
++ ret = -EPERM;
++ if (!ptrace_may_access(p, PTRACE_MODE_READ))
++ goto err_unlock;
++
++ head = p->compat_robust_list;
++ rcu_read_unlock();
++
+ if (put_user(sizeof(*head), len_ptr))
+ return -EFAULT;
+ return put_user(ptr_to_compat(head), head_ptr);
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index 4042064..c923640 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -508,9 +508,9 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
+ hrtimer_get_expires(&ts->sched_timer), 0))
+ break;
+ }
+- /* Update jiffies and reread time */
+- tick_do_update_jiffies64(now);
++ /* Reread time and update jiffies */
+ now = ktime_get();
++ tick_do_update_jiffies64(now);
+ }
+ }
+
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 2316840..bd936ed 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -2686,6 +2686,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ * so no worry about deadlock.
+ */
+ page = pte_page(entry);
++ get_page(page);
+ if (page != pagecache_page)
+ lock_page(page);
+
+@@ -2717,6 +2718,7 @@ out_page_table_lock:
+ }
+ if (page != pagecache_page)
+ unlock_page(page);
++ put_page(page);
+
+ out_mutex:
+ mutex_unlock(&hugetlb_instantiation_mutex);
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index b84458d..857dc88 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -510,6 +510,11 @@ int hci_dev_open(__u16 dev)
+
+ hci_req_lock(hdev);
+
++ if (test_bit(HCI_UNREGISTER, &hdev->flags)) {
++ ret = -ENODEV;
++ goto done;
++ }
++
+ if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
+ ret = -ERFKILL;
+ goto done;
+@@ -1540,6 +1545,8 @@ int hci_unregister_dev(struct hci_dev *hdev)
+
+ BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
+
++ set_bit(HCI_UNREGISTER, &hdev->flags);
++
+ write_lock_bh(&hci_dev_list_lock);
+ list_del(&hdev->list);
+ write_unlock_bh(&hci_dev_list_lock);
+diff --git a/security/commoncap.c b/security/commoncap.c
+index ee4f848..12440ee 100644
+--- a/security/commoncap.c
++++ b/security/commoncap.c
+@@ -28,6 +28,7 @@
+ #include <linux/prctl.h>
+ #include <linux/securebits.h>
+ #include <linux/user_namespace.h>
++#include <linux/personality.h>
+
+ /*
+ * If a non-root user executes a setuid-root binary in
+@@ -514,6 +515,11 @@ int cap_bprm_set_creds(struct linux_binprm *bprm)
+ }
+ skip:
+
++ /* if we have fs caps, clear dangerous personality flags */
++ if (!cap_issubset(new->cap_permitted, old->cap_permitted))
++ bprm->per_clear |= PER_CLEAR_ON_SETID;
++
++
+ /* Don't let someone trace a set[ug]id/setpcap binary with the revised
+ * credentials unless they have the appropriate permit
+ */
+diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
+index 216e33a..adb372d 100644
+--- a/tools/perf/util/hist.c
++++ b/tools/perf/util/hist.c
+@@ -230,6 +230,18 @@ struct hist_entry *__hists__add_entry(struct hists *hists,
+ if (!cmp) {
+ he->period += period;
+ ++he->nr_events;
++
++ /* If the map of an existing hist_entry has
++ * become out-of-date due to an exec() or
++ * similar, update it. Otherwise we will
++ * mis-adjust symbol addresses when computing
++ * the history counter to increment.
++ */
++ if (he->ms.map != entry->ms.map) {
++ he->ms.map = entry->ms.map;
++ if (he->ms.map)
++ he->ms.map->referenced = true;
++ }
+ goto out;
+ }
+
diff --git a/1016_linux-3.2.17.patch b/1016_linux-3.2.17.patch
new file mode 100644
index 00000000..5aeed10a
--- /dev/null
+++ b/1016_linux-3.2.17.patch
@@ -0,0 +1,5695 @@
+diff --git a/Makefile b/Makefile
+index 3da29cb..4c4efa3 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 16
++SUBLEVEL = 17
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index ab3740e..ef642a0 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -1155,6 +1155,15 @@ if !MMU
+ source "arch/arm/Kconfig-nommu"
+ endif
+
++config ARM_ERRATA_326103
++ bool "ARM errata: FSR write bit incorrect on a SWP to read-only memory"
++ depends on CPU_V6
++ help
++ Executing a SWP instruction to read-only memory does not set bit 11
++ of the FSR on the ARM 1136 prior to r1p0. This causes the kernel to
++ treat the access as a read, preventing a COW from occurring and
++ causing the faulting task to livelock.
++
+ config ARM_ERRATA_411920
+ bool "ARM errata: Invalidation of the Instruction Cache operation can fail"
+ depends on CPU_V6 || CPU_V6K
+diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
+index 60843eb..73409e6 100644
+--- a/arch/arm/include/asm/tls.h
++++ b/arch/arm/include/asm/tls.h
+@@ -7,6 +7,8 @@
+
+ .macro set_tls_v6k, tp, tmp1, tmp2
+ mcr p15, 0, \tp, c13, c0, 3 @ set TLS register
++ mov \tmp1, #0
++ mcr p15, 0, \tmp1, c13, c0, 2 @ clear user r/w TLS register
+ .endm
+
+ .macro set_tls_v6, tp, tmp1, tmp2
+@@ -15,6 +17,8 @@
+ mov \tmp2, #0xffff0fff
+ tst \tmp1, #HWCAP_TLS @ hardware TLS available?
+ mcrne p15, 0, \tp, c13, c0, 3 @ yes, set TLS register
++ movne \tmp1, #0
++ mcrne p15, 0, \tmp1, c13, c0, 2 @ clear user r/w TLS register
+ streq \tp, [\tmp2, #-15] @ set TLS value at 0xffff0ff0
+ .endm
+
+diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
+index 3efd82c..87c8be5 100644
+--- a/arch/arm/kernel/irq.c
++++ b/arch/arm/kernel/irq.c
+@@ -156,10 +156,10 @@ static bool migrate_one_irq(struct irq_desc *desc)
+ }
+
+ c = irq_data_get_irq_chip(d);
+- if (c->irq_set_affinity)
+- c->irq_set_affinity(d, affinity, true);
+- else
++ if (!c->irq_set_affinity)
+ pr_debug("IRQ%u: unable to set affinity\n", d->irq);
++ else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret)
++ cpumask_copy(d->affinity, affinity);
+
+ return ret;
+ }
+diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
+index ef5640b..e10e59a 100644
+--- a/arch/arm/kernel/smp.c
++++ b/arch/arm/kernel/smp.c
+@@ -297,8 +297,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
+ struct mm_struct *mm = &init_mm;
+ unsigned int cpu = smp_processor_id();
+
+- printk("CPU%u: Booted secondary processor\n", cpu);
+-
+ /*
+ * All kernel threads share the same mm context; grab a
+ * reference and switch to it.
+@@ -310,6 +308,8 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
+ enter_lazy_tlb(mm, current);
+ local_flush_tlb_all();
+
++ printk("CPU%u: Booted secondary processor\n", cpu);
++
+ cpu_init();
+ preempt_disable();
+ trace_hardirqs_off();
+diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c
+index d2b1779..76cbb05 100644
+--- a/arch/arm/kernel/sys_arm.c
++++ b/arch/arm/kernel/sys_arm.c
+@@ -115,7 +115,7 @@ int kernel_execve(const char *filename,
+ "Ir" (THREAD_START_SP - sizeof(regs)),
+ "r" (&regs),
+ "Ir" (sizeof(regs))
+- : "r0", "r1", "r2", "r3", "ip", "lr", "memory");
++ : "r0", "r1", "r2", "r3", "r8", "r9", "ip", "lr", "memory");
+
+ out:
+ return ret;
+diff --git a/arch/arm/mach-omap1/timer.c b/arch/arm/mach-omap1/timer.c
+index 6e90665..fb202af 100644
+--- a/arch/arm/mach-omap1/timer.c
++++ b/arch/arm/mach-omap1/timer.c
+@@ -47,9 +47,9 @@ static int omap1_dm_timer_set_src(struct platform_device *pdev,
+ int n = (pdev->id - 1) << 1;
+ u32 l;
+
+- l = __raw_readl(MOD_CONF_CTRL_1) & ~(0x03 << n);
++ l = omap_readl(MOD_CONF_CTRL_1) & ~(0x03 << n);
+ l |= source << n;
+- __raw_writel(l, MOD_CONF_CTRL_1);
++ omap_writel(l, MOD_CONF_CTRL_1);
+
+ return 0;
+ }
+diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S
+index ff1f7cc..8074199 100644
+--- a/arch/arm/mm/abort-ev6.S
++++ b/arch/arm/mm/abort-ev6.S
+@@ -26,18 +26,23 @@ ENTRY(v6_early_abort)
+ mrc p15, 0, r1, c5, c0, 0 @ get FSR
+ mrc p15, 0, r0, c6, c0, 0 @ get FAR
+ /*
+- * Faulty SWP instruction on 1136 doesn't set bit 11 in DFSR (erratum 326103).
+- * The test below covers all the write situations, including Java bytecodes
++ * Faulty SWP instruction on 1136 doesn't set bit 11 in DFSR.
+ */
+- bic r1, r1, #1 << 11 @ clear bit 11 of FSR
++#ifdef CONFIG_ARM_ERRATA_326103
++ ldr ip, =0x4107b36
++ mrc p15, 0, r3, c0, c0, 0 @ get processor id
++ teq ip, r3, lsr #4 @ r0 ARM1136?
++ bne do_DataAbort
+ tst r5, #PSR_J_BIT @ Java?
++ tsteq r5, #PSR_T_BIT @ Thumb?
+ bne do_DataAbort
+- do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
+- ldreq r3, [r4] @ read aborted ARM instruction
++ bic r1, r1, #1 << 11 @ clear bit 11 of FSR
++ ldr r3, [r4] @ read aborted ARM instruction
+ #ifdef CONFIG_CPU_ENDIAN_BE8
+- reveq r3, r3
++ rev r3, r3
+ #endif
+ do_ldrd_abort tmp=ip, insn=r3
+ tst r3, #1 << 20 @ L = 0 -> write
+ orreq r1, r1, #1 << 11 @ yes.
++#endif
+ b do_DataAbort
+diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
+index b1e192b..db7bcc0 100644
+--- a/arch/arm/mm/cache-l2x0.c
++++ b/arch/arm/mm/cache-l2x0.c
+@@ -32,6 +32,7 @@ static void __iomem *l2x0_base;
+ static DEFINE_RAW_SPINLOCK(l2x0_lock);
+ static uint32_t l2x0_way_mask; /* Bitmask of active ways */
+ static uint32_t l2x0_size;
++static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
+
+ struct l2x0_regs l2x0_saved_regs;
+
+@@ -61,12 +62,7 @@ static inline void cache_sync(void)
+ {
+ void __iomem *base = l2x0_base;
+
+-#ifdef CONFIG_PL310_ERRATA_753970
+- /* write to an unmmapped register */
+- writel_relaxed(0, base + L2X0_DUMMY_REG);
+-#else
+- writel_relaxed(0, base + L2X0_CACHE_SYNC);
+-#endif
++ writel_relaxed(0, base + sync_reg_offset);
+ cache_wait(base + L2X0_CACHE_SYNC, 1);
+ }
+
+@@ -85,10 +81,13 @@ static inline void l2x0_inv_line(unsigned long addr)
+ }
+
+ #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
++static inline void debug_writel(unsigned long val)
++{
++ if (outer_cache.set_debug)
++ outer_cache.set_debug(val);
++}
+
+-#define debug_writel(val) outer_cache.set_debug(val)
+-
+-static void l2x0_set_debug(unsigned long val)
++static void pl310_set_debug(unsigned long val)
+ {
+ writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
+ }
+@@ -98,7 +97,7 @@ static inline void debug_writel(unsigned long val)
+ {
+ }
+
+-#define l2x0_set_debug NULL
++#define pl310_set_debug NULL
+ #endif
+
+ #ifdef CONFIG_PL310_ERRATA_588369
+@@ -331,6 +330,11 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
+ else
+ ways = 8;
+ type = "L310";
++#ifdef CONFIG_PL310_ERRATA_753970
++ /* Unmapped register. */
++ sync_reg_offset = L2X0_DUMMY_REG;
++#endif
++ outer_cache.set_debug = pl310_set_debug;
+ break;
+ case L2X0_CACHE_ID_PART_L210:
+ ways = (aux >> 13) & 0xf;
+@@ -379,7 +383,6 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
+ outer_cache.flush_all = l2x0_flush_all;
+ outer_cache.inv_all = l2x0_inv_all;
+ outer_cache.disable = l2x0_disable;
+- outer_cache.set_debug = l2x0_set_debug;
+
+ printk(KERN_INFO "%s cache controller enabled\n", type);
+ printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
+diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
+index 89bbf4e..e77f4e4 100644
+--- a/arch/x86/boot/compressed/relocs.c
++++ b/arch/x86/boot/compressed/relocs.c
+@@ -402,13 +402,11 @@ static void print_absolute_symbols(void)
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+ char *sym_strtab;
+- Elf32_Sym *sh_symtab;
+ int j;
+
+ if (sec->shdr.sh_type != SHT_SYMTAB) {
+ continue;
+ }
+- sh_symtab = sec->symtab;
+ sym_strtab = sec->link->strtab;
+ for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Sym); j++) {
+ Elf32_Sym *sym;
+diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
+index f98d84c..c4e3581 100644
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -1577,9 +1577,11 @@ static int __init apic_verify(void)
+ mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
+
+ /* The BIOS may have set up the APIC at some other address */
+- rdmsr(MSR_IA32_APICBASE, l, h);
+- if (l & MSR_IA32_APICBASE_ENABLE)
+- mp_lapic_addr = l & MSR_IA32_APICBASE_BASE;
++ if (boot_cpu_data.x86 >= 6) {
++ rdmsr(MSR_IA32_APICBASE, l, h);
++ if (l & MSR_IA32_APICBASE_ENABLE)
++ mp_lapic_addr = l & MSR_IA32_APICBASE_BASE;
++ }
+
+ pr_info("Found and enabled local APIC!\n");
+ return 0;
+@@ -1597,13 +1599,15 @@ int __init apic_force_enable(unsigned long addr)
+ * MSR. This can only be done in software for Intel P6 or later
+ * and AMD K7 (Model > 1) or later.
+ */
+- rdmsr(MSR_IA32_APICBASE, l, h);
+- if (!(l & MSR_IA32_APICBASE_ENABLE)) {
+- pr_info("Local APIC disabled by BIOS -- reenabling.\n");
+- l &= ~MSR_IA32_APICBASE_BASE;
+- l |= MSR_IA32_APICBASE_ENABLE | addr;
+- wrmsr(MSR_IA32_APICBASE, l, h);
+- enabled_via_apicbase = 1;
++ if (boot_cpu_data.x86 >= 6) {
++ rdmsr(MSR_IA32_APICBASE, l, h);
++ if (!(l & MSR_IA32_APICBASE_ENABLE)) {
++ pr_info("Local APIC disabled by BIOS -- reenabling.\n");
++ l &= ~MSR_IA32_APICBASE_BASE;
++ l |= MSR_IA32_APICBASE_ENABLE | addr;
++ wrmsr(MSR_IA32_APICBASE, l, h);
++ enabled_via_apicbase = 1;
++ }
+ }
+ return apic_verify();
+ }
+@@ -2149,10 +2153,12 @@ static void lapic_resume(void)
+ * FIXME! This will be wrong if we ever support suspend on
+ * SMP! We'll need to do this as part of the CPU restore!
+ */
+- rdmsr(MSR_IA32_APICBASE, l, h);
+- l &= ~MSR_IA32_APICBASE_BASE;
+- l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
+- wrmsr(MSR_IA32_APICBASE, l, h);
++ if (boot_cpu_data.x86 >= 6) {
++ rdmsr(MSR_IA32_APICBASE, l, h);
++ l &= ~MSR_IA32_APICBASE_BASE;
++ l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
++ wrmsr(MSR_IA32_APICBASE, l, h);
++ }
+ }
+
+ maxlvt = lapic_get_maxlvt();
+diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
+index 9d46f5e..563a09d 100644
+--- a/arch/x86/kernel/microcode_core.c
++++ b/arch/x86/kernel/microcode_core.c
+@@ -418,10 +418,8 @@ static int mc_sysdev_add(struct sys_device *sys_dev)
+ if (err)
+ return err;
+
+- if (microcode_init_cpu(cpu) == UCODE_ERROR) {
+- sysfs_remove_group(&sys_dev->kobj, &mc_attr_group);
++ if (microcode_init_cpu(cpu) == UCODE_ERROR)
+ return -EINVAL;
+- }
+
+ return err;
+ }
+diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
+index 71f4727..5a98aa2 100644
+--- a/arch/x86/kernel/setup_percpu.c
++++ b/arch/x86/kernel/setup_percpu.c
+@@ -185,10 +185,22 @@ void __init setup_per_cpu_areas(void)
+ #endif
+ rc = -EINVAL;
+ if (pcpu_chosen_fc != PCPU_FC_PAGE) {
+- const size_t atom_size = cpu_has_pse ? PMD_SIZE : PAGE_SIZE;
+ const size_t dyn_size = PERCPU_MODULE_RESERVE +
+ PERCPU_DYNAMIC_RESERVE - PERCPU_FIRST_CHUNK_RESERVE;
++ size_t atom_size;
+
++ /*
++ * On 64bit, use PMD_SIZE for atom_size so that embedded
++ * percpu areas are aligned to PMD. This, in the future,
++ * can also allow using PMD mappings in vmalloc area. Use
++ * PAGE_SIZE on 32bit as vmalloc space is highly contended
++ * and large vmalloc area allocs can easily fail.
++ */
++#ifdef CONFIG_X86_64
++ atom_size = PMD_SIZE;
++#else
++ atom_size = PAGE_SIZE;
++#endif
+ rc = pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
+ dyn_size, atom_size,
+ pcpu_cpu_distance,
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index 1f92865..e7c920b 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -62,6 +62,7 @@
+ #include <asm/reboot.h>
+ #include <asm/stackprotector.h>
+ #include <asm/hypervisor.h>
++#include <asm/pci_x86.h>
+
+ #include "xen-ops.h"
+ #include "mmu.h"
+@@ -1278,8 +1279,10 @@ asmlinkage void __init xen_start_kernel(void)
+ /* Make sure ACS will be enabled */
+ pci_request_acs();
+ }
+-
+-
++#ifdef CONFIG_PCI
++ /* PCI BIOS service won't work from a PV guest. */
++ pci_probe &= ~PCI_PROBE_BIOS;
++#endif
+ xen_raw_console_write("about to get started...\n");
+
+ xen_setup_runstate_info(0);
+diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
+index 87f6673..ec3d603 100644
+--- a/arch/x86/xen/mmu.c
++++ b/arch/x86/xen/mmu.c
+@@ -353,8 +353,13 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
+ {
+ if (val & _PAGE_PRESENT) {
+ unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
++ unsigned long pfn = mfn_to_pfn(mfn);
++
+ pteval_t flags = val & PTE_FLAGS_MASK;
+- val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags;
++ if (unlikely(pfn == ~0))
++ val = flags & ~_PAGE_PRESENT;
++ else
++ val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
+ }
+
+ return val;
+diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
+index 041d4fe..9a23fff 100644
+--- a/arch/x86/xen/smp.c
++++ b/arch/x86/xen/smp.c
+@@ -172,6 +172,7 @@ static void __init xen_fill_possible_map(void)
+ static void __init xen_filter_cpu_maps(void)
+ {
+ int i, rc;
++ unsigned int subtract = 0;
+
+ if (!xen_initial_domain())
+ return;
+@@ -186,8 +187,22 @@ static void __init xen_filter_cpu_maps(void)
+ } else {
+ set_cpu_possible(i, false);
+ set_cpu_present(i, false);
++ subtract++;
+ }
+ }
++#ifdef CONFIG_HOTPLUG_CPU
++ /* This is akin to using 'nr_cpus' on the Linux command line.
++ * Which is OK as when we use 'dom0_max_vcpus=X' we can only
++ * have up to X, while nr_cpu_ids is greater than X. This
++ * normally is not a problem, except when CPU hotplugging
++ * is involved and then there might be more than X CPUs
++ * in the guest - which will not work as there is no
++ * hypercall to expand the max number of VCPUs an already
++ * running guest has. So cap it up to X. */
++ if (subtract)
++ nr_cpu_ids = nr_cpu_ids - subtract;
++#endif
++
+ }
+
+ static void __init xen_smp_prepare_boot_cpu(void)
+diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S
+index 79d7362..3e45aa0 100644
+--- a/arch/x86/xen/xen-asm.S
++++ b/arch/x86/xen/xen-asm.S
+@@ -96,7 +96,7 @@ ENTRY(xen_restore_fl_direct)
+
+ /* check for unmasked and pending */
+ cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
+- jz 1f
++ jnz 1f
+ 2: call check_events
+ 1:
+ ENDPATCH(xen_restore_fl_direct)
+diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
+index 107f6f7..dd30f40 100644
+--- a/crypto/sha512_generic.c
++++ b/crypto/sha512_generic.c
+@@ -174,7 +174,7 @@ sha512_update(struct shash_desc *desc, const u8 *data, unsigned int len)
+ index = sctx->count[0] & 0x7f;
+
+ /* Update number of bytes */
+- if (!(sctx->count[0] += len))
++ if ((sctx->count[0] += len) < len)
+ sctx->count[1]++;
+
+ part_len = 128 - index;
+diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
+index a9b2820..58db834 100644
+--- a/drivers/ata/libata-eh.c
++++ b/drivers/ata/libata-eh.c
+@@ -3500,7 +3500,8 @@ static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg
+ u64 now = get_jiffies_64();
+ int *trials = void_arg;
+
+- if (ent->timestamp < now - min(now, interval))
++ if ((ent->eflags & ATA_EFLAG_OLD_ER) ||
++ (ent->timestamp < now - min(now, interval)))
+ return -1;
+
+ (*trials)++;
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index 003cd8d..99fefbd 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -73,6 +73,7 @@ static struct usb_device_id ath3k_table[] = {
+ { USB_DEVICE(0x0CF3, 0x3004) },
+ { USB_DEVICE(0x0CF3, 0x311D) },
+ { USB_DEVICE(0x13d3, 0x3375) },
++ { USB_DEVICE(0x04CA, 0x3005) },
+
+ /* Atheros AR5BBU12 with sflash firmware */
+ { USB_DEVICE(0x0489, 0xE02C) },
+@@ -91,6 +92,7 @@ static struct usb_device_id ath3k_blist_tbl[] = {
+ { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
+
+ { } /* Terminating entry */
+ };
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index db44ad5..e56da6a 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -129,6 +129,7 @@ static struct usb_device_id blacklist_table[] = {
+ { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
+
+ /* Atheros AR5BBU12 with sflash firmware */
+ { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
+diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
+index a60adbf..79dcf6e 100644
+--- a/drivers/dma/at_hdmac.c
++++ b/drivers/dma/at_hdmac.c
+@@ -239,10 +239,6 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
+
+ vdbg_dump_regs(atchan);
+
+- /* clear any pending interrupt */
+- while (dma_readl(atdma, EBCISR))
+- cpu_relax();
+-
+ channel_writel(atchan, SADDR, 0);
+ channel_writel(atchan, DADDR, 0);
+ channel_writel(atchan, CTRLA, 0);
+diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
+index b0a8117..0535c21 100644
+--- a/drivers/firmware/efivars.c
++++ b/drivers/firmware/efivars.c
+@@ -191,6 +191,190 @@ utf16_strncmp(const efi_char16_t *a, const efi_char16_t *b, size_t len)
+ }
+ }
+
++static bool
++validate_device_path(struct efi_variable *var, int match, u8 *buffer,
++ unsigned long len)
++{
++ struct efi_generic_dev_path *node;
++ int offset = 0;
++
++ node = (struct efi_generic_dev_path *)buffer;
++
++ if (len < sizeof(*node))
++ return false;
++
++ while (offset <= len - sizeof(*node) &&
++ node->length >= sizeof(*node) &&
++ node->length <= len - offset) {
++ offset += node->length;
++
++ if ((node->type == EFI_DEV_END_PATH ||
++ node->type == EFI_DEV_END_PATH2) &&
++ node->sub_type == EFI_DEV_END_ENTIRE)
++ return true;
++
++ node = (struct efi_generic_dev_path *)(buffer + offset);
++ }
++
++ /*
++ * If we're here then either node->length pointed past the end
++ * of the buffer or we reached the end of the buffer without
++ * finding a device path end node.
++ */
++ return false;
++}
++
++static bool
++validate_boot_order(struct efi_variable *var, int match, u8 *buffer,
++ unsigned long len)
++{
++ /* An array of 16-bit integers */
++ if ((len % 2) != 0)
++ return false;
++
++ return true;
++}
++
++static bool
++validate_load_option(struct efi_variable *var, int match, u8 *buffer,
++ unsigned long len)
++{
++ u16 filepathlength;
++ int i, desclength = 0, namelen;
++
++ namelen = utf16_strnlen(var->VariableName, sizeof(var->VariableName));
++
++ /* Either "Boot" or "Driver" followed by four digits of hex */
++ for (i = match; i < match+4; i++) {
++ if (var->VariableName[i] > 127 ||
++ hex_to_bin(var->VariableName[i] & 0xff) < 0)
++ return true;
++ }
++
++ /* Reject it if there's 4 digits of hex and then further content */
++ if (namelen > match + 4)
++ return false;
++
++ /* A valid entry must be at least 8 bytes */
++ if (len < 8)
++ return false;
++
++ filepathlength = buffer[4] | buffer[5] << 8;
++
++ /*
++ * There's no stored length for the description, so it has to be
++ * found by hand
++ */
++ desclength = utf16_strsize((efi_char16_t *)(buffer + 6), len - 6) + 2;
++
++ /* Each boot entry must have a descriptor */
++ if (!desclength)
++ return false;
++
++ /*
++ * If the sum of the length of the description, the claimed filepath
++ * length and the original header are greater than the length of the
++ * variable, it's malformed
++ */
++ if ((desclength + filepathlength + 6) > len)
++ return false;
++
++ /*
++ * And, finally, check the filepath
++ */
++ return validate_device_path(var, match, buffer + desclength + 6,
++ filepathlength);
++}
++
++static bool
++validate_uint16(struct efi_variable *var, int match, u8 *buffer,
++ unsigned long len)
++{
++ /* A single 16-bit integer */
++ if (len != 2)
++ return false;
++
++ return true;
++}
++
++static bool
++validate_ascii_string(struct efi_variable *var, int match, u8 *buffer,
++ unsigned long len)
++{
++ int i;
++
++ for (i = 0; i < len; i++) {
++ if (buffer[i] > 127)
++ return false;
++
++ if (buffer[i] == 0)
++ return true;
++ }
++
++ return false;
++}
++
++struct variable_validate {
++ char *name;
++ bool (*validate)(struct efi_variable *var, int match, u8 *data,
++ unsigned long len);
++};
++
++static const struct variable_validate variable_validate[] = {
++ { "BootNext", validate_uint16 },
++ { "BootOrder", validate_boot_order },
++ { "DriverOrder", validate_boot_order },
++ { "Boot*", validate_load_option },
++ { "Driver*", validate_load_option },
++ { "ConIn", validate_device_path },
++ { "ConInDev", validate_device_path },
++ { "ConOut", validate_device_path },
++ { "ConOutDev", validate_device_path },
++ { "ErrOut", validate_device_path },
++ { "ErrOutDev", validate_device_path },
++ { "Timeout", validate_uint16 },
++ { "Lang", validate_ascii_string },
++ { "PlatformLang", validate_ascii_string },
++ { "", NULL },
++};
++
++static bool
++validate_var(struct efi_variable *var, u8 *data, unsigned long len)
++{
++ int i;
++ u16 *unicode_name = var->VariableName;
++
++ for (i = 0; variable_validate[i].validate != NULL; i++) {
++ const char *name = variable_validate[i].name;
++ int match;
++
++ for (match = 0; ; match++) {
++ char c = name[match];
++ u16 u = unicode_name[match];
++
++ /* All special variables are plain ascii */
++ if (u > 127)
++ return true;
++
++ /* Wildcard in the matching name means we've matched */
++ if (c == '*')
++ return variable_validate[i].validate(var,
++ match, data, len);
++
++ /* Case sensitive match */
++ if (c != u)
++ break;
++
++ /* Reached the end of the string while matching */
++ if (!c)
++ return variable_validate[i].validate(var,
++ match, data, len);
++ }
++ }
++
++ return true;
++}
++
+ static efi_status_t
+ get_var_data_locked(struct efivars *efivars, struct efi_variable *var)
+ {
+@@ -324,6 +508,12 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
+ return -EINVAL;
+ }
+
++ if ((new_var->Attributes & ~EFI_VARIABLE_MASK) != 0 ||
++ validate_var(new_var, new_var->Data, new_var->DataSize) == false) {
++ printk(KERN_ERR "efivars: Malformed variable content\n");
++ return -EINVAL;
++ }
++
+ spin_lock(&efivars->lock);
+ status = efivars->ops->set_variable(new_var->VariableName,
+ &new_var->VendorGuid,
+@@ -624,6 +814,12 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
++ if ((new_var->Attributes & ~EFI_VARIABLE_MASK) != 0 ||
++ validate_var(new_var, new_var->Data, new_var->DataSize) == false) {
++ printk(KERN_ERR "efivars: Malformed variable content\n");
++ return -EINVAL;
++ }
++
+ spin_lock(&efivars->lock);
+
+ /*
+diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+index b9da890..a6c2f7a 100644
+--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+@@ -984,6 +984,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+ struct intel_ring_buffer *ring;
+ u32 exec_start, exec_len;
+ u32 seqno;
++ u32 mask;
+ int ret, mode, i;
+
+ if (!i915_gem_check_execbuffer(args)) {
+@@ -1021,6 +1022,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+ }
+
+ mode = args->flags & I915_EXEC_CONSTANTS_MASK;
++ mask = I915_EXEC_CONSTANTS_MASK;
+ switch (mode) {
+ case I915_EXEC_CONSTANTS_REL_GENERAL:
+ case I915_EXEC_CONSTANTS_ABSOLUTE:
+@@ -1034,18 +1036,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+ mode == I915_EXEC_CONSTANTS_REL_SURFACE)
+ return -EINVAL;
+
+- ret = intel_ring_begin(ring, 4);
+- if (ret)
+- return ret;
+-
+- intel_ring_emit(ring, MI_NOOP);
+- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+- intel_ring_emit(ring, INSTPM);
+- intel_ring_emit(ring,
+- I915_EXEC_CONSTANTS_MASK << 16 | mode);
+- intel_ring_advance(ring);
+-
+- dev_priv->relative_constants_mode = mode;
++ /* The HW changed the meaning on this bit on gen6 */
++ if (INTEL_INFO(dev)->gen >= 6)
++ mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
+ }
+ break;
+ default:
+@@ -1064,6 +1057,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+ return -EINVAL;
+ }
+
++ if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
++ DRM_DEBUG("execbuf with %u cliprects\n",
++ args->num_cliprects);
++ return -EINVAL;
++ }
+ cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
+ GFP_KERNEL);
+ if (cliprects == NULL) {
+@@ -1176,6 +1174,21 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+ }
+ }
+
++ if (ring == &dev_priv->ring[RCS] &&
++ mode != dev_priv->relative_constants_mode) {
++ ret = intel_ring_begin(ring, 4);
++ if (ret)
++ goto err;
++
++ intel_ring_emit(ring, MI_NOOP);
++ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
++ intel_ring_emit(ring, INSTPM);
++ intel_ring_emit(ring, mask << 16 | mode);
++ intel_ring_advance(ring);
++
++ dev_priv->relative_constants_mode = mode;
++ }
++
+ trace_i915_gem_ring_dispatch(ring, seqno);
+
+ exec_start = batch_obj->gtt_offset + args->batch_start_offset;
+@@ -1314,7 +1327,8 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
+ struct drm_i915_gem_exec_object2 *exec2_list = NULL;
+ int ret;
+
+- if (args->buffer_count < 1) {
++ if (args->buffer_count < 1 ||
++ args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
+ DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
+ return -EINVAL;
+ }
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 2f99fd4..cbe5a88 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -442,6 +442,7 @@
+ #define INSTPM_AGPBUSY_DIS (1<<11) /* gen3: when disabled, pending interrupts
+ will not assert AGPBUSY# and will only
+ be delivered when out of C3. */
++#define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */
+ #define ACTHD 0x020c8
+ #define FW_BLC 0x020d8
+ #define FW_BLC2 0x020dc
+@@ -522,6 +523,7 @@
+ #define CM0_MASK_SHIFT 16
+ #define CM0_IZ_OPT_DISABLE (1<<6)
+ #define CM0_ZR_OPT_DISABLE (1<<5)
++#define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5)
+ #define CM0_DEPTH_EVICT_DISABLE (1<<4)
+ #define CM0_COLOR_EVICT_DISABLE (1<<3)
+ #define CM0_DEPTH_WRITE_DISABLE (1<<1)
+diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
+index 64541f7..9cd81ba 100644
+--- a/drivers/gpu/drm/i915/intel_hdmi.c
++++ b/drivers/gpu/drm/i915/intel_hdmi.c
+@@ -136,7 +136,7 @@ static void i9xx_write_infoframe(struct drm_encoder *encoder,
+
+ val &= ~VIDEO_DIP_SELECT_MASK;
+
+- I915_WRITE(VIDEO_DIP_CTL, val | port | flags);
++ I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags);
+
+ for (i = 0; i < len; i += 4) {
+ I915_WRITE(VIDEO_DIP_DATA, *data);
+diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
+index 8673581..62f9ac5 100644
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
+@@ -414,6 +414,22 @@ static int init_render_ring(struct intel_ring_buffer *ring)
+ return ret;
+ }
+
++
++ if (IS_GEN6(dev)) {
++ /* From the Sandybridge PRM, volume 1 part 3, page 24:
++ * "If this bit is set, STCunit will have LRA as replacement
++ * policy. [...] This bit must be reset. LRA replacement
++ * policy is not supported."
++ */
++ I915_WRITE(CACHE_MODE_0,
++ CM0_STC_EVICT_DISABLE_LRA_SNB << CM0_MASK_SHIFT);
++ }
++
++ if (INTEL_INFO(dev)->gen >= 6) {
++ I915_WRITE(INSTPM,
++ INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING);
++ }
++
+ return ret;
+ }
+
+diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
+index e334ec3..8eddcca 100644
+--- a/drivers/gpu/drm/i915/intel_sdvo.c
++++ b/drivers/gpu/drm/i915/intel_sdvo.c
+@@ -731,6 +731,7 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
+ uint16_t width, height;
+ uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len;
+ uint16_t h_sync_offset, v_sync_offset;
++ int mode_clock;
+
+ width = mode->crtc_hdisplay;
+ height = mode->crtc_vdisplay;
+@@ -745,7 +746,11 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
+ h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
+ v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
+
+- dtd->part1.clock = mode->clock / 10;
++ mode_clock = mode->clock;
++ mode_clock /= intel_mode_get_pixel_multiplier(mode) ?: 1;
++ mode_clock /= 10;
++ dtd->part1.clock = mode_clock;
++
+ dtd->part1.h_active = width & 0xff;
+ dtd->part1.h_blank = h_blank_len & 0xff;
+ dtd->part1.h_high = (((width >> 8) & 0xf) << 4) |
+@@ -997,7 +1002,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
+ struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
+ u32 sdvox;
+ struct intel_sdvo_in_out_map in_out;
+- struct intel_sdvo_dtd input_dtd;
++ struct intel_sdvo_dtd input_dtd, output_dtd;
+ int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
+ int rate;
+
+@@ -1022,20 +1027,13 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
+ intel_sdvo->attached_output))
+ return;
+
+- /* We have tried to get input timing in mode_fixup, and filled into
+- * adjusted_mode.
+- */
+- if (intel_sdvo->is_tv || intel_sdvo->is_lvds) {
+- input_dtd = intel_sdvo->input_dtd;
+- } else {
+- /* Set the output timing to the screen */
+- if (!intel_sdvo_set_target_output(intel_sdvo,
+- intel_sdvo->attached_output))
+- return;
+-
+- intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
+- (void) intel_sdvo_set_output_timing(intel_sdvo, &input_dtd);
+- }
++ /* lvds has a special fixed output timing. */
++ if (intel_sdvo->is_lvds)
++ intel_sdvo_get_dtd_from_mode(&output_dtd,
++ intel_sdvo->sdvo_lvds_fixed_mode);
++ else
++ intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
++ (void) intel_sdvo_set_output_timing(intel_sdvo, &output_dtd);
+
+ /* Set the input timing to the screen. Assume always input 0. */
+ if (!intel_sdvo_set_target_input(intel_sdvo))
+@@ -1053,6 +1051,10 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
+ !intel_sdvo_set_tv_format(intel_sdvo))
+ return;
+
++ /* We have tried to get input timing in mode_fixup, and filled into
++ * adjusted_mode.
++ */
++ intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
+ (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd);
+
+ switch (pixel_multiplier) {
+@@ -1219,8 +1221,14 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in
+
+ static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo)
+ {
++ struct drm_device *dev = intel_sdvo->base.base.dev;
+ u8 response[2];
+
++ /* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise
++ * on the line. */
++ if (IS_I945G(dev) || IS_I945GM(dev))
++ return false;
++
+ return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
+ &response, 2) && response[0];
+ }
+diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
+index 525744d..3df56c7 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
++++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
+@@ -245,7 +245,7 @@ static bool nouveau_dsm_detect(void)
+ struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
+ struct pci_dev *pdev = NULL;
+ int has_dsm = 0;
+- int has_optimus;
++ int has_optimus = 0;
+ int vga_count = 0;
+ bool guid_valid;
+ int retval;
+diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
+index b30081f..757c549 100644
+--- a/drivers/gpu/drm/radeon/atombios_crtc.c
++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
+@@ -917,8 +917,8 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
+ break;
+ }
+
+- if (radeon_encoder->active_device &
+- (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) {
++ if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
++ (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) {
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct drm_connector *connector =
+ radeon_get_connector_for_encoder(encoder);
+diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
+index 104b376..427468f 100644
+--- a/drivers/hwmon/coretemp.c
++++ b/drivers/hwmon/coretemp.c
+@@ -51,7 +51,7 @@ module_param_named(tjmax, force_tjmax, int, 0444);
+ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
+
+ #define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */
+-#define NUM_REAL_CORES 16 /* Number of Real cores per cpu */
++#define NUM_REAL_CORES 32 /* Number of Real cores per cpu */
+ #define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */
+ #define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */
+ #define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
+@@ -705,6 +705,10 @@ static void __cpuinit put_core_offline(unsigned int cpu)
+
+ indx = TO_ATTR_NO(cpu);
+
++ /* The core id is too big, just return */
++ if (indx > MAX_CORE_DATA - 1)
++ return;
++
+ if (pdata->core_data[indx] && pdata->core_data[indx]->cpu == cpu)
+ coretemp_remove_core(pdata, &pdev->dev, indx);
+
+diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
+index 930370d..9a4c3ab 100644
+--- a/drivers/hwmon/fam15h_power.c
++++ b/drivers/hwmon/fam15h_power.c
+@@ -122,6 +122,41 @@ static bool __devinit fam15h_power_is_internal_node0(struct pci_dev *f4)
+ return true;
+ }
+
++/*
++ * Newer BKDG versions have an updated recommendation on how to properly
++ * initialize the running average range (was: 0xE, now: 0x9). This avoids
++ * counter saturations resulting in bogus power readings.
++ * We correct this value ourselves to cope with older BIOSes.
++ */
++static DEFINE_PCI_DEVICE_TABLE(affected_device) = {
++ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
++ { 0 }
++};
++
++static void __devinit tweak_runavg_range(struct pci_dev *pdev)
++{
++ u32 val;
++
++ /*
++ * let this quirk apply only to the current version of the
++ * northbridge, since future versions may change the behavior
++ */
++ if (!pci_match_id(affected_device, pdev))
++ return;
++
++ pci_bus_read_config_dword(pdev->bus,
++ PCI_DEVFN(PCI_SLOT(pdev->devfn), 5),
++ REG_TDP_RUNNING_AVERAGE, &val);
++ if ((val & 0xf) != 0xe)
++ return;
++
++ val &= ~0xf;
++ val |= 0x9;
++ pci_bus_write_config_dword(pdev->bus,
++ PCI_DEVFN(PCI_SLOT(pdev->devfn), 5),
++ REG_TDP_RUNNING_AVERAGE, val);
++}
++
+ static void __devinit fam15h_power_init_data(struct pci_dev *f4,
+ struct fam15h_power_data *data)
+ {
+@@ -155,6 +190,13 @@ static int __devinit fam15h_power_probe(struct pci_dev *pdev,
+ struct device *dev;
+ int err;
+
++ /*
++ * though we ignore every other northbridge, we still have to
++ * do the tweaking on _each_ node in MCM processors as the counters
++ * are working hand-in-hand
++ */
++ tweak_runavg_range(pdev);
++
+ if (!fam15h_power_is_internal_node0(pdev)) {
+ err = -ENODEV;
+ goto exit;
+diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
+index 04be9f8..eb8ad53 100644
+--- a/drivers/i2c/busses/i2c-pnx.c
++++ b/drivers/i2c/busses/i2c-pnx.c
+@@ -546,8 +546,7 @@ static int i2c_pnx_controller_suspend(struct platform_device *pdev,
+ {
+ struct i2c_pnx_algo_data *alg_data = platform_get_drvdata(pdev);
+
+- /* FIXME: shouldn't this be clk_disable? */
+- clk_enable(alg_data->clk);
++ clk_disable(alg_data->clk);
+
+ return 0;
+ }
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 6f37aa4..065ab4f 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -8100,7 +8100,8 @@ static int md_notify_reboot(struct notifier_block *this,
+
+ for_each_mddev(mddev, tmp) {
+ if (mddev_trylock(mddev)) {
+- __md_stop_writes(mddev);
++ if (mddev->pers)
++ __md_stop_writes(mddev);
+ mddev->safemode = 2;
+ mddev_unlock(mddev);
+ }
+diff --git a/drivers/media/dvb/frontends/drxk_hard.c b/drivers/media/dvb/frontends/drxk_hard.c
+index f6431ef..a1f5e3d 100644
+--- a/drivers/media/dvb/frontends/drxk_hard.c
++++ b/drivers/media/dvb/frontends/drxk_hard.c
+@@ -1523,8 +1523,10 @@ static int scu_command(struct drxk_state *state,
+ dprintk(1, "\n");
+
+ if ((cmd == 0) || ((parameterLen > 0) && (parameter == NULL)) ||
+- ((resultLen > 0) && (result == NULL)))
+- goto error;
++ ((resultLen > 0) && (result == NULL))) {
++ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
++ return status;
++ }
+
+ mutex_lock(&state->mutex);
+
+diff --git a/drivers/media/rc/winbond-cir.c b/drivers/media/rc/winbond-cir.c
+index 13f54b5..a7e7d6f 100644
+--- a/drivers/media/rc/winbond-cir.c
++++ b/drivers/media/rc/winbond-cir.c
+@@ -1046,6 +1046,7 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
+ goto exit_unregister_led;
+ }
+
++ data->dev->driver_type = RC_DRIVER_IR_RAW;
+ data->dev->driver_name = WBCIR_NAME;
+ data->dev->input_name = WBCIR_NAME;
+ data->dev->input_phys = "wbcir/cir0";
+diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
+index e15e47d..34416d4 100644
+--- a/drivers/mmc/card/block.c
++++ b/drivers/mmc/card/block.c
+@@ -799,7 +799,7 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
+ {
+ struct mmc_blk_data *md = mq->data;
+ struct mmc_card *card = md->queue.card;
+- unsigned int from, nr, arg;
++ unsigned int from, nr, arg, trim_arg, erase_arg;
+ int err = 0, type = MMC_BLK_SECDISCARD;
+
+ if (!(mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))) {
+@@ -807,20 +807,26 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
+ goto out;
+ }
+
++ from = blk_rq_pos(req);
++ nr = blk_rq_sectors(req);
++
+ /* The sanitize operation is supported at v4.5 only */
+ if (mmc_can_sanitize(card)) {
+- err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+- EXT_CSD_SANITIZE_START, 1, 0);
+- goto out;
++ erase_arg = MMC_ERASE_ARG;
++ trim_arg = MMC_TRIM_ARG;
++ } else {
++ erase_arg = MMC_SECURE_ERASE_ARG;
++ trim_arg = MMC_SECURE_TRIM1_ARG;
+ }
+
+- from = blk_rq_pos(req);
+- nr = blk_rq_sectors(req);
+-
+- if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
+- arg = MMC_SECURE_TRIM1_ARG;
+- else
+- arg = MMC_SECURE_ERASE_ARG;
++ if (mmc_erase_group_aligned(card, from, nr))
++ arg = erase_arg;
++ else if (mmc_can_trim(card))
++ arg = trim_arg;
++ else {
++ err = -EINVAL;
++ goto out;
++ }
+ retry:
+ if (card->quirks & MMC_QUIRK_INAND_CMD38) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+@@ -830,25 +836,41 @@ retry:
+ INAND_CMD38_ARG_SECERASE,
+ 0);
+ if (err)
+- goto out;
++ goto out_retry;
+ }
++
+ err = mmc_erase(card, from, nr, arg);
+- if (!err && arg == MMC_SECURE_TRIM1_ARG) {
++ if (err == -EIO)
++ goto out_retry;
++ if (err)
++ goto out;
++
++ if (arg == MMC_SECURE_TRIM1_ARG) {
+ if (card->quirks & MMC_QUIRK_INAND_CMD38) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ INAND_CMD38_ARG_EXT_CSD,
+ INAND_CMD38_ARG_SECTRIM2,
+ 0);
+ if (err)
+- goto out;
++ goto out_retry;
+ }
++
+ err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
++ if (err == -EIO)
++ goto out_retry;
++ if (err)
++ goto out;
+ }
+-out:
+- if (err == -EIO && !mmc_blk_reset(md, card->host, type))
++
++ if (mmc_can_sanitize(card))
++ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
++ EXT_CSD_SANITIZE_START, 1, 0);
++out_retry:
++ if (err && !mmc_blk_reset(md, card->host, type))
+ goto retry;
+ if (!err)
+ mmc_blk_reset_success(md, type);
++out:
+ spin_lock_irq(&md->lock);
+ __blk_end_request(req, err, blk_rq_bytes(req));
+ spin_unlock_irq(&md->lock);
+diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
+index dcad59c..78690f2 100644
+--- a/drivers/mmc/card/queue.c
++++ b/drivers/mmc/card/queue.c
+@@ -134,7 +134,7 @@ static void mmc_queue_setup_discard(struct request_queue *q,
+
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
+ q->limits.max_discard_sectors = max_discard;
+- if (card->erased_byte == 0)
++ if (card->erased_byte == 0 && !mmc_can_discard(card))
+ q->limits.discard_zeroes_data = 1;
+ q->limits.discard_granularity = card->pref_erase << 9;
+ /* granularity must not be greater than max. discard */
+diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
+index 950b97d..411a994 100644
+--- a/drivers/mmc/core/core.c
++++ b/drivers/mmc/core/core.c
+@@ -1516,7 +1516,10 @@ static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
+ {
+ unsigned int erase_timeout;
+
+- if (card->ext_csd.erase_group_def & 1) {
++ if (arg == MMC_DISCARD_ARG ||
++ (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
++ erase_timeout = card->ext_csd.trim_timeout;
++ } else if (card->ext_csd.erase_group_def & 1) {
+ /* High Capacity Erase Group Size uses HC timeouts */
+ if (arg == MMC_TRIM_ARG)
+ erase_timeout = card->ext_csd.trim_timeout;
+@@ -1788,8 +1791,6 @@ int mmc_can_trim(struct mmc_card *card)
+ {
+ if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)
+ return 1;
+- if (mmc_can_discard(card))
+- return 1;
+ return 0;
+ }
+ EXPORT_SYMBOL(mmc_can_trim);
+@@ -1808,6 +1809,8 @@ EXPORT_SYMBOL(mmc_can_discard);
+
+ int mmc_can_sanitize(struct mmc_card *card)
+ {
++ if (!mmc_can_trim(card) && !mmc_can_erase(card))
++ return 0;
+ if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
+ return 1;
+ return 0;
+diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
+index 4540e37..1b47937 100644
+--- a/drivers/mmc/host/sdhci-esdhc-imx.c
++++ b/drivers/mmc/host/sdhci-esdhc-imx.c
+@@ -467,8 +467,7 @@ static int __devinit sdhci_esdhc_imx_probe(struct platform_device *pdev)
+ clk_enable(clk);
+ pltfm_host->clk = clk;
+
+- if (!is_imx25_esdhc(imx_data))
+- host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
++ host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
+
+ if (is_imx25_esdhc(imx_data) || is_imx35_esdhc(imx_data))
+ /* Fix errata ENGcm07207 present on i.MX25 and i.MX35 */
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index e58aa2b..f65e0b9 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -2982,7 +2982,11 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
+ trans_start + delta_in_ticks)) ||
+ bond->curr_active_slave != slave) {
+ slave->link = BOND_LINK_UP;
+- bond->current_arp_slave = NULL;
++ if (bond->current_arp_slave) {
++ bond_set_slave_inactive_flags(
++ bond->current_arp_slave);
++ bond->current_arp_slave = NULL;
++ }
+
+ pr_info("%s: link status definitely up for interface %s.\n",
+ bond->dev->name, slave->dev->name);
+diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
+index a7c5e88..eeac9ca 100644
+--- a/drivers/net/dummy.c
++++ b/drivers/net/dummy.c
+@@ -106,14 +106,14 @@ static int dummy_dev_init(struct net_device *dev)
+ return 0;
+ }
+
+-static void dummy_dev_free(struct net_device *dev)
++static void dummy_dev_uninit(struct net_device *dev)
+ {
+ free_percpu(dev->dstats);
+- free_netdev(dev);
+ }
+
+ static const struct net_device_ops dummy_netdev_ops = {
+ .ndo_init = dummy_dev_init,
++ .ndo_uninit = dummy_dev_uninit,
+ .ndo_start_xmit = dummy_xmit,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_rx_mode = set_multicast_list,
+@@ -127,7 +127,7 @@ static void dummy_setup(struct net_device *dev)
+
+ /* Initialize the device structure. */
+ dev->netdev_ops = &dummy_netdev_ops;
+- dev->destructor = dummy_dev_free;
++ dev->destructor = free_netdev;
+
+ /* Fill in device structure with ethernet-generic values. */
+ dev->tx_queue_len = 0;
+diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
+index 33a4e35..ee532e1 100644
+--- a/drivers/net/ethernet/atheros/atlx/atl1.c
++++ b/drivers/net/ethernet/atheros/atlx/atl1.c
+@@ -2473,7 +2473,7 @@ static irqreturn_t atl1_intr(int irq, void *data)
+ "pcie phy link down %x\n", status);
+ if (netif_running(adapter->netdev)) { /* reset MAC */
+ iowrite32(0, adapter->hw.hw_addr + REG_IMR);
+- schedule_work(&adapter->pcie_dma_to_rst_task);
++ schedule_work(&adapter->reset_dev_task);
+ return IRQ_HANDLED;
+ }
+ }
+@@ -2485,7 +2485,7 @@ static irqreturn_t atl1_intr(int irq, void *data)
+ "pcie DMA r/w error (status = 0x%x)\n",
+ status);
+ iowrite32(0, adapter->hw.hw_addr + REG_IMR);
+- schedule_work(&adapter->pcie_dma_to_rst_task);
++ schedule_work(&adapter->reset_dev_task);
+ return IRQ_HANDLED;
+ }
+
+@@ -2630,10 +2630,10 @@ static void atl1_down(struct atl1_adapter *adapter)
+ atl1_clean_rx_ring(adapter);
+ }
+
+-static void atl1_tx_timeout_task(struct work_struct *work)
++static void atl1_reset_dev_task(struct work_struct *work)
+ {
+ struct atl1_adapter *adapter =
+- container_of(work, struct atl1_adapter, tx_timeout_task);
++ container_of(work, struct atl1_adapter, reset_dev_task);
+ struct net_device *netdev = adapter->netdev;
+
+ netif_device_detach(netdev);
+@@ -3032,12 +3032,10 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
+ (unsigned long)adapter);
+ adapter->phy_timer_pending = false;
+
+- INIT_WORK(&adapter->tx_timeout_task, atl1_tx_timeout_task);
++ INIT_WORK(&adapter->reset_dev_task, atl1_reset_dev_task);
+
+ INIT_WORK(&adapter->link_chg_task, atlx_link_chg_task);
+
+- INIT_WORK(&adapter->pcie_dma_to_rst_task, atl1_tx_timeout_task);
+-
+ err = register_netdev(netdev);
+ if (err)
+ goto err_common;
+diff --git a/drivers/net/ethernet/atheros/atlx/atl1.h b/drivers/net/ethernet/atheros/atlx/atl1.h
+index 109d6da..e04bf4d 100644
+--- a/drivers/net/ethernet/atheros/atlx/atl1.h
++++ b/drivers/net/ethernet/atheros/atlx/atl1.h
+@@ -758,9 +758,8 @@ struct atl1_adapter {
+ u16 link_speed;
+ u16 link_duplex;
+ spinlock_t lock;
+- struct work_struct tx_timeout_task;
++ struct work_struct reset_dev_task;
+ struct work_struct link_chg_task;
+- struct work_struct pcie_dma_to_rst_task;
+
+ struct timer_list phy_config_timer;
+ bool phy_timer_pending;
+diff --git a/drivers/net/ethernet/atheros/atlx/atlx.c b/drivers/net/ethernet/atheros/atlx/atlx.c
+index aabcf4b..41c6d83 100644
+--- a/drivers/net/ethernet/atheros/atlx/atlx.c
++++ b/drivers/net/ethernet/atheros/atlx/atlx.c
+@@ -193,7 +193,7 @@ static void atlx_tx_timeout(struct net_device *netdev)
+ {
+ struct atlx_adapter *adapter = netdev_priv(netdev);
+ /* Do the reset outside of interrupt context */
+- schedule_work(&adapter->tx_timeout_task);
++ schedule_work(&adapter->reset_dev_task);
+ }
+
+ /*
+diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
+index d19c849..77241b6 100644
+--- a/drivers/net/ethernet/micrel/ks8851_mll.c
++++ b/drivers/net/ethernet/micrel/ks8851_mll.c
+@@ -40,7 +40,7 @@
+ #define DRV_NAME "ks8851_mll"
+
+ static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
+-#define MAX_RECV_FRAMES 32
++#define MAX_RECV_FRAMES 255
+ #define MAX_BUF_SIZE 2048
+ #define TX_BUF_SIZE 2000
+ #define RX_BUF_SIZE 2000
+diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
+index 7ece990..4b9f4bd 100644
+--- a/drivers/net/ethernet/micrel/ksz884x.c
++++ b/drivers/net/ethernet/micrel/ksz884x.c
+@@ -5679,7 +5679,7 @@ static int netdev_set_mac_address(struct net_device *dev, void *addr)
+ memcpy(hw->override_addr, mac->sa_data, MAC_ADDR_LEN);
+ }
+
+- memcpy(dev->dev_addr, mac->sa_data, MAX_ADDR_LEN);
++ memcpy(dev->dev_addr, mac->sa_data, ETH_ALEN);
+
+ interrupt = hw_block_intr(hw);
+
+diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
+index aba4f67..8f47907 100644
+--- a/drivers/net/ethernet/realtek/8139cp.c
++++ b/drivers/net/ethernet/realtek/8139cp.c
+@@ -961,6 +961,11 @@ static inline void cp_start_hw (struct cp_private *cp)
+ cpw8(Cmd, RxOn | TxOn);
+ }
+
++static void cp_enable_irq(struct cp_private *cp)
++{
++ cpw16_f(IntrMask, cp_intr_mask);
++}
++
+ static void cp_init_hw (struct cp_private *cp)
+ {
+ struct net_device *dev = cp->dev;
+@@ -1000,8 +1005,6 @@ static void cp_init_hw (struct cp_private *cp)
+
+ cpw16(MultiIntr, 0);
+
+- cpw16_f(IntrMask, cp_intr_mask);
+-
+ cpw8_f(Cfg9346, Cfg9346_Lock);
+ }
+
+@@ -1133,6 +1136,8 @@ static int cp_open (struct net_device *dev)
+ if (rc)
+ goto err_out_hw;
+
++ cp_enable_irq(cp);
++
+ netif_carrier_off(dev);
+ mii_check_media(&cp->mii_if, netif_msg_link(cp), true);
+ netif_start_queue(dev);
+@@ -2034,6 +2039,7 @@ static int cp_resume (struct pci_dev *pdev)
+ /* FIXME: sh*t may happen if the Rx ring buffer is depleted */
+ cp_init_rings_index (cp);
+ cp_init_hw (cp);
++ cp_enable_irq(cp);
+ netif_start_queue (dev);
+
+ spin_lock_irqsave (&cp->lock, flags);
+diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
+index 8843071..8c7dd21 100644
+--- a/drivers/net/ethernet/smsc/smsc911x.c
++++ b/drivers/net/ethernet/smsc/smsc911x.c
+@@ -1089,10 +1089,8 @@ smsc911x_rx_counterrors(struct net_device *dev, unsigned int rxstat)
+
+ /* Quickly dumps bad packets */
+ static void
+-smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktbytes)
++smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktwords)
+ {
+- unsigned int pktwords = (pktbytes + NET_IP_ALIGN + 3) >> 2;
+-
+ if (likely(pktwords >= 4)) {
+ unsigned int timeout = 500;
+ unsigned int val;
+@@ -1156,7 +1154,7 @@ static int smsc911x_poll(struct napi_struct *napi, int budget)
+ continue;
+ }
+
+- skb = netdev_alloc_skb(dev, pktlength + NET_IP_ALIGN);
++ skb = netdev_alloc_skb(dev, pktwords << 2);
+ if (unlikely(!skb)) {
+ SMSC_WARN(pdata, rx_err,
+ "Unable to allocate skb for rx packet");
+@@ -1166,14 +1164,12 @@ static int smsc911x_poll(struct napi_struct *napi, int budget)
+ break;
+ }
+
+- skb->data = skb->head;
+- skb_reset_tail_pointer(skb);
++ pdata->ops->rx_readfifo(pdata,
++ (unsigned int *)skb->data, pktwords);
+
+ /* Align IP on 16B boundary */
+ skb_reserve(skb, NET_IP_ALIGN);
+ skb_put(skb, pktlength - 4);
+- pdata->ops->rx_readfifo(pdata,
+- (unsigned int *)skb->head, pktwords);
+ skb->protocol = eth_type_trans(skb, dev);
+ skb_checksum_none_assert(skb);
+ netif_receive_skb(skb);
+@@ -1396,7 +1392,7 @@ static int smsc911x_open(struct net_device *dev)
+ smsc911x_reg_write(pdata, FIFO_INT, temp);
+
+ /* set RX Data offset to 2 bytes for alignment */
+- smsc911x_reg_write(pdata, RX_CFG, (2 << 8));
++ smsc911x_reg_write(pdata, RX_CFG, (NET_IP_ALIGN << 8));
+
+ /* enable NAPI polling before enabling RX interrupts */
+ napi_enable(&pdata->napi);
+diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
+index 7615040..f470ab6 100644
+--- a/drivers/net/ethernet/ti/davinci_mdio.c
++++ b/drivers/net/ethernet/ti/davinci_mdio.c
+@@ -181,6 +181,11 @@ static inline int wait_for_user_access(struct davinci_mdio_data *data)
+ __davinci_mdio_reset(data);
+ return -EAGAIN;
+ }
++
++ reg = __raw_readl(&regs->user[0].access);
++ if ((reg & USERACCESS_GO) == 0)
++ return 0;
++
+ dev_err(data->dev, "timed out waiting for user access\n");
+ return -ETIMEDOUT;
+ }
+diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
+index 486b404..3ed983c 100644
+--- a/drivers/net/ppp/ppp_generic.c
++++ b/drivers/net/ppp/ppp_generic.c
+@@ -968,7 +968,6 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ proto = npindex_to_proto[npi];
+ put_unaligned_be16(proto, pp);
+
+- netif_stop_queue(dev);
+ skb_queue_tail(&ppp->file.xq, skb);
+ ppp_xmit_process(ppp);
+ return NETDEV_TX_OK;
+@@ -1063,6 +1062,8 @@ ppp_xmit_process(struct ppp *ppp)
+ code that we can accept some more. */
+ if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq))
+ netif_wake_queue(ppp->dev);
++ else
++ netif_stop_queue(ppp->dev);
+ }
+ ppp_xmit_unlock(ppp);
+ }
+diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
+index a5b9b12..7bd219b 100644
+--- a/drivers/net/usb/smsc75xx.c
++++ b/drivers/net/usb/smsc75xx.c
+@@ -1050,6 +1050,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
+ dev->net->ethtool_ops = &smsc75xx_ethtool_ops;
+ dev->net->flags |= IFF_MULTICAST;
+ dev->net->hard_header_len += SMSC75XX_TX_OVERHEAD;
++ dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
+ return 0;
+ }
+
+diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
+index eff6767..55b3218 100644
+--- a/drivers/net/usb/smsc95xx.c
++++ b/drivers/net/usb/smsc95xx.c
+@@ -1190,7 +1190,7 @@ static const struct driver_info smsc95xx_info = {
+ .rx_fixup = smsc95xx_rx_fixup,
+ .tx_fixup = smsc95xx_tx_fixup,
+ .status = smsc95xx_status,
+- .flags = FLAG_ETHER | FLAG_SEND_ZLP,
++ .flags = FLAG_ETHER | FLAG_SEND_ZLP | FLAG_LINK_INTR,
+ };
+
+ static const struct usb_device_id products[] = {
+diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c
+index 64a1106..4697cf3 100644
+--- a/drivers/net/wimax/i2400m/netdev.c
++++ b/drivers/net/wimax/i2400m/netdev.c
+@@ -607,7 +607,8 @@ static void i2400m_get_drvinfo(struct net_device *net_dev,
+ struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
+
+ strncpy(info->driver, KBUILD_MODNAME, sizeof(info->driver) - 1);
+- strncpy(info->fw_version, i2400m->fw_name, sizeof(info->fw_version) - 1);
++ strncpy(info->fw_version,
++ i2400m->fw_name ? : "", sizeof(info->fw_version) - 1);
+ if (net_dev->dev.parent)
+ strncpy(info->bus_info, dev_name(net_dev->dev.parent),
+ sizeof(info->bus_info) - 1);
+diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
+index 5634d9a..680709c 100644
+--- a/drivers/net/wireless/b43/main.c
++++ b/drivers/net/wireless/b43/main.c
+@@ -4820,8 +4820,14 @@ static int b43_op_start(struct ieee80211_hw *hw)
+ out_mutex_unlock:
+ mutex_unlock(&wl->mutex);
+
+- /* reload configuration */
+- b43_op_config(hw, ~0);
++ /*
++ * Configuration may have been overwritten during initialization.
++ * Reload the configuration, but only if initialization was
++ * successful. Reloading the configuration after a failed init
++ * may hang the system.
++ */
++ if (!err)
++ b43_op_config(hw, ~0);
+
+ return err;
+ }
+diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
+index 453f58e..f98becc 100644
+--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
++++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
+@@ -7865,6 +7865,7 @@ brcms_c_recvctl(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
+ {
+ int len_mpdu;
+ struct ieee80211_rx_status rx_status;
++ struct ieee80211_hdr *hdr;
+
+ memset(&rx_status, 0, sizeof(rx_status));
+ prep_mac80211_status(wlc, rxh, p, &rx_status);
+@@ -7874,6 +7875,13 @@ brcms_c_recvctl(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
+ skb_pull(p, D11_PHY_HDR_LEN);
+ __skb_trim(p, len_mpdu);
+
++ /* unmute transmit */
++ if (wlc->hw->suspended_fifos) {
++ hdr = (struct ieee80211_hdr *)p->data;
++ if (ieee80211_is_beacon(hdr->frame_control))
++ brcms_b_mute(wlc->hw, false);
++ }
++
+ memcpy(IEEE80211_SKB_RXCB(p), &rx_status, sizeof(rx_status));
+ ieee80211_rx_irqsafe(wlc->pub->ieee_hw, p);
+ }
+diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
+index 99a710d..827889b 100644
+--- a/drivers/net/wireless/ipw2x00/ipw2200.c
++++ b/drivers/net/wireless/ipw2x00/ipw2200.c
+@@ -2183,6 +2183,7 @@ static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
+ {
+ int rc = 0;
+ unsigned long flags;
++ unsigned long now, end;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (priv->status & STATUS_HCMD_ACTIVE) {
+@@ -2224,10 +2225,20 @@ static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+
++ now = jiffies;
++ end = now + HOST_COMPLETE_TIMEOUT;
++again:
+ rc = wait_event_interruptible_timeout(priv->wait_command_queue,
+ !(priv->
+ status & STATUS_HCMD_ACTIVE),
+- HOST_COMPLETE_TIMEOUT);
++ end - now);
++ if (rc < 0) {
++ now = jiffies;
++ if (time_before(now, end))
++ goto again;
++ rc = 0;
++ }
++
+ if (rc == 0) {
+ spin_lock_irqsave(&priv->lock, flags);
+ if (priv->status & STATUS_HCMD_ACTIVE) {
+diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
+index dd008b0..1e6c8cc 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
++++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
+@@ -45,8 +45,8 @@
+ #include "iwl-cfg.h"
+
+ /* Highest firmware API version supported */
+-#define IWL1000_UCODE_API_MAX 6
+-#define IWL100_UCODE_API_MAX 6
++#define IWL1000_UCODE_API_MAX 5
++#define IWL100_UCODE_API_MAX 5
+
+ /* Oldest version we won't warn about */
+ #define IWL1000_UCODE_API_OK 5
+@@ -244,5 +244,5 @@ struct iwl_cfg iwl100_bg_cfg = {
+ IWL_DEVICE_100,
+ };
+
+-MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX));
+-MODULE_FIRMWARE(IWL100_MODULE_FIRMWARE(IWL100_UCODE_API_MAX));
++MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_OK));
++MODULE_FIRMWARE(IWL100_MODULE_FIRMWARE(IWL100_UCODE_API_OK));
+diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
+index 7943197..9823e41 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
++++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
+@@ -51,10 +51,10 @@
+ #define IWL135_UCODE_API_MAX 6
+
+ /* Oldest version we won't warn about */
+-#define IWL2030_UCODE_API_OK 5
+-#define IWL2000_UCODE_API_OK 5
+-#define IWL105_UCODE_API_OK 5
+-#define IWL135_UCODE_API_OK 5
++#define IWL2030_UCODE_API_OK 6
++#define IWL2000_UCODE_API_OK 6
++#define IWL105_UCODE_API_OK 6
++#define IWL135_UCODE_API_OK 6
+
+ /* Lowest firmware API version supported */
+ #define IWL2030_UCODE_API_MIN 5
+@@ -372,7 +372,7 @@ struct iwl_cfg iwl135_bgn_cfg = {
+ .ht_params = &iwl2000_ht_params,
+ };
+
+-MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_MAX));
+-MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_MAX));
+-MODULE_FIRMWARE(IWL105_MODULE_FIRMWARE(IWL105_UCODE_API_MAX));
+-MODULE_FIRMWARE(IWL135_MODULE_FIRMWARE(IWL135_UCODE_API_MAX));
++MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_OK));
++MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_OK));
++MODULE_FIRMWARE(IWL105_MODULE_FIRMWARE(IWL105_UCODE_API_OK));
++MODULE_FIRMWARE(IWL135_MODULE_FIRMWARE(IWL135_UCODE_API_OK));
+diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
+index f55fb2d..606213f 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
++++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
+@@ -50,6 +50,10 @@
+ #define IWL5000_UCODE_API_MAX 5
+ #define IWL5150_UCODE_API_MAX 2
+
++/* Oldest version we won't warn about */
++#define IWL5000_UCODE_API_OK 5
++#define IWL5150_UCODE_API_OK 2
++
+ /* Lowest firmware API version supported */
+ #define IWL5000_UCODE_API_MIN 1
+ #define IWL5150_UCODE_API_MIN 1
+@@ -373,6 +377,7 @@ static struct iwl_ht_params iwl5000_ht_params = {
+ #define IWL_DEVICE_5000 \
+ .fw_name_pre = IWL5000_FW_PRE, \
+ .ucode_api_max = IWL5000_UCODE_API_MAX, \
++ .ucode_api_ok = IWL5000_UCODE_API_OK, \
+ .ucode_api_min = IWL5000_UCODE_API_MIN, \
+ .eeprom_ver = EEPROM_5000_EEPROM_VERSION, \
+ .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, \
+@@ -416,6 +421,7 @@ struct iwl_cfg iwl5350_agn_cfg = {
+ .name = "Intel(R) WiMAX/WiFi Link 5350 AGN",
+ .fw_name_pre = IWL5000_FW_PRE,
+ .ucode_api_max = IWL5000_UCODE_API_MAX,
++ .ucode_api_ok = IWL5000_UCODE_API_OK,
+ .ucode_api_min = IWL5000_UCODE_API_MIN,
+ .eeprom_ver = EEPROM_5050_EEPROM_VERSION,
+ .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
+@@ -429,6 +435,7 @@ struct iwl_cfg iwl5350_agn_cfg = {
+ #define IWL_DEVICE_5150 \
+ .fw_name_pre = IWL5150_FW_PRE, \
+ .ucode_api_max = IWL5150_UCODE_API_MAX, \
++ .ucode_api_ok = IWL5150_UCODE_API_OK, \
+ .ucode_api_min = IWL5150_UCODE_API_MIN, \
+ .eeprom_ver = EEPROM_5050_EEPROM_VERSION, \
+ .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, \
+@@ -450,5 +457,5 @@ struct iwl_cfg iwl5150_abg_cfg = {
+ IWL_DEVICE_5150,
+ };
+
+-MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX));
+-MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_MAX));
++MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_OK));
++MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_OK));
+diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
+index c840c78..b4f809c 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
++++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
+@@ -46,12 +46,15 @@
+ #include "iwl-cfg.h"
+
+ /* Highest firmware API version supported */
+-#define IWL6000_UCODE_API_MAX 4
++#define IWL6000_UCODE_API_MAX 6
+ #define IWL6050_UCODE_API_MAX 5
+ #define IWL6000G2_UCODE_API_MAX 6
+
+ /* Oldest version we won't warn about */
++#define IWL6000_UCODE_API_OK 4
+ #define IWL6000G2_UCODE_API_OK 5
++#define IWL6050_UCODE_API_OK 5
++#define IWL6000G2B_UCODE_API_OK 6
+
+ /* Lowest firmware API version supported */
+ #define IWL6000_UCODE_API_MIN 4
+@@ -399,7 +402,7 @@ struct iwl_cfg iwl6005_2agn_d_cfg = {
+ #define IWL_DEVICE_6030 \
+ .fw_name_pre = IWL6030_FW_PRE, \
+ .ucode_api_max = IWL6000G2_UCODE_API_MAX, \
+- .ucode_api_ok = IWL6000G2_UCODE_API_OK, \
++ .ucode_api_ok = IWL6000G2B_UCODE_API_OK, \
+ .ucode_api_min = IWL6000G2_UCODE_API_MIN, \
+ .eeprom_ver = EEPROM_6030_EEPROM_VERSION, \
+ .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
+@@ -479,6 +482,7 @@ struct iwl_cfg iwl130_bg_cfg = {
+ #define IWL_DEVICE_6000i \
+ .fw_name_pre = IWL6000_FW_PRE, \
+ .ucode_api_max = IWL6000_UCODE_API_MAX, \
++ .ucode_api_ok = IWL6000_UCODE_API_OK, \
+ .ucode_api_min = IWL6000_UCODE_API_MIN, \
+ .valid_tx_ant = ANT_BC, /* .cfg overwrite */ \
+ .valid_rx_ant = ANT_BC, /* .cfg overwrite */ \
+@@ -559,6 +563,7 @@ struct iwl_cfg iwl6000_3agn_cfg = {
+ .name = "Intel(R) Centrino(R) Ultimate-N 6300 AGN",
+ .fw_name_pre = IWL6000_FW_PRE,
+ .ucode_api_max = IWL6000_UCODE_API_MAX,
++ .ucode_api_ok = IWL6000_UCODE_API_OK,
+ .ucode_api_min = IWL6000_UCODE_API_MIN,
+ .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
+ .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
+@@ -569,7 +574,7 @@ struct iwl_cfg iwl6000_3agn_cfg = {
+ .led_mode = IWL_LED_BLINK,
+ };
+
+-MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
+-MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX));
+-MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
+-MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
++MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_OK));
++MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_OK));
++MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_OK));
++MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2B_UCODE_API_OK));
+diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
+index e0e9a3d..d7d2512 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
++++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
+@@ -1504,7 +1504,6 @@ static void iwl_bg_run_time_calib_work(struct work_struct *work)
+
+ static void iwlagn_prepare_restart(struct iwl_priv *priv)
+ {
+- struct iwl_rxon_context *ctx;
+ bool bt_full_concurrent;
+ u8 bt_ci_compliance;
+ u8 bt_load;
+@@ -1513,8 +1512,6 @@ static void iwlagn_prepare_restart(struct iwl_priv *priv)
+
+ lockdep_assert_held(&priv->shrd->mutex);
+
+- for_each_context(priv, ctx)
+- ctx->vif = NULL;
+ priv->is_open = 0;
+
+ /*
+diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
+index 3d75d4c..832ec4d 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-core.c
++++ b/drivers/net/wireless/iwlwifi/iwl-core.c
+@@ -1228,6 +1228,7 @@ int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
+ struct iwl_rxon_context *tmp, *ctx = NULL;
+ int err;
+ enum nl80211_iftype viftype = ieee80211_vif_type_p2p(vif);
++ bool reset = false;
+
+ IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
+ viftype, vif->addr);
+@@ -1249,6 +1250,13 @@ int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
+ tmp->interface_modes | tmp->exclusive_interface_modes;
+
+ if (tmp->vif) {
++ /* On reset we need to add the same interface again */
++ if (tmp->vif == vif) {
++ reset = true;
++ ctx = tmp;
++ break;
++ }
++
+ /* check if this busy context is exclusive */
+ if (tmp->exclusive_interface_modes &
+ BIT(tmp->vif->type)) {
+@@ -1275,7 +1283,7 @@ int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
+ ctx->vif = vif;
+
+ err = iwl_setup_interface(priv, ctx);
+- if (!err)
++ if (!err || reset)
+ goto out;
+
+ ctx->vif = NULL;
+diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
+index 5bede9d..aae992a 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
++++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
+@@ -104,15 +104,29 @@
+ * (see struct iwl_tfd_frame). These 16 pointer registers are offset by 0x04
+ * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte
+ * aligned (address bits 0-7 must be 0).
++ * Later devices have 20 (5000 series) or 30 (higher) queues, but the registers
++ * for them are in different places.
+ *
+ * Bit fields in each pointer register:
+ * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned
+ */
+-#define FH_MEM_CBBC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
+-#define FH_MEM_CBBC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10)
+-
+-/* Find TFD CB base pointer for given queue (range 0-15). */
+-#define FH_MEM_CBBC_QUEUE(x) (FH_MEM_CBBC_LOWER_BOUND + (x) * 0x4)
++#define FH_MEM_CBBC_0_15_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
++#define FH_MEM_CBBC_0_15_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10)
++#define FH_MEM_CBBC_16_19_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBF0)
++#define FH_MEM_CBBC_16_19_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
++#define FH_MEM_CBBC_20_31_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xB20)
++#define FH_MEM_CBBC_20_31_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xB80)
++
++/* Find TFD CB base pointer for given queue */
++static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
++{
++ if (chnl < 16)
++ return FH_MEM_CBBC_0_15_LOWER_BOUND + 4 * chnl;
++ if (chnl < 20)
++ return FH_MEM_CBBC_16_19_LOWER_BOUND + 4 * (chnl - 16);
++ WARN_ON_ONCE(chnl >= 32);
++ return FH_MEM_CBBC_20_31_LOWER_BOUND + 4 * (chnl - 20);
++}
+
+
+ /**
+diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
+index bebdd82..d9b089e 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
++++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
+@@ -227,12 +227,33 @@
+ #define SCD_AIT (SCD_BASE + 0x0c)
+ #define SCD_TXFACT (SCD_BASE + 0x10)
+ #define SCD_ACTIVE (SCD_BASE + 0x14)
+-#define SCD_QUEUE_WRPTR(x) (SCD_BASE + 0x18 + (x) * 4)
+-#define SCD_QUEUE_RDPTR(x) (SCD_BASE + 0x68 + (x) * 4)
+ #define SCD_QUEUECHAIN_SEL (SCD_BASE + 0xe8)
+ #define SCD_AGGR_SEL (SCD_BASE + 0x248)
+ #define SCD_INTERRUPT_MASK (SCD_BASE + 0x108)
+-#define SCD_QUEUE_STATUS_BITS(x) (SCD_BASE + 0x10c + (x) * 4)
++
++static inline unsigned int SCD_QUEUE_WRPTR(unsigned int chnl)
++{
++ if (chnl < 20)
++ return SCD_BASE + 0x18 + chnl * 4;
++ WARN_ON_ONCE(chnl >= 32);
++ return SCD_BASE + 0x284 + (chnl - 20) * 4;
++}
++
++static inline unsigned int SCD_QUEUE_RDPTR(unsigned int chnl)
++{
++ if (chnl < 20)
++ return SCD_BASE + 0x68 + chnl * 4;
++ WARN_ON_ONCE(chnl >= 32);
++ return SCD_BASE + 0x2B4 + (chnl - 20) * 4;
++}
++
++static inline unsigned int SCD_QUEUE_STATUS_BITS(unsigned int chnl)
++{
++ if (chnl < 20)
++ return SCD_BASE + 0x10c + chnl * 4;
++ WARN_ON_ONCE(chnl >= 32);
++ return SCD_BASE + 0x384 + (chnl - 20) * 4;
++}
+
+ /*********************** END TX SCHEDULER *************************************/
+
+diff --git a/drivers/net/wireless/mwifiex/pcie.h b/drivers/net/wireless/mwifiex/pcie.h
+index 445ff21..2f218f9 100644
+--- a/drivers/net/wireless/mwifiex/pcie.h
++++ b/drivers/net/wireless/mwifiex/pcie.h
+@@ -48,15 +48,15 @@
+ #define PCIE_HOST_INT_STATUS_MASK 0xC3C
+ #define PCIE_SCRATCH_2_REG 0xC40
+ #define PCIE_SCRATCH_3_REG 0xC44
+-#define PCIE_SCRATCH_4_REG 0xCC0
+-#define PCIE_SCRATCH_5_REG 0xCC4
+-#define PCIE_SCRATCH_6_REG 0xCC8
+-#define PCIE_SCRATCH_7_REG 0xCCC
+-#define PCIE_SCRATCH_8_REG 0xCD0
+-#define PCIE_SCRATCH_9_REG 0xCD4
+-#define PCIE_SCRATCH_10_REG 0xCD8
+-#define PCIE_SCRATCH_11_REG 0xCDC
+-#define PCIE_SCRATCH_12_REG 0xCE0
++#define PCIE_SCRATCH_4_REG 0xCD0
++#define PCIE_SCRATCH_5_REG 0xCD4
++#define PCIE_SCRATCH_6_REG 0xCD8
++#define PCIE_SCRATCH_7_REG 0xCDC
++#define PCIE_SCRATCH_8_REG 0xCE0
++#define PCIE_SCRATCH_9_REG 0xCE4
++#define PCIE_SCRATCH_10_REG 0xCE8
++#define PCIE_SCRATCH_11_REG 0xCEC
++#define PCIE_SCRATCH_12_REG 0xCF0
+
+ #define CPU_INTR_DNLD_RDY BIT(0)
+ #define CPU_INTR_DOOR_BELL BIT(1)
+diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
+index cb71e88..0ffa111 100644
+--- a/drivers/net/wireless/rt2x00/rt2800usb.c
++++ b/drivers/net/wireless/rt2x00/rt2800usb.c
+@@ -914,12 +914,14 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ { USB_DEVICE(0x050d, 0x8053) },
+ { USB_DEVICE(0x050d, 0x805c) },
+ { USB_DEVICE(0x050d, 0x815c) },
++ { USB_DEVICE(0x050d, 0x825a) },
+ { USB_DEVICE(0x050d, 0x825b) },
+ { USB_DEVICE(0x050d, 0x935a) },
+ { USB_DEVICE(0x050d, 0x935b) },
+ /* Buffalo */
+ { USB_DEVICE(0x0411, 0x00e8) },
+ { USB_DEVICE(0x0411, 0x0158) },
++ { USB_DEVICE(0x0411, 0x015d) },
+ { USB_DEVICE(0x0411, 0x016f) },
+ { USB_DEVICE(0x0411, 0x01a2) },
+ /* Corega */
+@@ -934,6 +936,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ { USB_DEVICE(0x07d1, 0x3c0e) },
+ { USB_DEVICE(0x07d1, 0x3c0f) },
+ { USB_DEVICE(0x07d1, 0x3c11) },
++ { USB_DEVICE(0x07d1, 0x3c13) },
++ { USB_DEVICE(0x07d1, 0x3c15) },
+ { USB_DEVICE(0x07d1, 0x3c16) },
+ { USB_DEVICE(0x2001, 0x3c1b) },
+ /* Draytek */
+@@ -944,6 +948,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ { USB_DEVICE(0x7392, 0x7711) },
+ { USB_DEVICE(0x7392, 0x7717) },
+ { USB_DEVICE(0x7392, 0x7718) },
++ { USB_DEVICE(0x7392, 0x7722) },
+ /* Encore */
+ { USB_DEVICE(0x203d, 0x1480) },
+ { USB_DEVICE(0x203d, 0x14a9) },
+@@ -978,6 +983,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ { USB_DEVICE(0x1737, 0x0070) },
+ { USB_DEVICE(0x1737, 0x0071) },
+ { USB_DEVICE(0x1737, 0x0077) },
++ { USB_DEVICE(0x1737, 0x0078) },
+ /* Logitec */
+ { USB_DEVICE(0x0789, 0x0162) },
+ { USB_DEVICE(0x0789, 0x0163) },
+@@ -1001,9 +1007,13 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ { USB_DEVICE(0x0db0, 0x871b) },
+ { USB_DEVICE(0x0db0, 0x871c) },
+ { USB_DEVICE(0x0db0, 0x899a) },
++ /* Ovislink */
++ { USB_DEVICE(0x1b75, 0x3071) },
++ { USB_DEVICE(0x1b75, 0x3072) },
+ /* Para */
+ { USB_DEVICE(0x20b8, 0x8888) },
+ /* Pegatron */
++ { USB_DEVICE(0x1d4d, 0x0002) },
+ { USB_DEVICE(0x1d4d, 0x000c) },
+ { USB_DEVICE(0x1d4d, 0x000e) },
+ { USB_DEVICE(0x1d4d, 0x0011) },
+@@ -1056,7 +1066,9 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ /* Sparklan */
+ { USB_DEVICE(0x15a9, 0x0006) },
+ /* Sweex */
++ { USB_DEVICE(0x177f, 0x0153) },
+ { USB_DEVICE(0x177f, 0x0302) },
++ { USB_DEVICE(0x177f, 0x0313) },
+ /* U-Media */
+ { USB_DEVICE(0x157e, 0x300e) },
+ { USB_DEVICE(0x157e, 0x3013) },
+@@ -1140,27 +1152,24 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ { USB_DEVICE(0x13d3, 0x3322) },
+ /* Belkin */
+ { USB_DEVICE(0x050d, 0x1003) },
+- { USB_DEVICE(0x050d, 0x825a) },
+ /* Buffalo */
+ { USB_DEVICE(0x0411, 0x012e) },
+ { USB_DEVICE(0x0411, 0x0148) },
+ { USB_DEVICE(0x0411, 0x0150) },
+- { USB_DEVICE(0x0411, 0x015d) },
+ /* Corega */
+ { USB_DEVICE(0x07aa, 0x0041) },
+ { USB_DEVICE(0x07aa, 0x0042) },
+ { USB_DEVICE(0x18c5, 0x0008) },
+ /* D-Link */
+ { USB_DEVICE(0x07d1, 0x3c0b) },
+- { USB_DEVICE(0x07d1, 0x3c13) },
+- { USB_DEVICE(0x07d1, 0x3c15) },
+ { USB_DEVICE(0x07d1, 0x3c17) },
+ { USB_DEVICE(0x2001, 0x3c17) },
+ /* Edimax */
+ { USB_DEVICE(0x7392, 0x4085) },
+- { USB_DEVICE(0x7392, 0x7722) },
+ /* Encore */
+ { USB_DEVICE(0x203d, 0x14a1) },
++ /* Fujitsu Stylistic 550 */
++ { USB_DEVICE(0x1690, 0x0761) },
+ /* Gemtek */
+ { USB_DEVICE(0x15a9, 0x0010) },
+ /* Gigabyte */
+@@ -1172,19 +1181,13 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ /* LevelOne */
+ { USB_DEVICE(0x1740, 0x0605) },
+ { USB_DEVICE(0x1740, 0x0615) },
+- /* Linksys */
+- { USB_DEVICE(0x1737, 0x0078) },
+ /* Logitec */
+ { USB_DEVICE(0x0789, 0x0168) },
+ { USB_DEVICE(0x0789, 0x0169) },
+ /* Motorola */
+ { USB_DEVICE(0x100d, 0x9032) },
+- /* Ovislink */
+- { USB_DEVICE(0x1b75, 0x3071) },
+- { USB_DEVICE(0x1b75, 0x3072) },
+ /* Pegatron */
+ { USB_DEVICE(0x05a6, 0x0101) },
+- { USB_DEVICE(0x1d4d, 0x0002) },
+ { USB_DEVICE(0x1d4d, 0x0010) },
+ /* Planex */
+ { USB_DEVICE(0x2019, 0x5201) },
+@@ -1203,9 +1206,6 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ { USB_DEVICE(0x083a, 0xc522) },
+ { USB_DEVICE(0x083a, 0xd522) },
+ { USB_DEVICE(0x083a, 0xf511) },
+- /* Sweex */
+- { USB_DEVICE(0x177f, 0x0153) },
+- { USB_DEVICE(0x177f, 0x0313) },
+ /* Zyxel */
+ { USB_DEVICE(0x0586, 0x341a) },
+ #endif
+diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
+index d44d398..47ba0f7 100644
+--- a/drivers/net/wireless/rtlwifi/pci.c
++++ b/drivers/net/wireless/rtlwifi/pci.c
+@@ -1961,6 +1961,7 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
+ rtl_deinit_deferred_work(hw);
+ rtlpriv->intf_ops->adapter_stop(hw);
+ }
++ rtlpriv->cfg->ops->disable_interrupt(hw);
+
+ /*deinit rfkill */
+ rtl_deinit_rfkill(hw);
+diff --git a/drivers/net/wireless/wl1251/main.c b/drivers/net/wireless/wl1251/main.c
+index ba3268e..40c1574 100644
+--- a/drivers/net/wireless/wl1251/main.c
++++ b/drivers/net/wireless/wl1251/main.c
+@@ -479,6 +479,7 @@ static void wl1251_op_stop(struct ieee80211_hw *hw)
+ cancel_work_sync(&wl->irq_work);
+ cancel_work_sync(&wl->tx_work);
+ cancel_work_sync(&wl->filter_work);
++ cancel_delayed_work_sync(&wl->elp_work);
+
+ mutex_lock(&wl->mutex);
+
+diff --git a/drivers/net/wireless/wl1251/sdio.c b/drivers/net/wireless/wl1251/sdio.c
+index f786942..1b851f6 100644
+--- a/drivers/net/wireless/wl1251/sdio.c
++++ b/drivers/net/wireless/wl1251/sdio.c
+@@ -315,8 +315,8 @@ static void __devexit wl1251_sdio_remove(struct sdio_func *func)
+
+ if (wl->irq)
+ free_irq(wl->irq, wl);
+- kfree(wl_sdio);
+ wl1251_free_hw(wl);
++ kfree(wl_sdio);
+
+ sdio_claim_host(func);
+ sdio_release_irq(func);
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 6476547..78fda9c 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -2906,6 +2906,40 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f8, quirk_intel_mc_errata);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f9, quirk_intel_mc_errata);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65fa, quirk_intel_mc_errata);
+
++/*
++ * Some BIOS implementations leave the Intel GPU interrupts enabled,
++ * even though no one is handling them (f.e. i915 driver is never loaded).
++ * Additionally the interrupt destination is not set up properly
++ * and the interrupt ends up -somewhere-.
++ *
++ * These spurious interrupts are "sticky" and the kernel disables
++ * the (shared) interrupt line after 100.000+ generated interrupts.
++ *
++ * Fix it by disabling the still enabled interrupts.
++ * This resolves crashes often seen on monitor unplug.
++ */
++#define I915_DEIER_REG 0x4400c
++static void __devinit disable_igfx_irq(struct pci_dev *dev)
++{
++ void __iomem *regs = pci_iomap(dev, 0, 0);
++ if (regs == NULL) {
++ dev_warn(&dev->dev, "igfx quirk: Can't iomap PCI device\n");
++ return;
++ }
++
++ /* Check if any interrupt line is still enabled */
++ if (readl(regs + I915_DEIER_REG) != 0) {
++ dev_warn(&dev->dev, "BIOS left Intel GPU interrupts enabled; "
++ "disabling\n");
++
++ writel(0, regs + I915_DEIER_REG);
++ }
++
++ pci_iounmap(dev, regs);
++}
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
++
+ static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
+ struct pci_fixup *end)
+ {
+diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
+index d93e962..1d3bcce 100644
+--- a/drivers/platform/x86/dell-laptop.c
++++ b/drivers/platform/x86/dell-laptop.c
+@@ -184,6 +184,34 @@ static struct dmi_system_id __devinitdata dell_quirks[] = {
+ },
+ .driver_data = &quirk_dell_vostro_v130,
+ },
++ {
++ .callback = dmi_matched,
++ .ident = "Dell Vostro 3555",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3555"),
++ },
++ .driver_data = &quirk_dell_vostro_v130,
++ },
++ {
++ .callback = dmi_matched,
++ .ident = "Dell Inspiron N311z",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron N311z"),
++ },
++ .driver_data = &quirk_dell_vostro_v130,
++ },
++ {
++ .callback = dmi_matched,
++ .ident = "Dell Inspiron M5110",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron M5110"),
++ },
++ .driver_data = &quirk_dell_vostro_v130,
++ },
++ { }
+ };
+
+ static struct calling_interface_buffer *buffer;
+@@ -615,6 +643,7 @@ static void touchpad_led_set(struct led_classdev *led_cdev,
+ static struct led_classdev touchpad_led = {
+ .name = "dell-laptop::touchpad",
+ .brightness_set = touchpad_led_set,
++ .flags = LED_CORE_SUSPENDRESUME,
+ };
+
+ static int __devinit touchpad_led_init(struct device *dev)
+diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
+index 1b831c5..e48ba4b 100644
+--- a/drivers/scsi/libsas/sas_expander.c
++++ b/drivers/scsi/libsas/sas_expander.c
+@@ -192,7 +192,14 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id,
+ phy->attached_sata_ps = dr->attached_sata_ps;
+ phy->attached_iproto = dr->iproto << 1;
+ phy->attached_tproto = dr->tproto << 1;
+- memcpy(phy->attached_sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE);
++ /* help some expanders that fail to zero sas_address in the 'no
++ * device' case
++ */
++ if (phy->attached_dev_type == NO_DEVICE ||
++ phy->linkrate < SAS_LINK_RATE_1_5_GBPS)
++ memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
++ else
++ memcpy(phy->attached_sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE);
+ phy->attached_phy_id = dr->attached_phy_id;
+ phy->phy_change_count = dr->change_count;
+ phy->routing_attr = dr->routing_attr;
+@@ -1643,9 +1650,17 @@ static int sas_find_bcast_phy(struct domain_device *dev, int *phy_id,
+ int phy_change_count = 0;
+
+ res = sas_get_phy_change_count(dev, i, &phy_change_count);
+- if (res)
+- goto out;
+- else if (phy_change_count != ex->ex_phy[i].phy_change_count) {
++ switch (res) {
++ case SMP_RESP_PHY_VACANT:
++ case SMP_RESP_NO_PHY:
++ continue;
++ case SMP_RESP_FUNC_ACC:
++ break;
++ default:
++ return res;
++ }
++
++ if (phy_change_count != ex->ex_phy[i].phy_change_count) {
+ if (update)
+ ex->ex_phy[i].phy_change_count =
+ phy_change_count;
+@@ -1653,8 +1668,7 @@ static int sas_find_bcast_phy(struct domain_device *dev, int *phy_id,
+ return 0;
+ }
+ }
+-out:
+- return res;
++ return 0;
+ }
+
+ static int sas_get_ex_change_count(struct domain_device *dev, int *ecc)
+diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
+index 24cacff..5f748c0 100644
+--- a/drivers/spi/spi-fsl-spi.c
++++ b/drivers/spi/spi-fsl-spi.c
+@@ -139,10 +139,12 @@ static void fsl_spi_change_mode(struct spi_device *spi)
+ static void fsl_spi_chipselect(struct spi_device *spi, int value)
+ {
+ struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
+- struct fsl_spi_platform_data *pdata = spi->dev.parent->platform_data;
++ struct fsl_spi_platform_data *pdata;
+ bool pol = spi->mode & SPI_CS_HIGH;
+ struct spi_mpc8xxx_cs *cs = spi->controller_state;
+
++ pdata = spi->dev.parent->parent->platform_data;
++
+ if (value == BITBANG_CS_INACTIVE) {
+ if (pdata->cs_control)
+ pdata->cs_control(spi, !pol);
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index 77eae99..b2ccdea 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -319,7 +319,7 @@ struct spi_device *spi_alloc_device(struct spi_master *master)
+ }
+
+ spi->master = master;
+- spi->dev.parent = dev;
++ spi->dev.parent = &master->dev;
+ spi->dev.bus = &spi_bus_type;
+ spi->dev.release = spidev_release;
+ device_initialize(&spi->dev);
+diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c
+index fb11743..4bb2797 100644
+--- a/drivers/staging/rtl8712/os_intfs.c
++++ b/drivers/staging/rtl8712/os_intfs.c
+@@ -476,9 +476,6 @@ static int netdev_close(struct net_device *pnetdev)
+ r8712_free_assoc_resources(padapter);
+ /*s2-4.*/
+ r8712_free_network_queue(padapter);
+- release_firmware(padapter->fw);
+- /* never exit with a firmware callback pending */
+- wait_for_completion(&padapter->rtl8712_fw_ready);
+ return 0;
+ }
+
+diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
+index 9bade18..ec41d38 100644
+--- a/drivers/staging/rtl8712/usb_intf.c
++++ b/drivers/staging/rtl8712/usb_intf.c
+@@ -30,6 +30,7 @@
+
+ #include <linux/usb.h>
+ #include <linux/module.h>
++#include <linux/firmware.h>
+
+ #include "osdep_service.h"
+ #include "drv_types.h"
+@@ -621,6 +622,10 @@ static void r871xu_dev_remove(struct usb_interface *pusb_intf)
+ struct _adapter *padapter = netdev_priv(pnetdev);
+ struct usb_device *udev = interface_to_usbdev(pusb_intf);
+
++ if (padapter->fw_found)
++ release_firmware(padapter->fw);
++ /* never exit with a firmware callback pending */
++ wait_for_completion(&padapter->rtl8712_fw_ready);
+ usb_set_intfdata(pusb_intf, NULL);
+ if (padapter) {
+ if (drvpriv.drv_registered == true)
+diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
+index b84c834..8daf073 100644
+--- a/drivers/tty/amiserial.c
++++ b/drivers/tty/amiserial.c
+@@ -1113,8 +1113,10 @@ static int set_serial_info(struct async_struct * info,
+ (new_serial.close_delay != state->close_delay) ||
+ (new_serial.xmit_fifo_size != state->xmit_fifo_size) ||
+ ((new_serial.flags & ~ASYNC_USR_MASK) !=
+- (state->flags & ~ASYNC_USR_MASK)))
++ (state->flags & ~ASYNC_USR_MASK))) {
++ tty_unlock();
+ return -EPERM;
++ }
+ state->flags = ((state->flags & ~ASYNC_USR_MASK) |
+ (new_serial.flags & ASYNC_USR_MASK));
+ info->flags = ((info->flags & ~ASYNC_USR_MASK) |
+diff --git a/drivers/tty/serial/clps711x.c b/drivers/tty/serial/clps711x.c
+index e6c3dbd..836fe273 100644
+--- a/drivers/tty/serial/clps711x.c
++++ b/drivers/tty/serial/clps711x.c
+@@ -154,10 +154,9 @@ static irqreturn_t clps711xuart_int_tx(int irq, void *dev_id)
+ port->x_char = 0;
+ return IRQ_HANDLED;
+ }
+- if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
+- clps711xuart_stop_tx(port);
+- return IRQ_HANDLED;
+- }
++
++ if (uart_circ_empty(xmit) || uart_tx_stopped(port))
++ goto disable_tx_irq;
+
+ count = port->fifosize >> 1;
+ do {
+@@ -171,8 +170,11 @@ static irqreturn_t clps711xuart_int_tx(int irq, void *dev_id)
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+- if (uart_circ_empty(xmit))
+- clps711xuart_stop_tx(port);
++ if (uart_circ_empty(xmit)) {
++ disable_tx_irq:
++ disable_irq_nosync(TX_IRQ(port));
++ tx_enabled(port) = 0;
++ }
+
+ return IRQ_HANDLED;
+ }
+diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
+index da776a0..a4b192d 100644
+--- a/drivers/tty/serial/pch_uart.c
++++ b/drivers/tty/serial/pch_uart.c
+@@ -1356,9 +1356,11 @@ static int pch_uart_verify_port(struct uart_port *port,
+ __func__);
+ return -EOPNOTSUPP;
+ #endif
+- priv->use_dma = 1;
+ priv->use_dma_flag = 1;
+ dev_info(priv->port.dev, "PCH UART : Use DMA Mode\n");
++ if (!priv->use_dma)
++ pch_request_dma(port);
++ priv->use_dma = 1;
+ }
+
+ return 0;
+diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
+index 9eb71d8..2db0327 100644
+--- a/drivers/usb/class/cdc-wdm.c
++++ b/drivers/usb/class/cdc-wdm.c
+@@ -108,8 +108,9 @@ static void wdm_out_callback(struct urb *urb)
+ spin_lock(&desc->iuspin);
+ desc->werr = urb->status;
+ spin_unlock(&desc->iuspin);
+- clear_bit(WDM_IN_USE, &desc->flags);
+ kfree(desc->outbuf);
++ desc->outbuf = NULL;
++ clear_bit(WDM_IN_USE, &desc->flags);
+ wake_up(&desc->wait);
+ }
+
+@@ -312,7 +313,7 @@ static ssize_t wdm_write
+ if (we < 0)
+ return -EIO;
+
+- desc->outbuf = buf = kmalloc(count, GFP_KERNEL);
++ buf = kmalloc(count, GFP_KERNEL);
+ if (!buf) {
+ rv = -ENOMEM;
+ goto outnl;
+@@ -376,10 +377,12 @@ static ssize_t wdm_write
+ req->wIndex = desc->inum;
+ req->wLength = cpu_to_le16(count);
+ set_bit(WDM_IN_USE, &desc->flags);
++ desc->outbuf = buf;
+
+ rv = usb_submit_urb(desc->command, GFP_KERNEL);
+ if (rv < 0) {
+ kfree(buf);
++ desc->outbuf = NULL;
+ clear_bit(WDM_IN_USE, &desc->flags);
+ dev_err(&desc->intf->dev, "Tx URB error: %d\n", rv);
+ } else {
+diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
+index 61d08dd..5f1404a 100644
+--- a/drivers/usb/core/hcd-pci.c
++++ b/drivers/usb/core/hcd-pci.c
+@@ -495,6 +495,15 @@ static int hcd_pci_suspend_noirq(struct device *dev)
+
+ pci_save_state(pci_dev);
+
++ /*
++ * Some systems crash if an EHCI controller is in D3 during
++ * a sleep transition. We have to leave such controllers in D0.
++ */
++ if (hcd->broken_pci_sleep) {
++ dev_dbg(dev, "Staying in PCI D0\n");
++ return retval;
++ }
++
+ /* If the root hub is dead rather than suspended, disallow remote
+ * wakeup. usb_hc_died() should ensure that both hosts are marked as
+ * dying, so we only need to check the primary roothub.
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index e238b3b..2b0a341 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -1644,7 +1644,6 @@ void usb_disconnect(struct usb_device **pdev)
+ {
+ struct usb_device *udev = *pdev;
+ int i;
+- struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+
+ /* mark the device as inactive, so any further urb submissions for
+ * this device (and any of its children) will fail immediately.
+@@ -1667,9 +1666,7 @@ void usb_disconnect(struct usb_device **pdev)
+ * so that the hardware is now fully quiesced.
+ */
+ dev_dbg (&udev->dev, "unregistering device\n");
+- mutex_lock(hcd->bandwidth_mutex);
+ usb_disable_device(udev, 0);
+- mutex_unlock(hcd->bandwidth_mutex);
+ usb_hcd_synchronize_unlinks(udev);
+
+ usb_remove_ep_devs(&udev->ep0);
+diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
+index aed3e07..ca717da 100644
+--- a/drivers/usb/core/message.c
++++ b/drivers/usb/core/message.c
+@@ -1136,8 +1136,6 @@ void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf,
+ * Deallocates hcd/hardware state for the endpoints (nuking all or most
+ * pending urbs) and usbcore state for the interfaces, so that usbcore
+ * must usb_set_configuration() before any interfaces could be used.
+- *
+- * Must be called with hcd->bandwidth_mutex held.
+ */
+ void usb_disable_device(struct usb_device *dev, int skip_ep0)
+ {
+@@ -1190,7 +1188,9 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
+ usb_disable_endpoint(dev, i + USB_DIR_IN, false);
+ }
+ /* Remove endpoints from the host controller internal state */
++ mutex_lock(hcd->bandwidth_mutex);
+ usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
++ mutex_unlock(hcd->bandwidth_mutex);
+ /* Second pass: remove endpoint pointers */
+ }
+ for (i = skip_ep0; i < 16; ++i) {
+@@ -1750,7 +1750,6 @@ free_interfaces:
+ /* if it's already configured, clear out old state first.
+ * getting rid of old interfaces means unbinding their drivers.
+ */
+- mutex_lock(hcd->bandwidth_mutex);
+ if (dev->state != USB_STATE_ADDRESS)
+ usb_disable_device(dev, 1); /* Skip ep0 */
+
+@@ -1763,6 +1762,7 @@ free_interfaces:
+ * host controller will not allow submissions to dropped endpoints. If
+ * this call fails, the device state is unchanged.
+ */
++ mutex_lock(hcd->bandwidth_mutex);
+ ret = usb_hcd_alloc_bandwidth(dev, cp, NULL, NULL);
+ if (ret < 0) {
+ mutex_unlock(hcd->bandwidth_mutex);
+diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
+index 27bd50a..c0dcf69 100644
+--- a/drivers/usb/dwc3/ep0.c
++++ b/drivers/usb/dwc3/ep0.c
+@@ -572,9 +572,10 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
+ dwc->ep0_bounced = false;
+ } else {
+ transferred = ur->length - trb.length;
+- ur->actual += transferred;
+ }
+
++ ur->actual += transferred;
++
+ if ((epnum & 1) && ur->actual < ur->length) {
+ /* for some reason we did not get everything out */
+
+diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
+index ab8f1b4..527736e 100644
+--- a/drivers/usb/gadget/dummy_hcd.c
++++ b/drivers/usb/gadget/dummy_hcd.c
+@@ -925,7 +925,6 @@ static int dummy_udc_stop(struct usb_gadget *g,
+
+ dum->driver = NULL;
+
+- dummy_pullup(&dum->gadget, 0);
+ return 0;
+ }
+
+diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
+index acb3800..0e641a1 100644
+--- a/drivers/usb/gadget/f_fs.c
++++ b/drivers/usb/gadget/f_fs.c
+@@ -712,7 +712,7 @@ static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value)
+ if (code == FUNCTIONFS_INTERFACE_REVMAP) {
+ struct ffs_function *func = ffs->func;
+ ret = func ? ffs_func_revmap_intf(func, value) : -ENODEV;
+- } else if (gadget->ops->ioctl) {
++ } else if (gadget && gadget->ops->ioctl) {
+ ret = gadget->ops->ioctl(gadget, code, value);
+ } else {
+ ret = -ENOTTY;
+diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
+index 1a6f415..a5570b6 100644
+--- a/drivers/usb/gadget/f_mass_storage.c
++++ b/drivers/usb/gadget/f_mass_storage.c
+@@ -2182,7 +2182,7 @@ unknown_cmnd:
+ common->data_size_from_cmnd = 0;
+ sprintf(unknown, "Unknown x%02x", common->cmnd[0]);
+ reply = check_command(common, common->cmnd_size,
+- DATA_DIR_UNKNOWN, 0xff, 0, unknown);
++ DATA_DIR_UNKNOWN, ~0, 0, unknown);
+ if (reply == 0) {
+ common->curlun->sense_data = SS_INVALID_COMMAND;
+ reply = -EINVAL;
+diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
+index 11b5196..db2d607 100644
+--- a/drivers/usb/gadget/file_storage.c
++++ b/drivers/usb/gadget/file_storage.c
+@@ -2569,7 +2569,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
+ fsg->data_size_from_cmnd = 0;
+ sprintf(unknown, "Unknown x%02x", fsg->cmnd[0]);
+ if ((reply = check_command(fsg, fsg->cmnd_size,
+- DATA_DIR_UNKNOWN, 0xff, 0, unknown)) == 0) {
++ DATA_DIR_UNKNOWN, ~0, 0, unknown)) == 0) {
+ fsg->curlun->sense_data = SS_INVALID_COMMAND;
+ reply = -EINVAL;
+ }
+diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c
+index 6939e17..901924a 100644
+--- a/drivers/usb/gadget/udc-core.c
++++ b/drivers/usb/gadget/udc-core.c
+@@ -211,9 +211,9 @@ static void usb_gadget_remove_driver(struct usb_udc *udc)
+
+ if (udc_is_newstyle(udc)) {
+ udc->driver->disconnect(udc->gadget);
++ usb_gadget_disconnect(udc->gadget);
+ udc->driver->unbind(udc->gadget);
+ usb_gadget_udc_stop(udc->gadget, udc->driver);
+- usb_gadget_disconnect(udc->gadget);
+ } else {
+ usb_gadget_stop(udc->gadget, udc->driver);
+ }
+@@ -359,9 +359,13 @@ static ssize_t usb_udc_softconn_store(struct device *dev,
+ struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
+
+ if (sysfs_streq(buf, "connect")) {
++ if (udc_is_newstyle(udc))
++ usb_gadget_udc_start(udc->gadget, udc->driver);
+ usb_gadget_connect(udc->gadget);
+ } else if (sysfs_streq(buf, "disconnect")) {
+ usb_gadget_disconnect(udc->gadget);
++ if (udc_is_newstyle(udc))
++ usb_gadget_udc_stop(udc->gadget, udc->driver);
+ } else {
+ dev_err(dev, "unsupported command '%s'\n", buf);
+ return -EINVAL;
+diff --git a/drivers/usb/gadget/uvc.h b/drivers/usb/gadget/uvc.h
+index bc78c60..ca4e03a 100644
+--- a/drivers/usb/gadget/uvc.h
++++ b/drivers/usb/gadget/uvc.h
+@@ -28,7 +28,7 @@
+
+ struct uvc_request_data
+ {
+- unsigned int length;
++ __s32 length;
+ __u8 data[60];
+ };
+
+diff --git a/drivers/usb/gadget/uvc_v4l2.c b/drivers/usb/gadget/uvc_v4l2.c
+index f6e083b..54d7ca5 100644
+--- a/drivers/usb/gadget/uvc_v4l2.c
++++ b/drivers/usb/gadget/uvc_v4l2.c
+@@ -39,7 +39,7 @@ uvc_send_response(struct uvc_device *uvc, struct uvc_request_data *data)
+ if (data->length < 0)
+ return usb_ep_set_halt(cdev->gadget->ep0);
+
+- req->length = min(uvc->event_length, data->length);
++ req->length = min_t(unsigned int, uvc->event_length, data->length);
+ req->zero = data->length < uvc->event_length;
+ req->dma = DMA_ADDR_INVALID;
+
+diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
+index 3ff9f82..da2f711 100644
+--- a/drivers/usb/host/ehci-hcd.c
++++ b/drivers/usb/host/ehci-hcd.c
+@@ -815,8 +815,13 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
+ goto dead;
+ }
+
++ /*
++ * We don't use STS_FLR, but some controllers don't like it to
++ * remain on, so mask it out along with the other status bits.
++ */
++ masked_status = status & (INTR_MASK | STS_FLR);
++
+ /* Shared IRQ? */
+- masked_status = status & INTR_MASK;
+ if (!masked_status || unlikely(ehci->rh_state == EHCI_RH_HALTED)) {
+ spin_unlock(&ehci->lock);
+ return IRQ_NONE;
+@@ -867,7 +872,7 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
+ pcd_status = status;
+
+ /* resume root hub? */
+- if (!(cmd & CMD_RUN))
++ if (ehci->rh_state == EHCI_RH_SUSPENDED)
+ usb_hcd_resume_root_hub(hcd);
+
+ /* get per-port change detect bits */
+diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
+index f4b627d..971d312 100644
+--- a/drivers/usb/host/ehci-pci.c
++++ b/drivers/usb/host/ehci-pci.c
+@@ -144,6 +144,14 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
+ hcd->has_tt = 1;
+ tdi_reset(ehci);
+ }
++ if (pdev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK) {
++ /* EHCI #1 or #2 on 6 Series/C200 Series chipset */
++ if (pdev->device == 0x1c26 || pdev->device == 0x1c2d) {
++ ehci_info(ehci, "broken D3 during system sleep on ASUS\n");
++ hcd->broken_pci_sleep = 1;
++ device_set_wakeup_capable(&pdev->dev, false);
++ }
++ }
+ break;
+ case PCI_VENDOR_ID_TDI:
+ if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) {
+diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
+index ac5bfd6..2504694 100644
+--- a/drivers/usb/misc/yurex.c
++++ b/drivers/usb/misc/yurex.c
+@@ -99,9 +99,7 @@ static void yurex_delete(struct kref *kref)
+ usb_put_dev(dev->udev);
+ if (dev->cntl_urb) {
+ usb_kill_urb(dev->cntl_urb);
+- if (dev->cntl_req)
+- usb_free_coherent(dev->udev, YUREX_BUF_SIZE,
+- dev->cntl_req, dev->cntl_urb->setup_dma);
++ kfree(dev->cntl_req);
+ if (dev->cntl_buffer)
+ usb_free_coherent(dev->udev, YUREX_BUF_SIZE,
+ dev->cntl_buffer, dev->cntl_urb->transfer_dma);
+@@ -234,9 +232,7 @@ static int yurex_probe(struct usb_interface *interface, const struct usb_device_
+ }
+
+ /* allocate buffer for control req */
+- dev->cntl_req = usb_alloc_coherent(dev->udev, YUREX_BUF_SIZE,
+- GFP_KERNEL,
+- &dev->cntl_urb->setup_dma);
++ dev->cntl_req = kmalloc(YUREX_BUF_SIZE, GFP_KERNEL);
+ if (!dev->cntl_req) {
+ err("Could not allocate cntl_req");
+ goto error;
+@@ -286,7 +282,7 @@ static int yurex_probe(struct usb_interface *interface, const struct usb_device_
+ usb_rcvintpipe(dev->udev, dev->int_in_endpointAddr),
+ dev->int_buffer, YUREX_BUF_SIZE, yurex_interrupt,
+ dev, 1);
+- dev->cntl_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
++ dev->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ if (usb_submit_urb(dev->urb, GFP_KERNEL)) {
+ retval = -EIO;
+ err("Could not submitting URB");
+diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
+index ba85f27..a8f0c09 100644
+--- a/drivers/usb/musb/omap2430.c
++++ b/drivers/usb/musb/omap2430.c
+@@ -282,7 +282,8 @@ static int musb_otg_notifications(struct notifier_block *nb,
+
+ static int omap2430_musb_init(struct musb *musb)
+ {
+- u32 l, status = 0;
++ u32 l;
++ int status = 0;
+ struct device *dev = musb->controller;
+ struct musb_hdrc_platform_data *plat = dev->platform_data;
+ struct omap_musb_board_data *data = plat->board_data;
+@@ -299,7 +300,7 @@ static int omap2430_musb_init(struct musb *musb)
+
+ status = pm_runtime_get_sync(dev);
+ if (status < 0) {
+- dev_err(dev, "pm_runtime_get_sync FAILED");
++ dev_err(dev, "pm_runtime_get_sync FAILED %d\n", status);
+ goto err1;
+ }
+
+@@ -451,14 +452,14 @@ static int __init omap2430_probe(struct platform_device *pdev)
+ goto err2;
+ }
+
++ pm_runtime_enable(&pdev->dev);
++
+ ret = platform_device_add(musb);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register musb device\n");
+ goto err2;
+ }
+
+- pm_runtime_enable(&pdev->dev);
+-
+ return 0;
+
+ err2:
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 4c12404..f2c57e0 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -285,7 +285,8 @@ static int cp210x_get_config(struct usb_serial_port *port, u8 request,
+ /* Issue the request, attempting to read 'size' bytes */
+ result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
+ request, REQTYPE_DEVICE_TO_HOST, 0x0000,
+- port_priv->bInterfaceNumber, buf, size, 300);
++ port_priv->bInterfaceNumber, buf, size,
++ USB_CTRL_GET_TIMEOUT);
+
+ /* Convert data into an array of integers */
+ for (i = 0; i < length; i++)
+@@ -335,12 +336,14 @@ static int cp210x_set_config(struct usb_serial_port *port, u8 request,
+ result = usb_control_msg(serial->dev,
+ usb_sndctrlpipe(serial->dev, 0),
+ request, REQTYPE_HOST_TO_DEVICE, 0x0000,
+- port_priv->bInterfaceNumber, buf, size, 300);
++ port_priv->bInterfaceNumber, buf, size,
++ USB_CTRL_SET_TIMEOUT);
+ } else {
+ result = usb_control_msg(serial->dev,
+ usb_sndctrlpipe(serial->dev, 0),
+ request, REQTYPE_HOST_TO_DEVICE, data[0],
+- port_priv->bInterfaceNumber, NULL, 0, 300);
++ port_priv->bInterfaceNumber, NULL, 0,
++ USB_CTRL_SET_TIMEOUT);
+ }
+
+ kfree(buf);
+diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
+index 7c3ec9e..e093585 100644
+--- a/drivers/usb/serial/sierra.c
++++ b/drivers/usb/serial/sierra.c
+@@ -221,7 +221,7 @@ static const struct sierra_iface_info typeB_interface_list = {
+ };
+
+ /* 'blacklist' of interfaces not served by this driver */
+-static const u8 direct_ip_non_serial_ifaces[] = { 7, 8, 9, 10, 11 };
++static const u8 direct_ip_non_serial_ifaces[] = { 7, 8, 9, 10, 11, 19, 20 };
+ static const struct sierra_iface_info direct_ip_interface_blacklist = {
+ .infolen = ARRAY_SIZE(direct_ip_non_serial_ifaces),
+ .ifaceinfo = direct_ip_non_serial_ifaces,
+@@ -289,7 +289,6 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x1199, 0x6856) }, /* Sierra Wireless AirCard 881 U */
+ { USB_DEVICE(0x1199, 0x6859) }, /* Sierra Wireless AirCard 885 E */
+ { USB_DEVICE(0x1199, 0x685A) }, /* Sierra Wireless AirCard 885 E */
+- { USB_DEVICE(0x1199, 0x68A2) }, /* Sierra Wireless MC7710 */
+ /* Sierra Wireless C885 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6880, 0xFF, 0xFF, 0xFF)},
+ /* Sierra Wireless C888, Air Card 501, USB 303, USB 304 */
+@@ -299,6 +298,9 @@ static const struct usb_device_id id_table[] = {
+ /* Sierra Wireless HSPA Non-Composite Device */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6892, 0xFF, 0xFF, 0xFF)},
+ { USB_DEVICE(0x1199, 0x6893) }, /* Sierra Wireless Device */
++ { USB_DEVICE(0x1199, 0x68A2), /* Sierra Wireless MC77xx in QMI mode */
++ .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
++ },
+ { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */
+ .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
+ },
+diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c
+index 2babcd4..86685e9 100644
+--- a/drivers/uwb/hwa-rc.c
++++ b/drivers/uwb/hwa-rc.c
+@@ -645,7 +645,8 @@ void hwarc_neep_cb(struct urb *urb)
+ dev_err(dev, "NEEP: URB error %d\n", urb->status);
+ }
+ result = usb_submit_urb(urb, GFP_ATOMIC);
+- if (result < 0) {
++ if (result < 0 && result != -ENODEV && result != -EPERM) {
++ /* ignoring unrecoverable errors */
+ dev_err(dev, "NEEP: Can't resubmit URB (%d) resetting device\n",
+ result);
+ goto error;
+diff --git a/drivers/uwb/neh.c b/drivers/uwb/neh.c
+index a269937..8cb71bb 100644
+--- a/drivers/uwb/neh.c
++++ b/drivers/uwb/neh.c
+@@ -107,6 +107,7 @@ struct uwb_rc_neh {
+ u8 evt_type;
+ __le16 evt;
+ u8 context;
++ u8 completed;
+ uwb_rc_cmd_cb_f cb;
+ void *arg;
+
+@@ -409,6 +410,7 @@ static void uwb_rc_neh_grok_event(struct uwb_rc *rc, struct uwb_rceb *rceb, size
+ struct device *dev = &rc->uwb_dev.dev;
+ struct uwb_rc_neh *neh;
+ struct uwb_rceb *notif;
++ unsigned long flags;
+
+ if (rceb->bEventContext == 0) {
+ notif = kmalloc(size, GFP_ATOMIC);
+@@ -422,7 +424,11 @@ static void uwb_rc_neh_grok_event(struct uwb_rc *rc, struct uwb_rceb *rceb, size
+ } else {
+ neh = uwb_rc_neh_lookup(rc, rceb);
+ if (neh) {
+- del_timer_sync(&neh->timer);
++ spin_lock_irqsave(&rc->neh_lock, flags);
++ /* to guard against a timeout */
++ neh->completed = 1;
++ del_timer(&neh->timer);
++ spin_unlock_irqrestore(&rc->neh_lock, flags);
+ uwb_rc_neh_cb(neh, rceb, size);
+ } else
+ dev_warn(dev, "event 0x%02x/%04x/%02x (%zu bytes): nobody cared\n",
+@@ -568,6 +574,10 @@ static void uwb_rc_neh_timer(unsigned long arg)
+ unsigned long flags;
+
+ spin_lock_irqsave(&rc->neh_lock, flags);
++ if (neh->completed) {
++ spin_unlock_irqrestore(&rc->neh_lock, flags);
++ return;
++ }
+ if (neh->context)
+ __uwb_rc_neh_rm(rc, neh);
+ else
+diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
+index afca14d..625890c 100644
+--- a/drivers/xen/gntdev.c
++++ b/drivers/xen/gntdev.c
+@@ -692,7 +692,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
+ vma->vm_flags |= VM_RESERVED|VM_DONTEXPAND;
+
+ if (use_ptemod)
+- vma->vm_flags |= VM_DONTCOPY|VM_PFNMAP;
++ vma->vm_flags |= VM_DONTCOPY;
+
+ vma->vm_private_data = map;
+
+diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
+index 2f73195..2ce95c0 100644
+--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
++++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
+@@ -129,7 +129,7 @@ static int read_backend_details(struct xenbus_device *xendev)
+ return xenbus_read_otherend_details(xendev, "backend-id", "backend");
+ }
+
+-static int is_device_connecting(struct device *dev, void *data)
++static int is_device_connecting(struct device *dev, void *data, bool ignore_nonessential)
+ {
+ struct xenbus_device *xendev = to_xenbus_device(dev);
+ struct device_driver *drv = data;
+@@ -146,16 +146,41 @@ static int is_device_connecting(struct device *dev, void *data)
+ if (drv && (dev->driver != drv))
+ return 0;
+
++ if (ignore_nonessential) {
++ /* With older QEMU, for PVonHVM guests the guest config files
++ * could contain: vfb = [ 'vnc=1, vnclisten=0.0.0.0']
++ * which is nonsensical as there is no PV FB (there can be
++ * a PVKB) running as HVM guest. */
++
++ if ((strncmp(xendev->nodename, "device/vkbd", 11) == 0))
++ return 0;
++
++ if ((strncmp(xendev->nodename, "device/vfb", 10) == 0))
++ return 0;
++ }
+ xendrv = to_xenbus_driver(dev->driver);
+ return (xendev->state < XenbusStateConnected ||
+ (xendev->state == XenbusStateConnected &&
+ xendrv->is_ready && !xendrv->is_ready(xendev)));
+ }
++static int essential_device_connecting(struct device *dev, void *data)
++{
++ return is_device_connecting(dev, data, true /* ignore PV[KBB+FB] */);
++}
++static int non_essential_device_connecting(struct device *dev, void *data)
++{
++ return is_device_connecting(dev, data, false);
++}
+
+-static int exists_connecting_device(struct device_driver *drv)
++static int exists_essential_connecting_device(struct device_driver *drv)
+ {
+ return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
+- is_device_connecting);
++ essential_device_connecting);
++}
++static int exists_non_essential_connecting_device(struct device_driver *drv)
++{
++ return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
++ non_essential_device_connecting);
+ }
+
+ static int print_device_status(struct device *dev, void *data)
+@@ -186,6 +211,23 @@ static int print_device_status(struct device *dev, void *data)
+ /* We only wait for device setup after most initcalls have run. */
+ static int ready_to_wait_for_devices;
+
++static bool wait_loop(unsigned long start, unsigned int max_delay,
++ unsigned int *seconds_waited)
++{
++ if (time_after(jiffies, start + (*seconds_waited+5)*HZ)) {
++ if (!*seconds_waited)
++ printk(KERN_WARNING "XENBUS: Waiting for "
++ "devices to initialise: ");
++ *seconds_waited += 5;
++ printk("%us...", max_delay - *seconds_waited);
++ if (*seconds_waited == max_delay)
++ return true;
++ }
++
++ schedule_timeout_interruptible(HZ/10);
++
++ return false;
++}
+ /*
+ * On a 5-minute timeout, wait for all devices currently configured. We need
+ * to do this to guarantee that the filesystems and / or network devices
+@@ -209,19 +251,14 @@ static void wait_for_devices(struct xenbus_driver *xendrv)
+ if (!ready_to_wait_for_devices || !xen_domain())
+ return;
+
+- while (exists_connecting_device(drv)) {
+- if (time_after(jiffies, start + (seconds_waited+5)*HZ)) {
+- if (!seconds_waited)
+- printk(KERN_WARNING "XENBUS: Waiting for "
+- "devices to initialise: ");
+- seconds_waited += 5;
+- printk("%us...", 300 - seconds_waited);
+- if (seconds_waited == 300)
+- break;
+- }
+-
+- schedule_timeout_interruptible(HZ/10);
+- }
++ while (exists_non_essential_connecting_device(drv))
++ if (wait_loop(start, 30, &seconds_waited))
++ break;
++
++ /* Skips PVKB and PVFB check.*/
++ while (exists_essential_connecting_device(drv))
++ if (wait_loop(start, 270, &seconds_waited))
++ break;
+
+ if (seconds_waited)
+ printk("\n");
+diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
+index 308a98b..650d520 100644
+--- a/fs/autofs4/autofs_i.h
++++ b/fs/autofs4/autofs_i.h
+@@ -110,7 +110,6 @@ struct autofs_sb_info {
+ int sub_version;
+ int min_proto;
+ int max_proto;
+- int compat_daemon;
+ unsigned long exp_timeout;
+ unsigned int type;
+ int reghost_enabled;
+@@ -269,6 +268,17 @@ int autofs4_fill_super(struct super_block *, void *, int);
+ struct autofs_info *autofs4_new_ino(struct autofs_sb_info *);
+ void autofs4_clean_ino(struct autofs_info *);
+
++static inline int autofs_prepare_pipe(struct file *pipe)
++{
++ if (!pipe->f_op || !pipe->f_op->write)
++ return -EINVAL;
++ if (!S_ISFIFO(pipe->f_dentry->d_inode->i_mode))
++ return -EINVAL;
++ /* We want a packet pipe */
++ pipe->f_flags |= O_DIRECT;
++ return 0;
++}
++
+ /* Queue management functions */
+
+ int autofs4_wait(struct autofs_sb_info *,struct dentry *, enum autofs_notify);
+diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
+index 56bac70..de54271 100644
+--- a/fs/autofs4/dev-ioctl.c
++++ b/fs/autofs4/dev-ioctl.c
+@@ -376,7 +376,7 @@ static int autofs_dev_ioctl_setpipefd(struct file *fp,
+ err = -EBADF;
+ goto out;
+ }
+- if (!pipe->f_op || !pipe->f_op->write) {
++ if (autofs_prepare_pipe(pipe) < 0) {
+ err = -EPIPE;
+ fput(pipe);
+ goto out;
+@@ -385,7 +385,6 @@ static int autofs_dev_ioctl_setpipefd(struct file *fp,
+ sbi->pipefd = pipefd;
+ sbi->pipe = pipe;
+ sbi->catatonic = 0;
+- sbi->compat_daemon = is_compat_task();
+ }
+ out:
+ mutex_unlock(&sbi->wq_mutex);
+diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
+index 98a5695..7b5293e 100644
+--- a/fs/autofs4/inode.c
++++ b/fs/autofs4/inode.c
+@@ -19,7 +19,6 @@
+ #include <linux/parser.h>
+ #include <linux/bitops.h>
+ #include <linux/magic.h>
+-#include <linux/compat.h>
+ #include "autofs_i.h"
+ #include <linux/module.h>
+
+@@ -225,7 +224,6 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent)
+ set_autofs_type_indirect(&sbi->type);
+ sbi->min_proto = 0;
+ sbi->max_proto = 0;
+- sbi->compat_daemon = is_compat_task();
+ mutex_init(&sbi->wq_mutex);
+ spin_lock_init(&sbi->fs_lock);
+ sbi->queues = NULL;
+@@ -294,7 +292,7 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent)
+ printk("autofs: could not open pipe file descriptor\n");
+ goto fail_dput;
+ }
+- if (!pipe->f_op || !pipe->f_op->write)
++ if (autofs_prepare_pipe(pipe) < 0)
+ goto fail_fput;
+ sbi->pipe = pipe;
+ sbi->pipefd = pipefd;
+diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
+index 6861f61..e1fbdee 100644
+--- a/fs/autofs4/waitq.c
++++ b/fs/autofs4/waitq.c
+@@ -90,24 +90,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
+
+ return (bytes > 0);
+ }
+-
+-/*
+- * The autofs_v5 packet was misdesigned.
+- *
+- * The packets are identical on x86-32 and x86-64, but have different
+- * alignment. Which means that 'sizeof()' will give different results.
+- * Fix it up for the case of running 32-bit user mode on a 64-bit kernel.
+- */
+-static noinline size_t autofs_v5_packet_size(struct autofs_sb_info *sbi)
+-{
+- size_t pktsz = sizeof(struct autofs_v5_packet);
+-#if defined(CONFIG_X86_64) && defined(CONFIG_COMPAT)
+- if (sbi->compat_daemon > 0)
+- pktsz -= 4;
+-#endif
+- return pktsz;
+-}
+-
++
+ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
+ struct autofs_wait_queue *wq,
+ int type)
+@@ -164,7 +147,8 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
+ {
+ struct autofs_v5_packet *packet = &pkt.v5_pkt.v5_packet;
+
+- pktsz = autofs_v5_packet_size(sbi);
++ pktsz = sizeof(*packet);
++
+ packet->wait_queue_token = wq->wait_queue_token;
+ packet->len = wq->name.len;
+ memcpy(packet->name, wq->name.name, wq->name.len);
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index 6738503..83a871f 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -2025,7 +2025,7 @@ BTRFS_SETGET_STACK_FUNCS(root_last_snapshot, struct btrfs_root_item,
+
+ static inline bool btrfs_root_readonly(struct btrfs_root *root)
+ {
+- return root->root_item.flags & BTRFS_ROOT_SUBVOL_RDONLY;
++ return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_RDONLY)) != 0;
+ }
+
+ /* struct btrfs_root_backup */
+diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
+index 0e6adac..e89803b 100644
+--- a/fs/cifs/cifssmb.c
++++ b/fs/cifs/cifssmb.c
+@@ -4826,8 +4826,12 @@ parse_DFS_referrals(TRANSACTION2_GET_DFS_REFER_RSP *pSMBr,
+ max_len = data_end - temp;
+ node->node_name = cifs_strndup_from_ucs(temp, max_len,
+ is_unicode, nls_codepage);
+- if (!node->node_name)
++ if (!node->node_name) {
+ rc = -ENOMEM;
++ goto parse_DFS_referrals_exit;
++ }
++
++ ref++;
+ }
+
+ parse_DFS_referrals_exit:
+diff --git a/fs/eventpoll.c b/fs/eventpoll.c
+index ea54cde..4d9d3a4 100644
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -988,6 +988,10 @@ static int path_count[PATH_ARR_SIZE];
+
+ static int path_count_inc(int nests)
+ {
++ /* Allow an arbitrary number of depth 1 paths */
++ if (nests == 0)
++ return 0;
++
+ if (++path_count[nests] > path_limits[nests])
+ return -1;
+ return 0;
+diff --git a/fs/exec.c b/fs/exec.c
+index 3625464..160cd2f 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -973,6 +973,9 @@ static int de_thread(struct task_struct *tsk)
+ sig->notify_count = 0;
+
+ no_thread_group:
++ /* we have changed execution domain */
++ tsk->exit_signal = SIGCHLD;
++
+ if (current->mm)
+ setmax_mm_hiwater_rss(&sig->maxrss, current->mm);
+
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index c2a2012..54f2bdc 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -2812,7 +2812,7 @@ static int ext4_split_extent_at(handle_t *handle,
+ if (err)
+ goto fix_extent_len;
+ /* update the extent length and mark as initialized */
+- ex->ee_len = cpu_to_le32(ee_len);
++ ex->ee_len = cpu_to_le16(ee_len);
+ ext4_ext_try_to_merge(inode, path, ex);
+ err = ext4_ext_dirty(handle, inode, path + depth);
+ goto out;
+diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
+index 4dfbfec..ec2a9c2 100644
+--- a/fs/hfsplus/catalog.c
++++ b/fs/hfsplus/catalog.c
+@@ -366,6 +366,10 @@ int hfsplus_rename_cat(u32 cnid,
+ err = hfs_brec_find(&src_fd);
+ if (err)
+ goto out;
++ if (src_fd.entrylength > sizeof(entry) || src_fd.entrylength < 0) {
++ err = -EIO;
++ goto out;
++ }
+
+ hfs_bnode_read(src_fd.bnode, &entry, src_fd.entryoffset,
+ src_fd.entrylength);
+diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
+index 4536cd3..5adb740 100644
+--- a/fs/hfsplus/dir.c
++++ b/fs/hfsplus/dir.c
+@@ -150,6 +150,11 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
+ filp->f_pos++;
+ /* fall through */
+ case 1:
++ if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) {
++ err = -EIO;
++ goto out;
++ }
++
+ hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
+ fd.entrylength);
+ if (be16_to_cpu(entry.type) != HFSPLUS_FOLDER_THREAD) {
+@@ -181,6 +186,12 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
+ err = -EIO;
+ goto out;
+ }
++
++ if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) {
++ err = -EIO;
++ goto out;
++ }
++
+ hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
+ fd.entrylength);
+ type = be16_to_cpu(entry.type);
+diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
+index 68d704d..d751f04 100644
+--- a/fs/jbd2/commit.c
++++ b/fs/jbd2/commit.c
+@@ -683,7 +683,7 @@ start_journal_io:
+ if (commit_transaction->t_need_data_flush &&
+ (journal->j_fs_dev != journal->j_dev) &&
+ (journal->j_flags & JBD2_BARRIER))
+- blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
++ blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS, NULL);
+
+ /* Done it all: now write the commit record asynchronously. */
+ if (JBD2_HAS_INCOMPAT_FEATURE(journal,
+@@ -819,7 +819,7 @@ wait_for_iobuf:
+ if (JBD2_HAS_INCOMPAT_FEATURE(journal,
+ JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT) &&
+ journal->j_flags & JBD2_BARRIER) {
+- blkdev_issue_flush(journal->j_dev, GFP_KERNEL, NULL);
++ blkdev_issue_flush(journal->j_dev, GFP_NOFS, NULL);
+ }
+
+ if (err)
+diff --git a/fs/lockd/clnt4xdr.c b/fs/lockd/clnt4xdr.c
+index f848b52..046bb77 100644
+--- a/fs/lockd/clnt4xdr.c
++++ b/fs/lockd/clnt4xdr.c
+@@ -241,7 +241,7 @@ static int decode_nlm4_stat(struct xdr_stream *xdr, __be32 *stat)
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+- if (unlikely(*p > nlm4_failed))
++ if (unlikely(ntohl(*p) > ntohl(nlm4_failed)))
+ goto out_bad_xdr;
+ *stat = *p;
+ return 0;
+diff --git a/fs/lockd/clntxdr.c b/fs/lockd/clntxdr.c
+index 180ac34..36057ce 100644
+--- a/fs/lockd/clntxdr.c
++++ b/fs/lockd/clntxdr.c
+@@ -236,7 +236,7 @@ static int decode_nlm_stat(struct xdr_stream *xdr,
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+- if (unlikely(*p > nlm_lck_denied_grace_period))
++ if (unlikely(ntohl(*p) > ntohl(nlm_lck_denied_grace_period)))
+ goto out_enum;
+ *stat = *p;
+ return 0;
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 757293b..51f6a40 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -4453,7 +4453,9 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f
+ static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
+ {
+ struct nfs_server *server = NFS_SERVER(state->inode);
+- struct nfs4_exception exception = { };
++ struct nfs4_exception exception = {
++ .inode = state->inode,
++ };
+ int err;
+
+ do {
+@@ -4471,7 +4473,9 @@ static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request
+ static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
+ {
+ struct nfs_server *server = NFS_SERVER(state->inode);
+- struct nfs4_exception exception = { };
++ struct nfs4_exception exception = {
++ .inode = state->inode,
++ };
+ int err;
+
+ err = nfs4_set_lock_state(state, request);
+@@ -4551,6 +4555,7 @@ static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *
+ {
+ struct nfs4_exception exception = {
+ .state = state,
++ .inode = state->inode,
+ };
+ int err;
+
+@@ -4596,6 +4601,20 @@ nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
+
+ if (state == NULL)
+ return -ENOLCK;
++ /*
++ * Don't rely on the VFS having checked the file open mode,
++ * since it won't do this for flock() locks.
++ */
++ switch (request->fl_type & (F_RDLCK|F_WRLCK|F_UNLCK)) {
++ case F_RDLCK:
++ if (!(filp->f_mode & FMODE_READ))
++ return -EBADF;
++ break;
++ case F_WRLCK:
++ if (!(filp->f_mode & FMODE_WRITE))
++ return -EBADF;
++ }
++
+ do {
+ status = nfs4_proc_setlk(state, cmd, request);
+ if ((status != -EAGAIN) || IS_SETLK(cmd))
+diff --git a/fs/nfs/read.c b/fs/nfs/read.c
+index cfa175c..41bae32 100644
+--- a/fs/nfs/read.c
++++ b/fs/nfs/read.c
+@@ -324,7 +324,7 @@ out_bad:
+ while (!list_empty(res)) {
+ data = list_entry(res->next, struct nfs_read_data, list);
+ list_del(&data->list);
+- nfs_readdata_free(data);
++ nfs_readdata_release(data);
+ }
+ nfs_readpage_release(req);
+ return -ENOMEM;
+diff --git a/fs/nfs/super.c b/fs/nfs/super.c
+index 3ada13c..376cd65 100644
+--- a/fs/nfs/super.c
++++ b/fs/nfs/super.c
+@@ -2708,11 +2708,15 @@ static struct vfsmount *nfs_do_root_mount(struct file_system_type *fs_type,
+ char *root_devname;
+ size_t len;
+
+- len = strlen(hostname) + 3;
++ len = strlen(hostname) + 5;
+ root_devname = kmalloc(len, GFP_KERNEL);
+ if (root_devname == NULL)
+ return ERR_PTR(-ENOMEM);
+- snprintf(root_devname, len, "%s:/", hostname);
++ /* Does hostname needs to be enclosed in brackets? */
++ if (strchr(hostname, ':'))
++ snprintf(root_devname, len, "[%s]:/", hostname);
++ else
++ snprintf(root_devname, len, "%s:/", hostname);
+ root_mnt = vfs_kern_mount(fs_type, flags, root_devname, data);
+ kfree(root_devname);
+ return root_mnt;
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index 1dda78d..4efd421 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -974,7 +974,7 @@ out_bad:
+ while (!list_empty(res)) {
+ data = list_entry(res->next, struct nfs_write_data, list);
+ list_del(&data->list);
+- nfs_writedata_free(data);
++ nfs_writedata_release(data);
+ }
+ nfs_redirty_request(req);
+ return -ENOMEM;
+diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
+index 08c6e36..43f46cd 100644
+--- a/fs/nfsd/nfs3xdr.c
++++ b/fs/nfsd/nfs3xdr.c
+@@ -803,13 +803,13 @@ encode_entry_baggage(struct nfsd3_readdirres *cd, __be32 *p, const char *name,
+ return p;
+ }
+
+-static int
++static __be32
+ compose_entry_fh(struct nfsd3_readdirres *cd, struct svc_fh *fhp,
+ const char *name, int namlen)
+ {
+ struct svc_export *exp;
+ struct dentry *dparent, *dchild;
+- int rv = 0;
++ __be32 rv = nfserr_noent;
+
+ dparent = cd->fh.fh_dentry;
+ exp = cd->fh.fh_export;
+@@ -817,26 +817,20 @@ compose_entry_fh(struct nfsd3_readdirres *cd, struct svc_fh *fhp,
+ if (isdotent(name, namlen)) {
+ if (namlen == 2) {
+ dchild = dget_parent(dparent);
+- if (dchild == dparent) {
+- /* filesystem root - cannot return filehandle for ".." */
+- dput(dchild);
+- return -ENOENT;
+- }
++ /* filesystem root - cannot return filehandle for ".." */
++ if (dchild == dparent)
++ goto out;
+ } else
+ dchild = dget(dparent);
+ } else
+ dchild = lookup_one_len(name, dparent, namlen);
+ if (IS_ERR(dchild))
+- return -ENOENT;
+- rv = -ENOENT;
++ return rv;
+ if (d_mountpoint(dchild))
+ goto out;
+- rv = fh_compose(fhp, exp, dchild, &cd->fh);
+- if (rv)
+- goto out;
+ if (!dchild->d_inode)
+ goto out;
+- rv = 0;
++ rv = fh_compose(fhp, exp, dchild, &cd->fh);
+ out:
+ dput(dchild);
+ return rv;
+@@ -845,7 +839,7 @@ out:
+ static __be32 *encode_entryplus_baggage(struct nfsd3_readdirres *cd, __be32 *p, const char *name, int namlen)
+ {
+ struct svc_fh fh;
+- int err;
++ __be32 err;
+
+ fh_init(&fh, NFS3_FHSIZE);
+ err = compose_entry_fh(cd, &fh, name, namlen);
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index fa38336..b8c5538 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -231,17 +231,17 @@ do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_o
+ */
+ if (open->op_createmode == NFS4_CREATE_EXCLUSIVE && status == 0)
+ open->op_bmval[1] = (FATTR4_WORD1_TIME_ACCESS |
+- FATTR4_WORD1_TIME_MODIFY);
++ FATTR4_WORD1_TIME_MODIFY);
+ } else {
+ status = nfsd_lookup(rqstp, current_fh,
+ open->op_fname.data, open->op_fname.len, &resfh);
+ fh_unlock(current_fh);
+- if (status)
+- goto out;
+- status = nfsd_check_obj_isreg(&resfh);
+ }
+ if (status)
+ goto out;
++ status = nfsd_check_obj_isreg(&resfh);
++ if (status)
++ goto out;
+
+ if (is_create_with_attrs(open) && open->op_acl != NULL)
+ do_set_nfs4_acl(rqstp, &resfh, open->op_acl, open->op_bmval);
+@@ -827,6 +827,7 @@ nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ struct nfsd4_setattr *setattr)
+ {
+ __be32 status = nfs_ok;
++ int err;
+
+ if (setattr->sa_iattr.ia_valid & ATTR_SIZE) {
+ nfs4_lock_state();
+@@ -838,9 +839,9 @@ nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ return status;
+ }
+ }
+- status = mnt_want_write(cstate->current_fh.fh_export->ex_path.mnt);
+- if (status)
+- return status;
++ err = mnt_want_write(cstate->current_fh.fh_export->ex_path.mnt);
++ if (err)
++ return nfserrno(err);
+ status = nfs_ok;
+
+ status = check_attr_support(rqstp, cstate, setattr->sa_bmval,
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 5abced7..4cfe260 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -4080,16 +4080,14 @@ out:
+ * vfs_test_lock. (Arguably perhaps test_lock should be done with an
+ * inode operation.)
+ */
+-static int nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
++static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
+ {
+ struct file *file;
+- int err;
+-
+- err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file);
+- if (err)
+- return err;
+- err = vfs_test_lock(file, lock);
+- nfsd_close(file);
++ __be32 err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file);
++ if (!err) {
++ err = nfserrno(vfs_test_lock(file, lock));
++ nfsd_close(file);
++ }
+ return err;
+ }
+
+@@ -4103,7 +4101,6 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ struct inode *inode;
+ struct file_lock file_lock;
+ struct nfs4_lockowner *lo;
+- int error;
+ __be32 status;
+
+ if (locks_in_grace())
+@@ -4149,12 +4146,10 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+
+ nfs4_transform_lock_offset(&file_lock);
+
+- status = nfs_ok;
+- error = nfsd_test_lock(rqstp, &cstate->current_fh, &file_lock);
+- if (error) {
+- status = nfserrno(error);
++ status = nfsd_test_lock(rqstp, &cstate->current_fh, &file_lock);
++ if (status)
+ goto out;
+- }
++
+ if (file_lock.fl_type != F_UNLCK) {
+ status = nfserr_denied;
+ nfs4_set_lock_denied(&file_lock, &lockt->lt_denied);
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index b6fa792..9cfa60a 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -3411,7 +3411,7 @@ nfsd4_encode_test_stateid(struct nfsd4_compoundres *resp, int nfserr,
+ nfsd4_decode_stateid(argp, &si);
+ valid = nfs4_validate_stateid(cl, &si);
+ RESERVE_SPACE(4);
+- *p++ = htonl(valid);
++ *p++ = valid;
+ resp->p = p;
+ }
+ nfs4_unlock_state();
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index 7a2e442..5c3cd82 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -1439,7 +1439,7 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ switch (createmode) {
+ case NFS3_CREATE_UNCHECKED:
+ if (! S_ISREG(dchild->d_inode->i_mode))
+- err = nfserr_exist;
++ goto out;
+ else if (truncp) {
+ /* in nfsv4, we need to treat this case a little
+ * differently. we don't want to truncate the
+diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
+index 3165aeb..31b9463 100644
+--- a/fs/ocfs2/alloc.c
++++ b/fs/ocfs2/alloc.c
+@@ -1134,7 +1134,7 @@ static int ocfs2_adjust_rightmost_branch(handle_t *handle,
+ }
+
+ el = path_leaf_el(path);
+- rec = &el->l_recs[le32_to_cpu(el->l_next_free_rec) - 1];
++ rec = &el->l_recs[le16_to_cpu(el->l_next_free_rec) - 1];
+
+ ocfs2_adjust_rightmost_records(handle, et, path, rec);
+
+diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
+index cf78233..9f32d7c 100644
+--- a/fs/ocfs2/refcounttree.c
++++ b/fs/ocfs2/refcounttree.c
+@@ -1036,14 +1036,14 @@ static int ocfs2_get_refcount_cpos_end(struct ocfs2_caching_info *ci,
+
+ tmp_el = left_path->p_node[subtree_root].el;
+ blkno = left_path->p_node[subtree_root+1].bh->b_blocknr;
+- for (i = 0; i < le32_to_cpu(tmp_el->l_next_free_rec); i++) {
++ for (i = 0; i < le16_to_cpu(tmp_el->l_next_free_rec); i++) {
+ if (le64_to_cpu(tmp_el->l_recs[i].e_blkno) == blkno) {
+ *cpos_end = le32_to_cpu(tmp_el->l_recs[i+1].e_cpos);
+ break;
+ }
+ }
+
+- BUG_ON(i == le32_to_cpu(tmp_el->l_next_free_rec));
++ BUG_ON(i == le16_to_cpu(tmp_el->l_next_free_rec));
+
+ out:
+ ocfs2_free_path(left_path);
+@@ -1468,7 +1468,7 @@ static int ocfs2_divide_leaf_refcount_block(struct buffer_head *ref_leaf_bh,
+
+ trace_ocfs2_divide_leaf_refcount_block(
+ (unsigned long long)ref_leaf_bh->b_blocknr,
+- le32_to_cpu(rl->rl_count), le32_to_cpu(rl->rl_used));
++ le16_to_cpu(rl->rl_count), le16_to_cpu(rl->rl_used));
+
+ /*
+ * XXX: Improvement later.
+@@ -2411,7 +2411,7 @@ static int ocfs2_calc_refcount_meta_credits(struct super_block *sb,
+ rb = (struct ocfs2_refcount_block *)
+ prev_bh->b_data;
+
+- if (le64_to_cpu(rb->rf_records.rl_used) +
++ if (le16_to_cpu(rb->rf_records.rl_used) +
+ recs_add >
+ le16_to_cpu(rb->rf_records.rl_count))
+ ref_blocks++;
+@@ -2476,7 +2476,7 @@ static int ocfs2_calc_refcount_meta_credits(struct super_block *sb,
+ if (prev_bh) {
+ rb = (struct ocfs2_refcount_block *)prev_bh->b_data;
+
+- if (le64_to_cpu(rb->rf_records.rl_used) + recs_add >
++ if (le16_to_cpu(rb->rf_records.rl_used) + recs_add >
+ le16_to_cpu(rb->rf_records.rl_count))
+ ref_blocks++;
+
+@@ -3629,7 +3629,7 @@ int ocfs2_refcounted_xattr_delete_need(struct inode *inode,
+ * one will split a refcount rec, so totally we need
+ * clusters * 2 new refcount rec.
+ */
+- if (le64_to_cpu(rb->rf_records.rl_used) + clusters * 2 >
++ if (le16_to_cpu(rb->rf_records.rl_used) + clusters * 2 >
+ le16_to_cpu(rb->rf_records.rl_count))
+ ref_blocks++;
+
+diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
+index ba5d97e..f169da4 100644
+--- a/fs/ocfs2/suballoc.c
++++ b/fs/ocfs2/suballoc.c
+@@ -600,7 +600,7 @@ static void ocfs2_bg_alloc_cleanup(handle_t *handle,
+ ret = ocfs2_free_clusters(handle, cluster_ac->ac_inode,
+ cluster_ac->ac_bh,
+ le64_to_cpu(rec->e_blkno),
+- le32_to_cpu(rec->e_leaf_clusters));
++ le16_to_cpu(rec->e_leaf_clusters));
+ if (ret)
+ mlog_errno(ret);
+ /* Try all the clusters to free */
+@@ -1628,7 +1628,7 @@ static int ocfs2_bg_discontig_fix_by_rec(struct ocfs2_suballoc_result *res,
+ {
+ unsigned int bpc = le16_to_cpu(cl->cl_bpc);
+ unsigned int bitoff = le32_to_cpu(rec->e_cpos) * bpc;
+- unsigned int bitcount = le32_to_cpu(rec->e_leaf_clusters) * bpc;
++ unsigned int bitcount = le16_to_cpu(rec->e_leaf_clusters) * bpc;
+
+ if (res->sr_bit_offset < bitoff)
+ return 0;
+diff --git a/fs/pipe.c b/fs/pipe.c
+index 4065f07..05ed5ca 100644
+--- a/fs/pipe.c
++++ b/fs/pipe.c
+@@ -345,6 +345,16 @@ static const struct pipe_buf_operations anon_pipe_buf_ops = {
+ .get = generic_pipe_buf_get,
+ };
+
++static const struct pipe_buf_operations packet_pipe_buf_ops = {
++ .can_merge = 0,
++ .map = generic_pipe_buf_map,
++ .unmap = generic_pipe_buf_unmap,
++ .confirm = generic_pipe_buf_confirm,
++ .release = anon_pipe_buf_release,
++ .steal = generic_pipe_buf_steal,
++ .get = generic_pipe_buf_get,
++};
++
+ static ssize_t
+ pipe_read(struct kiocb *iocb, const struct iovec *_iov,
+ unsigned long nr_segs, loff_t pos)
+@@ -406,6 +416,13 @@ redo:
+ ret += chars;
+ buf->offset += chars;
+ buf->len -= chars;
++
++ /* Was it a packet buffer? Clean up and exit */
++ if (buf->flags & PIPE_BUF_FLAG_PACKET) {
++ total_len = chars;
++ buf->len = 0;
++ }
++
+ if (!buf->len) {
+ buf->ops = NULL;
+ ops->release(pipe, buf);
+@@ -458,6 +475,11 @@ redo:
+ return ret;
+ }
+
++static inline int is_packetized(struct file *file)
++{
++ return (file->f_flags & O_DIRECT) != 0;
++}
++
+ static ssize_t
+ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
+ unsigned long nr_segs, loff_t ppos)
+@@ -592,6 +614,11 @@ redo2:
+ buf->ops = &anon_pipe_buf_ops;
+ buf->offset = 0;
+ buf->len = chars;
++ buf->flags = 0;
++ if (is_packetized(filp)) {
++ buf->ops = &packet_pipe_buf_ops;
++ buf->flags = PIPE_BUF_FLAG_PACKET;
++ }
+ pipe->nrbufs = ++bufs;
+ pipe->tmp_page = NULL;
+
+@@ -1012,7 +1039,7 @@ struct file *create_write_pipe(int flags)
+ goto err_dentry;
+ f->f_mapping = inode->i_mapping;
+
+- f->f_flags = O_WRONLY | (flags & O_NONBLOCK);
++ f->f_flags = O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT));
+ f->f_version = 0;
+
+ return f;
+@@ -1056,7 +1083,7 @@ int do_pipe_flags(int *fd, int flags)
+ int error;
+ int fdw, fdr;
+
+- if (flags & ~(O_CLOEXEC | O_NONBLOCK))
++ if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT))
+ return -EINVAL;
+
+ fw = create_write_pipe(flags);
+diff --git a/fs/splice.c b/fs/splice.c
+index fa2defa..6d0dfb8 100644
+--- a/fs/splice.c
++++ b/fs/splice.c
+@@ -31,6 +31,7 @@
+ #include <linux/uio.h>
+ #include <linux/security.h>
+ #include <linux/gfp.h>
++#include <linux/socket.h>
+
+ /*
+ * Attempt to steal a page from a pipe buffer. This should perhaps go into
+@@ -691,7 +692,9 @@ static int pipe_to_sendpage(struct pipe_inode_info *pipe,
+ if (!likely(file->f_op && file->f_op->sendpage))
+ return -EINVAL;
+
+- more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
++ more = (sd->flags & SPLICE_F_MORE) ? MSG_MORE : 0;
++ if (sd->len < sd->total_len)
++ more |= MSG_SENDPAGE_NOTLAST;
+ return file->f_op->sendpage(file, buf->page, buf->offset,
+ sd->len, &pos, more);
+ }
+diff --git a/include/asm-generic/statfs.h b/include/asm-generic/statfs.h
+index 0fd28e0..c749af9 100644
+--- a/include/asm-generic/statfs.h
++++ b/include/asm-generic/statfs.h
+@@ -15,7 +15,7 @@ typedef __kernel_fsid_t fsid_t;
+ * with a 10' pole.
+ */
+ #ifndef __statfs_word
+-#if BITS_PER_LONG == 64
++#if __BITS_PER_LONG == 64
+ #define __statfs_word long
+ #else
+ #define __statfs_word __u32
+diff --git a/include/linux/efi.h b/include/linux/efi.h
+index 2362a0b..1328d8c 100644
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -383,7 +383,18 @@ extern int __init efi_setup_pcdp_console(char *);
+ #define EFI_VARIABLE_NON_VOLATILE 0x0000000000000001
+ #define EFI_VARIABLE_BOOTSERVICE_ACCESS 0x0000000000000002
+ #define EFI_VARIABLE_RUNTIME_ACCESS 0x0000000000000004
+-
++#define EFI_VARIABLE_HARDWARE_ERROR_RECORD 0x0000000000000008
++#define EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS 0x0000000000000010
++#define EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS 0x0000000000000020
++#define EFI_VARIABLE_APPEND_WRITE 0x0000000000000040
++
++#define EFI_VARIABLE_MASK (EFI_VARIABLE_NON_VOLATILE | \
++ EFI_VARIABLE_BOOTSERVICE_ACCESS | \
++ EFI_VARIABLE_RUNTIME_ACCESS | \
++ EFI_VARIABLE_HARDWARE_ERROR_RECORD | \
++ EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS | \
++ EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS | \
++ EFI_VARIABLE_APPEND_WRITE)
+ /*
+ * EFI Device Path information
+ */
+diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
+index d526231..35410ef 100644
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -562,6 +562,7 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
+
+ #ifdef CONFIG_IOMMU_API
+ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
++void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
+ int kvm_iommu_map_guest(struct kvm *kvm);
+ int kvm_iommu_unmap_guest(struct kvm *kvm);
+ int kvm_assign_device(struct kvm *kvm,
+@@ -575,6 +576,11 @@ static inline int kvm_iommu_map_pages(struct kvm *kvm,
+ return 0;
+ }
+
++static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
++ struct kvm_memory_slot *slot)
++{
++}
++
+ static inline int kvm_iommu_map_guest(struct kvm *kvm)
+ {
+ return -ENODEV;
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index a82ad4d..cbeb586 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -2536,8 +2536,6 @@ extern void net_disable_timestamp(void);
+ extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
+ extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
+ extern void dev_seq_stop(struct seq_file *seq, void *v);
+-extern int dev_seq_open_ops(struct inode *inode, struct file *file,
+- const struct seq_operations *ops);
+ #endif
+
+ extern int netdev_class_create_file(struct class_attribute *class_attr);
+diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
+index 77257c9..0072a53 100644
+--- a/include/linux/pipe_fs_i.h
++++ b/include/linux/pipe_fs_i.h
+@@ -8,6 +8,7 @@
+ #define PIPE_BUF_FLAG_LRU 0x01 /* page is on the LRU */
+ #define PIPE_BUF_FLAG_ATOMIC 0x02 /* was atomically mapped */
+ #define PIPE_BUF_FLAG_GIFT 0x04 /* page is a gift */
++#define PIPE_BUF_FLAG_PACKET 0x08 /* read() as a packet */
+
+ /**
+ * struct pipe_buffer - a linux kernel pipe buffer
+diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
+index c6db9fb..bb1fac5 100644
+--- a/include/linux/seqlock.h
++++ b/include/linux/seqlock.h
+@@ -141,7 +141,7 @@ static inline unsigned __read_seqcount_begin(const seqcount_t *s)
+ unsigned ret;
+
+ repeat:
+- ret = s->sequence;
++ ret = ACCESS_ONCE(s->sequence);
+ if (unlikely(ret & 1)) {
+ cpu_relax();
+ goto repeat;
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index 6cf8b53..e689b47 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -458,6 +458,7 @@ struct sk_buff {
+ union {
+ __u32 mark;
+ __u32 dropcount;
++ __u32 avail_size;
+ };
+
+ __u16 vlan_tci;
+@@ -1326,6 +1327,18 @@ static inline int skb_tailroom(const struct sk_buff *skb)
+ }
+
+ /**
++ * skb_availroom - bytes at buffer end
++ * @skb: buffer to check
++ *
++ * Return the number of bytes of free space at the tail of an sk_buff
++ * allocated by sk_stream_alloc()
++ */
++static inline int skb_availroom(const struct sk_buff *skb)
++{
++ return skb_is_nonlinear(skb) ? 0 : skb->avail_size - skb->len;
++}
++
++/**
+ * skb_reserve - adjust headroom
+ * @skb: buffer to alter
+ * @len: bytes to move
+diff --git a/include/linux/socket.h b/include/linux/socket.h
+index d0e77f6..ad919e0 100644
+--- a/include/linux/socket.h
++++ b/include/linux/socket.h
+@@ -265,7 +265,7 @@ struct ucred {
+ #define MSG_NOSIGNAL 0x4000 /* Do not generate SIGPIPE */
+ #define MSG_MORE 0x8000 /* Sender will send more */
+ #define MSG_WAITFORONE 0x10000 /* recvmmsg(): block until 1+ packets avail */
+-
++#define MSG_SENDPAGE_NOTLAST 0x20000 /* sendpage() internal : not the last page */
+ #define MSG_EOF MSG_FIN
+
+ #define MSG_CMSG_CLOEXEC 0x40000000 /* Set close_on_exit for file
+diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
+index 03354d5..64cec8d 100644
+--- a/include/linux/usb/hcd.h
++++ b/include/linux/usb/hcd.h
+@@ -128,6 +128,8 @@ struct usb_hcd {
+ unsigned wireless:1; /* Wireless USB HCD */
+ unsigned authorized_default:1;
+ unsigned has_tt:1; /* Integrated TT in root hub */
++ unsigned broken_pci_sleep:1; /* Don't put the
++ controller in PCI-D3 for system sleep */
+
+ int irq; /* irq allocated */
+ void __iomem *regs; /* device memory/io */
+diff --git a/kernel/exit.c b/kernel/exit.c
+index e6e01b9..5a8a66e 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -819,25 +819,6 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
+ if (group_dead)
+ kill_orphaned_pgrp(tsk->group_leader, NULL);
+
+- /* Let father know we died
+- *
+- * Thread signals are configurable, but you aren't going to use
+- * that to send signals to arbitrary processes.
+- * That stops right now.
+- *
+- * If the parent exec id doesn't match the exec id we saved
+- * when we started then we know the parent has changed security
+- * domain.
+- *
+- * If our self_exec id doesn't match our parent_exec_id then
+- * we have changed execution domain as these two values started
+- * the same after a fork.
+- */
+- if (thread_group_leader(tsk) && tsk->exit_signal != SIGCHLD &&
+- (tsk->parent_exec_id != tsk->real_parent->self_exec_id ||
+- tsk->self_exec_id != tsk->parent_exec_id))
+- tsk->exit_signal = SIGCHLD;
+-
+ if (unlikely(tsk->ptrace)) {
+ int sig = thread_group_leader(tsk) &&
+ thread_group_empty(tsk) &&
+diff --git a/kernel/power/swap.c b/kernel/power/swap.c
+index 11a594c..b313086 100644
+--- a/kernel/power/swap.c
++++ b/kernel/power/swap.c
+@@ -52,6 +52,23 @@
+
+ #define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1)
+
++/*
++ * Number of free pages that are not high.
++ */
++static inline unsigned long low_free_pages(void)
++{
++ return nr_free_pages() - nr_free_highpages();
++}
++
++/*
++ * Number of pages required to be kept free while writing the image. Always
++ * half of all available low pages before the writing starts.
++ */
++static inline unsigned long reqd_free_pages(void)
++{
++ return low_free_pages() / 2;
++}
++
+ struct swap_map_page {
+ sector_t entries[MAP_PAGE_ENTRIES];
+ sector_t next_swap;
+@@ -73,7 +90,7 @@ struct swap_map_handle {
+ sector_t cur_swap;
+ sector_t first_sector;
+ unsigned int k;
+- unsigned long nr_free_pages, written;
++ unsigned long reqd_free_pages;
+ u32 crc32;
+ };
+
+@@ -317,8 +334,7 @@ static int get_swap_writer(struct swap_map_handle *handle)
+ goto err_rel;
+ }
+ handle->k = 0;
+- handle->nr_free_pages = nr_free_pages() >> 1;
+- handle->written = 0;
++ handle->reqd_free_pages = reqd_free_pages();
+ handle->first_sector = handle->cur_swap;
+ return 0;
+ err_rel:
+@@ -353,11 +369,11 @@ static int swap_write_page(struct swap_map_handle *handle, void *buf,
+ handle->cur_swap = offset;
+ handle->k = 0;
+ }
+- if (bio_chain && ++handle->written > handle->nr_free_pages) {
++ if (bio_chain && low_free_pages() <= handle->reqd_free_pages) {
+ error = hib_wait_on_bio_chain(bio_chain);
+ if (error)
+ goto out;
+- handle->written = 0;
++ handle->reqd_free_pages = reqd_free_pages();
+ }
+ out:
+ return error;
+@@ -619,7 +635,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
+ * Adjust number of free pages after all allocations have been done.
+ * We don't want to run out of pages when writing.
+ */
+- handle->nr_free_pages = nr_free_pages() >> 1;
++ handle->reqd_free_pages = reqd_free_pages();
+
+ /*
+ * Start the CRC32 thread.
+diff --git a/kernel/sched.c b/kernel/sched.c
+index d6b149c..299f55c 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -3538,13 +3538,10 @@ calc_load_n(unsigned long load, unsigned long exp,
+ * Once we've updated the global active value, we need to apply the exponential
+ * weights adjusted to the number of cycles missed.
+ */
+-static void calc_global_nohz(unsigned long ticks)
++static void calc_global_nohz(void)
+ {
+ long delta, active, n;
+
+- if (time_before(jiffies, calc_load_update))
+- return;
+-
+ /*
+ * If we crossed a calc_load_update boundary, make sure to fold
+ * any pending idle changes, the respective CPUs might have
+@@ -3556,31 +3553,25 @@ static void calc_global_nohz(unsigned long ticks)
+ atomic_long_add(delta, &calc_load_tasks);
+
+ /*
+- * If we were idle for multiple load cycles, apply them.
++ * It could be the one fold was all it took, we done!
+ */
+- if (ticks >= LOAD_FREQ) {
+- n = ticks / LOAD_FREQ;
++ if (time_before(jiffies, calc_load_update + 10))
++ return;
+
+- active = atomic_long_read(&calc_load_tasks);
+- active = active > 0 ? active * FIXED_1 : 0;
++ /*
++ * Catch-up, fold however many we are behind still
++ */
++ delta = jiffies - calc_load_update - 10;
++ n = 1 + (delta / LOAD_FREQ);
+
+- avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
+- avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
+- avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
++ active = atomic_long_read(&calc_load_tasks);
++ active = active > 0 ? active * FIXED_1 : 0;
+
+- calc_load_update += n * LOAD_FREQ;
+- }
++ avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
++ avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
++ avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
+
+- /*
+- * Its possible the remainder of the above division also crosses
+- * a LOAD_FREQ period, the regular check in calc_global_load()
+- * which comes after this will take care of that.
+- *
+- * Consider us being 11 ticks before a cycle completion, and us
+- * sleeping for 4*LOAD_FREQ + 22 ticks, then the above code will
+- * age us 4 cycles, and the test in calc_global_load() will
+- * pick up the final one.
+- */
++ calc_load_update += n * LOAD_FREQ;
+ }
+ #else
+ static void calc_load_account_idle(struct rq *this_rq)
+@@ -3592,7 +3583,7 @@ static inline long calc_load_fold_idle(void)
+ return 0;
+ }
+
+-static void calc_global_nohz(unsigned long ticks)
++static void calc_global_nohz(void)
+ {
+ }
+ #endif
+@@ -3620,8 +3611,6 @@ void calc_global_load(unsigned long ticks)
+ {
+ long active;
+
+- calc_global_nohz(ticks);
+-
+ if (time_before(jiffies, calc_load_update + 10))
+ return;
+
+@@ -3633,6 +3622,16 @@ void calc_global_load(unsigned long ticks)
+ avenrun[2] = calc_load(avenrun[2], EXP_15, active);
+
+ calc_load_update += LOAD_FREQ;
++
++ /*
++ * Account one period with whatever state we found before
++ * folding in the nohz state and ageing the entire idle period.
++ *
++ * This avoids loosing a sample when we go idle between
++ * calc_load_account_active() (10 ticks ago) and now and thus
++ * under-accounting.
++ */
++ calc_global_nohz();
+ }
+
+ /*
+@@ -7605,16 +7604,26 @@ static void __sdt_free(const struct cpumask *cpu_map)
+ struct sd_data *sdd = &tl->data;
+
+ for_each_cpu(j, cpu_map) {
+- struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j);
+- if (sd && (sd->flags & SD_OVERLAP))
+- free_sched_groups(sd->groups, 0);
+- kfree(*per_cpu_ptr(sdd->sd, j));
+- kfree(*per_cpu_ptr(sdd->sg, j));
+- kfree(*per_cpu_ptr(sdd->sgp, j));
++ struct sched_domain *sd;
++
++ if (sdd->sd) {
++ sd = *per_cpu_ptr(sdd->sd, j);
++ if (sd && (sd->flags & SD_OVERLAP))
++ free_sched_groups(sd->groups, 0);
++ kfree(*per_cpu_ptr(sdd->sd, j));
++ }
++
++ if (sdd->sg)
++ kfree(*per_cpu_ptr(sdd->sg, j));
++ if (sdd->sgp)
++ kfree(*per_cpu_ptr(sdd->sgp, j));
+ }
+ free_percpu(sdd->sd);
++ sdd->sd = NULL;
+ free_percpu(sdd->sg);
++ sdd->sg = NULL;
+ free_percpu(sdd->sgp);
++ sdd->sgp = NULL;
+ }
+ }
+
+diff --git a/kernel/signal.c b/kernel/signal.c
+index 2065515..08e0b97 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -1610,6 +1610,15 @@ bool do_notify_parent(struct task_struct *tsk, int sig)
+ BUG_ON(!tsk->ptrace &&
+ (tsk->group_leader != tsk || !thread_group_empty(tsk)));
+
++ if (sig != SIGCHLD) {
++ /*
++ * This is only possible if parent == real_parent.
++ * Check if it has changed security domain.
++ */
++ if (tsk->parent_exec_id != tsk->parent->self_exec_id)
++ sig = SIGCHLD;
++ }
++
+ info.si_signo = sig;
+ info.si_errno = 0;
+ /*
+diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
+index 5199930..1dcf253 100644
+--- a/kernel/trace/trace_output.c
++++ b/kernel/trace/trace_output.c
+@@ -638,6 +638,8 @@ int trace_print_lat_context(struct trace_iterator *iter)
+ {
+ u64 next_ts;
+ int ret;
++ /* trace_find_next_entry will reset ent_size */
++ int ent_size = iter->ent_size;
+ struct trace_seq *s = &iter->seq;
+ struct trace_entry *entry = iter->ent,
+ *next_entry = trace_find_next_entry(iter, NULL,
+@@ -646,6 +648,9 @@ int trace_print_lat_context(struct trace_iterator *iter)
+ unsigned long abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
+ unsigned long rel_usecs;
+
++ /* Restore the original ent_size */
++ iter->ent_size = ent_size;
++
+ if (!next_entry)
+ next_ts = iter->ts;
+ rel_usecs = ns2usecs(next_ts - iter->ts);
+diff --git a/mm/swap_state.c b/mm/swap_state.c
+index 78cc4d1..7704d9c 100644
+--- a/mm/swap_state.c
++++ b/mm/swap_state.c
+@@ -27,7 +27,7 @@
+ */
+ static const struct address_space_operations swap_aops = {
+ .writepage = swap_writepage,
+- .set_page_dirty = __set_page_dirty_nobuffers,
++ .set_page_dirty = __set_page_dirty_no_writeback,
+ .migratepage = migrate_page,
+ };
+
+diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
+index e7c69f4..b04a6ef 100644
+--- a/net/ax25/af_ax25.c
++++ b/net/ax25/af_ax25.c
+@@ -2006,16 +2006,17 @@ static void __exit ax25_exit(void)
+ proc_net_remove(&init_net, "ax25_route");
+ proc_net_remove(&init_net, "ax25");
+ proc_net_remove(&init_net, "ax25_calls");
+- ax25_rt_free();
+- ax25_uid_free();
+- ax25_dev_free();
+
+- ax25_unregister_sysctl();
+ unregister_netdevice_notifier(&ax25_dev_notifier);
++ ax25_unregister_sysctl();
+
+ dev_remove_pack(&ax25_packet_type);
+
+ sock_unregister(PF_AX25);
+ proto_unregister(&ax25_proto);
++
++ ax25_rt_free();
++ ax25_uid_free();
++ ax25_dev_free();
+ }
+ module_exit(ax25_exit);
+diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
+index 8eb6b15..5ac1811 100644
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -241,7 +241,6 @@ static void br_multicast_group_expired(unsigned long data)
+ hlist_del_rcu(&mp->hlist[mdb->ver]);
+ mdb->size--;
+
+- del_timer(&mp->query_timer);
+ call_rcu_bh(&mp->rcu, br_multicast_free_group);
+
+ out:
+@@ -271,7 +270,6 @@ static void br_multicast_del_pg(struct net_bridge *br,
+ rcu_assign_pointer(*pp, p->next);
+ hlist_del_init(&p->mglist);
+ del_timer(&p->timer);
+- del_timer(&p->query_timer);
+ call_rcu_bh(&p->rcu, br_multicast_free_pg);
+
+ if (!mp->ports && !mp->mglist &&
+@@ -507,74 +505,6 @@ static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
+ return NULL;
+ }
+
+-static void br_multicast_send_group_query(struct net_bridge_mdb_entry *mp)
+-{
+- struct net_bridge *br = mp->br;
+- struct sk_buff *skb;
+-
+- skb = br_multicast_alloc_query(br, &mp->addr);
+- if (!skb)
+- goto timer;
+-
+- netif_rx(skb);
+-
+-timer:
+- if (++mp->queries_sent < br->multicast_last_member_count)
+- mod_timer(&mp->query_timer,
+- jiffies + br->multicast_last_member_interval);
+-}
+-
+-static void br_multicast_group_query_expired(unsigned long data)
+-{
+- struct net_bridge_mdb_entry *mp = (void *)data;
+- struct net_bridge *br = mp->br;
+-
+- spin_lock(&br->multicast_lock);
+- if (!netif_running(br->dev) || !mp->mglist ||
+- mp->queries_sent >= br->multicast_last_member_count)
+- goto out;
+-
+- br_multicast_send_group_query(mp);
+-
+-out:
+- spin_unlock(&br->multicast_lock);
+-}
+-
+-static void br_multicast_send_port_group_query(struct net_bridge_port_group *pg)
+-{
+- struct net_bridge_port *port = pg->port;
+- struct net_bridge *br = port->br;
+- struct sk_buff *skb;
+-
+- skb = br_multicast_alloc_query(br, &pg->addr);
+- if (!skb)
+- goto timer;
+-
+- br_deliver(port, skb);
+-
+-timer:
+- if (++pg->queries_sent < br->multicast_last_member_count)
+- mod_timer(&pg->query_timer,
+- jiffies + br->multicast_last_member_interval);
+-}
+-
+-static void br_multicast_port_group_query_expired(unsigned long data)
+-{
+- struct net_bridge_port_group *pg = (void *)data;
+- struct net_bridge_port *port = pg->port;
+- struct net_bridge *br = port->br;
+-
+- spin_lock(&br->multicast_lock);
+- if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) ||
+- pg->queries_sent >= br->multicast_last_member_count)
+- goto out;
+-
+- br_multicast_send_port_group_query(pg);
+-
+-out:
+- spin_unlock(&br->multicast_lock);
+-}
+-
+ static struct net_bridge_mdb_entry *br_multicast_get_group(
+ struct net_bridge *br, struct net_bridge_port *port,
+ struct br_ip *group, int hash)
+@@ -690,8 +620,6 @@ rehash:
+ mp->addr = *group;
+ setup_timer(&mp->timer, br_multicast_group_expired,
+ (unsigned long)mp);
+- setup_timer(&mp->query_timer, br_multicast_group_query_expired,
+- (unsigned long)mp);
+
+ hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]);
+ mdb->size++;
+@@ -746,8 +674,6 @@ static int br_multicast_add_group(struct net_bridge *br,
+ hlist_add_head(&p->mglist, &port->mglist);
+ setup_timer(&p->timer, br_multicast_port_group_expired,
+ (unsigned long)p);
+- setup_timer(&p->query_timer, br_multicast_port_group_query_expired,
+- (unsigned long)p);
+
+ rcu_assign_pointer(*pp, p);
+
+@@ -1291,9 +1217,6 @@ static void br_multicast_leave_group(struct net_bridge *br,
+ time_after(mp->timer.expires, time) :
+ try_to_del_timer_sync(&mp->timer) >= 0)) {
+ mod_timer(&mp->timer, time);
+-
+- mp->queries_sent = 0;
+- mod_timer(&mp->query_timer, now);
+ }
+
+ goto out;
+@@ -1310,9 +1233,6 @@ static void br_multicast_leave_group(struct net_bridge *br,
+ time_after(p->timer.expires, time) :
+ try_to_del_timer_sync(&p->timer) >= 0)) {
+ mod_timer(&p->timer, time);
+-
+- p->queries_sent = 0;
+- mod_timer(&p->query_timer, now);
+ }
+
+ break;
+@@ -1680,7 +1600,6 @@ void br_multicast_stop(struct net_bridge *br)
+ hlist_for_each_entry_safe(mp, p, n, &mdb->mhash[i],
+ hlist[ver]) {
+ del_timer(&mp->timer);
+- del_timer(&mp->query_timer);
+ call_rcu_bh(&mp->rcu, br_multicast_free_group);
+ }
+ }
+diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
+index d7d6fb0..93264df 100644
+--- a/net/bridge/br_private.h
++++ b/net/bridge/br_private.h
+@@ -82,9 +82,7 @@ struct net_bridge_port_group {
+ struct hlist_node mglist;
+ struct rcu_head rcu;
+ struct timer_list timer;
+- struct timer_list query_timer;
+ struct br_ip addr;
+- u32 queries_sent;
+ };
+
+ struct net_bridge_mdb_entry
+@@ -94,10 +92,8 @@ struct net_bridge_mdb_entry
+ struct net_bridge_port_group __rcu *ports;
+ struct rcu_head rcu;
+ struct timer_list timer;
+- struct timer_list query_timer;
+ struct br_ip addr;
+ bool mglist;
+- u32 queries_sent;
+ };
+
+ struct net_bridge_mdb_htable
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 55cd370..cd5050e 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -4102,54 +4102,41 @@ static int dev_ifconf(struct net *net, char __user *arg)
+
+ #ifdef CONFIG_PROC_FS
+
+-#define BUCKET_SPACE (32 - NETDEV_HASHBITS)
+-
+-struct dev_iter_state {
+- struct seq_net_private p;
+- unsigned int pos; /* bucket << BUCKET_SPACE + offset */
+-};
++#define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1)
+
+ #define get_bucket(x) ((x) >> BUCKET_SPACE)
+ #define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
+ #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
+
+-static inline struct net_device *dev_from_same_bucket(struct seq_file *seq)
++static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff_t *pos)
+ {
+- struct dev_iter_state *state = seq->private;
+ struct net *net = seq_file_net(seq);
+ struct net_device *dev;
+ struct hlist_node *p;
+ struct hlist_head *h;
+- unsigned int count, bucket, offset;
++ unsigned int count = 0, offset = get_offset(*pos);
+
+- bucket = get_bucket(state->pos);
+- offset = get_offset(state->pos);
+- h = &net->dev_name_head[bucket];
+- count = 0;
++ h = &net->dev_name_head[get_bucket(*pos)];
+ hlist_for_each_entry_rcu(dev, p, h, name_hlist) {
+- if (count++ == offset) {
+- state->pos = set_bucket_offset(bucket, count);
++ if (++count == offset)
+ return dev;
+- }
+ }
+
+ return NULL;
+ }
+
+-static inline struct net_device *dev_from_new_bucket(struct seq_file *seq)
++static inline struct net_device *dev_from_bucket(struct seq_file *seq, loff_t *pos)
+ {
+- struct dev_iter_state *state = seq->private;
+ struct net_device *dev;
+ unsigned int bucket;
+
+- bucket = get_bucket(state->pos);
+ do {
+- dev = dev_from_same_bucket(seq);
++ dev = dev_from_same_bucket(seq, pos);
+ if (dev)
+ return dev;
+
+- bucket++;
+- state->pos = set_bucket_offset(bucket, 0);
++ bucket = get_bucket(*pos) + 1;
++ *pos = set_bucket_offset(bucket, 1);
+ } while (bucket < NETDEV_HASHENTRIES);
+
+ return NULL;
+@@ -4162,33 +4149,20 @@ static inline struct net_device *dev_from_new_bucket(struct seq_file *seq)
+ void *dev_seq_start(struct seq_file *seq, loff_t *pos)
+ __acquires(RCU)
+ {
+- struct dev_iter_state *state = seq->private;
+-
+ rcu_read_lock();
+ if (!*pos)
+ return SEQ_START_TOKEN;
+
+- /* check for end of the hash */
+- if (state->pos == 0 && *pos > 1)
++ if (get_bucket(*pos) >= NETDEV_HASHENTRIES)
+ return NULL;
+
+- return dev_from_new_bucket(seq);
++ return dev_from_bucket(seq, pos);
+ }
+
+ void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+ {
+- struct net_device *dev;
+-
+ ++*pos;
+-
+- if (v == SEQ_START_TOKEN)
+- return dev_from_new_bucket(seq);
+-
+- dev = dev_from_same_bucket(seq);
+- if (dev)
+- return dev;
+-
+- return dev_from_new_bucket(seq);
++ return dev_from_bucket(seq, pos);
+ }
+
+ void dev_seq_stop(struct seq_file *seq, void *v)
+@@ -4287,13 +4261,7 @@ static const struct seq_operations dev_seq_ops = {
+ static int dev_seq_open(struct inode *inode, struct file *file)
+ {
+ return seq_open_net(inode, file, &dev_seq_ops,
+- sizeof(struct dev_iter_state));
+-}
+-
+-int dev_seq_open_ops(struct inode *inode, struct file *file,
+- const struct seq_operations *ops)
+-{
+- return seq_open_net(inode, file, ops, sizeof(struct dev_iter_state));
++ sizeof(struct seq_net_private));
+ }
+
+ static const struct file_operations dev_seq_fops = {
+diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
+index febba51..277faef 100644
+--- a/net/core/dev_addr_lists.c
++++ b/net/core/dev_addr_lists.c
+@@ -696,7 +696,8 @@ static const struct seq_operations dev_mc_seq_ops = {
+
+ static int dev_mc_seq_open(struct inode *inode, struct file *file)
+ {
+- return dev_seq_open_ops(inode, file, &dev_mc_seq_ops);
++ return seq_open_net(inode, file, &dev_mc_seq_ops,
++ sizeof(struct seq_net_private));
+ }
+
+ static const struct file_operations dev_mc_seq_fops = {
+diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
+index 0e950fd..31a5ae5 100644
+--- a/net/core/net_namespace.c
++++ b/net/core/net_namespace.c
+@@ -83,21 +83,29 @@ assign:
+
+ static int ops_init(const struct pernet_operations *ops, struct net *net)
+ {
+- int err;
++ int err = -ENOMEM;
++ void *data = NULL;
++
+ if (ops->id && ops->size) {
+- void *data = kzalloc(ops->size, GFP_KERNEL);
++ data = kzalloc(ops->size, GFP_KERNEL);
+ if (!data)
+- return -ENOMEM;
++ goto out;
+
+ err = net_assign_generic(net, *ops->id, data);
+- if (err) {
+- kfree(data);
+- return err;
+- }
++ if (err)
++ goto cleanup;
+ }
++ err = 0;
+ if (ops->init)
+- return ops->init(net);
+- return 0;
++ err = ops->init(net);
++ if (!err)
++ return 0;
++
++cleanup:
++ kfree(data);
++
++out:
++ return err;
+ }
+
+ static void ops_free(const struct pernet_operations *ops, struct net *net)
+@@ -448,12 +456,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
+ static int __register_pernet_operations(struct list_head *list,
+ struct pernet_operations *ops)
+ {
+- int err = 0;
+- err = ops_init(ops, &init_net);
+- if (err)
+- ops_free(ops, &init_net);
+- return err;
+-
++ return ops_init(ops, &init_net);
+ }
+
+ static void __unregister_pernet_operations(struct pernet_operations *ops)
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 3c30ee4..2ec200de 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -903,9 +903,11 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
+ goto adjust_others;
+ }
+
+- data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
++ data = kmalloc(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
++ gfp_mask);
+ if (!data)
+ goto nodata;
++ size = SKB_WITH_OVERHEAD(ksize(data));
+
+ /* Copy only real data... and, alas, header. This should be
+ * optimized for the cases when header is void.
+@@ -3111,6 +3113,8 @@ static void sock_rmem_free(struct sk_buff *skb)
+ */
+ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
+ {
++ int len = skb->len;
++
+ if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
+ (unsigned)sk->sk_rcvbuf)
+ return -ENOMEM;
+@@ -3125,7 +3129,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
+
+ skb_queue_tail(&sk->sk_error_queue, skb);
+ if (!sock_flag(sk, SOCK_DEAD))
+- sk->sk_data_ready(sk, skb->len);
++ sk->sk_data_ready(sk, len);
+ return 0;
+ }
+ EXPORT_SYMBOL(sock_queue_err_skb);
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 34f5db1..7904db4 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -701,11 +701,12 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
+ skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
+ if (skb) {
+ if (sk_wmem_schedule(sk, skb->truesize)) {
++ skb_reserve(skb, sk->sk_prot->max_header);
+ /*
+ * Make sure that we have exactly size bytes
+ * available to the caller, no more, no less.
+ */
+- skb_reserve(skb, skb_tailroom(skb) - size);
++ skb->avail_size = size;
+ return skb;
+ }
+ __kfree_skb(skb);
+@@ -860,7 +861,7 @@ wait_for_memory:
+ }
+
+ out:
+- if (copied)
++ if (copied && !(flags & MSG_SENDPAGE_NOTLAST))
+ tcp_push(sk, flags, mss_now, tp->nonagle);
+ return copied;
+
+@@ -995,10 +996,9 @@ new_segment:
+ copy = seglen;
+
+ /* Where to copy to? */
+- if (skb_tailroom(skb) > 0) {
++ if (skb_availroom(skb) > 0) {
+ /* We have some space in skb head. Superb! */
+- if (copy > skb_tailroom(skb))
+- copy = skb_tailroom(skb);
++ copy = min_t(int, copy, skb_availroom(skb));
+ err = skb_add_data_nocache(sk, skb, from, copy);
+ if (err)
+ goto do_fault;
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index e4d1e4a..daedc07 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -334,6 +334,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
+ incr = __tcp_grow_window(sk, skb);
+
+ if (incr) {
++ incr = max_t(int, incr, 2 * skb->len);
+ tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr,
+ tp->window_clamp);
+ inet_csk(sk)->icsk_ack.quick |= 1;
+@@ -473,8 +474,11 @@ static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep)
+ if (!win_dep) {
+ m -= (new_sample >> 3);
+ new_sample += m;
+- } else if (m < new_sample)
+- new_sample = m << 3;
++ } else {
++ m <<= 3;
++ if (m < new_sample)
++ new_sample = m;
++ }
+ } else {
+ /* No previous measure. */
+ new_sample = m << 3;
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 097e0c7..c51dd5b 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1093,6 +1093,14 @@ static void __pskb_trim_head(struct sk_buff *skb, int len)
+ {
+ int i, k, eat;
+
++ eat = min_t(int, len, skb_headlen(skb));
++ if (eat) {
++ __skb_pull(skb, eat);
++ skb->avail_size -= eat;
++ len -= eat;
++ if (!len)
++ return;
++ }
+ eat = len;
+ k = 0;
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+@@ -1124,11 +1132,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
+ if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+ return -ENOMEM;
+
+- /* If len == headlen, we avoid __skb_pull to preserve alignment. */
+- if (unlikely(len < skb_headlen(skb)))
+- __skb_pull(skb, len);
+- else
+- __pskb_trim_head(skb, len - skb_headlen(skb));
++ __pskb_trim_head(skb, len);
+
+ TCP_SKB_CB(skb)->seq += len;
+ skb->ip_summed = CHECKSUM_PARTIAL;
+@@ -2057,7 +2061,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
+ /* Punt if not enough space exists in the first SKB for
+ * the data in the second
+ */
+- if (skb->len > skb_tailroom(to))
++ if (skb->len > skb_availroom(to))
+ break;
+
+ if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp)))
+diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
+index 2257366..f2d74ea 100644
+--- a/net/ipv6/mcast.c
++++ b/net/ipv6/mcast.c
+@@ -2054,7 +2054,7 @@ static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
+ if (!delta)
+ pmc->mca_sfcount[sfmode]--;
+ for (j=0; j<i; j++)
+- (void) ip6_mc_del1_src(pmc, sfmode, &psfsrc[i]);
++ ip6_mc_del1_src(pmc, sfmode, &psfsrc[j]);
+ } else if (isexclude != (pmc->mca_sfcount[MCAST_EXCLUDE] != 0)) {
+ struct ip6_sf_list *psf;
+
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index b859e4a..4a56574 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -1494,6 +1494,10 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
+ tcp_mtup_init(newsk);
+ tcp_sync_mss(newsk, dst_mtu(dst));
+ newtp->advmss = dst_metric_advmss(dst);
++ if (tcp_sk(sk)->rx_opt.user_mss &&
++ tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
++ newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
++
+ tcp_initialize_rcv_mss(newsk);
+ if (tcp_rsk(req)->snt_synack)
+ tcp_valid_rtt_meas(newsk,
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index eff1f4e..4ff35bf 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -1121,7 +1121,8 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
+ tx->sta = rcu_dereference(sdata->u.vlan.sta);
+ if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr)
+ return TX_DROP;
+- } else if (info->flags & IEEE80211_TX_CTL_INJECTED) {
++ } else if (info->flags & IEEE80211_TX_CTL_INJECTED ||
++ tx->sdata->control_port_protocol == tx->skb->protocol) {
+ tx->sta = sta_info_get_bss(sdata, hdr->addr1);
+ }
+ if (!tx->sta)
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 1201b6d..a99fb41 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -830,12 +830,19 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
+ return 0;
+ }
+
+-int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
++static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
+ {
+ int len = skb->len;
+
+ skb_queue_tail(&sk->sk_receive_queue, skb);
+ sk->sk_data_ready(sk, len);
++ return len;
++}
++
++int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
++{
++ int len = __netlink_sendskb(sk, skb);
++
+ sock_put(sk);
+ return len;
+ }
+@@ -960,8 +967,7 @@ static inline int netlink_broadcast_deliver(struct sock *sk,
+ if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
+ !test_bit(0, &nlk->state)) {
+ skb_set_owner_r(skb, sk);
+- skb_queue_tail(&sk->sk_receive_queue, skb);
+- sk->sk_data_ready(sk, skb->len);
++ __netlink_sendskb(sk, skb);
+ return atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf;
+ }
+ return -1;
+@@ -1684,10 +1690,8 @@ static int netlink_dump(struct sock *sk)
+
+ if (sk_filter(sk, skb))
+ kfree_skb(skb);
+- else {
+- skb_queue_tail(&sk->sk_receive_queue, skb);
+- sk->sk_data_ready(sk, skb->len);
+- }
++ else
++ __netlink_sendskb(sk, skb);
+ return 0;
+ }
+
+@@ -1701,10 +1705,8 @@ static int netlink_dump(struct sock *sk)
+
+ if (sk_filter(sk, skb))
+ kfree_skb(skb);
+- else {
+- skb_queue_tail(&sk->sk_receive_queue, skb);
+- sk->sk_data_ready(sk, skb->len);
+- }
++ else
++ __netlink_sendskb(sk, skb);
+
+ if (cb->done)
+ cb->done(cb);
+diff --git a/net/phonet/pep.c b/net/phonet/pep.c
+index 2ba6e9f..007546d 100644
+--- a/net/phonet/pep.c
++++ b/net/phonet/pep.c
+@@ -1046,6 +1046,9 @@ static int pep_sendmsg(struct kiocb *iocb, struct sock *sk,
+ int flags = msg->msg_flags;
+ int err, done;
+
++ if (len > USHRT_MAX)
++ return -EMSGSIZE;
++
+ if ((msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|
+ MSG_CMSG_COMPAT)) ||
+ !(msg->msg_flags & MSG_EOR))
+diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
+index 6cd8ddf..e1afe0c 100644
+--- a/net/sched/sch_gred.c
++++ b/net/sched/sch_gred.c
+@@ -544,11 +544,8 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
+ opt.packets = q->packetsin;
+ opt.bytesin = q->bytesin;
+
+- if (gred_wred_mode(table)) {
+- q->parms.qidlestart =
+- table->tab[table->def]->parms.qidlestart;
+- q->parms.qavg = table->tab[table->def]->parms.qavg;
+- }
++ if (gred_wred_mode(table))
++ gred_load_wred_set(table, q);
+
+ opt.qave = red_calc_qavg(&q->parms, q->parms.qavg);
+
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 54a7cd2..0075554 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -4133,9 +4133,10 @@ static int sctp_getsockopt_disable_fragments(struct sock *sk, int len,
+ static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
+ int __user *optlen)
+ {
+- if (len < sizeof(struct sctp_event_subscribe))
++ if (len <= 0)
+ return -EINVAL;
+- len = sizeof(struct sctp_event_subscribe);
++ if (len > sizeof(struct sctp_event_subscribe))
++ len = sizeof(struct sctp_event_subscribe);
+ if (put_user(len, optlen))
+ return -EFAULT;
+ if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len))
+diff --git a/net/socket.c b/net/socket.c
+index 2dce67a..273cbce 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -791,9 +791,9 @@ static ssize_t sock_sendpage(struct file *file, struct page *page,
+
+ sock = file->private_data;
+
+- flags = !(file->f_flags & O_NONBLOCK) ? 0 : MSG_DONTWAIT;
+- if (more)
+- flags |= MSG_MORE;
++ flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0;
++ /* more is a combination of MSG_MORE and MSG_SENDPAGE_NOTLAST */
++ flags |= more;
+
+ return kernel_sendpage(sock, page, offset, size, flags);
+ }
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index ffafda5..c06c365 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -1258,6 +1258,11 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
+ goto bad_res;
+ }
+
++ if (!netif_running(netdev)) {
++ result = -ENETDOWN;
++ goto bad_res;
++ }
++
+ nla_for_each_nested(nl_txq_params,
+ info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS],
+ rem_txq_params) {
+@@ -5944,7 +5949,7 @@ static struct genl_ops nl80211_ops[] = {
+ .doit = nl80211_get_key,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+- .internal_flags = NL80211_FLAG_NEED_NETDEV |
++ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_NEED_RTNL,
+ },
+ {
+@@ -5976,7 +5981,7 @@ static struct genl_ops nl80211_ops[] = {
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ .doit = nl80211_addset_beacon,
+- .internal_flags = NL80211_FLAG_NEED_NETDEV |
++ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_NEED_RTNL,
+ },
+ {
+@@ -5984,7 +5989,7 @@ static struct genl_ops nl80211_ops[] = {
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ .doit = nl80211_addset_beacon,
+- .internal_flags = NL80211_FLAG_NEED_NETDEV |
++ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_NEED_RTNL,
+ },
+ {
+@@ -6008,7 +6013,7 @@ static struct genl_ops nl80211_ops[] = {
+ .doit = nl80211_set_station,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+- .internal_flags = NL80211_FLAG_NEED_NETDEV |
++ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_NEED_RTNL,
+ },
+ {
+@@ -6024,7 +6029,7 @@ static struct genl_ops nl80211_ops[] = {
+ .doit = nl80211_del_station,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+- .internal_flags = NL80211_FLAG_NEED_NETDEV |
++ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_NEED_RTNL,
+ },
+ {
+@@ -6057,7 +6062,7 @@ static struct genl_ops nl80211_ops[] = {
+ .doit = nl80211_del_mpath,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+- .internal_flags = NL80211_FLAG_NEED_NETDEV |
++ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_NEED_RTNL,
+ },
+ {
+@@ -6065,7 +6070,7 @@ static struct genl_ops nl80211_ops[] = {
+ .doit = nl80211_set_bss,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+- .internal_flags = NL80211_FLAG_NEED_NETDEV |
++ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_NEED_RTNL,
+ },
+ {
+@@ -6091,7 +6096,7 @@ static struct genl_ops nl80211_ops[] = {
+ .doit = nl80211_get_mesh_config,
+ .policy = nl80211_policy,
+ /* can be retrieved by unprivileged users */
+- .internal_flags = NL80211_FLAG_NEED_NETDEV |
++ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_NEED_RTNL,
+ },
+ {
+@@ -6224,7 +6229,7 @@ static struct genl_ops nl80211_ops[] = {
+ .doit = nl80211_setdel_pmksa,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+- .internal_flags = NL80211_FLAG_NEED_NETDEV |
++ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_NEED_RTNL,
+ },
+ {
+@@ -6232,7 +6237,7 @@ static struct genl_ops nl80211_ops[] = {
+ .doit = nl80211_setdel_pmksa,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+- .internal_flags = NL80211_FLAG_NEED_NETDEV |
++ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_NEED_RTNL,
+ },
+ {
+@@ -6240,7 +6245,7 @@ static struct genl_ops nl80211_ops[] = {
+ .doit = nl80211_flush_pmksa,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+- .internal_flags = NL80211_FLAG_NEED_NETDEV |
++ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_NEED_RTNL,
+ },
+ {
+@@ -6328,7 +6333,7 @@ static struct genl_ops nl80211_ops[] = {
+ .doit = nl80211_set_wds_peer,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+- .internal_flags = NL80211_FLAG_NEED_NETDEV |
++ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_NEED_RTNL,
+ },
+ {
+diff --git a/net/wireless/util.c b/net/wireless/util.c
+index 4dde429..8bf8902 100644
+--- a/net/wireless/util.c
++++ b/net/wireless/util.c
+@@ -996,7 +996,7 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
+ if (rdev->wiphy.software_iftypes & BIT(iftype))
+ continue;
+ for (j = 0; j < c->n_limits; j++) {
+- if (!(limits[j].types & iftype))
++ if (!(limits[j].types & BIT(iftype)))
+ continue;
+ if (limits[j].max < num[iftype])
+ goto cont;
+diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
+index f936d1f..d1d0ae8 100644
+--- a/scripts/mod/file2alias.c
++++ b/scripts/mod/file2alias.c
+@@ -926,6 +926,10 @@ void handle_moddevtable(struct module *mod, struct elf_info *info,
+ if (!sym->st_shndx || get_secindex(info, sym) >= info->num_sections)
+ return;
+
++ /* We're looking for an object */
++ if (ELF_ST_TYPE(sym->st_info) != STT_OBJECT)
++ return;
++
+ /* Handle all-NULL symbols allocated into .bss */
+ if (info->sechdrs[get_secindex(info, sym)].sh_type & SHT_NOBITS) {
+ zeros = calloc(1, sym->st_size);
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index ae94929..51a1afc 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -4003,9 +4003,14 @@ static void cx_auto_init_output(struct hda_codec *codec)
+ int i;
+
+ mute_outputs(codec, spec->multiout.num_dacs, spec->multiout.dac_nids);
+- for (i = 0; i < cfg->hp_outs; i++)
++ for (i = 0; i < cfg->hp_outs; i++) {
++ unsigned int val = PIN_OUT;
++ if (snd_hda_query_pin_caps(codec, cfg->hp_pins[i]) &
++ AC_PINCAP_HP_DRV)
++ val |= AC_PINCTL_HP_EN;
+ snd_hda_codec_write(codec, cfg->hp_pins[i], 0,
+- AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP);
++ AC_VERB_SET_PIN_WIDGET_CONTROL, val);
++ }
+ mute_outputs(codec, cfg->hp_outs, cfg->hp_pins);
+ mute_outputs(codec, cfg->line_outs, cfg->line_out_pins);
+ mute_outputs(codec, cfg->speaker_outs, cfg->speaker_pins);
+@@ -4408,8 +4413,10 @@ static void apply_pin_fixup(struct hda_codec *codec,
+
+ enum {
+ CXT_PINCFG_LENOVO_X200,
++ CXT_PINCFG_LENOVO_TP410,
+ };
+
++/* ThinkPad X200 & co with cxt5051 */
+ static const struct cxt_pincfg cxt_pincfg_lenovo_x200[] = {
+ { 0x16, 0x042140ff }, /* HP (seq# overridden) */
+ { 0x17, 0x21a11000 }, /* dock-mic */
+@@ -4417,15 +4424,33 @@ static const struct cxt_pincfg cxt_pincfg_lenovo_x200[] = {
+ {}
+ };
+
++/* ThinkPad 410/420/510/520, X201 & co with cxt5066 */
++static const struct cxt_pincfg cxt_pincfg_lenovo_tp410[] = {
++ { 0x19, 0x042110ff }, /* HP (seq# overridden) */
++ { 0x1a, 0x21a190f0 }, /* dock-mic */
++ { 0x1c, 0x212140ff }, /* dock-HP */
++ {}
++};
++
+ static const struct cxt_pincfg *cxt_pincfg_tbl[] = {
+ [CXT_PINCFG_LENOVO_X200] = cxt_pincfg_lenovo_x200,
++ [CXT_PINCFG_LENOVO_TP410] = cxt_pincfg_lenovo_tp410,
+ };
+
+-static const struct snd_pci_quirk cxt_fixups[] = {
++static const struct snd_pci_quirk cxt5051_fixups[] = {
+ SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo X200", CXT_PINCFG_LENOVO_X200),
+ {}
+ };
+
++static const struct snd_pci_quirk cxt5066_fixups[] = {
++ SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
++ SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo T410", CXT_PINCFG_LENOVO_TP410),
++ SND_PCI_QUIRK(0x17aa, 0x215f, "Lenovo T510", CXT_PINCFG_LENOVO_TP410),
++ SND_PCI_QUIRK(0x17aa, 0x21ce, "Lenovo T420", CXT_PINCFG_LENOVO_TP410),
++ SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520", CXT_PINCFG_LENOVO_TP410),
++ {}
++};
++
+ /* add "fake" mute amp-caps to DACs on cx5051 so that mixer mute switches
+ * can be created (bko#42825)
+ */
+@@ -4462,11 +4487,13 @@ static int patch_conexant_auto(struct hda_codec *codec)
+ break;
+ case 0x14f15051:
+ add_cx5051_fake_mutes(codec);
++ apply_pin_fixup(codec, cxt5051_fixups, cxt_pincfg_tbl);
++ break;
++ default:
++ apply_pin_fixup(codec, cxt5066_fixups, cxt_pincfg_tbl);
+ break;
+ }
+
+- apply_pin_fixup(codec, cxt_fixups, cxt_pincfg_tbl);
+-
+ err = cx_auto_search_adcs(codec);
+ if (err < 0)
+ return err;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index dc8a6fc..0bc5a46 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5032,6 +5032,7 @@ static const struct alc_fixup alc269_fixups[] = {
+ };
+
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
++ SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_DMIC),
+ SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
+ SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
+ SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
+diff --git a/sound/soc/codecs/tlv320aic23.c b/sound/soc/codecs/tlv320aic23.c
+index 336de8f..0e7e26e 100644
+--- a/sound/soc/codecs/tlv320aic23.c
++++ b/sound/soc/codecs/tlv320aic23.c
+@@ -473,7 +473,7 @@ static int tlv320aic23_set_dai_sysclk(struct snd_soc_dai *codec_dai,
+ static int tlv320aic23_set_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+ {
+- u16 reg = snd_soc_read(codec, TLV320AIC23_PWR) & 0xff7f;
++ u16 reg = snd_soc_read(codec, TLV320AIC23_PWR) & 0x17f;
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+@@ -492,7 +492,7 @@ static int tlv320aic23_set_bias_level(struct snd_soc_codec *codec,
+ case SND_SOC_BIAS_OFF:
+ /* everything off, dac mute, inactive */
+ snd_soc_write(codec, TLV320AIC23_ACTIVE, 0x0);
+- snd_soc_write(codec, TLV320AIC23_PWR, 0xffff);
++ snd_soc_write(codec, TLV320AIC23_PWR, 0x1ff);
+ break;
+ }
+ codec->dapm.bias_level = level;
+diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
+index 2f1f5f8..7806301 100644
+--- a/sound/soc/codecs/wm8994.c
++++ b/sound/soc/codecs/wm8994.c
+@@ -883,61 +883,170 @@ static void wm8994_update_class_w(struct snd_soc_codec *codec)
+ }
+ }
+
+-static int late_enable_ev(struct snd_soc_dapm_widget *w,
+- struct snd_kcontrol *kcontrol, int event)
++static int aif1clk_ev(struct snd_soc_dapm_widget *w,
++ struct snd_kcontrol *kcontrol, int event)
+ {
+ struct snd_soc_codec *codec = w->codec;
+- struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
++ struct wm8994 *control = codec->control_data;
++ int mask = WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC1R_ENA;
++ int dac;
++ int adc;
++ int val;
++
++ switch (control->type) {
++ case WM8994:
++ case WM8958:
++ mask |= WM8994_AIF1DAC2L_ENA | WM8994_AIF1DAC2R_ENA;
++ break;
++ default:
++ break;
++ }
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+- if (wm8994->aif1clk_enable) {
+- snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1,
+- WM8994_AIF1CLK_ENA_MASK,
+- WM8994_AIF1CLK_ENA);
+- wm8994->aif1clk_enable = 0;
+- }
+- if (wm8994->aif2clk_enable) {
+- snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1,
+- WM8994_AIF2CLK_ENA_MASK,
+- WM8994_AIF2CLK_ENA);
+- wm8994->aif2clk_enable = 0;
+- }
++ val = snd_soc_read(codec, WM8994_AIF1_CONTROL_1);
++ if ((val & WM8994_AIF1ADCL_SRC) &&
++ (val & WM8994_AIF1ADCR_SRC))
++ adc = WM8994_AIF1ADC1R_ENA | WM8994_AIF1ADC2R_ENA;
++ else if (!(val & WM8994_AIF1ADCL_SRC) &&
++ !(val & WM8994_AIF1ADCR_SRC))
++ adc = WM8994_AIF1ADC1L_ENA | WM8994_AIF1ADC2L_ENA;
++ else
++ adc = WM8994_AIF1ADC1R_ENA | WM8994_AIF1ADC2R_ENA |
++ WM8994_AIF1ADC1L_ENA | WM8994_AIF1ADC2L_ENA;
++
++ val = snd_soc_read(codec, WM8994_AIF1_CONTROL_2);
++ if ((val & WM8994_AIF1DACL_SRC) &&
++ (val & WM8994_AIF1DACR_SRC))
++ dac = WM8994_AIF1DAC1R_ENA | WM8994_AIF1DAC2R_ENA;
++ else if (!(val & WM8994_AIF1DACL_SRC) &&
++ !(val & WM8994_AIF1DACR_SRC))
++ dac = WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC2L_ENA;
++ else
++ dac = WM8994_AIF1DAC1R_ENA | WM8994_AIF1DAC2R_ENA |
++ WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC2L_ENA;
++
++ snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_4,
++ mask, adc);
++ snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
++ mask, dac);
++ snd_soc_update_bits(codec, WM8994_CLOCKING_1,
++ WM8994_AIF1DSPCLK_ENA |
++ WM8994_SYSDSPCLK_ENA,
++ WM8994_AIF1DSPCLK_ENA |
++ WM8994_SYSDSPCLK_ENA);
++ snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_4, mask,
++ WM8994_AIF1ADC1R_ENA |
++ WM8994_AIF1ADC1L_ENA |
++ WM8994_AIF1ADC2R_ENA |
++ WM8994_AIF1ADC2L_ENA);
++ snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5, mask,
++ WM8994_AIF1DAC1R_ENA |
++ WM8994_AIF1DAC1L_ENA |
++ WM8994_AIF1DAC2R_ENA |
++ WM8994_AIF1DAC2L_ENA);
++ break;
++
++ case SND_SOC_DAPM_PRE_PMD:
++ case SND_SOC_DAPM_POST_PMD:
++ snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
++ mask, 0);
++ snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_4,
++ mask, 0);
++
++ val = snd_soc_read(codec, WM8994_CLOCKING_1);
++ if (val & WM8994_AIF2DSPCLK_ENA)
++ val = WM8994_SYSDSPCLK_ENA;
++ else
++ val = 0;
++ snd_soc_update_bits(codec, WM8994_CLOCKING_1,
++ WM8994_SYSDSPCLK_ENA |
++ WM8994_AIF1DSPCLK_ENA, val);
+ break;
+ }
+
+- /* We may also have postponed startup of DSP, handle that. */
+- wm8958_aif_ev(w, kcontrol, event);
+-
+ return 0;
+ }
+
+-static int late_disable_ev(struct snd_soc_dapm_widget *w,
+- struct snd_kcontrol *kcontrol, int event)
++static int aif2clk_ev(struct snd_soc_dapm_widget *w,
++ struct snd_kcontrol *kcontrol, int event)
+ {
+ struct snd_soc_codec *codec = w->codec;
+- struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
++ int dac;
++ int adc;
++ int val;
+
+ switch (event) {
++ case SND_SOC_DAPM_PRE_PMU:
++ val = snd_soc_read(codec, WM8994_AIF2_CONTROL_1);
++ if ((val & WM8994_AIF2ADCL_SRC) &&
++ (val & WM8994_AIF2ADCR_SRC))
++ adc = WM8994_AIF2ADCR_ENA;
++ else if (!(val & WM8994_AIF2ADCL_SRC) &&
++ !(val & WM8994_AIF2ADCR_SRC))
++ adc = WM8994_AIF2ADCL_ENA;
++ else
++ adc = WM8994_AIF2ADCL_ENA | WM8994_AIF2ADCR_ENA;
++
++
++ val = snd_soc_read(codec, WM8994_AIF2_CONTROL_2);
++ if ((val & WM8994_AIF2DACL_SRC) &&
++ (val & WM8994_AIF2DACR_SRC))
++ dac = WM8994_AIF2DACR_ENA;
++ else if (!(val & WM8994_AIF2DACL_SRC) &&
++ !(val & WM8994_AIF2DACR_SRC))
++ dac = WM8994_AIF2DACL_ENA;
++ else
++ dac = WM8994_AIF2DACL_ENA | WM8994_AIF2DACR_ENA;
++
++ snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_4,
++ WM8994_AIF2ADCL_ENA |
++ WM8994_AIF2ADCR_ENA, adc);
++ snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
++ WM8994_AIF2DACL_ENA |
++ WM8994_AIF2DACR_ENA, dac);
++ snd_soc_update_bits(codec, WM8994_CLOCKING_1,
++ WM8994_AIF2DSPCLK_ENA |
++ WM8994_SYSDSPCLK_ENA,
++ WM8994_AIF2DSPCLK_ENA |
++ WM8994_SYSDSPCLK_ENA);
++ snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_4,
++ WM8994_AIF2ADCL_ENA |
++ WM8994_AIF2ADCR_ENA,
++ WM8994_AIF2ADCL_ENA |
++ WM8994_AIF2ADCR_ENA);
++ snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
++ WM8994_AIF2DACL_ENA |
++ WM8994_AIF2DACR_ENA,
++ WM8994_AIF2DACL_ENA |
++ WM8994_AIF2DACR_ENA);
++ break;
++
++ case SND_SOC_DAPM_PRE_PMD:
+ case SND_SOC_DAPM_POST_PMD:
+- if (wm8994->aif1clk_disable) {
+- snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1,
+- WM8994_AIF1CLK_ENA_MASK, 0);
+- wm8994->aif1clk_disable = 0;
+- }
+- if (wm8994->aif2clk_disable) {
+- snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1,
+- WM8994_AIF2CLK_ENA_MASK, 0);
+- wm8994->aif2clk_disable = 0;
+- }
++ snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
++ WM8994_AIF2DACL_ENA |
++ WM8994_AIF2DACR_ENA, 0);
++ snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
++ WM8994_AIF2ADCL_ENA |
++ WM8994_AIF2ADCR_ENA, 0);
++
++ val = snd_soc_read(codec, WM8994_CLOCKING_1);
++ if (val & WM8994_AIF1DSPCLK_ENA)
++ val = WM8994_SYSDSPCLK_ENA;
++ else
++ val = 0;
++ snd_soc_update_bits(codec, WM8994_CLOCKING_1,
++ WM8994_SYSDSPCLK_ENA |
++ WM8994_AIF2DSPCLK_ENA, val);
+ break;
+ }
+
+ return 0;
+ }
+
+-static int aif1clk_ev(struct snd_soc_dapm_widget *w,
+- struct snd_kcontrol *kcontrol, int event)
++static int aif1clk_late_ev(struct snd_soc_dapm_widget *w,
++ struct snd_kcontrol *kcontrol, int event)
+ {
+ struct snd_soc_codec *codec = w->codec;
+ struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+@@ -954,8 +1063,8 @@ static int aif1clk_ev(struct snd_soc_dapm_widget *w,
+ return 0;
+ }
+
+-static int aif2clk_ev(struct snd_soc_dapm_widget *w,
+- struct snd_kcontrol *kcontrol, int event)
++static int aif2clk_late_ev(struct snd_soc_dapm_widget *w,
++ struct snd_kcontrol *kcontrol, int event)
+ {
+ struct snd_soc_codec *codec = w->codec;
+ struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+@@ -972,6 +1081,63 @@ static int aif2clk_ev(struct snd_soc_dapm_widget *w,
+ return 0;
+ }
+
++static int late_enable_ev(struct snd_soc_dapm_widget *w,
++ struct snd_kcontrol *kcontrol, int event)
++{
++ struct snd_soc_codec *codec = w->codec;
++ struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
++
++ switch (event) {
++ case SND_SOC_DAPM_PRE_PMU:
++ if (wm8994->aif1clk_enable) {
++ aif1clk_ev(w, kcontrol, event);
++ snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1,
++ WM8994_AIF1CLK_ENA_MASK,
++ WM8994_AIF1CLK_ENA);
++ wm8994->aif1clk_enable = 0;
++ }
++ if (wm8994->aif2clk_enable) {
++ aif2clk_ev(w, kcontrol, event);
++ snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1,
++ WM8994_AIF2CLK_ENA_MASK,
++ WM8994_AIF2CLK_ENA);
++ wm8994->aif2clk_enable = 0;
++ }
++ break;
++ }
++
++ /* We may also have postponed startup of DSP, handle that. */
++ wm8958_aif_ev(w, kcontrol, event);
++
++ return 0;
++}
++
++static int late_disable_ev(struct snd_soc_dapm_widget *w,
++ struct snd_kcontrol *kcontrol, int event)
++{
++ struct snd_soc_codec *codec = w->codec;
++ struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
++
++ switch (event) {
++ case SND_SOC_DAPM_POST_PMD:
++ if (wm8994->aif1clk_disable) {
++ snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1,
++ WM8994_AIF1CLK_ENA_MASK, 0);
++ aif1clk_ev(w, kcontrol, event);
++ wm8994->aif1clk_disable = 0;
++ }
++ if (wm8994->aif2clk_disable) {
++ snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1,
++ WM8994_AIF2CLK_ENA_MASK, 0);
++ aif2clk_ev(w, kcontrol, event);
++ wm8994->aif2clk_disable = 0;
++ }
++ break;
++ }
++
++ return 0;
++}
++
+ static int adc_mux_ev(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+ {
+@@ -1268,9 +1434,9 @@ static const struct snd_kcontrol_new aif2dacr_src_mux =
+ SOC_DAPM_ENUM("AIF2DACR Mux", aif2dacr_src_enum);
+
+ static const struct snd_soc_dapm_widget wm8994_lateclk_revd_widgets[] = {
+-SND_SOC_DAPM_SUPPLY("AIF1CLK", SND_SOC_NOPM, 0, 0, aif1clk_ev,
++SND_SOC_DAPM_SUPPLY("AIF1CLK", SND_SOC_NOPM, 0, 0, aif1clk_late_ev,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+-SND_SOC_DAPM_SUPPLY("AIF2CLK", SND_SOC_NOPM, 0, 0, aif2clk_ev,
++SND_SOC_DAPM_SUPPLY("AIF2CLK", SND_SOC_NOPM, 0, 0, aif2clk_late_ev,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_PGA_E("Late DAC1L Enable PGA", SND_SOC_NOPM, 0, 0, NULL, 0,
+@@ -1299,8 +1465,10 @@ SND_SOC_DAPM_POST("Late Disable PGA", late_disable_ev)
+ };
+
+ static const struct snd_soc_dapm_widget wm8994_lateclk_widgets[] = {
+-SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, NULL, 0),
+-SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, NULL, 0),
++SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, aif1clk_ev,
++ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
++SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, aif2clk_ev,
++ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
+ SND_SOC_DAPM_PGA("Direct Voice", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("SPKL", WM8994_POWER_MANAGEMENT_3, 8, 0,
+ left_speaker_mixer, ARRAY_SIZE(left_speaker_mixer)),
+@@ -1353,30 +1521,30 @@ SND_SOC_DAPM_SUPPLY("VMID", SND_SOC_NOPM, 0, 0, vmid_event,
+ SND_SOC_DAPM_SUPPLY("CLK_SYS", SND_SOC_NOPM, 0, 0, clk_sys_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+
+-SND_SOC_DAPM_SUPPLY("DSP1CLK", WM8994_CLOCKING_1, 3, 0, NULL, 0),
+-SND_SOC_DAPM_SUPPLY("DSP2CLK", WM8994_CLOCKING_1, 2, 0, NULL, 0),
+-SND_SOC_DAPM_SUPPLY("DSPINTCLK", WM8994_CLOCKING_1, 1, 0, NULL, 0),
++SND_SOC_DAPM_SUPPLY("DSP1CLK", SND_SOC_NOPM, 3, 0, NULL, 0),
++SND_SOC_DAPM_SUPPLY("DSP2CLK", SND_SOC_NOPM, 2, 0, NULL, 0),
++SND_SOC_DAPM_SUPPLY("DSPINTCLK", SND_SOC_NOPM, 1, 0, NULL, 0),
+
+ SND_SOC_DAPM_AIF_OUT("AIF1ADC1L", NULL,
+- 0, WM8994_POWER_MANAGEMENT_4, 9, 0),
++ 0, SND_SOC_NOPM, 9, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF1ADC1R", NULL,
+- 0, WM8994_POWER_MANAGEMENT_4, 8, 0),
++ 0, SND_SOC_NOPM, 8, 0),
+ SND_SOC_DAPM_AIF_IN_E("AIF1DAC1L", NULL, 0,
+- WM8994_POWER_MANAGEMENT_5, 9, 0, wm8958_aif_ev,
++ SND_SOC_NOPM, 9, 0, wm8958_aif_ev,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_AIF_IN_E("AIF1DAC1R", NULL, 0,
+- WM8994_POWER_MANAGEMENT_5, 8, 0, wm8958_aif_ev,
++ SND_SOC_NOPM, 8, 0, wm8958_aif_ev,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_AIF_OUT("AIF1ADC2L", NULL,
+- 0, WM8994_POWER_MANAGEMENT_4, 11, 0),
++ 0, SND_SOC_NOPM, 11, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF1ADC2R", NULL,
+- 0, WM8994_POWER_MANAGEMENT_4, 10, 0),
++ 0, SND_SOC_NOPM, 10, 0),
+ SND_SOC_DAPM_AIF_IN_E("AIF1DAC2L", NULL, 0,
+- WM8994_POWER_MANAGEMENT_5, 11, 0, wm8958_aif_ev,
++ SND_SOC_NOPM, 11, 0, wm8958_aif_ev,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_AIF_IN_E("AIF1DAC2R", NULL, 0,
+- WM8994_POWER_MANAGEMENT_5, 10, 0, wm8958_aif_ev,
++ SND_SOC_NOPM, 10, 0, wm8958_aif_ev,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_MIXER("AIF1ADC1L Mixer", SND_SOC_NOPM, 0, 0,
+@@ -1403,14 +1571,14 @@ SND_SOC_DAPM_MIXER("DAC1R Mixer", SND_SOC_NOPM, 0, 0,
+ dac1r_mix, ARRAY_SIZE(dac1r_mix)),
+
+ SND_SOC_DAPM_AIF_OUT("AIF2ADCL", NULL, 0,
+- WM8994_POWER_MANAGEMENT_4, 13, 0),
++ SND_SOC_NOPM, 13, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF2ADCR", NULL, 0,
+- WM8994_POWER_MANAGEMENT_4, 12, 0),
++ SND_SOC_NOPM, 12, 0),
+ SND_SOC_DAPM_AIF_IN_E("AIF2DACL", NULL, 0,
+- WM8994_POWER_MANAGEMENT_5, 13, 0, wm8958_aif_ev,
++ SND_SOC_NOPM, 13, 0, wm8958_aif_ev,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+ SND_SOC_DAPM_AIF_IN_E("AIF2DACR", NULL, 0,
+- WM8994_POWER_MANAGEMENT_5, 12, 0, wm8958_aif_ev,
++ SND_SOC_NOPM, 12, 0, wm8958_aif_ev,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+
+ SND_SOC_DAPM_AIF_IN("AIF1DACDAT", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0),
+diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
+index ea909c5..90e93bf 100644
+--- a/sound/soc/soc-dapm.c
++++ b/sound/soc/soc-dapm.c
+@@ -69,6 +69,7 @@ static int dapm_up_seq[] = {
+ [snd_soc_dapm_out_drv] = 10,
+ [snd_soc_dapm_hp] = 10,
+ [snd_soc_dapm_spk] = 10,
++ [snd_soc_dapm_line] = 10,
+ [snd_soc_dapm_post] = 11,
+ };
+
+@@ -77,6 +78,7 @@ static int dapm_down_seq[] = {
+ [snd_soc_dapm_adc] = 1,
+ [snd_soc_dapm_hp] = 2,
+ [snd_soc_dapm_spk] = 2,
++ [snd_soc_dapm_line] = 2,
+ [snd_soc_dapm_out_drv] = 2,
+ [snd_soc_dapm_pga] = 4,
+ [snd_soc_dapm_mixer_named_ctl] = 5,
+diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
+index adb372d..e0a0970 100644
+--- a/tools/perf/util/hist.c
++++ b/tools/perf/util/hist.c
+@@ -237,8 +237,8 @@ struct hist_entry *__hists__add_entry(struct hists *hists,
+ * mis-adjust symbol addresses when computing
+ * the history counter to increment.
+ */
+- if (he->ms.map != entry->ms.map) {
+- he->ms.map = entry->ms.map;
++ if (he->ms.map != entry.ms.map) {
++ he->ms.map = entry.ms.map;
+ if (he->ms.map)
+ he->ms.map->referenced = true;
+ }
+diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
+index a195c07..fd817a2 100644
+--- a/virt/kvm/iommu.c
++++ b/virt/kvm/iommu.c
+@@ -309,6 +309,11 @@ static void kvm_iommu_put_pages(struct kvm *kvm,
+ }
+ }
+
++void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
++{
++ kvm_iommu_put_pages(kvm, slot->base_gfn, slot->npages);
++}
++
+ static int kvm_iommu_unmap_memslots(struct kvm *kvm)
+ {
+ int i, idx;
+@@ -317,10 +322,9 @@ static int kvm_iommu_unmap_memslots(struct kvm *kvm)
+ idx = srcu_read_lock(&kvm->srcu);
+ slots = kvm_memslots(kvm);
+
+- for (i = 0; i < slots->nmemslots; i++) {
+- kvm_iommu_put_pages(kvm, slots->memslots[i].base_gfn,
+- slots->memslots[i].npages);
+- }
++ for (i = 0; i < slots->nmemslots; i++)
++ kvm_iommu_unmap_pages(kvm, &slots->memslots[i]);
++
+ srcu_read_unlock(&kvm->srcu, idx);
+
+ return 0;
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index d9cfb78..e401c1b 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -802,12 +802,13 @@ skip_lpage:
+ if (r)
+ goto out_free;
+
+- /* map the pages in iommu page table */
++ /* map/unmap the pages in iommu page table */
+ if (npages) {
+ r = kvm_iommu_map_pages(kvm, &new);
+ if (r)
+ goto out_free;
+- }
++ } else
++ kvm_iommu_unmap_pages(kvm, &old);
+
+ r = -ENOMEM;
+ slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
diff --git a/1017_linux-3.2.18.patch b/1017_linux-3.2.18.patch
new file mode 100644
index 00000000..7a64c070
--- /dev/null
+++ b/1017_linux-3.2.18.patch
@@ -0,0 +1,1791 @@
+diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
+index 589f2da..a4399f5 100644
+--- a/Documentation/networking/ip-sysctl.txt
++++ b/Documentation/networking/ip-sysctl.txt
+@@ -137,7 +137,7 @@ tcp_adv_win_scale - INTEGER
+ (if tcp_adv_win_scale > 0) or bytes-bytes/2^(-tcp_adv_win_scale),
+ if it is <= 0.
+ Possible values are [-31, 31], inclusive.
+- Default: 2
++ Default: 1
+
+ tcp_allowed_congestion_control - STRING
+ Show/set the congestion control choices available to non-privileged
+@@ -397,7 +397,7 @@ tcp_rmem - vector of 3 INTEGERs: min, default, max
+ net.core.rmem_max. Calling setsockopt() with SO_RCVBUF disables
+ automatic tuning of that socket's receive buffer size, in which
+ case this value is ignored.
+- Default: between 87380B and 4MB, depending on RAM size.
++ Default: between 87380B and 6MB, depending on RAM size.
+
+ tcp_sack - BOOLEAN
+ Enable select acknowledgments (SACKS).
+diff --git a/Makefile b/Makefile
+index 4c4efa3..add68f1 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 17
++SUBLEVEL = 18
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/mach-omap2/include/mach/ctrl_module_pad_core_44xx.h b/arch/arm/mach-omap2/include/mach/ctrl_module_pad_core_44xx.h
+index 1e2d332..c88420d 100644
+--- a/arch/arm/mach-omap2/include/mach/ctrl_module_pad_core_44xx.h
++++ b/arch/arm/mach-omap2/include/mach/ctrl_module_pad_core_44xx.h
+@@ -941,10 +941,10 @@
+ #define OMAP4_DSI2_LANEENABLE_MASK (0x7 << 29)
+ #define OMAP4_DSI1_LANEENABLE_SHIFT 24
+ #define OMAP4_DSI1_LANEENABLE_MASK (0x1f << 24)
+-#define OMAP4_DSI2_PIPD_SHIFT 19
+-#define OMAP4_DSI2_PIPD_MASK (0x1f << 19)
+-#define OMAP4_DSI1_PIPD_SHIFT 14
+-#define OMAP4_DSI1_PIPD_MASK (0x1f << 14)
++#define OMAP4_DSI1_PIPD_SHIFT 19
++#define OMAP4_DSI1_PIPD_MASK (0x1f << 19)
++#define OMAP4_DSI2_PIPD_SHIFT 14
++#define OMAP4_DSI2_PIPD_MASK (0x1f << 14)
+
+ /* CONTROL_MCBSPLP */
+ #define OMAP4_ALBCTRLRX_FSX_SHIFT 31
+diff --git a/arch/arm/mach-orion5x/mpp.h b/arch/arm/mach-orion5x/mpp.h
+index eac6897..db70e79 100644
+--- a/arch/arm/mach-orion5x/mpp.h
++++ b/arch/arm/mach-orion5x/mpp.h
+@@ -65,8 +65,8 @@
+ #define MPP8_GIGE MPP(8, 0x1, 0, 0, 1, 1, 1)
+
+ #define MPP9_UNUSED MPP(9, 0x0, 0, 0, 1, 1, 1)
+-#define MPP9_GPIO MPP(9, 0x0, 0, 0, 1, 1, 1)
+-#define MPP9_GIGE MPP(9, 0x1, 1, 1, 1, 1, 1)
++#define MPP9_GPIO MPP(9, 0x0, 1, 1, 1, 1, 1)
++#define MPP9_GIGE MPP(9, 0x1, 0, 0, 1, 1, 1)
+
+ #define MPP10_UNUSED MPP(10, 0x0, 0, 0, 1, 1, 1)
+ #define MPP10_GPIO MPP(10, 0x0, 1, 1, 1, 1, 1)
+diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
+index aa33949..4b0bc37 100644
+--- a/arch/arm/mm/fault.c
++++ b/arch/arm/mm/fault.c
+@@ -267,7 +267,9 @@ good_area:
+ return fault;
+
+ check_stack:
+- if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
++ /* Don't allow expansion below FIRST_USER_ADDRESS */
++ if (vma->vm_flags & VM_GROWSDOWN &&
++ addr >= FIRST_USER_ADDRESS && !expand_stack(vma, addr))
+ goto good_area;
+ out:
+ return fault;
+diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
+index 8f3ccdd..8ea07e4 100644
+--- a/arch/arm/vfp/vfpmodule.c
++++ b/arch/arm/vfp/vfpmodule.c
+@@ -11,6 +11,7 @@
+ #include <linux/types.h>
+ #include <linux/cpu.h>
+ #include <linux/cpu_pm.h>
++#include <linux/hardirq.h>
+ #include <linux/kernel.h>
+ #include <linux/notifier.h>
+ #include <linux/signal.h>
+@@ -428,7 +429,10 @@ void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
+
+ static void vfp_enable(void *unused)
+ {
+- u32 access = get_copro_access();
++ u32 access;
++
++ BUG_ON(preemptible());
++ access = get_copro_access();
+
+ /*
+ * Enable full access to VFP (cp10 and cp11)
+@@ -556,7 +560,7 @@ static int __init vfp_init(void)
+ unsigned int cpu_arch = cpu_architecture();
+
+ if (cpu_arch >= CPU_ARCH_ARMv6)
+- vfp_enable(NULL);
++ on_each_cpu(vfp_enable, NULL, 1);
+
+ /*
+ * First check that there is a VFP that we can use.
+@@ -577,8 +581,6 @@ static int __init vfp_init(void)
+ } else {
+ hotcpu_notifier(vfp_hotplug, 0);
+
+- smp_call_function(vfp_enable, NULL, 1);
+-
+ VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */
+ printk("implementor %02x architecture %d part %02x variant %x rev %x\n",
+ (vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT,
+diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h
+index 7617248..7a3bd25 100644
+--- a/arch/ia64/include/asm/unistd.h
++++ b/arch/ia64/include/asm/unistd.h
+@@ -323,11 +323,12 @@
+ #define __NR_sendmmsg 1331
+ #define __NR_process_vm_readv 1332
+ #define __NR_process_vm_writev 1333
++#define __NR_accept4 1334
+
+ #ifdef __KERNEL__
+
+
+-#define NR_syscalls 310 /* length of syscall table */
++#define NR_syscalls 311 /* length of syscall table */
+
+ /*
+ * The following defines stop scripts/checksyscalls.sh from complaining about
+diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
+index 5b31d46..1ccbe12 100644
+--- a/arch/ia64/kernel/entry.S
++++ b/arch/ia64/kernel/entry.S
+@@ -1779,6 +1779,7 @@ sys_call_table:
+ data8 sys_sendmmsg
+ data8 sys_process_vm_readv
+ data8 sys_process_vm_writev
++ data8 sys_accept4
+
+ .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
+ #endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
+diff --git a/arch/sparc/kernel/central.c b/arch/sparc/kernel/central.c
+index 38d48a5..9708851 100644
+--- a/arch/sparc/kernel/central.c
++++ b/arch/sparc/kernel/central.c
+@@ -269,4 +269,4 @@ static int __init sunfire_init(void)
+ return 0;
+ }
+
+-subsys_initcall(sunfire_init);
++fs_initcall(sunfire_init);
+diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S
+index b57a594..874162a 100644
+--- a/arch/sparc/mm/ultra.S
++++ b/arch/sparc/mm/ultra.S
+@@ -495,11 +495,11 @@ xcall_fetch_glob_regs:
+ stx %o7, [%g1 + GR_SNAP_O7]
+ stx %i7, [%g1 + GR_SNAP_I7]
+ /* Don't try this at home kids... */
+- rdpr %cwp, %g2
+- sub %g2, 1, %g7
++ rdpr %cwp, %g3
++ sub %g3, 1, %g7
+ wrpr %g7, %cwp
+ mov %i7, %g7
+- wrpr %g2, %cwp
++ wrpr %g3, %cwp
+ stx %g7, [%g1 + GR_SNAP_RPC]
+ sethi %hi(trap_block), %g7
+ or %g7, %lo(trap_block), %g7
+diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c
+index a7869ad..41459d8 100644
+--- a/arch/tile/kernel/compat_signal.c
++++ b/arch/tile/kernel/compat_signal.c
+@@ -406,19 +406,17 @@ int compat_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+ * Set up registers for signal handler.
+ * Registers that we don't modify keep the value they had from
+ * user-space at the time we took the signal.
++ * We always pass siginfo and mcontext, regardless of SA_SIGINFO,
++ * since some things rely on this (e.g. glibc's debug/segfault.c).
+ */
+ regs->pc = ptr_to_compat_reg(ka->sa.sa_handler);
+ regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */
+ regs->sp = ptr_to_compat_reg(frame);
+ regs->lr = restorer;
+ regs->regs[0] = (unsigned long) usig;
+-
+- if (ka->sa.sa_flags & SA_SIGINFO) {
+- /* Need extra arguments, so mark to restore caller-saves. */
+- regs->regs[1] = ptr_to_compat_reg(&frame->info);
+- regs->regs[2] = ptr_to_compat_reg(&frame->uc);
+- regs->flags |= PT_FLAGS_CALLER_SAVES;
+- }
++ regs->regs[1] = ptr_to_compat_reg(&frame->info);
++ regs->regs[2] = ptr_to_compat_reg(&frame->uc);
++ regs->flags |= PT_FLAGS_CALLER_SAVES;
+
+ /*
+ * Notify any tracer that was single-stepping it.
+diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
+index 6d16b4b..7e3002b 100644
+--- a/drivers/crypto/Kconfig
++++ b/drivers/crypto/Kconfig
+@@ -173,6 +173,7 @@ config CRYPTO_DEV_MV_CESA
+ select CRYPTO_ALGAPI
+ select CRYPTO_AES
+ select CRYPTO_BLKCIPHER2
++ select CRYPTO_HASH
+ help
+ This driver allows you to utilize the Cryptographic Engines and
+ Security Accelerator (CESA) which can be found on the Marvell Orion
+diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
+index 461958f..271fd49 100644
+--- a/drivers/gpio/gpio-ml-ioh.c
++++ b/drivers/gpio/gpio-ml-ioh.c
+@@ -448,6 +448,7 @@ static int __devinit ioh_gpio_probe(struct pci_dev *pdev,
+ chip->reg = chip->base;
+ chip->ch = i;
+ mutex_init(&chip->lock);
++ spin_lock_init(&chip->spinlock);
+ ioh_gpio_setup(chip, num_ports[i]);
+ ret = gpiochip_add(&chip->gpio);
+ if (ret) {
+diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
+index 801d92d..a417f94 100644
+--- a/drivers/md/dm-mpath.c
++++ b/drivers/md/dm-mpath.c
+@@ -698,8 +698,8 @@ static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
+ return 0;
+
+ m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
+- request_module("scsi_dh_%s", m->hw_handler_name);
+- if (scsi_dh_handler_exist(m->hw_handler_name) == 0) {
++ if (!try_then_request_module(scsi_dh_handler_exist(m->hw_handler_name),
++ "scsi_dh_%s", m->hw_handler_name)) {
+ ti->error = "unknown hardware handler type";
+ ret = -EINVAL;
+ goto fail;
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 065ab4f..adcd850 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -392,6 +392,8 @@ void mddev_suspend(struct mddev *mddev)
+ synchronize_rcu();
+ wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
+ mddev->pers->quiesce(mddev, 1);
++
++ del_timer_sync(&mddev->safemode_timer);
+ }
+ EXPORT_SYMBOL_GPL(mddev_suspend);
+
+diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
+index cf10ecf..ed77c6d 100644
+--- a/drivers/media/rc/ene_ir.c
++++ b/drivers/media/rc/ene_ir.c
+@@ -1018,22 +1018,6 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
+
+ spin_lock_init(&dev->hw_lock);
+
+- /* claim the resources */
+- error = -EBUSY;
+- dev->hw_io = pnp_port_start(pnp_dev, 0);
+- if (!request_region(dev->hw_io, ENE_IO_SIZE, ENE_DRIVER_NAME)) {
+- dev->hw_io = -1;
+- dev->irq = -1;
+- goto error;
+- }
+-
+- dev->irq = pnp_irq(pnp_dev, 0);
+- if (request_irq(dev->irq, ene_isr,
+- IRQF_SHARED, ENE_DRIVER_NAME, (void *)dev)) {
+- dev->irq = -1;
+- goto error;
+- }
+-
+ pnp_set_drvdata(pnp_dev, dev);
+ dev->pnp_dev = pnp_dev;
+
+@@ -1086,6 +1070,22 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
+ device_set_wakeup_capable(&pnp_dev->dev, true);
+ device_set_wakeup_enable(&pnp_dev->dev, true);
+
++ /* claim the resources */
++ error = -EBUSY;
++ dev->hw_io = pnp_port_start(pnp_dev, 0);
++ if (!request_region(dev->hw_io, ENE_IO_SIZE, ENE_DRIVER_NAME)) {
++ dev->hw_io = -1;
++ dev->irq = -1;
++ goto error;
++ }
++
++ dev->irq = pnp_irq(pnp_dev, 0);
++ if (request_irq(dev->irq, ene_isr,
++ IRQF_SHARED, ENE_DRIVER_NAME, (void *)dev)) {
++ dev->irq = -1;
++ goto error;
++ }
++
+ error = rc_register_device(rdev);
+ if (error < 0)
+ goto error;
+diff --git a/drivers/media/rc/fintek-cir.c b/drivers/media/rc/fintek-cir.c
+index 7f7079b..4218f73 100644
+--- a/drivers/media/rc/fintek-cir.c
++++ b/drivers/media/rc/fintek-cir.c
+@@ -504,16 +504,6 @@ static int fintek_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id
+
+ spin_lock_init(&fintek->fintek_lock);
+
+- ret = -EBUSY;
+- /* now claim resources */
+- if (!request_region(fintek->cir_addr,
+- fintek->cir_port_len, FINTEK_DRIVER_NAME))
+- goto failure;
+-
+- if (request_irq(fintek->cir_irq, fintek_cir_isr, IRQF_SHARED,
+- FINTEK_DRIVER_NAME, (void *)fintek))
+- goto failure;
+-
+ pnp_set_drvdata(pdev, fintek);
+ fintek->pdev = pdev;
+
+@@ -548,6 +538,16 @@ static int fintek_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id
+ /* rx resolution is hardwired to 50us atm, 1, 25, 100 also possible */
+ rdev->rx_resolution = US_TO_NS(CIR_SAMPLE_PERIOD);
+
++ ret = -EBUSY;
++ /* now claim resources */
++ if (!request_region(fintek->cir_addr,
++ fintek->cir_port_len, FINTEK_DRIVER_NAME))
++ goto failure;
++
++ if (request_irq(fintek->cir_irq, fintek_cir_isr, IRQF_SHARED,
++ FINTEK_DRIVER_NAME, (void *)fintek))
++ goto failure;
++
+ ret = rc_register_device(rdev);
+ if (ret)
+ goto failure;
+diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
+index 682009d..0e49c99 100644
+--- a/drivers/media/rc/ite-cir.c
++++ b/drivers/media/rc/ite-cir.c
+@@ -1515,16 +1515,6 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
+ /* initialize raw event */
+ init_ir_raw_event(&itdev->rawir);
+
+- ret = -EBUSY;
+- /* now claim resources */
+- if (!request_region(itdev->cir_addr,
+- dev_desc->io_region_size, ITE_DRIVER_NAME))
+- goto failure;
+-
+- if (request_irq(itdev->cir_irq, ite_cir_isr, IRQF_SHARED,
+- ITE_DRIVER_NAME, (void *)itdev))
+- goto failure;
+-
+ /* set driver data into the pnp device */
+ pnp_set_drvdata(pdev, itdev);
+ itdev->pdev = pdev;
+@@ -1600,6 +1590,16 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
+ rdev->driver_name = ITE_DRIVER_NAME;
+ rdev->map_name = RC_MAP_RC6_MCE;
+
++ ret = -EBUSY;
++ /* now claim resources */
++ if (!request_region(itdev->cir_addr,
++ dev_desc->io_region_size, ITE_DRIVER_NAME))
++ goto failure;
++
++ if (request_irq(itdev->cir_irq, ite_cir_isr, IRQF_SHARED,
++ ITE_DRIVER_NAME, (void *)itdev))
++ goto failure;
++
+ ret = rc_register_device(rdev);
+ if (ret)
+ goto failure;
+diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
+index 144f3f5..8b2c071 100644
+--- a/drivers/media/rc/nuvoton-cir.c
++++ b/drivers/media/rc/nuvoton-cir.c
+@@ -1021,24 +1021,6 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
+ spin_lock_init(&nvt->nvt_lock);
+ spin_lock_init(&nvt->tx.lock);
+
+- ret = -EBUSY;
+- /* now claim resources */
+- if (!request_region(nvt->cir_addr,
+- CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
+- goto failure;
+-
+- if (request_irq(nvt->cir_irq, nvt_cir_isr, IRQF_SHARED,
+- NVT_DRIVER_NAME, (void *)nvt))
+- goto failure;
+-
+- if (!request_region(nvt->cir_wake_addr,
+- CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
+- goto failure;
+-
+- if (request_irq(nvt->cir_wake_irq, nvt_cir_wake_isr, IRQF_SHARED,
+- NVT_DRIVER_NAME, (void *)nvt))
+- goto failure;
+-
+ pnp_set_drvdata(pdev, nvt);
+ nvt->pdev = pdev;
+
+@@ -1085,6 +1067,24 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
+ rdev->tx_resolution = XYZ;
+ #endif
+
++ ret = -EBUSY;
++ /* now claim resources */
++ if (!request_region(nvt->cir_addr,
++ CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
++ goto failure;
++
++ if (request_irq(nvt->cir_irq, nvt_cir_isr, IRQF_SHARED,
++ NVT_DRIVER_NAME, (void *)nvt))
++ goto failure;
++
++ if (!request_region(nvt->cir_wake_addr,
++ CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
++ goto failure;
++
++ if (request_irq(nvt->cir_wake_irq, nvt_cir_wake_isr, IRQF_SHARED,
++ NVT_DRIVER_NAME, (void *)nvt))
++ goto failure;
++
+ ret = rc_register_device(rdev);
+ if (ret)
+ goto failure;
+diff --git a/drivers/media/rc/winbond-cir.c b/drivers/media/rc/winbond-cir.c
+index a7e7d6f..4591770 100644
+--- a/drivers/media/rc/winbond-cir.c
++++ b/drivers/media/rc/winbond-cir.c
+@@ -991,39 +991,10 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
+ "(w: 0x%lX, e: 0x%lX, s: 0x%lX, i: %u)\n",
+ data->wbase, data->ebase, data->sbase, data->irq);
+
+- if (!request_region(data->wbase, WAKEUP_IOMEM_LEN, DRVNAME)) {
+- dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
+- data->wbase, data->wbase + WAKEUP_IOMEM_LEN - 1);
+- err = -EBUSY;
+- goto exit_free_data;
+- }
+-
+- if (!request_region(data->ebase, EHFUNC_IOMEM_LEN, DRVNAME)) {
+- dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
+- data->ebase, data->ebase + EHFUNC_IOMEM_LEN - 1);
+- err = -EBUSY;
+- goto exit_release_wbase;
+- }
+-
+- if (!request_region(data->sbase, SP_IOMEM_LEN, DRVNAME)) {
+- dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
+- data->sbase, data->sbase + SP_IOMEM_LEN - 1);
+- err = -EBUSY;
+- goto exit_release_ebase;
+- }
+-
+- err = request_irq(data->irq, wbcir_irq_handler,
+- IRQF_DISABLED, DRVNAME, device);
+- if (err) {
+- dev_err(dev, "Failed to claim IRQ %u\n", data->irq);
+- err = -EBUSY;
+- goto exit_release_sbase;
+- }
+-
+ led_trigger_register_simple("cir-tx", &data->txtrigger);
+ if (!data->txtrigger) {
+ err = -ENOMEM;
+- goto exit_free_irq;
++ goto exit_free_data;
+ }
+
+ led_trigger_register_simple("cir-rx", &data->rxtrigger);
+@@ -1062,9 +1033,38 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
+ data->dev->priv = data;
+ data->dev->dev.parent = &device->dev;
+
++ if (!request_region(data->wbase, WAKEUP_IOMEM_LEN, DRVNAME)) {
++ dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
++ data->wbase, data->wbase + WAKEUP_IOMEM_LEN - 1);
++ err = -EBUSY;
++ goto exit_free_rc;
++ }
++
++ if (!request_region(data->ebase, EHFUNC_IOMEM_LEN, DRVNAME)) {
++ dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
++ data->ebase, data->ebase + EHFUNC_IOMEM_LEN - 1);
++ err = -EBUSY;
++ goto exit_release_wbase;
++ }
++
++ if (!request_region(data->sbase, SP_IOMEM_LEN, DRVNAME)) {
++ dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
++ data->sbase, data->sbase + SP_IOMEM_LEN - 1);
++ err = -EBUSY;
++ goto exit_release_ebase;
++ }
++
++ err = request_irq(data->irq, wbcir_irq_handler,
++ IRQF_DISABLED, DRVNAME, device);
++ if (err) {
++ dev_err(dev, "Failed to claim IRQ %u\n", data->irq);
++ err = -EBUSY;
++ goto exit_release_sbase;
++ }
++
+ err = rc_register_device(data->dev);
+ if (err)
+- goto exit_free_rc;
++ goto exit_free_irq;
+
+ device_init_wakeup(&device->dev, 1);
+
+@@ -1072,14 +1072,6 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
+
+ return 0;
+
+-exit_free_rc:
+- rc_free_device(data->dev);
+-exit_unregister_led:
+- led_classdev_unregister(&data->led);
+-exit_unregister_rxtrigger:
+- led_trigger_unregister_simple(data->rxtrigger);
+-exit_unregister_txtrigger:
+- led_trigger_unregister_simple(data->txtrigger);
+ exit_free_irq:
+ free_irq(data->irq, device);
+ exit_release_sbase:
+@@ -1088,6 +1080,14 @@ exit_release_ebase:
+ release_region(data->ebase, EHFUNC_IOMEM_LEN);
+ exit_release_wbase:
+ release_region(data->wbase, WAKEUP_IOMEM_LEN);
++exit_free_rc:
++ rc_free_device(data->dev);
++exit_unregister_led:
++ led_classdev_unregister(&data->led);
++exit_unregister_rxtrigger:
++ led_trigger_unregister_simple(data->rxtrigger);
++exit_unregister_txtrigger:
++ led_trigger_unregister_simple(data->txtrigger);
+ exit_free_data:
+ kfree(data);
+ pnp_set_drvdata(device, NULL);
+diff --git a/drivers/media/video/marvell-ccic/mmp-driver.c b/drivers/media/video/marvell-ccic/mmp-driver.c
+index fb0b124..a6b7657 100644
+--- a/drivers/media/video/marvell-ccic/mmp-driver.c
++++ b/drivers/media/video/marvell-ccic/mmp-driver.c
+@@ -175,7 +175,6 @@ static int mmpcam_probe(struct platform_device *pdev)
+ INIT_LIST_HEAD(&cam->devlist);
+
+ mcam = &cam->mcam;
+- mcam->platform = MHP_Armada610;
+ mcam->plat_power_up = mmpcam_power_up;
+ mcam->plat_power_down = mmpcam_power_down;
+ mcam->dev = &pdev->dev;
+diff --git a/drivers/media/video/s5p-fimc/fimc-capture.c b/drivers/media/video/s5p-fimc/fimc-capture.c
+index 2cc3b91..327a81f 100644
+--- a/drivers/media/video/s5p-fimc/fimc-capture.c
++++ b/drivers/media/video/s5p-fimc/fimc-capture.c
+@@ -1304,7 +1304,7 @@ static int fimc_subdev_set_crop(struct v4l2_subdev *sd,
+ fimc_capture_try_crop(ctx, r, crop->pad);
+
+ if (crop->which == V4L2_SUBDEV_FORMAT_TRY) {
+- mutex_lock(&fimc->lock);
++ mutex_unlock(&fimc->lock);
+ *v4l2_subdev_get_try_crop(fh, crop->pad) = *r;
+ return 0;
+ }
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index 3d55883..2dcac28 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -879,8 +879,13 @@ static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
+ if (sblk->status & SD_STATUS_LINK_CHG)
+ work_exists = 1;
+ }
+- /* check for RX/TX work to do */
+- if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
++
++ /* check for TX work to do */
++ if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
++ work_exists = 1;
++
++ /* check for RX work to do */
++ if (tnapi->rx_rcb_prod_idx &&
+ *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
+ work_exists = 1;
+
+@@ -5895,6 +5900,9 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
+ return work_done;
+ }
+
++ if (!tnapi->rx_rcb_prod_idx)
++ return work_done;
++
+ /* run RX thread, within the bounds set by NAPI.
+ * All RX "locking" is done by ensuring outside
+ * code synchronizes with tg3->napi.poll()
+@@ -7448,6 +7456,12 @@ static int tg3_alloc_consistent(struct tg3 *tp)
+ */
+ switch (i) {
+ default:
++ if (tg3_flag(tp, ENABLE_RSS)) {
++ tnapi->rx_rcb_prod_idx = NULL;
++ break;
++ }
++ /* Fall through */
++ case 1:
+ tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
+ break;
+ case 2:
+diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
+index cf480b5..de00805 100644
+--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
++++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
+@@ -494,7 +494,11 @@ out:
+ static void e1000_down_and_stop(struct e1000_adapter *adapter)
+ {
+ set_bit(__E1000_DOWN, &adapter->flags);
+- cancel_work_sync(&adapter->reset_task);
++
++ /* Only kill reset task if adapter is not resetting */
++ if (!test_bit(__E1000_RESETTING, &adapter->flags))
++ cancel_work_sync(&adapter->reset_task);
++
+ cancel_delayed_work_sync(&adapter->watchdog_task);
+ cancel_delayed_work_sync(&adapter->phy_info_task);
+ cancel_delayed_work_sync(&adapter->fifo_stall_task);
+diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
+index 7eb8a00..65c51ff 100644
+--- a/drivers/net/ethernet/marvell/sky2.c
++++ b/drivers/net/ethernet/marvell/sky2.c
+@@ -2475,8 +2475,13 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2,
+ skb_copy_from_linear_data(re->skb, skb->data, length);
+ skb->ip_summed = re->skb->ip_summed;
+ skb->csum = re->skb->csum;
++ skb->rxhash = re->skb->rxhash;
++ skb->vlan_tci = re->skb->vlan_tci;
++
+ pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr,
+ length, PCI_DMA_FROMDEVICE);
++ re->skb->vlan_tci = 0;
++ re->skb->rxhash = 0;
+ re->skb->ip_summed = CHECKSUM_NONE;
+ skb_put(skb, length);
+ }
+@@ -2561,9 +2566,6 @@ static struct sk_buff *sky2_receive(struct net_device *dev,
+ struct sk_buff *skb = NULL;
+ u16 count = (status & GMR_FS_LEN) >> 16;
+
+- if (status & GMR_FS_VLAN)
+- count -= VLAN_HLEN; /* Account for vlan tag */
+-
+ netif_printk(sky2, rx_status, KERN_DEBUG, dev,
+ "rx slot %u status 0x%x len %d\n",
+ sky2->rx_next, status, length);
+@@ -2571,6 +2573,9 @@ static struct sk_buff *sky2_receive(struct net_device *dev,
+ sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending;
+ prefetch(sky2->rx_ring + sky2->rx_next);
+
++ if (vlan_tx_tag_present(re->skb))
++ count -= VLAN_HLEN; /* Account for vlan tag */
++
+ /* This chip has hardware problems that generates bogus status.
+ * So do only marginal checking and expect higher level protocols
+ * to handle crap frames.
+@@ -2628,11 +2633,8 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last)
+ }
+
+ static inline void sky2_skb_rx(const struct sky2_port *sky2,
+- u32 status, struct sk_buff *skb)
++ struct sk_buff *skb)
+ {
+- if (status & GMR_FS_VLAN)
+- __vlan_hwaccel_put_tag(skb, be16_to_cpu(sky2->rx_tag));
+-
+ if (skb->ip_summed == CHECKSUM_NONE)
+ netif_receive_skb(skb);
+ else
+@@ -2686,6 +2688,14 @@ static void sky2_rx_checksum(struct sky2_port *sky2, u32 status)
+ }
+ }
+
++static void sky2_rx_tag(struct sky2_port *sky2, u16 length)
++{
++ struct sk_buff *skb;
++
++ skb = sky2->rx_ring[sky2->rx_next].skb;
++ __vlan_hwaccel_put_tag(skb, be16_to_cpu(length));
++}
++
+ static void sky2_rx_hash(struct sky2_port *sky2, u32 status)
+ {
+ struct sk_buff *skb;
+@@ -2744,8 +2754,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
+ }
+
+ skb->protocol = eth_type_trans(skb, dev);
+-
+- sky2_skb_rx(sky2, status, skb);
++ sky2_skb_rx(sky2, skb);
+
+ /* Stop after net poll weight */
+ if (++work_done >= to_do)
+@@ -2753,11 +2762,11 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
+ break;
+
+ case OP_RXVLAN:
+- sky2->rx_tag = length;
++ sky2_rx_tag(sky2, length);
+ break;
+
+ case OP_RXCHKSVLAN:
+- sky2->rx_tag = length;
++ sky2_rx_tag(sky2, length);
+ /* fall through */
+ case OP_RXCHKS:
+ if (likely(dev->features & NETIF_F_RXCSUM))
+diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h
+index ff6f58b..3c896ce 100644
+--- a/drivers/net/ethernet/marvell/sky2.h
++++ b/drivers/net/ethernet/marvell/sky2.h
+@@ -2241,7 +2241,6 @@ struct sky2_port {
+ u16 rx_pending;
+ u16 rx_data_size;
+ u16 rx_nfrags;
+- u16 rx_tag;
+
+ struct {
+ unsigned long last;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+index 41e6b33..c07cfe9 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+@@ -22,6 +22,7 @@
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+ *******************************************************************************/
+
++#include <linux/kernel.h>
+ #include <linux/io.h>
+ #include "mmc.h"
+
+diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
+index ceab215..c508d6a 100644
+--- a/drivers/net/ethernet/sun/sungem.c
++++ b/drivers/net/ethernet/sun/sungem.c
+@@ -2340,7 +2340,7 @@ static int gem_suspend(struct pci_dev *pdev, pm_message_t state)
+ netif_device_detach(dev);
+
+ /* Switch off chip, remember WOL setting */
+- gp->asleep_wol = gp->wake_on_lan;
++ gp->asleep_wol = !!gp->wake_on_lan;
+ gem_do_stop(dev, gp->asleep_wol);
+
+ /* Unlock the network stack */
+diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
+index c81f136..b142300 100644
+--- a/drivers/net/phy/icplus.c
++++ b/drivers/net/phy/icplus.c
+@@ -150,7 +150,8 @@ static int ip101a_config_init(struct phy_device *phydev)
+ /* Enable Auto Power Saving mode */
+ c = phy_read(phydev, IP10XX_SPEC_CTRL_STATUS);
+ c |= IP101A_APS_ON;
+- return c;
++
++ return phy_write(phydev, IP10XX_SPEC_CTRL_STATUS, c);
+ }
+
+ static int ip175c_read_status(struct phy_device *phydev)
+diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
+index fda4be2..a9abee8 100644
+--- a/drivers/net/usb/asix.c
++++ b/drivers/net/usb/asix.c
+@@ -403,7 +403,7 @@ static struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
+ u32 packet_len;
+ u32 padbytes = 0xffff0000;
+
+- padlen = ((skb->len + 4) % 512) ? 0 : 4;
++ padlen = ((skb->len + 4) & (dev->maxpacket - 1)) ? 0 : 4;
+
+ if ((!skb_cloned(skb)) &&
+ ((headroom + tailroom) >= (4 + padlen))) {
+@@ -425,7 +425,7 @@ static struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
+ cpu_to_le32s(&packet_len);
+ skb_copy_to_linear_data(skb, &packet_len, sizeof(packet_len));
+
+- if ((skb->len % 512) == 0) {
++ if (padlen) {
+ cpu_to_le32s(&padbytes);
+ memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes));
+ skb_put(skb, sizeof(padbytes));
+diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
+index 4fd4144..2ba40cf 100644
+--- a/drivers/net/usb/cdc_ether.c
++++ b/drivers/net/usb/cdc_ether.c
+@@ -83,6 +83,7 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
+ struct cdc_state *info = (void *) &dev->data;
+ int status;
+ int rndis;
++ bool android_rndis_quirk = false;
+ struct usb_driver *driver = driver_of(intf);
+ struct usb_cdc_mdlm_desc *desc = NULL;
+ struct usb_cdc_mdlm_detail_desc *detail = NULL;
+@@ -195,6 +196,11 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
+ info->control,
+ info->u->bSlaveInterface0,
+ info->data);
++ /* fall back to hard-wiring for RNDIS */
++ if (rndis) {
++ android_rndis_quirk = true;
++ goto next_desc;
++ }
+ goto bad_desc;
+ }
+ if (info->control != intf) {
+@@ -271,11 +277,15 @@ next_desc:
+ /* Microsoft ActiveSync based and some regular RNDIS devices lack the
+ * CDC descriptors, so we'll hard-wire the interfaces and not check
+ * for descriptors.
++ *
++ * Some Android RNDIS devices have a CDC Union descriptor pointing
++ * to non-existing interfaces. Ignore that and attempt the same
++ * hard-wired 0 and 1 interfaces.
+ */
+- if (rndis && !info->u) {
++ if (rndis && (!info->u || android_rndis_quirk)) {
+ info->control = usb_ifnum_to_if(dev->udev, 0);
+ info->data = usb_ifnum_to_if(dev->udev, 1);
+- if (!info->control || !info->data) {
++ if (!info->control || !info->data || info->control != intf) {
+ dev_dbg(&intf->dev,
+ "rndis: master #0/%p slave #1/%p\n",
+ info->control,
+@@ -472,6 +482,7 @@ static const struct driver_info wwan_info = {
+ /*-------------------------------------------------------------------------*/
+
+ #define HUAWEI_VENDOR_ID 0x12D1
++#define NOVATEL_VENDOR_ID 0x1410
+
+ static const struct usb_device_id products [] = {
+ /*
+@@ -589,6 +600,21 @@ static const struct usb_device_id products [] = {
+ * because of bugs/quirks in a given product (like Zaurus, above).
+ */
+ {
++ /* Novatel USB551L */
++ /* This match must come *before* the generic CDC-ETHER match so that
++ * we get FLAG_WWAN set on the device, since it's descriptors are
++ * generic CDC-ETHER.
++ */
++ .match_flags = USB_DEVICE_ID_MATCH_VENDOR
++ | USB_DEVICE_ID_MATCH_PRODUCT
++ | USB_DEVICE_ID_MATCH_INT_INFO,
++ .idVendor = NOVATEL_VENDOR_ID,
++ .idProduct = 0xB001,
++ .bInterfaceClass = USB_CLASS_COMM,
++ .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
++ .bInterfaceProtocol = USB_CDC_PROTO_NONE,
++ .driver_info = (unsigned long)&wwan_info,
++}, {
+ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long) &cdc_info,
+diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
+index 750e330..b873b5d 100644
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -281,17 +281,32 @@ int usbnet_change_mtu (struct net_device *net, int new_mtu)
+ }
+ EXPORT_SYMBOL_GPL(usbnet_change_mtu);
+
++/* The caller must hold list->lock */
++static void __usbnet_queue_skb(struct sk_buff_head *list,
++ struct sk_buff *newsk, enum skb_state state)
++{
++ struct skb_data *entry = (struct skb_data *) newsk->cb;
++
++ __skb_queue_tail(list, newsk);
++ entry->state = state;
++}
++
+ /*-------------------------------------------------------------------------*/
+
+ /* some LK 2.4 HCDs oopsed if we freed or resubmitted urbs from
+ * completion callbacks. 2.5 should have fixed those bugs...
+ */
+
+-static void defer_bh(struct usbnet *dev, struct sk_buff *skb, struct sk_buff_head *list)
++static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
++ struct sk_buff_head *list, enum skb_state state)
+ {
+ unsigned long flags;
++ enum skb_state old_state;
++ struct skb_data *entry = (struct skb_data *) skb->cb;
+
+ spin_lock_irqsave(&list->lock, flags);
++ old_state = entry->state;
++ entry->state = state;
+ __skb_unlink(skb, list);
+ spin_unlock(&list->lock);
+ spin_lock(&dev->done.lock);
+@@ -299,6 +314,7 @@ static void defer_bh(struct usbnet *dev, struct sk_buff *skb, struct sk_buff_hea
+ if (dev->done.qlen == 1)
+ tasklet_schedule(&dev->bh);
+ spin_unlock_irqrestore(&dev->done.lock, flags);
++ return old_state;
+ }
+
+ /* some work can't be done in tasklets, so we use keventd
+@@ -339,7 +355,6 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
+ entry = (struct skb_data *) skb->cb;
+ entry->urb = urb;
+ entry->dev = dev;
+- entry->state = rx_start;
+ entry->length = 0;
+
+ usb_fill_bulk_urb (urb, dev->udev, dev->in,
+@@ -371,7 +386,7 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
+ tasklet_schedule (&dev->bh);
+ break;
+ case 0:
+- __skb_queue_tail (&dev->rxq, skb);
++ __usbnet_queue_skb(&dev->rxq, skb, rx_start);
+ }
+ } else {
+ netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
+@@ -422,16 +437,17 @@ static void rx_complete (struct urb *urb)
+ struct skb_data *entry = (struct skb_data *) skb->cb;
+ struct usbnet *dev = entry->dev;
+ int urb_status = urb->status;
++ enum skb_state state;
+
+ skb_put (skb, urb->actual_length);
+- entry->state = rx_done;
++ state = rx_done;
+ entry->urb = NULL;
+
+ switch (urb_status) {
+ /* success */
+ case 0:
+ if (skb->len < dev->net->hard_header_len) {
+- entry->state = rx_cleanup;
++ state = rx_cleanup;
+ dev->net->stats.rx_errors++;
+ dev->net->stats.rx_length_errors++;
+ netif_dbg(dev, rx_err, dev->net,
+@@ -470,7 +486,7 @@ static void rx_complete (struct urb *urb)
+ "rx throttle %d\n", urb_status);
+ }
+ block:
+- entry->state = rx_cleanup;
++ state = rx_cleanup;
+ entry->urb = urb;
+ urb = NULL;
+ break;
+@@ -481,17 +497,18 @@ block:
+ // FALLTHROUGH
+
+ default:
+- entry->state = rx_cleanup;
++ state = rx_cleanup;
+ dev->net->stats.rx_errors++;
+ netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
+ break;
+ }
+
+- defer_bh(dev, skb, &dev->rxq);
++ state = defer_bh(dev, skb, &dev->rxq, state);
+
+ if (urb) {
+ if (netif_running (dev->net) &&
+- !test_bit (EVENT_RX_HALT, &dev->flags)) {
++ !test_bit (EVENT_RX_HALT, &dev->flags) &&
++ state != unlink_start) {
+ rx_submit (dev, urb, GFP_ATOMIC);
+ return;
+ }
+@@ -577,16 +594,23 @@ EXPORT_SYMBOL_GPL(usbnet_purge_paused_rxq);
+ static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
+ {
+ unsigned long flags;
+- struct sk_buff *skb, *skbnext;
++ struct sk_buff *skb;
+ int count = 0;
+
+ spin_lock_irqsave (&q->lock, flags);
+- skb_queue_walk_safe(q, skb, skbnext) {
++ while (!skb_queue_empty(q)) {
+ struct skb_data *entry;
+ struct urb *urb;
+ int retval;
+
+- entry = (struct skb_data *) skb->cb;
++ skb_queue_walk(q, skb) {
++ entry = (struct skb_data *) skb->cb;
++ if (entry->state != unlink_start)
++ goto found;
++ }
++ break;
++found:
++ entry->state = unlink_start;
+ urb = entry->urb;
+
+ /*
+@@ -1037,8 +1061,7 @@ static void tx_complete (struct urb *urb)
+ }
+
+ usb_autopm_put_interface_async(dev->intf);
+- entry->state = tx_done;
+- defer_bh(dev, skb, &dev->txq);
++ (void) defer_bh(dev, skb, &dev->txq, tx_done);
+ }
+
+ /*-------------------------------------------------------------------------*/
+@@ -1094,7 +1117,6 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
+ entry = (struct skb_data *) skb->cb;
+ entry->urb = urb;
+ entry->dev = dev;
+- entry->state = tx_start;
+ entry->length = length;
+
+ usb_fill_bulk_urb (urb, dev->udev, dev->out,
+@@ -1153,7 +1175,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
+ break;
+ case 0:
+ net->trans_start = jiffies;
+- __skb_queue_tail (&dev->txq, skb);
++ __usbnet_queue_skb(&dev->txq, skb, tx_start);
+ if (dev->txq.qlen >= TX_QLEN (dev))
+ netif_stop_queue (net);
+ }
+diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
+index 7f27dbd..0515862 100644
+--- a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
++++ b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
+@@ -1053,17 +1053,13 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
+ }
+ /* either retransmit or send bar if ack not recd */
+ if (!ack_recd) {
+- struct ieee80211_tx_rate *txrate =
+- tx_info->status.rates;
+- if (retry && (txrate[0].count < (int)retry_limit)) {
++ if (retry && (ini->txretry[index] < (int)retry_limit)) {
+ ini->txretry[index]++;
+ ini->tx_in_transit--;
+ /*
+ * Use high prededence for retransmit to
+ * give some punch
+ */
+- /* brcms_c_txq_enq(wlc, scb, p,
+- * BRCMS_PRIO_TO_PREC(tid)); */
+ brcms_c_txq_enq(wlc, scb, p,
+ BRCMS_PRIO_TO_HI_PREC(tid));
+ } else {
+diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
+index f98becc..833cbef 100644
+--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
++++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
+@@ -7879,7 +7879,7 @@ brcms_c_recvctl(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
+ if (wlc->hw->suspended_fifos) {
+ hdr = (struct ieee80211_hdr *)p->data;
+ if (ieee80211_is_beacon(hdr->frame_control))
+- brcms_b_mute(wlc->hw, false);
++ brcms_b_mute(wlc->hw, false, 0);
+ }
+
+ memcpy(IEEE80211_SKB_RXCB(p), &rx_status, sizeof(rx_status));
+diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c
+index 6176129..8cba82d 100644
+--- a/drivers/regulator/max8997.c
++++ b/drivers/regulator/max8997.c
+@@ -689,7 +689,7 @@ static int max8997_set_voltage_buck(struct regulator_dev *rdev,
+ }
+
+ new_val++;
+- } while (desc->min + desc->step + new_val <= desc->max);
++ } while (desc->min + desc->step * new_val <= desc->max);
+
+ new_idx = tmp_idx;
+ new_val = tmp_val;
+diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
+index 54b9d2e..1590dbd 100644
+--- a/drivers/spi/spi-topcliff-pch.c
++++ b/drivers/spi/spi-topcliff-pch.c
+@@ -196,6 +196,7 @@ struct pch_spi_data {
+ struct pch_spi_dma_ctrl dma;
+ int use_dma;
+ u8 irq_reg_sts;
++ int save_total_len;
+ };
+
+ /**
+@@ -823,11 +824,13 @@ static void pch_spi_copy_rx_data_for_dma(struct pch_spi_data *data, int bpw)
+ rx_dma_buf = data->dma.rx_buf_virt;
+ for (j = 0; j < data->bpw_len; j++)
+ *rx_buf++ = *rx_dma_buf++ & 0xFF;
++ data->cur_trans->rx_buf = rx_buf;
+ } else {
+ rx_sbuf = data->cur_trans->rx_buf;
+ rx_dma_sbuf = data->dma.rx_buf_virt;
+ for (j = 0; j < data->bpw_len; j++)
+ *rx_sbuf++ = *rx_dma_sbuf++;
++ data->cur_trans->rx_buf = rx_sbuf;
+ }
+ }
+
+@@ -853,6 +856,9 @@ static int pch_spi_start_transfer(struct pch_spi_data *data)
+ rtn = wait_event_interruptible_timeout(data->wait,
+ data->transfer_complete,
+ msecs_to_jiffies(2 * HZ));
++ if (!rtn)
++ dev_err(&data->master->dev,
++ "%s wait-event timeout\n", __func__);
+
+ dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent,
+ DMA_FROM_DEVICE);
+@@ -924,7 +930,8 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
+ dma_cap_set(DMA_SLAVE, mask);
+
+ /* Get DMA's dev information */
+- dma_dev = pci_get_bus_and_slot(2, PCI_DEVFN(12, 0));
++ dma_dev = pci_get_bus_and_slot(data->board_dat->pdev->bus->number,
++ PCI_DEVFN(12, 0));
+
+ /* Set Tx DMA */
+ param = &dma->param_tx;
+@@ -988,6 +995,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
+ int i;
+ int size;
+ int rem;
++ int head;
+ unsigned long flags;
+ struct pch_spi_dma_ctrl *dma;
+
+@@ -1016,6 +1024,11 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
+ }
+ data->bpw_len = data->cur_trans->len / (*bpw / 8);
+
++ if (data->bpw_len > PCH_BUF_SIZE) {
++ data->bpw_len = PCH_BUF_SIZE;
++ data->cur_trans->len -= PCH_BUF_SIZE;
++ }
++
+ /* copy Tx Data */
+ if (data->cur_trans->tx_buf != NULL) {
+ if (*bpw == 8) {
+@@ -1030,10 +1043,17 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
+ *tx_dma_sbuf++ = *tx_sbuf++;
+ }
+ }
++
++ /* Calculate Rx parameter for DMA transmitting */
+ if (data->bpw_len > PCH_DMA_TRANS_SIZE) {
+- num = data->bpw_len / PCH_DMA_TRANS_SIZE + 1;
++ if (data->bpw_len % PCH_DMA_TRANS_SIZE) {
++ num = data->bpw_len / PCH_DMA_TRANS_SIZE + 1;
++ rem = data->bpw_len % PCH_DMA_TRANS_SIZE;
++ } else {
++ num = data->bpw_len / PCH_DMA_TRANS_SIZE;
++ rem = PCH_DMA_TRANS_SIZE;
++ }
+ size = PCH_DMA_TRANS_SIZE;
+- rem = data->bpw_len % PCH_DMA_TRANS_SIZE;
+ } else {
+ num = 1;
+ size = data->bpw_len;
+@@ -1093,15 +1113,23 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
+ dma->nent = num;
+ dma->desc_rx = desc_rx;
+
+- /* TX */
+- if (data->bpw_len > PCH_DMA_TRANS_SIZE) {
+- num = data->bpw_len / PCH_DMA_TRANS_SIZE;
++ /* Calculate Tx parameter for DMA transmitting */
++ if (data->bpw_len > PCH_MAX_FIFO_DEPTH) {
++ head = PCH_MAX_FIFO_DEPTH - PCH_DMA_TRANS_SIZE;
++ if (data->bpw_len % PCH_DMA_TRANS_SIZE > 4) {
++ num = data->bpw_len / PCH_DMA_TRANS_SIZE + 1;
++ rem = data->bpw_len % PCH_DMA_TRANS_SIZE - head;
++ } else {
++ num = data->bpw_len / PCH_DMA_TRANS_SIZE;
++ rem = data->bpw_len % PCH_DMA_TRANS_SIZE +
++ PCH_DMA_TRANS_SIZE - head;
++ }
+ size = PCH_DMA_TRANS_SIZE;
+- rem = 16;
+ } else {
+ num = 1;
+ size = data->bpw_len;
+ rem = data->bpw_len;
++ head = 0;
+ }
+
+ dma->sg_tx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC);
+@@ -1111,11 +1139,17 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
+ for (i = 0; i < num; i++, sg++) {
+ if (i == 0) {
+ sg->offset = 0;
++ sg_set_page(sg, virt_to_page(dma->tx_buf_virt), size + head,
++ sg->offset);
++ sg_dma_len(sg) = size + head;
++ } else if (i == (num - 1)) {
++ sg->offset = head + size * i;
++ sg->offset = sg->offset * (*bpw / 8);
+ sg_set_page(sg, virt_to_page(dma->tx_buf_virt), rem,
+ sg->offset);
+ sg_dma_len(sg) = rem;
+ } else {
+- sg->offset = rem + size * (i - 1);
++ sg->offset = head + size * i;
+ sg->offset = sg->offset * (*bpw / 8);
+ sg_set_page(sg, virt_to_page(dma->tx_buf_virt), size,
+ sg->offset);
+@@ -1203,6 +1237,7 @@ static void pch_spi_process_messages(struct work_struct *pwork)
+ data->current_msg->spi->bits_per_word);
+ pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL);
+ do {
++ int cnt;
+ /* If we are already processing a message get the next
+ transfer structure from the message otherwise retrieve
+ the 1st transfer request from the message. */
+@@ -1222,11 +1257,28 @@ static void pch_spi_process_messages(struct work_struct *pwork)
+ }
+ spin_unlock(&data->lock);
+
++ if (!data->cur_trans->len)
++ goto out;
++ cnt = (data->cur_trans->len - 1) / PCH_BUF_SIZE + 1;
++ data->save_total_len = data->cur_trans->len;
+ if (data->use_dma) {
+- pch_spi_handle_dma(data, &bpw);
+- if (!pch_spi_start_transfer(data))
+- goto out;
+- pch_spi_copy_rx_data_for_dma(data, bpw);
++ int i;
++ char *save_rx_buf = data->cur_trans->rx_buf;
++ for (i = 0; i < cnt; i ++) {
++ pch_spi_handle_dma(data, &bpw);
++ if (!pch_spi_start_transfer(data)) {
++ data->transfer_complete = true;
++ data->current_msg->status = -EIO;
++ data->current_msg->complete
++ (data->current_msg->context);
++ data->bcurrent_msg_processing = false;
++ data->current_msg = NULL;
++ data->cur_trans = NULL;
++ goto out;
++ }
++ pch_spi_copy_rx_data_for_dma(data, bpw);
++ }
++ data->cur_trans->rx_buf = save_rx_buf;
+ } else {
+ pch_spi_set_tx(data, &bpw);
+ pch_spi_set_ir(data);
+@@ -1237,6 +1289,7 @@ static void pch_spi_process_messages(struct work_struct *pwork)
+ data->pkt_tx_buff = NULL;
+ }
+ /* increment message count */
++ data->cur_trans->len = data->save_total_len;
+ data->current_msg->actual_length += data->cur_trans->len;
+
+ dev_dbg(&data->master->dev,
+@@ -1389,6 +1442,7 @@ static int __devinit pch_spi_pd_probe(struct platform_device *plat_dev)
+ master->num_chipselect = PCH_MAX_CS;
+ master->setup = pch_spi_setup;
+ master->transfer = pch_spi_transfer;
++ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
+
+ data->board_dat = board_dat;
+ data->plat_dev = plat_dev;
+diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
+index b4864fb..cad8b92 100644
+--- a/drivers/target/target_core_file.c
++++ b/drivers/target/target_core_file.c
+@@ -170,6 +170,7 @@ static struct se_device *fd_create_virtdevice(
+ inode = file->f_mapping->host;
+ if (S_ISBLK(inode->i_mode)) {
+ struct request_queue *q;
++ unsigned long long dev_size;
+ /*
+ * Setup the local scope queue_limits from struct request_queue->limits
+ * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
+@@ -184,13 +185,12 @@ static struct se_device *fd_create_virtdevice(
+ * one (1) logical sector from underlying struct block_device
+ */
+ fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
+- fd_dev->fd_dev_size = (i_size_read(file->f_mapping->host) -
++ dev_size = (i_size_read(file->f_mapping->host) -
+ fd_dev->fd_block_size);
+
+ pr_debug("FILEIO: Using size: %llu bytes from struct"
+ " block_device blocks: %llu logical_block_size: %d\n",
+- fd_dev->fd_dev_size,
+- div_u64(fd_dev->fd_dev_size, fd_dev->fd_block_size),
++ dev_size, div_u64(dev_size, fd_dev->fd_block_size),
+ fd_dev->fd_block_size);
+ } else {
+ if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
+@@ -606,10 +606,20 @@ static u32 fd_get_device_type(struct se_device *dev)
+ static sector_t fd_get_blocks(struct se_device *dev)
+ {
+ struct fd_dev *fd_dev = dev->dev_ptr;
+- unsigned long long blocks_long = div_u64(fd_dev->fd_dev_size,
+- dev->se_sub_dev->se_dev_attrib.block_size);
++ struct file *f = fd_dev->fd_file;
++ struct inode *i = f->f_mapping->host;
++ unsigned long long dev_size;
++ /*
++ * When using a file that references an underlying struct block_device,
++ * ensure dev_size is always based on the current inode size in order
++ * to handle underlying block_device resize operations.
++ */
++ if (S_ISBLK(i->i_mode))
++ dev_size = (i_size_read(i) - fd_dev->fd_block_size);
++ else
++ dev_size = fd_dev->fd_dev_size;
+
+- return blocks_long;
++ return div_u64(dev_size, dev->se_sub_dev->se_dev_attrib.block_size);
+ }
+
+ static struct se_subsystem_api fileio_template = {
+diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
+index 6cf6ff4..b75bc92 100644
+--- a/drivers/target/target_core_pr.c
++++ b/drivers/target/target_core_pr.c
+@@ -223,6 +223,9 @@ int target_scsi2_reservation_release(struct se_task *task)
+ if (dev->dev_reserved_node_acl != sess->se_node_acl)
+ goto out_unlock;
+
++ if (dev->dev_res_bin_isid != sess->sess_bin_isid)
++ goto out_unlock;
++
+ dev->dev_reserved_node_acl = NULL;
+ dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
+ if (dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID) {
+diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
+index 8ddd133..d91fe44 100644
+--- a/drivers/target/target_core_tpg.c
++++ b/drivers/target/target_core_tpg.c
+@@ -63,7 +63,6 @@ static void core_clear_initiator_node_from_tpg(
+ int i;
+ struct se_dev_entry *deve;
+ struct se_lun *lun;
+- struct se_lun_acl *acl, *acl_tmp;
+
+ spin_lock_irq(&nacl->device_list_lock);
+ for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+@@ -84,28 +83,7 @@ static void core_clear_initiator_node_from_tpg(
+ core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
+ TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
+
+- spin_lock(&lun->lun_acl_lock);
+- list_for_each_entry_safe(acl, acl_tmp,
+- &lun->lun_acl_list, lacl_list) {
+- if (!strcmp(acl->initiatorname, nacl->initiatorname) &&
+- (acl->mapped_lun == deve->mapped_lun))
+- break;
+- }
+-
+- if (!acl) {
+- pr_err("Unable to locate struct se_lun_acl for %s,"
+- " mapped_lun: %u\n", nacl->initiatorname,
+- deve->mapped_lun);
+- spin_unlock(&lun->lun_acl_lock);
+- spin_lock_irq(&nacl->device_list_lock);
+- continue;
+- }
+-
+- list_del(&acl->lacl_list);
+- spin_unlock(&lun->lun_acl_lock);
+-
+ spin_lock_irq(&nacl->device_list_lock);
+- kfree(acl);
+ }
+ spin_unlock_irq(&nacl->device_list_lock);
+ }
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index 93a00d8..4410ae7 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -487,18 +487,19 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
+ ext4_free_blocks(handle, inode, bh, 0, 1,
+ EXT4_FREE_BLOCKS_METADATA |
+ EXT4_FREE_BLOCKS_FORGET);
++ unlock_buffer(bh);
+ } else {
+ le32_add_cpu(&BHDR(bh)->h_refcount, -1);
++ if (ce)
++ mb_cache_entry_release(ce);
++ unlock_buffer(bh);
+ error = ext4_handle_dirty_metadata(handle, inode, bh);
+ if (IS_SYNC(inode))
+ ext4_handle_sync(handle);
+ dquot_free_block(inode, 1);
+ ea_bdebug(bh, "refcount now=%d; releasing",
+ le32_to_cpu(BHDR(bh)->h_refcount));
+- if (ce)
+- mb_cache_entry_release(ce);
+ }
+- unlock_buffer(bh);
+ out:
+ ext4_std_error(inode->i_sb, error);
+ return;
+diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c
+index 31dce61..4bbd521 100644
+--- a/fs/jffs2/gc.c
++++ b/fs/jffs2/gc.c
+@@ -225,8 +225,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
+ return 0;
+
+ D1(printk(KERN_DEBUG "No progress from erasing blocks; doing GC anyway\n"));
+- spin_lock(&c->erase_completion_lock);
+ mutex_lock(&c->alloc_sem);
++ spin_lock(&c->erase_completion_lock);
+ }
+
+ /* First, work out which block we're garbage-collecting */
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 51f6a40..bab7c58 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -1802,6 +1802,7 @@ static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode
+ nfs_setattr_update_inode(state->inode, sattr);
+ nfs_post_op_update_inode(state->inode, opendata->o_res.f_attr);
+ }
++ nfs_revalidate_inode(server, state->inode);
+ nfs4_opendata_put(opendata);
+ nfs4_put_state_owner(sp);
+ *res = state;
+diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
+index a9e6ba4..daad4e6 100644
+--- a/include/linux/mtd/map.h
++++ b/include/linux/mtd/map.h
+@@ -26,7 +26,7 @@
+ #include <linux/list.h>
+ #include <linux/string.h>
+ #include <linux/bug.h>
+-
++#include <linux/kernel.h>
+
+ #include <asm/unaligned.h>
+ #include <asm/system.h>
+diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
+index 605b0aa..76f4396 100644
+--- a/include/linux/usb/usbnet.h
++++ b/include/linux/usb/usbnet.h
+@@ -191,7 +191,8 @@ extern void usbnet_cdc_status(struct usbnet *, struct urb *);
+ enum skb_state {
+ illegal = 0,
+ tx_start, tx_done,
+- rx_start, rx_done, rx_cleanup
++ rx_start, rx_done, rx_cleanup,
++ unlink_start
+ };
+
+ struct skb_data { /* skb->cb is one of these */
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 0acf42c0..26f1ab0 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -47,6 +47,7 @@
+ #include <linux/audit.h>
+ #include <linux/memcontrol.h>
+ #include <linux/ftrace.h>
++#include <linux/proc_fs.h>
+ #include <linux/profile.h>
+ #include <linux/rmap.h>
+ #include <linux/ksm.h>
+@@ -1387,6 +1388,8 @@ bad_fork_cleanup_io:
+ if (p->io_context)
+ exit_io_context(p);
+ bad_fork_cleanup_namespaces:
++ if (unlikely(clone_flags & CLONE_NEWPID))
++ pid_ns_release_proc(p->nsproxy->pid_ns);
+ exit_task_namespaces(p);
+ bad_fork_cleanup_mm:
+ if (p->mm)
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index bd936ed..7120c2e 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -2405,7 +2405,6 @@ retry_avoidcopy:
+ if (outside_reserve) {
+ BUG_ON(huge_pte_none(pte));
+ if (unmap_ref_private(mm, vma, old_page, address)) {
+- BUG_ON(page_count(old_page) != 1);
+ BUG_ON(huge_pte_none(pte));
+ spin_lock(&mm->page_table_lock);
+ goto retry_avoidcopy;
+diff --git a/mm/nobootmem.c b/mm/nobootmem.c
+index 7fa41b4..07c08c4 100644
+--- a/mm/nobootmem.c
++++ b/mm/nobootmem.c
+@@ -83,8 +83,7 @@ void __init free_bootmem_late(unsigned long addr, unsigned long size)
+
+ static void __init __free_pages_memory(unsigned long start, unsigned long end)
+ {
+- int i;
+- unsigned long start_aligned, end_aligned;
++ unsigned long i, start_aligned, end_aligned;
+ int order = ilog2(BITS_PER_LONG);
+
+ start_aligned = (start + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
+diff --git a/mm/percpu.c b/mm/percpu.c
+index 716eb4a..5c29750 100644
+--- a/mm/percpu.c
++++ b/mm/percpu.c
+@@ -1642,6 +1642,16 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
+ areas[group] = ptr;
+
+ base = min(ptr, base);
++ }
++
++ /*
++ * Copy data and free unused parts. This should happen after all
++ * allocations are complete; otherwise, we may end up with
++ * overlapping groups.
++ */
++ for (group = 0; group < ai->nr_groups; group++) {
++ struct pcpu_group_info *gi = &ai->groups[group];
++ void *ptr = areas[group];
+
+ for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
+ if (gi->cpu_map[i] == NR_CPUS) {
+diff --git a/net/core/dev.c b/net/core/dev.c
+index cd5050e..61a7baa 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -1421,14 +1421,34 @@ EXPORT_SYMBOL(register_netdevice_notifier);
+ * register_netdevice_notifier(). The notifier is unlinked into the
+ * kernel structures and may then be reused. A negative errno code
+ * is returned on a failure.
++ *
++ * After unregistering unregister and down device events are synthesized
++ * for all devices on the device list to the removed notifier to remove
++ * the need for special case cleanup code.
+ */
+
+ int unregister_netdevice_notifier(struct notifier_block *nb)
+ {
++ struct net_device *dev;
++ struct net *net;
+ int err;
+
+ rtnl_lock();
+ err = raw_notifier_chain_unregister(&netdev_chain, nb);
++ if (err)
++ goto unlock;
++
++ for_each_net(net) {
++ for_each_netdev(net, dev) {
++ if (dev->flags & IFF_UP) {
++ nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
++ nb->notifier_call(nb, NETDEV_DOWN, dev);
++ }
++ nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
++ nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
++ }
++ }
++unlock:
+ rtnl_unlock();
+ return err;
+ }
+diff --git a/net/core/pktgen.c b/net/core/pktgen.c
+index 0001c24..df878de 100644
+--- a/net/core/pktgen.c
++++ b/net/core/pktgen.c
+@@ -1932,7 +1932,7 @@ static int pktgen_device_event(struct notifier_block *unused,
+ {
+ struct net_device *dev = ptr;
+
+- if (!net_eq(dev_net(dev), &init_net))
++ if (!net_eq(dev_net(dev), &init_net) || pktgen_exiting)
+ return NOTIFY_DONE;
+
+ /* It is OK that we do not hold the group lock right now,
+@@ -3758,12 +3758,18 @@ static void __exit pg_cleanup(void)
+ {
+ struct pktgen_thread *t;
+ struct list_head *q, *n;
++ LIST_HEAD(list);
+
+ /* Stop all interfaces & threads */
+ pktgen_exiting = true;
+
+- list_for_each_safe(q, n, &pktgen_threads) {
++ mutex_lock(&pktgen_thread_lock);
++ list_splice_init(&pktgen_threads, &list);
++ mutex_unlock(&pktgen_thread_lock);
++
++ list_for_each_safe(q, n, &list) {
+ t = list_entry(q, struct pktgen_thread, th_list);
++ list_del(&t->th_list);
+ kthread_stop(t->tsk);
+ kfree(t);
+ }
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 7904db4..11ba922 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -851,8 +851,7 @@ new_segment:
+ wait_for_sndbuf:
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ wait_for_memory:
+- if (copied)
+- tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
++ tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
+
+ if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
+ goto do_error;
+@@ -3216,7 +3215,7 @@ void __init tcp_init(void)
+ {
+ struct sk_buff *skb = NULL;
+ unsigned long limit;
+- int i, max_share, cnt;
++ int i, max_rshare, max_wshare, cnt;
+ unsigned long jiffy = jiffies;
+
+ BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
+@@ -3280,15 +3279,16 @@ void __init tcp_init(void)
+
+ /* Set per-socket limits to no more than 1/128 the pressure threshold */
+ limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7);
+- max_share = min(4UL*1024*1024, limit);
++ max_wshare = min(4UL*1024*1024, limit);
++ max_rshare = min(6UL*1024*1024, limit);
+
+ sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
+ sysctl_tcp_wmem[1] = 16*1024;
+- sysctl_tcp_wmem[2] = max(64*1024, max_share);
++ sysctl_tcp_wmem[2] = max(64*1024, max_wshare);
+
+ sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
+ sysctl_tcp_rmem[1] = 87380;
+- sysctl_tcp_rmem[2] = max(87380, max_share);
++ sysctl_tcp_rmem[2] = max(87380, max_rshare);
+
+ printk(KERN_INFO "TCP: Hash tables configured "
+ "(established %u bind %u)\n",
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index daedc07..9726927 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -83,7 +83,7 @@ int sysctl_tcp_ecn __read_mostly = 2;
+ EXPORT_SYMBOL(sysctl_tcp_ecn);
+ int sysctl_tcp_dsack __read_mostly = 1;
+ int sysctl_tcp_app_win __read_mostly = 31;
+-int sysctl_tcp_adv_win_scale __read_mostly = 2;
++int sysctl_tcp_adv_win_scale __read_mostly = 1;
+ EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
+
+ int sysctl_tcp_stdurg __read_mostly;
+@@ -2868,11 +2868,14 @@ static inline void tcp_complete_cwr(struct sock *sk)
+
+ /* Do not moderate cwnd if it's already undone in cwr or recovery. */
+ if (tp->undo_marker) {
+- if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR)
++ if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR) {
+ tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
+- else /* PRR */
++ tp->snd_cwnd_stamp = tcp_time_stamp;
++ } else if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH) {
++ /* PRR algorithm. */
+ tp->snd_cwnd = tp->snd_ssthresh;
+- tp->snd_cwnd_stamp = tcp_time_stamp;
++ tp->snd_cwnd_stamp = tcp_time_stamp;
++ }
+ }
+ tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
+ }
+diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
+index 55670ec..2a2a3e7 100644
+--- a/net/l2tp/l2tp_ip.c
++++ b/net/l2tp/l2tp_ip.c
+@@ -441,8 +441,9 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
+
+ daddr = lip->l2tp_addr.s_addr;
+ } else {
++ rc = -EDESTADDRREQ;
+ if (sk->sk_state != TCP_ESTABLISHED)
+- return -EDESTADDRREQ;
++ goto out;
+
+ daddr = inet->inet_daddr;
+ connected = 1;
+diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
+index 7801b15..a489d8b 100644
+--- a/net/sched/sch_netem.c
++++ b/net/sched/sch_netem.c
+@@ -351,10 +351,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
+ if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
+ (skb->ip_summed == CHECKSUM_PARTIAL &&
+- skb_checksum_help(skb))) {
+- sch->qstats.drops++;
+- return NET_XMIT_DROP;
+- }
++ skb_checksum_help(skb)))
++ return qdisc_drop(skb, sch);
+
+ skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
+ }
+diff --git a/sound/pci/echoaudio/echoaudio_dsp.c b/sound/pci/echoaudio/echoaudio_dsp.c
+index 64417a7..d8c670c 100644
+--- a/sound/pci/echoaudio/echoaudio_dsp.c
++++ b/sound/pci/echoaudio/echoaudio_dsp.c
+@@ -475,7 +475,7 @@ static int load_firmware(struct echoaudio *chip)
+ const struct firmware *fw;
+ int box_type, err;
+
+- if (snd_BUG_ON(!chip->dsp_code_to_load || !chip->comm_page))
++ if (snd_BUG_ON(!chip->comm_page))
+ return -EPERM;
+
+ /* See if the ASIC is present and working - only if the DSP is already loaded */
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 192e6c0..53345bc 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -769,11 +769,13 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus,
+ {
+ struct azx *chip = bus->private_data;
+ unsigned long timeout;
++ unsigned long loopcounter;
+ int do_poll = 0;
+
+ again:
+ timeout = jiffies + msecs_to_jiffies(1000);
+- for (;;) {
++
++ for (loopcounter = 0;; loopcounter++) {
+ if (chip->polling_mode || do_poll) {
+ spin_lock_irq(&chip->reg_lock);
+ azx_update_rirb(chip);
+@@ -789,7 +791,7 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus,
+ }
+ if (time_after(jiffies, timeout))
+ break;
+- if (bus->needs_damn_long_delay)
++ if (bus->needs_damn_long_delay || loopcounter > 3000)
+ msleep(2); /* temporary workaround */
+ else {
+ udelay(10);
+diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
+index ed67698..7b7a516 100644
+--- a/sound/pci/hda/patch_sigmatel.c
++++ b/sound/pci/hda/patch_sigmatel.c
+@@ -4484,9 +4484,9 @@ static int stac92xx_init(struct hda_codec *codec)
+ def_conf = get_defcfg_connect(def_conf);
+ /* skip any ports that don't have jacks since presence
+ * detection is useless */
+- if (def_conf != AC_JACK_PORT_COMPLEX) {
+- if (def_conf != AC_JACK_PORT_NONE)
+- stac_toggle_power_map(codec, nid, 1);
++ if (def_conf != AC_JACK_PORT_NONE &&
++ !is_jack_detectable(codec, nid)) {
++ stac_toggle_power_map(codec, nid, 1);
+ continue;
+ }
+ if (enable_pin_detect(codec, nid, STAC_PWR_EVENT)) {
+diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
+index 7806301..3e7aa22 100644
+--- a/sound/soc/codecs/wm8994.c
++++ b/sound/soc/codecs/wm8994.c
+@@ -1027,7 +1027,7 @@ static int aif2clk_ev(struct snd_soc_dapm_widget *w,
+ snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
+ WM8994_AIF2DACL_ENA |
+ WM8994_AIF2DACR_ENA, 0);
+- snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
++ snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_4,
+ WM8994_AIF2ADCL_ENA |
+ WM8994_AIF2ADCR_ENA, 0);
+
diff --git a/1018_linux-3.2.19.patch b/1018_linux-3.2.19.patch
new file mode 100644
index 00000000..6d5dc53e
--- /dev/null
+++ b/1018_linux-3.2.19.patch
@@ -0,0 +1,4857 @@
+diff --git a/Documentation/HOWTO b/Documentation/HOWTO
+index f7ade3b..59c080f 100644
+--- a/Documentation/HOWTO
++++ b/Documentation/HOWTO
+@@ -218,16 +218,16 @@ The development process
+ Linux kernel development process currently consists of a few different
+ main kernel "branches" and lots of different subsystem-specific kernel
+ branches. These different branches are:
+- - main 2.6.x kernel tree
+- - 2.6.x.y -stable kernel tree
+- - 2.6.x -git kernel patches
++ - main 3.x kernel tree
++ - 3.x.y -stable kernel tree
++ - 3.x -git kernel patches
+ - subsystem specific kernel trees and patches
+- - the 2.6.x -next kernel tree for integration tests
++ - the 3.x -next kernel tree for integration tests
+
+-2.6.x kernel tree
++3.x kernel tree
+ -----------------
+-2.6.x kernels are maintained by Linus Torvalds, and can be found on
+-kernel.org in the pub/linux/kernel/v2.6/ directory. Its development
++3.x kernels are maintained by Linus Torvalds, and can be found on
++kernel.org in the pub/linux/kernel/v3.x/ directory. Its development
+ process is as follows:
+ - As soon as a new kernel is released a two weeks window is open,
+ during this period of time maintainers can submit big diffs to
+@@ -262,20 +262,20 @@ mailing list about kernel releases:
+ released according to perceived bug status, not according to a
+ preconceived timeline."
+
+-2.6.x.y -stable kernel tree
++3.x.y -stable kernel tree
+ ---------------------------
+-Kernels with 4-part versions are -stable kernels. They contain
++Kernels with 3-part versions are -stable kernels. They contain
+ relatively small and critical fixes for security problems or significant
+-regressions discovered in a given 2.6.x kernel.
++regressions discovered in a given 3.x kernel.
+
+ This is the recommended branch for users who want the most recent stable
+ kernel and are not interested in helping test development/experimental
+ versions.
+
+-If no 2.6.x.y kernel is available, then the highest numbered 2.6.x
++If no 3.x.y kernel is available, then the highest numbered 3.x
+ kernel is the current stable kernel.
+
+-2.6.x.y are maintained by the "stable" team <stable@vger.kernel.org>, and
++3.x.y are maintained by the "stable" team <stable@vger.kernel.org>, and
+ are released as needs dictate. The normal release period is approximately
+ two weeks, but it can be longer if there are no pressing problems. A
+ security-related problem, instead, can cause a release to happen almost
+@@ -285,7 +285,7 @@ The file Documentation/stable_kernel_rules.txt in the kernel tree
+ documents what kinds of changes are acceptable for the -stable tree, and
+ how the release process works.
+
+-2.6.x -git patches
++3.x -git patches
+ ------------------
+ These are daily snapshots of Linus' kernel tree which are managed in a
+ git repository (hence the name.) These patches are usually released
+@@ -317,13 +317,13 @@ revisions to it, and maintainers can mark patches as under review,
+ accepted, or rejected. Most of these patchwork sites are listed at
+ http://patchwork.kernel.org/.
+
+-2.6.x -next kernel tree for integration tests
++3.x -next kernel tree for integration tests
+ ---------------------------------------------
+-Before updates from subsystem trees are merged into the mainline 2.6.x
++Before updates from subsystem trees are merged into the mainline 3.x
+ tree, they need to be integration-tested. For this purpose, a special
+ testing repository exists into which virtually all subsystem trees are
+ pulled on an almost daily basis:
+- http://git.kernel.org/?p=linux/kernel/git/sfr/linux-next.git
++ http://git.kernel.org/?p=linux/kernel/git/next/linux-next.git
+ http://linux.f-seidel.de/linux-next/pmwiki/
+
+ This way, the -next kernel gives a summary outlook onto what will be
+diff --git a/Makefile b/Makefile
+index add68f1..c291184 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 18
++SUBLEVEL = 19
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+@@ -442,7 +442,7 @@ asm-generic:
+
+ no-dot-config-targets := clean mrproper distclean \
+ cscope gtags TAGS tags help %docs check% coccicheck \
+- include/linux/version.h headers_% \
++ include/linux/version.h headers_% archscripts \
+ kernelversion %src-pkg
+
+ config-targets := 0
+@@ -979,7 +979,7 @@ prepare1: prepare2 include/linux/version.h include/generated/utsrelease.h \
+ include/config/auto.conf
+ $(cmd_crmodverdir)
+
+-archprepare: prepare1 scripts_basic
++archprepare: archscripts prepare1 scripts_basic
+
+ prepare0: archprepare FORCE
+ $(Q)$(MAKE) $(build)=.
+@@ -1046,8 +1046,11 @@ hdr-inst := -rR -f $(srctree)/scripts/Makefile.headersinst obj
+ # If we do an all arch process set dst to asm-$(hdr-arch)
+ hdr-dst = $(if $(KBUILD_HEADERS), dst=include/asm-$(hdr-arch), dst=include/asm)
+
++PHONY += archscripts
++archscripts:
++
+ PHONY += __headers
+-__headers: include/linux/version.h scripts_basic asm-generic FORCE
++__headers: include/linux/version.h scripts_basic asm-generic archscripts FORCE
+ $(Q)$(MAKE) $(build)=scripts build_unifdef
+
+ PHONY += headers_install_all
+diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
+index d5d8d5c..1252a26 100644
+--- a/arch/arm/include/asm/cacheflush.h
++++ b/arch/arm/include/asm/cacheflush.h
+@@ -249,7 +249,7 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr
+ * Harvard caches are synchronised for the user space address range.
+ * This is used for the ARM private sys_cacheflush system call.
+ */
+-#define flush_cache_user_range(vma,start,end) \
++#define flush_cache_user_range(start,end) \
+ __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
+
+ /*
+diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
+index 99a5727..160cb16 100644
+--- a/arch/arm/kernel/traps.c
++++ b/arch/arm/kernel/traps.c
+@@ -488,7 +488,9 @@ do_cache_op(unsigned long start, unsigned long end, int flags)
+ if (end > vma->vm_end)
+ end = vma->vm_end;
+
+- flush_cache_user_range(vma, start, end);
++ up_read(&mm->mmap_sem);
++ flush_cache_user_range(start, end);
++ return;
+ }
+ up_read(&mm->mmap_sem);
+ }
+diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
+index 43f4c92..7073185 100644
+--- a/arch/ia64/kvm/kvm-ia64.c
++++ b/arch/ia64/kvm/kvm-ia64.c
+@@ -1169,6 +1169,11 @@ out:
+
+ #define PALE_RESET_ENTRY 0x80000000ffffffb0UL
+
++bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
++{
++ return irqchip_in_kernel(vcpu->kcm) == (vcpu->arch.apic != NULL);
++}
++
+ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+ {
+ struct kvm_vcpu *v;
+diff --git a/arch/parisc/include/asm/prefetch.h b/arch/parisc/include/asm/prefetch.h
+index c5edc60..1ee7c82 100644
+--- a/arch/parisc/include/asm/prefetch.h
++++ b/arch/parisc/include/asm/prefetch.h
+@@ -21,7 +21,12 @@
+ #define ARCH_HAS_PREFETCH
+ static inline void prefetch(const void *addr)
+ {
+- __asm__("ldw 0(%0), %%r0" : : "r" (addr));
++ __asm__(
++#ifndef CONFIG_PA20
++ /* Need to avoid prefetch of NULL on PA7300LC */
++ " extrw,u,= %0,31,32,%%r0\n"
++#endif
++ " ldw 0(%0), %%r0" : : "r" (addr));
+ }
+
+ /* LDD is a PA2.0 addition. */
+diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
+index 6f05944..5350342 100644
+--- a/arch/parisc/kernel/entry.S
++++ b/arch/parisc/kernel/entry.S
+@@ -581,7 +581,11 @@
+ */
+ cmpiclr,= 0x01,\tmp,%r0
+ ldi (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
++#ifdef CONFIG_64BIT
+ depd,z \prot,8,7,\prot
++#else
++ depw,z \prot,8,7,\prot
++#endif
+ /*
+ * OK, it is in the temp alias region, check whether "from" or "to".
+ * Check "subtle" note in pacache.S re: r23/r26.
+diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
+index 93ff3d9..5d7218a 100644
+--- a/arch/parisc/kernel/pacache.S
++++ b/arch/parisc/kernel/pacache.S
+@@ -692,7 +692,7 @@ ENTRY(flush_icache_page_asm)
+
+ /* Purge any old translation */
+
+- pitlb (%sr0,%r28)
++ pitlb (%sr4,%r28)
+
+ ldil L%icache_stride, %r1
+ ldw R%icache_stride(%r1), %r1
+@@ -706,27 +706,29 @@ ENTRY(flush_icache_page_asm)
+ sub %r25, %r1, %r25
+
+
+-1: fic,m %r1(%r28)
+- fic,m %r1(%r28)
+- fic,m %r1(%r28)
+- fic,m %r1(%r28)
+- fic,m %r1(%r28)
+- fic,m %r1(%r28)
+- fic,m %r1(%r28)
+- fic,m %r1(%r28)
+- fic,m %r1(%r28)
+- fic,m %r1(%r28)
+- fic,m %r1(%r28)
+- fic,m %r1(%r28)
+- fic,m %r1(%r28)
+- fic,m %r1(%r28)
+- fic,m %r1(%r28)
++ /* fic only has the type 26 form on PA1.1, requiring an
++ * explicit space specification, so use %sr4 */
++1: fic,m %r1(%sr4,%r28)
++ fic,m %r1(%sr4,%r28)
++ fic,m %r1(%sr4,%r28)
++ fic,m %r1(%sr4,%r28)
++ fic,m %r1(%sr4,%r28)
++ fic,m %r1(%sr4,%r28)
++ fic,m %r1(%sr4,%r28)
++ fic,m %r1(%sr4,%r28)
++ fic,m %r1(%sr4,%r28)
++ fic,m %r1(%sr4,%r28)
++ fic,m %r1(%sr4,%r28)
++ fic,m %r1(%sr4,%r28)
++ fic,m %r1(%sr4,%r28)
++ fic,m %r1(%sr4,%r28)
++ fic,m %r1(%sr4,%r28)
+ cmpb,COND(<<) %r28, %r25,1b
+- fic,m %r1(%r28)
++ fic,m %r1(%sr4,%r28)
+
+ sync
+ bv %r0(%r2)
+- pitlb (%sr0,%r25)
++ pitlb (%sr4,%r25)
+ .exit
+
+ .procend
+diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
+index 0243454..a5f6eff 100644
+--- a/arch/s390/kvm/intercept.c
++++ b/arch/s390/kvm/intercept.c
+@@ -133,13 +133,6 @@ static int handle_stop(struct kvm_vcpu *vcpu)
+
+ vcpu->stat.exit_stop_request++;
+ spin_lock_bh(&vcpu->arch.local_int.lock);
+- if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) {
+- vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP;
+- rc = kvm_s390_vcpu_store_status(vcpu,
+- KVM_S390_STORE_STATUS_NOADDR);
+- if (rc >= 0)
+- rc = -EOPNOTSUPP;
+- }
+
+ if (vcpu->arch.local_int.action_bits & ACTION_RELOADVCPU_ON_STOP) {
+ vcpu->arch.local_int.action_bits &= ~ACTION_RELOADVCPU_ON_STOP;
+@@ -155,7 +148,18 @@ static int handle_stop(struct kvm_vcpu *vcpu)
+ rc = -EOPNOTSUPP;
+ }
+
+- spin_unlock_bh(&vcpu->arch.local_int.lock);
++ if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) {
++ vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP;
++ /* store status must be called unlocked. Since local_int.lock
++ * only protects local_int.* and not guest memory we can give
++ * up the lock here */
++ spin_unlock_bh(&vcpu->arch.local_int.lock);
++ rc = kvm_s390_vcpu_store_status(vcpu,
++ KVM_S390_STORE_STATUS_NOADDR);
++ if (rc >= 0)
++ rc = -EOPNOTSUPP;
++ } else
++ spin_unlock_bh(&vcpu->arch.local_int.lock);
+ return rc;
+ }
+
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index d1c44573..d3cb86c 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -418,7 +418,7 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
+ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+ {
+ memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
+- vcpu->arch.guest_fpregs.fpc = fpu->fpc;
++ vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK;
+ restore_fp_regs(&vcpu->arch.guest_fpregs);
+ return 0;
+ }
+diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
+index c7f0fbc..b28aaa4 100644
+--- a/arch/s390/mm/fault.c
++++ b/arch/s390/mm/fault.c
+@@ -583,6 +583,7 @@ static void pfault_interrupt(unsigned int ext_int_code,
+ tsk->thread.pfault_wait = 0;
+ list_del(&tsk->thread.list);
+ wake_up_process(tsk);
++ put_task_struct(tsk);
+ } else {
+ /* Completion interrupt was faster than initial
+ * interrupt. Set pfault_wait to -1 so the initial
+@@ -597,14 +598,22 @@ static void pfault_interrupt(unsigned int ext_int_code,
+ put_task_struct(tsk);
+ } else {
+ /* signal bit not set -> a real page is missing. */
+- if (tsk->thread.pfault_wait == -1) {
++ if (tsk->thread.pfault_wait == 1) {
++ /* Already on the list with a reference: put to sleep */
++ set_task_state(tsk, TASK_UNINTERRUPTIBLE);
++ set_tsk_need_resched(tsk);
++ } else if (tsk->thread.pfault_wait == -1) {
+ /* Completion interrupt was faster than the initial
+ * interrupt (pfault_wait == -1). Set pfault_wait
+ * back to zero and exit. */
+ tsk->thread.pfault_wait = 0;
+ } else {
+ /* Initial interrupt arrived before completion
+- * interrupt. Let the task sleep. */
++ * interrupt. Let the task sleep.
++ * An extra task reference is needed since a different
++ * cpu may set the task state to TASK_RUNNING again
++ * before the scheduler is reached. */
++ get_task_struct(tsk);
+ tsk->thread.pfault_wait = 1;
+ list_add(&tsk->thread.list, &pfault_list);
+ set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+@@ -629,6 +638,7 @@ static int __cpuinit pfault_cpu_notify(struct notifier_block *self,
+ list_del(&thread->list);
+ tsk = container_of(thread, struct task_struct, thread);
+ wake_up_process(tsk);
++ put_task_struct(tsk);
+ }
+ spin_unlock_irq(&pfault_lock);
+ break;
+diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
+index f92602e..f210d51 100644
+--- a/arch/sparc/Kconfig
++++ b/arch/sparc/Kconfig
+@@ -583,6 +583,9 @@ config SYSVIPC_COMPAT
+ depends on COMPAT && SYSVIPC
+ default y
+
++config KEYS_COMPAT
++ def_bool y if COMPAT && KEYS
++
+ endmenu
+
+ source "net/Kconfig"
+diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
+index db86b1a..3a58e0d 100644
+--- a/arch/sparc/kernel/systbls_64.S
++++ b/arch/sparc/kernel/systbls_64.S
+@@ -74,7 +74,7 @@ sys_call_table32:
+ .word sys_timer_delete, compat_sys_timer_create, sys_ni_syscall, compat_sys_io_setup, sys_io_destroy
+ /*270*/ .word sys32_io_submit, sys_io_cancel, compat_sys_io_getevents, sys32_mq_open, sys_mq_unlink
+ .word compat_sys_mq_timedsend, compat_sys_mq_timedreceive, compat_sys_mq_notify, compat_sys_mq_getsetattr, compat_sys_waitid
+-/*280*/ .word sys32_tee, sys_add_key, sys_request_key, sys_keyctl, compat_sys_openat
++/*280*/ .word sys32_tee, sys_add_key, sys_request_key, compat_sys_keyctl, compat_sys_openat
+ .word sys_mkdirat, sys_mknodat, sys_fchownat, compat_sys_futimesat, compat_sys_fstatat64
+ /*290*/ .word sys_unlinkat, sys_renameat, sys_linkat, sys_symlinkat, sys_readlinkat
+ .word sys_fchmodat, sys_faccessat, compat_sys_pselect6, compat_sys_ppoll, sys_unshare
+diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
+index 70a0de4..6cb8319 100644
+--- a/arch/tile/Kconfig
++++ b/arch/tile/Kconfig
+@@ -11,8 +11,9 @@ config TILE
+ select GENERIC_IRQ_PROBE
+ select GENERIC_PENDING_IRQ if SMP
+ select GENERIC_IRQ_SHOW
++ select HAVE_SYSCALL_WRAPPERS if TILEGX
+ select SYS_HYPERVISOR
+- select ARCH_HAVE_NMI_SAFE_CMPXCHG if !M386
++ select ARCH_HAVE_NMI_SAFE_CMPXCHG
+
+ # FIXME: investigate whether we need/want these options.
+ # select HAVE_IOREMAP_PROT
+diff --git a/arch/tile/include/asm/bitops.h b/arch/tile/include/asm/bitops.h
+index 16f1fa5..bd186c4 100644
+--- a/arch/tile/include/asm/bitops.h
++++ b/arch/tile/include/asm/bitops.h
+@@ -77,6 +77,11 @@ static inline int ffs(int x)
+ return __builtin_ffs(x);
+ }
+
++static inline int fls64(__u64 w)
++{
++ return (sizeof(__u64) * 8) - __builtin_clzll(w);
++}
++
+ /**
+ * fls - find last set bit in word
+ * @x: the word to search
+@@ -90,12 +95,7 @@ static inline int ffs(int x)
+ */
+ static inline int fls(int x)
+ {
+- return (sizeof(int) * 8) - __builtin_clz(x);
+-}
+-
+-static inline int fls64(__u64 w)
+-{
+- return (sizeof(__u64) * 8) - __builtin_clzll(w);
++ return fls64((unsigned int) x);
+ }
+
+ static inline unsigned int __arch_hweight32(unsigned int w)
+diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h
+index 41474fb..aa365c5 100644
+--- a/arch/um/include/asm/pgtable.h
++++ b/arch/um/include/asm/pgtable.h
+@@ -271,6 +271,12 @@ static inline void set_pte(pte_t *pteptr, pte_t pteval)
+ }
+ #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
+
++#define __HAVE_ARCH_PTE_SAME
++static inline int pte_same(pte_t pte_a, pte_t pte_b)
++{
++ return !((pte_val(pte_a) ^ pte_val(pte_b)) & ~_PAGE_NEWPAGE);
++}
++
+ /*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+@@ -346,11 +352,11 @@ extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
+ #define update_mmu_cache(vma,address,ptep) do ; while (0)
+
+ /* Encode and de-code a swap entry */
+-#define __swp_type(x) (((x).val >> 4) & 0x3f)
++#define __swp_type(x) (((x).val >> 5) & 0x1f)
+ #define __swp_offset(x) ((x).val >> 11)
+
+ #define __swp_entry(type, offset) \
+- ((swp_entry_t) { ((type) << 4) | ((offset) << 11) })
++ ((swp_entry_t) { ((type) << 5) | ((offset) << 11) })
+ #define __pte_to_swp_entry(pte) \
+ ((swp_entry_t) { pte_val(pte_mkuptodate(pte)) })
+ #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+diff --git a/arch/x86/Makefile b/arch/x86/Makefile
+index b02e509..03dbc7f5b 100644
+--- a/arch/x86/Makefile
++++ b/arch/x86/Makefile
+@@ -117,6 +117,9 @@ KBUILD_CFLAGS += $(call cc-option,-mno-sse -mno-mmx -mno-sse2 -mno-3dnow,)
+ KBUILD_CFLAGS += $(mflags-y)
+ KBUILD_AFLAGS += $(mflags-y)
+
++archscripts:
++ $(Q)$(MAKE) $(build)=arch/x86/tools relocs
++
+ ###
+ # Kernel objects
+
+@@ -180,6 +183,7 @@ archclean:
+ $(Q)rm -rf $(objtree)/arch/i386
+ $(Q)rm -rf $(objtree)/arch/x86_64
+ $(Q)$(MAKE) $(clean)=$(boot)
++ $(Q)$(MAKE) $(clean)=arch/x86/tools
+
+ define archhelp
+ echo '* bzImage - Compressed kernel image (arch/x86/boot/bzImage)'
+diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
+index 09664ef..77453c6 100644
+--- a/arch/x86/boot/compressed/Makefile
++++ b/arch/x86/boot/compressed/Makefile
+@@ -31,13 +31,12 @@ OBJCOPYFLAGS_vmlinux.bin := -R .comment -S
+ $(obj)/vmlinux.bin: vmlinux FORCE
+ $(call if_changed,objcopy)
+
++targets += vmlinux.bin.all vmlinux.relocs
+
+-targets += vmlinux.bin.all vmlinux.relocs relocs
+-hostprogs-$(CONFIG_X86_NEED_RELOCS) += relocs
+-
++CMD_RELOCS = arch/x86/tools/relocs
+ quiet_cmd_relocs = RELOCS $@
+- cmd_relocs = $(obj)/relocs $< > $@;$(obj)/relocs --abs-relocs $<
+-$(obj)/vmlinux.relocs: vmlinux $(obj)/relocs FORCE
++ cmd_relocs = $(CMD_RELOCS) $< > $@;$(CMD_RELOCS) --abs-relocs $<
++$(obj)/vmlinux.relocs: vmlinux FORCE
+ $(call if_changed,relocs)
+
+ vmlinux.bin.all-y := $(obj)/vmlinux.bin
+diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
+deleted file mode 100644
+index e77f4e4..0000000
+--- a/arch/x86/boot/compressed/relocs.c
++++ /dev/null
+@@ -1,680 +0,0 @@
+-#include <stdio.h>
+-#include <stdarg.h>
+-#include <stdlib.h>
+-#include <stdint.h>
+-#include <string.h>
+-#include <errno.h>
+-#include <unistd.h>
+-#include <elf.h>
+-#include <byteswap.h>
+-#define USE_BSD
+-#include <endian.h>
+-#include <regex.h>
+-
+-static void die(char *fmt, ...);
+-
+-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+-static Elf32_Ehdr ehdr;
+-static unsigned long reloc_count, reloc_idx;
+-static unsigned long *relocs;
+-
+-struct section {
+- Elf32_Shdr shdr;
+- struct section *link;
+- Elf32_Sym *symtab;
+- Elf32_Rel *reltab;
+- char *strtab;
+-};
+-static struct section *secs;
+-
+-/*
+- * Following symbols have been audited. There values are constant and do
+- * not change if bzImage is loaded at a different physical address than
+- * the address for which it has been compiled. Don't warn user about
+- * absolute relocations present w.r.t these symbols.
+- */
+-static const char abs_sym_regex[] =
+- "^(xen_irq_disable_direct_reloc$|"
+- "xen_save_fl_direct_reloc$|"
+- "VDSO|"
+- "__crc_)";
+-static regex_t abs_sym_regex_c;
+-static int is_abs_reloc(const char *sym_name)
+-{
+- return !regexec(&abs_sym_regex_c, sym_name, 0, NULL, 0);
+-}
+-
+-/*
+- * These symbols are known to be relative, even if the linker marks them
+- * as absolute (typically defined outside any section in the linker script.)
+- */
+-static const char rel_sym_regex[] =
+- "^_end$";
+-static regex_t rel_sym_regex_c;
+-static int is_rel_reloc(const char *sym_name)
+-{
+- return !regexec(&rel_sym_regex_c, sym_name, 0, NULL, 0);
+-}
+-
+-static void regex_init(void)
+-{
+- char errbuf[128];
+- int err;
+-
+- err = regcomp(&abs_sym_regex_c, abs_sym_regex,
+- REG_EXTENDED|REG_NOSUB);
+- if (err) {
+- regerror(err, &abs_sym_regex_c, errbuf, sizeof errbuf);
+- die("%s", errbuf);
+- }
+-
+- err = regcomp(&rel_sym_regex_c, rel_sym_regex,
+- REG_EXTENDED|REG_NOSUB);
+- if (err) {
+- regerror(err, &rel_sym_regex_c, errbuf, sizeof errbuf);
+- die("%s", errbuf);
+- }
+-}
+-
+-static void die(char *fmt, ...)
+-{
+- va_list ap;
+- va_start(ap, fmt);
+- vfprintf(stderr, fmt, ap);
+- va_end(ap);
+- exit(1);
+-}
+-
+-static const char *sym_type(unsigned type)
+-{
+- static const char *type_name[] = {
+-#define SYM_TYPE(X) [X] = #X
+- SYM_TYPE(STT_NOTYPE),
+- SYM_TYPE(STT_OBJECT),
+- SYM_TYPE(STT_FUNC),
+- SYM_TYPE(STT_SECTION),
+- SYM_TYPE(STT_FILE),
+- SYM_TYPE(STT_COMMON),
+- SYM_TYPE(STT_TLS),
+-#undef SYM_TYPE
+- };
+- const char *name = "unknown sym type name";
+- if (type < ARRAY_SIZE(type_name)) {
+- name = type_name[type];
+- }
+- return name;
+-}
+-
+-static const char *sym_bind(unsigned bind)
+-{
+- static const char *bind_name[] = {
+-#define SYM_BIND(X) [X] = #X
+- SYM_BIND(STB_LOCAL),
+- SYM_BIND(STB_GLOBAL),
+- SYM_BIND(STB_WEAK),
+-#undef SYM_BIND
+- };
+- const char *name = "unknown sym bind name";
+- if (bind < ARRAY_SIZE(bind_name)) {
+- name = bind_name[bind];
+- }
+- return name;
+-}
+-
+-static const char *sym_visibility(unsigned visibility)
+-{
+- static const char *visibility_name[] = {
+-#define SYM_VISIBILITY(X) [X] = #X
+- SYM_VISIBILITY(STV_DEFAULT),
+- SYM_VISIBILITY(STV_INTERNAL),
+- SYM_VISIBILITY(STV_HIDDEN),
+- SYM_VISIBILITY(STV_PROTECTED),
+-#undef SYM_VISIBILITY
+- };
+- const char *name = "unknown sym visibility name";
+- if (visibility < ARRAY_SIZE(visibility_name)) {
+- name = visibility_name[visibility];
+- }
+- return name;
+-}
+-
+-static const char *rel_type(unsigned type)
+-{
+- static const char *type_name[] = {
+-#define REL_TYPE(X) [X] = #X
+- REL_TYPE(R_386_NONE),
+- REL_TYPE(R_386_32),
+- REL_TYPE(R_386_PC32),
+- REL_TYPE(R_386_GOT32),
+- REL_TYPE(R_386_PLT32),
+- REL_TYPE(R_386_COPY),
+- REL_TYPE(R_386_GLOB_DAT),
+- REL_TYPE(R_386_JMP_SLOT),
+- REL_TYPE(R_386_RELATIVE),
+- REL_TYPE(R_386_GOTOFF),
+- REL_TYPE(R_386_GOTPC),
+-#undef REL_TYPE
+- };
+- const char *name = "unknown type rel type name";
+- if (type < ARRAY_SIZE(type_name) && type_name[type]) {
+- name = type_name[type];
+- }
+- return name;
+-}
+-
+-static const char *sec_name(unsigned shndx)
+-{
+- const char *sec_strtab;
+- const char *name;
+- sec_strtab = secs[ehdr.e_shstrndx].strtab;
+- name = "<noname>";
+- if (shndx < ehdr.e_shnum) {
+- name = sec_strtab + secs[shndx].shdr.sh_name;
+- }
+- else if (shndx == SHN_ABS) {
+- name = "ABSOLUTE";
+- }
+- else if (shndx == SHN_COMMON) {
+- name = "COMMON";
+- }
+- return name;
+-}
+-
+-static const char *sym_name(const char *sym_strtab, Elf32_Sym *sym)
+-{
+- const char *name;
+- name = "<noname>";
+- if (sym->st_name) {
+- name = sym_strtab + sym->st_name;
+- }
+- else {
+- name = sec_name(secs[sym->st_shndx].shdr.sh_name);
+- }
+- return name;
+-}
+-
+-
+-
+-#if BYTE_ORDER == LITTLE_ENDIAN
+-#define le16_to_cpu(val) (val)
+-#define le32_to_cpu(val) (val)
+-#endif
+-#if BYTE_ORDER == BIG_ENDIAN
+-#define le16_to_cpu(val) bswap_16(val)
+-#define le32_to_cpu(val) bswap_32(val)
+-#endif
+-
+-static uint16_t elf16_to_cpu(uint16_t val)
+-{
+- return le16_to_cpu(val);
+-}
+-
+-static uint32_t elf32_to_cpu(uint32_t val)
+-{
+- return le32_to_cpu(val);
+-}
+-
+-static void read_ehdr(FILE *fp)
+-{
+- if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1) {
+- die("Cannot read ELF header: %s\n",
+- strerror(errno));
+- }
+- if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0) {
+- die("No ELF magic\n");
+- }
+- if (ehdr.e_ident[EI_CLASS] != ELFCLASS32) {
+- die("Not a 32 bit executable\n");
+- }
+- if (ehdr.e_ident[EI_DATA] != ELFDATA2LSB) {
+- die("Not a LSB ELF executable\n");
+- }
+- if (ehdr.e_ident[EI_VERSION] != EV_CURRENT) {
+- die("Unknown ELF version\n");
+- }
+- /* Convert the fields to native endian */
+- ehdr.e_type = elf16_to_cpu(ehdr.e_type);
+- ehdr.e_machine = elf16_to_cpu(ehdr.e_machine);
+- ehdr.e_version = elf32_to_cpu(ehdr.e_version);
+- ehdr.e_entry = elf32_to_cpu(ehdr.e_entry);
+- ehdr.e_phoff = elf32_to_cpu(ehdr.e_phoff);
+- ehdr.e_shoff = elf32_to_cpu(ehdr.e_shoff);
+- ehdr.e_flags = elf32_to_cpu(ehdr.e_flags);
+- ehdr.e_ehsize = elf16_to_cpu(ehdr.e_ehsize);
+- ehdr.e_phentsize = elf16_to_cpu(ehdr.e_phentsize);
+- ehdr.e_phnum = elf16_to_cpu(ehdr.e_phnum);
+- ehdr.e_shentsize = elf16_to_cpu(ehdr.e_shentsize);
+- ehdr.e_shnum = elf16_to_cpu(ehdr.e_shnum);
+- ehdr.e_shstrndx = elf16_to_cpu(ehdr.e_shstrndx);
+-
+- if ((ehdr.e_type != ET_EXEC) && (ehdr.e_type != ET_DYN)) {
+- die("Unsupported ELF header type\n");
+- }
+- if (ehdr.e_machine != EM_386) {
+- die("Not for x86\n");
+- }
+- if (ehdr.e_version != EV_CURRENT) {
+- die("Unknown ELF version\n");
+- }
+- if (ehdr.e_ehsize != sizeof(Elf32_Ehdr)) {
+- die("Bad Elf header size\n");
+- }
+- if (ehdr.e_phentsize != sizeof(Elf32_Phdr)) {
+- die("Bad program header entry\n");
+- }
+- if (ehdr.e_shentsize != sizeof(Elf32_Shdr)) {
+- die("Bad section header entry\n");
+- }
+- if (ehdr.e_shstrndx >= ehdr.e_shnum) {
+- die("String table index out of bounds\n");
+- }
+-}
+-
+-static void read_shdrs(FILE *fp)
+-{
+- int i;
+- Elf32_Shdr shdr;
+-
+- secs = calloc(ehdr.e_shnum, sizeof(struct section));
+- if (!secs) {
+- die("Unable to allocate %d section headers\n",
+- ehdr.e_shnum);
+- }
+- if (fseek(fp, ehdr.e_shoff, SEEK_SET) < 0) {
+- die("Seek to %d failed: %s\n",
+- ehdr.e_shoff, strerror(errno));
+- }
+- for (i = 0; i < ehdr.e_shnum; i++) {
+- struct section *sec = &secs[i];
+- if (fread(&shdr, sizeof shdr, 1, fp) != 1)
+- die("Cannot read ELF section headers %d/%d: %s\n",
+- i, ehdr.e_shnum, strerror(errno));
+- sec->shdr.sh_name = elf32_to_cpu(shdr.sh_name);
+- sec->shdr.sh_type = elf32_to_cpu(shdr.sh_type);
+- sec->shdr.sh_flags = elf32_to_cpu(shdr.sh_flags);
+- sec->shdr.sh_addr = elf32_to_cpu(shdr.sh_addr);
+- sec->shdr.sh_offset = elf32_to_cpu(shdr.sh_offset);
+- sec->shdr.sh_size = elf32_to_cpu(shdr.sh_size);
+- sec->shdr.sh_link = elf32_to_cpu(shdr.sh_link);
+- sec->shdr.sh_info = elf32_to_cpu(shdr.sh_info);
+- sec->shdr.sh_addralign = elf32_to_cpu(shdr.sh_addralign);
+- sec->shdr.sh_entsize = elf32_to_cpu(shdr.sh_entsize);
+- if (sec->shdr.sh_link < ehdr.e_shnum)
+- sec->link = &secs[sec->shdr.sh_link];
+- }
+-
+-}
+-
+-static void read_strtabs(FILE *fp)
+-{
+- int i;
+- for (i = 0; i < ehdr.e_shnum; i++) {
+- struct section *sec = &secs[i];
+- if (sec->shdr.sh_type != SHT_STRTAB) {
+- continue;
+- }
+- sec->strtab = malloc(sec->shdr.sh_size);
+- if (!sec->strtab) {
+- die("malloc of %d bytes for strtab failed\n",
+- sec->shdr.sh_size);
+- }
+- if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0) {
+- die("Seek to %d failed: %s\n",
+- sec->shdr.sh_offset, strerror(errno));
+- }
+- if (fread(sec->strtab, 1, sec->shdr.sh_size, fp)
+- != sec->shdr.sh_size) {
+- die("Cannot read symbol table: %s\n",
+- strerror(errno));
+- }
+- }
+-}
+-
+-static void read_symtabs(FILE *fp)
+-{
+- int i,j;
+- for (i = 0; i < ehdr.e_shnum; i++) {
+- struct section *sec = &secs[i];
+- if (sec->shdr.sh_type != SHT_SYMTAB) {
+- continue;
+- }
+- sec->symtab = malloc(sec->shdr.sh_size);
+- if (!sec->symtab) {
+- die("malloc of %d bytes for symtab failed\n",
+- sec->shdr.sh_size);
+- }
+- if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0) {
+- die("Seek to %d failed: %s\n",
+- sec->shdr.sh_offset, strerror(errno));
+- }
+- if (fread(sec->symtab, 1, sec->shdr.sh_size, fp)
+- != sec->shdr.sh_size) {
+- die("Cannot read symbol table: %s\n",
+- strerror(errno));
+- }
+- for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Sym); j++) {
+- Elf32_Sym *sym = &sec->symtab[j];
+- sym->st_name = elf32_to_cpu(sym->st_name);
+- sym->st_value = elf32_to_cpu(sym->st_value);
+- sym->st_size = elf32_to_cpu(sym->st_size);
+- sym->st_shndx = elf16_to_cpu(sym->st_shndx);
+- }
+- }
+-}
+-
+-
+-static void read_relocs(FILE *fp)
+-{
+- int i,j;
+- for (i = 0; i < ehdr.e_shnum; i++) {
+- struct section *sec = &secs[i];
+- if (sec->shdr.sh_type != SHT_REL) {
+- continue;
+- }
+- sec->reltab = malloc(sec->shdr.sh_size);
+- if (!sec->reltab) {
+- die("malloc of %d bytes for relocs failed\n",
+- sec->shdr.sh_size);
+- }
+- if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0) {
+- die("Seek to %d failed: %s\n",
+- sec->shdr.sh_offset, strerror(errno));
+- }
+- if (fread(sec->reltab, 1, sec->shdr.sh_size, fp)
+- != sec->shdr.sh_size) {
+- die("Cannot read symbol table: %s\n",
+- strerror(errno));
+- }
+- for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
+- Elf32_Rel *rel = &sec->reltab[j];
+- rel->r_offset = elf32_to_cpu(rel->r_offset);
+- rel->r_info = elf32_to_cpu(rel->r_info);
+- }
+- }
+-}
+-
+-
+-static void print_absolute_symbols(void)
+-{
+- int i;
+- printf("Absolute symbols\n");
+- printf(" Num: Value Size Type Bind Visibility Name\n");
+- for (i = 0; i < ehdr.e_shnum; i++) {
+- struct section *sec = &secs[i];
+- char *sym_strtab;
+- int j;
+-
+- if (sec->shdr.sh_type != SHT_SYMTAB) {
+- continue;
+- }
+- sym_strtab = sec->link->strtab;
+- for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Sym); j++) {
+- Elf32_Sym *sym;
+- const char *name;
+- sym = &sec->symtab[j];
+- name = sym_name(sym_strtab, sym);
+- if (sym->st_shndx != SHN_ABS) {
+- continue;
+- }
+- printf("%5d %08x %5d %10s %10s %12s %s\n",
+- j, sym->st_value, sym->st_size,
+- sym_type(ELF32_ST_TYPE(sym->st_info)),
+- sym_bind(ELF32_ST_BIND(sym->st_info)),
+- sym_visibility(ELF32_ST_VISIBILITY(sym->st_other)),
+- name);
+- }
+- }
+- printf("\n");
+-}
+-
+-static void print_absolute_relocs(void)
+-{
+- int i, printed = 0;
+-
+- for (i = 0; i < ehdr.e_shnum; i++) {
+- struct section *sec = &secs[i];
+- struct section *sec_applies, *sec_symtab;
+- char *sym_strtab;
+- Elf32_Sym *sh_symtab;
+- int j;
+- if (sec->shdr.sh_type != SHT_REL) {
+- continue;
+- }
+- sec_symtab = sec->link;
+- sec_applies = &secs[sec->shdr.sh_info];
+- if (!(sec_applies->shdr.sh_flags & SHF_ALLOC)) {
+- continue;
+- }
+- sh_symtab = sec_symtab->symtab;
+- sym_strtab = sec_symtab->link->strtab;
+- for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
+- Elf32_Rel *rel;
+- Elf32_Sym *sym;
+- const char *name;
+- rel = &sec->reltab[j];
+- sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
+- name = sym_name(sym_strtab, sym);
+- if (sym->st_shndx != SHN_ABS) {
+- continue;
+- }
+-
+- /* Absolute symbols are not relocated if bzImage is
+- * loaded at a non-compiled address. Display a warning
+- * to user at compile time about the absolute
+- * relocations present.
+- *
+- * User need to audit the code to make sure
+- * some symbols which should have been section
+- * relative have not become absolute because of some
+- * linker optimization or wrong programming usage.
+- *
+- * Before warning check if this absolute symbol
+- * relocation is harmless.
+- */
+- if (is_abs_reloc(name) || is_rel_reloc(name))
+- continue;
+-
+- if (!printed) {
+- printf("WARNING: Absolute relocations"
+- " present\n");
+- printf("Offset Info Type Sym.Value "
+- "Sym.Name\n");
+- printed = 1;
+- }
+-
+- printf("%08x %08x %10s %08x %s\n",
+- rel->r_offset,
+- rel->r_info,
+- rel_type(ELF32_R_TYPE(rel->r_info)),
+- sym->st_value,
+- name);
+- }
+- }
+-
+- if (printed)
+- printf("\n");
+-}
+-
+-static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
+-{
+- int i;
+- /* Walk through the relocations */
+- for (i = 0; i < ehdr.e_shnum; i++) {
+- char *sym_strtab;
+- Elf32_Sym *sh_symtab;
+- struct section *sec_applies, *sec_symtab;
+- int j;
+- struct section *sec = &secs[i];
+-
+- if (sec->shdr.sh_type != SHT_REL) {
+- continue;
+- }
+- sec_symtab = sec->link;
+- sec_applies = &secs[sec->shdr.sh_info];
+- if (!(sec_applies->shdr.sh_flags & SHF_ALLOC)) {
+- continue;
+- }
+- sh_symtab = sec_symtab->symtab;
+- sym_strtab = sec_symtab->link->strtab;
+- for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
+- Elf32_Rel *rel;
+- Elf32_Sym *sym;
+- unsigned r_type;
+- rel = &sec->reltab[j];
+- sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
+- r_type = ELF32_R_TYPE(rel->r_info);
+- /* Don't visit relocations to absolute symbols */
+- if (sym->st_shndx == SHN_ABS &&
+- !is_rel_reloc(sym_name(sym_strtab, sym))) {
+- continue;
+- }
+- switch (r_type) {
+- case R_386_NONE:
+- case R_386_PC32:
+- /*
+- * NONE can be ignored and and PC relative
+- * relocations don't need to be adjusted.
+- */
+- break;
+- case R_386_32:
+- /* Visit relocations that need to be adjusted */
+- visit(rel, sym);
+- break;
+- default:
+- die("Unsupported relocation type: %s (%d)\n",
+- rel_type(r_type), r_type);
+- break;
+- }
+- }
+- }
+-}
+-
+-static void count_reloc(Elf32_Rel *rel, Elf32_Sym *sym)
+-{
+- reloc_count += 1;
+-}
+-
+-static void collect_reloc(Elf32_Rel *rel, Elf32_Sym *sym)
+-{
+- /* Remember the address that needs to be adjusted. */
+- relocs[reloc_idx++] = rel->r_offset;
+-}
+-
+-static int cmp_relocs(const void *va, const void *vb)
+-{
+- const unsigned long *a, *b;
+- a = va; b = vb;
+- return (*a == *b)? 0 : (*a > *b)? 1 : -1;
+-}
+-
+-static void emit_relocs(int as_text)
+-{
+- int i;
+- /* Count how many relocations I have and allocate space for them. */
+- reloc_count = 0;
+- walk_relocs(count_reloc);
+- relocs = malloc(reloc_count * sizeof(relocs[0]));
+- if (!relocs) {
+- die("malloc of %d entries for relocs failed\n",
+- reloc_count);
+- }
+- /* Collect up the relocations */
+- reloc_idx = 0;
+- walk_relocs(collect_reloc);
+-
+- /* Order the relocations for more efficient processing */
+- qsort(relocs, reloc_count, sizeof(relocs[0]), cmp_relocs);
+-
+- /* Print the relocations */
+- if (as_text) {
+- /* Print the relocations in a form suitable that
+- * gas will like.
+- */
+- printf(".section \".data.reloc\",\"a\"\n");
+- printf(".balign 4\n");
+- for (i = 0; i < reloc_count; i++) {
+- printf("\t .long 0x%08lx\n", relocs[i]);
+- }
+- printf("\n");
+- }
+- else {
+- unsigned char buf[4];
+- /* Print a stop */
+- fwrite("\0\0\0\0", 4, 1, stdout);
+- /* Now print each relocation */
+- for (i = 0; i < reloc_count; i++) {
+- buf[0] = (relocs[i] >> 0) & 0xff;
+- buf[1] = (relocs[i] >> 8) & 0xff;
+- buf[2] = (relocs[i] >> 16) & 0xff;
+- buf[3] = (relocs[i] >> 24) & 0xff;
+- fwrite(buf, 4, 1, stdout);
+- }
+- }
+-}
+-
+-static void usage(void)
+-{
+- die("relocs [--abs-syms |--abs-relocs | --text] vmlinux\n");
+-}
+-
+-int main(int argc, char **argv)
+-{
+- int show_absolute_syms, show_absolute_relocs;
+- int as_text;
+- const char *fname;
+- FILE *fp;
+- int i;
+-
+- regex_init();
+-
+- show_absolute_syms = 0;
+- show_absolute_relocs = 0;
+- as_text = 0;
+- fname = NULL;
+- for (i = 1; i < argc; i++) {
+- char *arg = argv[i];
+- if (*arg == '-') {
+- if (strcmp(argv[1], "--abs-syms") == 0) {
+- show_absolute_syms = 1;
+- continue;
+- }
+-
+- if (strcmp(argv[1], "--abs-relocs") == 0) {
+- show_absolute_relocs = 1;
+- continue;
+- }
+- else if (strcmp(argv[1], "--text") == 0) {
+- as_text = 1;
+- continue;
+- }
+- }
+- else if (!fname) {
+- fname = arg;
+- continue;
+- }
+- usage();
+- }
+- if (!fname) {
+- usage();
+- }
+- fp = fopen(fname, "r");
+- if (!fp) {
+- die("Cannot open %s: %s\n",
+- fname, strerror(errno));
+- }
+- read_ehdr(fp);
+- read_shdrs(fp);
+- read_strtabs(fp);
+- read_symtabs(fp);
+- read_relocs(fp);
+- if (show_absolute_syms) {
+- print_absolute_symbols();
+- return 0;
+- }
+- if (show_absolute_relocs) {
+- print_absolute_relocs();
+- return 0;
+- }
+- emit_relocs(as_text);
+- return 0;
+-}
+diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c
+index 7395d5f..c9c9cfe 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce-severity.c
++++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c
+@@ -145,15 +145,19 @@ static struct severity {
+ };
+
+ /*
+- * If the EIPV bit is set, it means the saved IP is the
+- * instruction which caused the MCE.
++ * If mcgstatus indicated that ip/cs on the stack were
++ * no good, then "m->cs" will be zero and we will have
++ * to assume the worst case (IN_KERNEL) as we actually
++ * have no idea what we were executing when the machine
++ * check hit.
++ * If we do have a good "m->cs" (or a faked one in the
++ * case we were executing in VM86 mode) we can use it to
++ * distinguish an exception taken in user from from one
++ * taken in the kernel.
+ */
+ static int error_context(struct mce *m)
+ {
+- if (m->mcgstatus & MCG_STATUS_EIPV)
+- return (m->ip && (m->cs & 3) == 3) ? IN_USER : IN_KERNEL;
+- /* Unknown, assume kernel */
+- return IN_KERNEL;
++ return ((m->cs & 3) == 3) ? IN_USER : IN_KERNEL;
+ }
+
+ int mce_severity(struct mce *m, int tolerant, char **msg)
+diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
+index 2af127d..b0f1271 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce.c
++++ b/arch/x86/kernel/cpu/mcheck/mce.c
+@@ -389,6 +389,14 @@ static inline void mce_gather_info(struct mce *m, struct pt_regs *regs)
+ if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) {
+ m->ip = regs->ip;
+ m->cs = regs->cs;
++
++ /*
++ * When in VM86 mode make the cs look like ring 3
++ * always. This is a lie, but it's better than passing
++ * the additional vm86 bit around everywhere.
++ */
++ if (v8086_mode(regs))
++ m->cs |= 3;
+ }
+ /* Use accurate RIP reporting if available. */
+ if (rip_msr)
+diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
+index f64a039..3dbfb00 100644
+--- a/arch/x86/kernel/cpu/perf_event_amd.c
++++ b/arch/x86/kernel/cpu/perf_event_amd.c
+@@ -473,6 +473,7 @@ static __initconst const struct x86_pmu amd_pmu = {
+ * 0x023 DE PERF_CTL[2:0]
+ * 0x02D LS PERF_CTL[3]
+ * 0x02E LS PERF_CTL[3,0]
++ * 0x031 LS PERF_CTL[2:0] (**)
+ * 0x043 CU PERF_CTL[2:0]
+ * 0x045 CU PERF_CTL[2:0]
+ * 0x046 CU PERF_CTL[2:0]
+@@ -486,10 +487,12 @@ static __initconst const struct x86_pmu amd_pmu = {
+ * 0x0DD LS PERF_CTL[5:0]
+ * 0x0DE LS PERF_CTL[5:0]
+ * 0x0DF LS PERF_CTL[5:0]
++ * 0x1C0 EX PERF_CTL[5:3]
+ * 0x1D6 EX PERF_CTL[5:0]
+ * 0x1D8 EX PERF_CTL[5:0]
+ *
+- * (*) depending on the umask all FPU counters may be used
++ * (*) depending on the umask all FPU counters may be used
++ * (**) only one unitmask enabled at a time
+ */
+
+ static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0);
+@@ -539,6 +542,12 @@ amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *ev
+ return &amd_f15_PMC3;
+ case 0x02E:
+ return &amd_f15_PMC30;
++ case 0x031:
++ if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
++ return &amd_f15_PMC20;
++ return &emptyconstraint;
++ case 0x1C0:
++ return &amd_f15_PMC53;
+ default:
+ return &amd_f15_PMC50;
+ }
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 4ea7678..7315488 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -1677,7 +1677,7 @@ static int nested_pf_handled(struct kvm_vcpu *vcpu)
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+
+ /* TODO: also check PFEC_MATCH/MASK, not just EB.PF. */
+- if (!(vmcs12->exception_bitmap & PF_VECTOR))
++ if (!(vmcs12->exception_bitmap & (1u << PF_VECTOR)))
+ return 0;
+
+ nested_vmx_vmexit(vcpu);
+@@ -3915,7 +3915,9 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
+ vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
+
+ vmx->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
++ vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+ vmx_set_cr0(&vmx->vcpu, kvm_read_cr0(vcpu)); /* enter rmode */
++ srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+ vmx_set_cr4(&vmx->vcpu, 0);
+ vmx_set_efer(&vmx->vcpu, 0);
+ vmx_fpu_activate(&vmx->vcpu);
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index e04cae1..4fc5323 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -3579,6 +3579,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
+ r = -EEXIST;
+ if (kvm->arch.vpic)
+ goto create_irqchip_unlock;
++ r = -EINVAL;
++ if (atomic_read(&kvm->online_vcpus))
++ goto create_irqchip_unlock;
+ r = -ENOMEM;
+ vpic = kvm_create_pic(kvm);
+ if (vpic) {
+@@ -6486,6 +6489,11 @@ void kvm_arch_check_processor_compat(void *rtn)
+ kvm_x86_ops->check_processor_compatibility(rtn);
+ }
+
++bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
++{
++ return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
++}
++
+ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+ {
+ struct page *page;
+diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
+index d99346e..4c262f6 100644
+--- a/arch/x86/pci/xen.c
++++ b/arch/x86/pci/xen.c
+@@ -64,6 +64,10 @@ static int xen_register_pirq(u32 gsi, int gsi_override, int triggering,
+ int shareable = 0;
+ char *name;
+
++ irq = xen_irq_from_gsi(gsi);
++ if (irq > 0)
++ return irq;
++
+ if (set_pirq)
+ pirq = gsi;
+
+diff --git a/arch/x86/tools/.gitignore b/arch/x86/tools/.gitignore
+new file mode 100644
+index 0000000..be0ed06
+--- /dev/null
++++ b/arch/x86/tools/.gitignore
+@@ -0,0 +1 @@
++relocs
+diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
+index f820826..162a34a 100644
+--- a/arch/x86/tools/Makefile
++++ b/arch/x86/tools/Makefile
+@@ -29,3 +29,6 @@ HOSTCFLAGS_test_get_len.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/
+ # Dependencies are also needed.
+ $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
+
++HOST_EXTRACFLAGS += -I$(srctree)/tools/include
++hostprogs-y += relocs
++relocs: $(obj)/relocs
+diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
+new file mode 100644
+index 0000000..e529730
+--- /dev/null
++++ b/arch/x86/tools/relocs.c
+@@ -0,0 +1,820 @@
++#include <stdio.h>
++#include <stdarg.h>
++#include <stdlib.h>
++#include <stdint.h>
++#include <string.h>
++#include <errno.h>
++#include <unistd.h>
++#include <elf.h>
++#include <byteswap.h>
++#define USE_BSD
++#include <endian.h>
++#include <regex.h>
++
++static void die(char *fmt, ...);
++
++#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
++static Elf32_Ehdr ehdr;
++static unsigned long reloc_count, reloc_idx;
++static unsigned long *relocs;
++static unsigned long reloc16_count, reloc16_idx;
++static unsigned long *relocs16;
++
++struct section {
++ Elf32_Shdr shdr;
++ struct section *link;
++ Elf32_Sym *symtab;
++ Elf32_Rel *reltab;
++ char *strtab;
++};
++static struct section *secs;
++
++enum symtype {
++ S_ABS,
++ S_REL,
++ S_SEG,
++ S_LIN,
++ S_NSYMTYPES
++};
++
++static const char * const sym_regex_kernel[S_NSYMTYPES] = {
++/*
++ * Following symbols have been audited. There values are constant and do
++ * not change if bzImage is loaded at a different physical address than
++ * the address for which it has been compiled. Don't warn user about
++ * absolute relocations present w.r.t these symbols.
++ */
++ [S_ABS] =
++ "^(xen_irq_disable_direct_reloc$|"
++ "xen_save_fl_direct_reloc$|"
++ "VDSO|"
++ "__crc_)",
++
++/*
++ * These symbols are known to be relative, even if the linker marks them
++ * as absolute (typically defined outside any section in the linker script.)
++ */
++ [S_REL] =
++ "^(__init_(begin|end)|"
++ "__x86_cpu_dev_(start|end)|"
++ "(__parainstructions|__alt_instructions)(|_end)|"
++ "(__iommu_table|__apicdrivers|__smp_locks)(|_end)|"
++ "__(start|end)_pci_.*|"
++ "__(start|end)_builtin_fw|"
++ "__(start|stop)___ksymtab(|_gpl|_unused|_unused_gpl|_gpl_future)|"
++ "__(start|stop)___kcrctab(|_gpl|_unused|_unused_gpl|_gpl_future)|"
++ "__(start|stop)___param|"
++ "__(start|stop)___modver|"
++ "__(start|stop)___bug_table|"
++ "__tracedata_(start|end)|"
++ "__(start|stop)_notes|"
++ "__end_rodata|"
++ "__initramfs_start|"
++ "(jiffies|jiffies_64)|"
++ "_end)$"
++};
++
++
++static const char * const sym_regex_realmode[S_NSYMTYPES] = {
++/*
++ * These are 16-bit segment symbols when compiling 16-bit code.
++ */
++ [S_SEG] =
++ "^real_mode_seg$",
++
++/*
++ * These are offsets belonging to segments, as opposed to linear addresses,
++ * when compiling 16-bit code.
++ */
++ [S_LIN] =
++ "^pa_",
++};
++
++static const char * const *sym_regex;
++
++static regex_t sym_regex_c[S_NSYMTYPES];
++static int is_reloc(enum symtype type, const char *sym_name)
++{
++ return sym_regex[type] &&
++ !regexec(&sym_regex_c[type], sym_name, 0, NULL, 0);
++}
++
++static void regex_init(int use_real_mode)
++{
++ char errbuf[128];
++ int err;
++ int i;
++
++ if (use_real_mode)
++ sym_regex = sym_regex_realmode;
++ else
++ sym_regex = sym_regex_kernel;
++
++ for (i = 0; i < S_NSYMTYPES; i++) {
++ if (!sym_regex[i])
++ continue;
++
++ err = regcomp(&sym_regex_c[i], sym_regex[i],
++ REG_EXTENDED|REG_NOSUB);
++
++ if (err) {
++ regerror(err, &sym_regex_c[i], errbuf, sizeof errbuf);
++ die("%s", errbuf);
++ }
++ }
++}
++
++static void die(char *fmt, ...)
++{
++ va_list ap;
++ va_start(ap, fmt);
++ vfprintf(stderr, fmt, ap);
++ va_end(ap);
++ exit(1);
++}
++
++static const char *sym_type(unsigned type)
++{
++ static const char *type_name[] = {
++#define SYM_TYPE(X) [X] = #X
++ SYM_TYPE(STT_NOTYPE),
++ SYM_TYPE(STT_OBJECT),
++ SYM_TYPE(STT_FUNC),
++ SYM_TYPE(STT_SECTION),
++ SYM_TYPE(STT_FILE),
++ SYM_TYPE(STT_COMMON),
++ SYM_TYPE(STT_TLS),
++#undef SYM_TYPE
++ };
++ const char *name = "unknown sym type name";
++ if (type < ARRAY_SIZE(type_name)) {
++ name = type_name[type];
++ }
++ return name;
++}
++
++static const char *sym_bind(unsigned bind)
++{
++ static const char *bind_name[] = {
++#define SYM_BIND(X) [X] = #X
++ SYM_BIND(STB_LOCAL),
++ SYM_BIND(STB_GLOBAL),
++ SYM_BIND(STB_WEAK),
++#undef SYM_BIND
++ };
++ const char *name = "unknown sym bind name";
++ if (bind < ARRAY_SIZE(bind_name)) {
++ name = bind_name[bind];
++ }
++ return name;
++}
++
++static const char *sym_visibility(unsigned visibility)
++{
++ static const char *visibility_name[] = {
++#define SYM_VISIBILITY(X) [X] = #X
++ SYM_VISIBILITY(STV_DEFAULT),
++ SYM_VISIBILITY(STV_INTERNAL),
++ SYM_VISIBILITY(STV_HIDDEN),
++ SYM_VISIBILITY(STV_PROTECTED),
++#undef SYM_VISIBILITY
++ };
++ const char *name = "unknown sym visibility name";
++ if (visibility < ARRAY_SIZE(visibility_name)) {
++ name = visibility_name[visibility];
++ }
++ return name;
++}
++
++static const char *rel_type(unsigned type)
++{
++ static const char *type_name[] = {
++#define REL_TYPE(X) [X] = #X
++ REL_TYPE(R_386_NONE),
++ REL_TYPE(R_386_32),
++ REL_TYPE(R_386_PC32),
++ REL_TYPE(R_386_GOT32),
++ REL_TYPE(R_386_PLT32),
++ REL_TYPE(R_386_COPY),
++ REL_TYPE(R_386_GLOB_DAT),
++ REL_TYPE(R_386_JMP_SLOT),
++ REL_TYPE(R_386_RELATIVE),
++ REL_TYPE(R_386_GOTOFF),
++ REL_TYPE(R_386_GOTPC),
++ REL_TYPE(R_386_8),
++ REL_TYPE(R_386_PC8),
++ REL_TYPE(R_386_16),
++ REL_TYPE(R_386_PC16),
++#undef REL_TYPE
++ };
++ const char *name = "unknown type rel type name";
++ if (type < ARRAY_SIZE(type_name) && type_name[type]) {
++ name = type_name[type];
++ }
++ return name;
++}
++
++static const char *sec_name(unsigned shndx)
++{
++ const char *sec_strtab;
++ const char *name;
++ sec_strtab = secs[ehdr.e_shstrndx].strtab;
++ name = "<noname>";
++ if (shndx < ehdr.e_shnum) {
++ name = sec_strtab + secs[shndx].shdr.sh_name;
++ }
++ else if (shndx == SHN_ABS) {
++ name = "ABSOLUTE";
++ }
++ else if (shndx == SHN_COMMON) {
++ name = "COMMON";
++ }
++ return name;
++}
++
++static const char *sym_name(const char *sym_strtab, Elf32_Sym *sym)
++{
++ const char *name;
++ name = "<noname>";
++ if (sym->st_name) {
++ name = sym_strtab + sym->st_name;
++ }
++ else {
++ name = sec_name(sym->st_shndx);
++ }
++ return name;
++}
++
++
++
++#if BYTE_ORDER == LITTLE_ENDIAN
++#define le16_to_cpu(val) (val)
++#define le32_to_cpu(val) (val)
++#endif
++#if BYTE_ORDER == BIG_ENDIAN
++#define le16_to_cpu(val) bswap_16(val)
++#define le32_to_cpu(val) bswap_32(val)
++#endif
++
++static uint16_t elf16_to_cpu(uint16_t val)
++{
++ return le16_to_cpu(val);
++}
++
++static uint32_t elf32_to_cpu(uint32_t val)
++{
++ return le32_to_cpu(val);
++}
++
++static void read_ehdr(FILE *fp)
++{
++ if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1) {
++ die("Cannot read ELF header: %s\n",
++ strerror(errno));
++ }
++ if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0) {
++ die("No ELF magic\n");
++ }
++ if (ehdr.e_ident[EI_CLASS] != ELFCLASS32) {
++ die("Not a 32 bit executable\n");
++ }
++ if (ehdr.e_ident[EI_DATA] != ELFDATA2LSB) {
++ die("Not a LSB ELF executable\n");
++ }
++ if (ehdr.e_ident[EI_VERSION] != EV_CURRENT) {
++ die("Unknown ELF version\n");
++ }
++ /* Convert the fields to native endian */
++ ehdr.e_type = elf16_to_cpu(ehdr.e_type);
++ ehdr.e_machine = elf16_to_cpu(ehdr.e_machine);
++ ehdr.e_version = elf32_to_cpu(ehdr.e_version);
++ ehdr.e_entry = elf32_to_cpu(ehdr.e_entry);
++ ehdr.e_phoff = elf32_to_cpu(ehdr.e_phoff);
++ ehdr.e_shoff = elf32_to_cpu(ehdr.e_shoff);
++ ehdr.e_flags = elf32_to_cpu(ehdr.e_flags);
++ ehdr.e_ehsize = elf16_to_cpu(ehdr.e_ehsize);
++ ehdr.e_phentsize = elf16_to_cpu(ehdr.e_phentsize);
++ ehdr.e_phnum = elf16_to_cpu(ehdr.e_phnum);
++ ehdr.e_shentsize = elf16_to_cpu(ehdr.e_shentsize);
++ ehdr.e_shnum = elf16_to_cpu(ehdr.e_shnum);
++ ehdr.e_shstrndx = elf16_to_cpu(ehdr.e_shstrndx);
++
++ if ((ehdr.e_type != ET_EXEC) && (ehdr.e_type != ET_DYN)) {
++ die("Unsupported ELF header type\n");
++ }
++ if (ehdr.e_machine != EM_386) {
++ die("Not for x86\n");
++ }
++ if (ehdr.e_version != EV_CURRENT) {
++ die("Unknown ELF version\n");
++ }
++ if (ehdr.e_ehsize != sizeof(Elf32_Ehdr)) {
++ die("Bad Elf header size\n");
++ }
++ if (ehdr.e_phentsize != sizeof(Elf32_Phdr)) {
++ die("Bad program header entry\n");
++ }
++ if (ehdr.e_shentsize != sizeof(Elf32_Shdr)) {
++ die("Bad section header entry\n");
++ }
++ if (ehdr.e_shstrndx >= ehdr.e_shnum) {
++ die("String table index out of bounds\n");
++ }
++}
++
++static void read_shdrs(FILE *fp)
++{
++ int i;
++ Elf32_Shdr shdr;
++
++ secs = calloc(ehdr.e_shnum, sizeof(struct section));
++ if (!secs) {
++ die("Unable to allocate %d section headers\n",
++ ehdr.e_shnum);
++ }
++ if (fseek(fp, ehdr.e_shoff, SEEK_SET) < 0) {
++ die("Seek to %d failed: %s\n",
++ ehdr.e_shoff, strerror(errno));
++ }
++ for (i = 0; i < ehdr.e_shnum; i++) {
++ struct section *sec = &secs[i];
++ if (fread(&shdr, sizeof shdr, 1, fp) != 1)
++ die("Cannot read ELF section headers %d/%d: %s\n",
++ i, ehdr.e_shnum, strerror(errno));
++ sec->shdr.sh_name = elf32_to_cpu(shdr.sh_name);
++ sec->shdr.sh_type = elf32_to_cpu(shdr.sh_type);
++ sec->shdr.sh_flags = elf32_to_cpu(shdr.sh_flags);
++ sec->shdr.sh_addr = elf32_to_cpu(shdr.sh_addr);
++ sec->shdr.sh_offset = elf32_to_cpu(shdr.sh_offset);
++ sec->shdr.sh_size = elf32_to_cpu(shdr.sh_size);
++ sec->shdr.sh_link = elf32_to_cpu(shdr.sh_link);
++ sec->shdr.sh_info = elf32_to_cpu(shdr.sh_info);
++ sec->shdr.sh_addralign = elf32_to_cpu(shdr.sh_addralign);
++ sec->shdr.sh_entsize = elf32_to_cpu(shdr.sh_entsize);
++ if (sec->shdr.sh_link < ehdr.e_shnum)
++ sec->link = &secs[sec->shdr.sh_link];
++ }
++
++}
++
++static void read_strtabs(FILE *fp)
++{
++ int i;
++ for (i = 0; i < ehdr.e_shnum; i++) {
++ struct section *sec = &secs[i];
++ if (sec->shdr.sh_type != SHT_STRTAB) {
++ continue;
++ }
++ sec->strtab = malloc(sec->shdr.sh_size);
++ if (!sec->strtab) {
++ die("malloc of %d bytes for strtab failed\n",
++ sec->shdr.sh_size);
++ }
++ if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0) {
++ die("Seek to %d failed: %s\n",
++ sec->shdr.sh_offset, strerror(errno));
++ }
++ if (fread(sec->strtab, 1, sec->shdr.sh_size, fp)
++ != sec->shdr.sh_size) {
++ die("Cannot read symbol table: %s\n",
++ strerror(errno));
++ }
++ }
++}
++
++static void read_symtabs(FILE *fp)
++{
++ int i,j;
++ for (i = 0; i < ehdr.e_shnum; i++) {
++ struct section *sec = &secs[i];
++ if (sec->shdr.sh_type != SHT_SYMTAB) {
++ continue;
++ }
++ sec->symtab = malloc(sec->shdr.sh_size);
++ if (!sec->symtab) {
++ die("malloc of %d bytes for symtab failed\n",
++ sec->shdr.sh_size);
++ }
++ if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0) {
++ die("Seek to %d failed: %s\n",
++ sec->shdr.sh_offset, strerror(errno));
++ }
++ if (fread(sec->symtab, 1, sec->shdr.sh_size, fp)
++ != sec->shdr.sh_size) {
++ die("Cannot read symbol table: %s\n",
++ strerror(errno));
++ }
++ for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Sym); j++) {
++ Elf32_Sym *sym = &sec->symtab[j];
++ sym->st_name = elf32_to_cpu(sym->st_name);
++ sym->st_value = elf32_to_cpu(sym->st_value);
++ sym->st_size = elf32_to_cpu(sym->st_size);
++ sym->st_shndx = elf16_to_cpu(sym->st_shndx);
++ }
++ }
++}
++
++
++static void read_relocs(FILE *fp)
++{
++ int i,j;
++ for (i = 0; i < ehdr.e_shnum; i++) {
++ struct section *sec = &secs[i];
++ if (sec->shdr.sh_type != SHT_REL) {
++ continue;
++ }
++ sec->reltab = malloc(sec->shdr.sh_size);
++ if (!sec->reltab) {
++ die("malloc of %d bytes for relocs failed\n",
++ sec->shdr.sh_size);
++ }
++ if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0) {
++ die("Seek to %d failed: %s\n",
++ sec->shdr.sh_offset, strerror(errno));
++ }
++ if (fread(sec->reltab, 1, sec->shdr.sh_size, fp)
++ != sec->shdr.sh_size) {
++ die("Cannot read symbol table: %s\n",
++ strerror(errno));
++ }
++ for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
++ Elf32_Rel *rel = &sec->reltab[j];
++ rel->r_offset = elf32_to_cpu(rel->r_offset);
++ rel->r_info = elf32_to_cpu(rel->r_info);
++ }
++ }
++}
++
++
++static void print_absolute_symbols(void)
++{
++ int i;
++ printf("Absolute symbols\n");
++ printf(" Num: Value Size Type Bind Visibility Name\n");
++ for (i = 0; i < ehdr.e_shnum; i++) {
++ struct section *sec = &secs[i];
++ char *sym_strtab;
++ int j;
++
++ if (sec->shdr.sh_type != SHT_SYMTAB) {
++ continue;
++ }
++ sym_strtab = sec->link->strtab;
++ for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Sym); j++) {
++ Elf32_Sym *sym;
++ const char *name;
++ sym = &sec->symtab[j];
++ name = sym_name(sym_strtab, sym);
++ if (sym->st_shndx != SHN_ABS) {
++ continue;
++ }
++ printf("%5d %08x %5d %10s %10s %12s %s\n",
++ j, sym->st_value, sym->st_size,
++ sym_type(ELF32_ST_TYPE(sym->st_info)),
++ sym_bind(ELF32_ST_BIND(sym->st_info)),
++ sym_visibility(ELF32_ST_VISIBILITY(sym->st_other)),
++ name);
++ }
++ }
++ printf("\n");
++}
++
++static void print_absolute_relocs(void)
++{
++ int i, printed = 0;
++
++ for (i = 0; i < ehdr.e_shnum; i++) {
++ struct section *sec = &secs[i];
++ struct section *sec_applies, *sec_symtab;
++ char *sym_strtab;
++ Elf32_Sym *sh_symtab;
++ int j;
++ if (sec->shdr.sh_type != SHT_REL) {
++ continue;
++ }
++ sec_symtab = sec->link;
++ sec_applies = &secs[sec->shdr.sh_info];
++ if (!(sec_applies->shdr.sh_flags & SHF_ALLOC)) {
++ continue;
++ }
++ sh_symtab = sec_symtab->symtab;
++ sym_strtab = sec_symtab->link->strtab;
++ for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
++ Elf32_Rel *rel;
++ Elf32_Sym *sym;
++ const char *name;
++ rel = &sec->reltab[j];
++ sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
++ name = sym_name(sym_strtab, sym);
++ if (sym->st_shndx != SHN_ABS) {
++ continue;
++ }
++
++ /* Absolute symbols are not relocated if bzImage is
++ * loaded at a non-compiled address. Display a warning
++ * to user at compile time about the absolute
++ * relocations present.
++ *
++ * User need to audit the code to make sure
++ * some symbols which should have been section
++ * relative have not become absolute because of some
++ * linker optimization or wrong programming usage.
++ *
++ * Before warning check if this absolute symbol
++ * relocation is harmless.
++ */
++ if (is_reloc(S_ABS, name) || is_reloc(S_REL, name))
++ continue;
++
++ if (!printed) {
++ printf("WARNING: Absolute relocations"
++ " present\n");
++ printf("Offset Info Type Sym.Value "
++ "Sym.Name\n");
++ printed = 1;
++ }
++
++ printf("%08x %08x %10s %08x %s\n",
++ rel->r_offset,
++ rel->r_info,
++ rel_type(ELF32_R_TYPE(rel->r_info)),
++ sym->st_value,
++ name);
++ }
++ }
++
++ if (printed)
++ printf("\n");
++}
++
++static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
++ int use_real_mode)
++{
++ int i;
++ /* Walk through the relocations */
++ for (i = 0; i < ehdr.e_shnum; i++) {
++ char *sym_strtab;
++ Elf32_Sym *sh_symtab;
++ struct section *sec_applies, *sec_symtab;
++ int j;
++ struct section *sec = &secs[i];
++
++ if (sec->shdr.sh_type != SHT_REL) {
++ continue;
++ }
++ sec_symtab = sec->link;
++ sec_applies = &secs[sec->shdr.sh_info];
++ if (!(sec_applies->shdr.sh_flags & SHF_ALLOC)) {
++ continue;
++ }
++ sh_symtab = sec_symtab->symtab;
++ sym_strtab = sec_symtab->link->strtab;
++ for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
++ Elf32_Rel *rel;
++ Elf32_Sym *sym;
++ unsigned r_type;
++ const char *symname;
++ int shn_abs;
++
++ rel = &sec->reltab[j];
++ sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
++ r_type = ELF32_R_TYPE(rel->r_info);
++
++ shn_abs = sym->st_shndx == SHN_ABS;
++
++ switch (r_type) {
++ case R_386_NONE:
++ case R_386_PC32:
++ case R_386_PC16:
++ case R_386_PC8:
++ /*
++ * NONE can be ignored and and PC relative
++ * relocations don't need to be adjusted.
++ */
++ break;
++
++ case R_386_16:
++ symname = sym_name(sym_strtab, sym);
++ if (!use_real_mode)
++ goto bad;
++ if (shn_abs) {
++ if (is_reloc(S_ABS, symname))
++ break;
++ else if (!is_reloc(S_SEG, symname))
++ goto bad;
++ } else {
++ if (is_reloc(S_LIN, symname))
++ goto bad;
++ else
++ break;
++ }
++ visit(rel, sym);
++ break;
++
++ case R_386_32:
++ symname = sym_name(sym_strtab, sym);
++ if (shn_abs) {
++ if (is_reloc(S_ABS, symname))
++ break;
++ else if (!is_reloc(S_REL, symname))
++ goto bad;
++ } else {
++ if (use_real_mode &&
++ !is_reloc(S_LIN, symname))
++ break;
++ }
++ visit(rel, sym);
++ break;
++ default:
++ die("Unsupported relocation type: %s (%d)\n",
++ rel_type(r_type), r_type);
++ break;
++ bad:
++ symname = sym_name(sym_strtab, sym);
++ die("Invalid %s %s relocation: %s\n",
++ shn_abs ? "absolute" : "relative",
++ rel_type(r_type), symname);
++ }
++ }
++ }
++}
++
++static void count_reloc(Elf32_Rel *rel, Elf32_Sym *sym)
++{
++ if (ELF32_R_TYPE(rel->r_info) == R_386_16)
++ reloc16_count++;
++ else
++ reloc_count++;
++}
++
++static void collect_reloc(Elf32_Rel *rel, Elf32_Sym *sym)
++{
++ /* Remember the address that needs to be adjusted. */
++ if (ELF32_R_TYPE(rel->r_info) == R_386_16)
++ relocs16[reloc16_idx++] = rel->r_offset;
++ else
++ relocs[reloc_idx++] = rel->r_offset;
++}
++
++static int cmp_relocs(const void *va, const void *vb)
++{
++ const unsigned long *a, *b;
++ a = va; b = vb;
++ return (*a == *b)? 0 : (*a > *b)? 1 : -1;
++}
++
++static int write32(unsigned int v, FILE *f)
++{
++ unsigned char buf[4];
++
++ buf[0] = (v >> 0) & 0xff;
++ buf[1] = (v >> 8) & 0xff;
++ buf[2] = (v >> 16) & 0xff;
++ buf[3] = (v >> 24) & 0xff;
++ return fwrite(buf, 1, 4, f) == 4 ? 0 : -1;
++}
++
++static void emit_relocs(int as_text, int use_real_mode)
++{
++ int i;
++ /* Count how many relocations I have and allocate space for them. */
++ reloc_count = 0;
++ walk_relocs(count_reloc, use_real_mode);
++ relocs = malloc(reloc_count * sizeof(relocs[0]));
++ if (!relocs) {
++ die("malloc of %d entries for relocs failed\n",
++ reloc_count);
++ }
++
++ relocs16 = malloc(reloc16_count * sizeof(relocs[0]));
++ if (!relocs16) {
++ die("malloc of %d entries for relocs16 failed\n",
++ reloc16_count);
++ }
++ /* Collect up the relocations */
++ reloc_idx = 0;
++ walk_relocs(collect_reloc, use_real_mode);
++
++ if (reloc16_count && !use_real_mode)
++ die("Segment relocations found but --realmode not specified\n");
++
++ /* Order the relocations for more efficient processing */
++ qsort(relocs, reloc_count, sizeof(relocs[0]), cmp_relocs);
++ qsort(relocs16, reloc16_count, sizeof(relocs16[0]), cmp_relocs);
++
++ /* Print the relocations */
++ if (as_text) {
++ /* Print the relocations in a form suitable that
++ * gas will like.
++ */
++ printf(".section \".data.reloc\",\"a\"\n");
++ printf(".balign 4\n");
++ if (use_real_mode) {
++ printf("\t.long %lu\n", reloc16_count);
++ for (i = 0; i < reloc16_count; i++)
++ printf("\t.long 0x%08lx\n", relocs16[i]);
++ printf("\t.long %lu\n", reloc_count);
++ for (i = 0; i < reloc_count; i++) {
++ printf("\t.long 0x%08lx\n", relocs[i]);
++ }
++ } else {
++ /* Print a stop */
++ printf("\t.long 0x%08lx\n", (unsigned long)0);
++ for (i = 0; i < reloc_count; i++) {
++ printf("\t.long 0x%08lx\n", relocs[i]);
++ }
++ }
++
++ printf("\n");
++ }
++ else {
++ if (use_real_mode) {
++ write32(reloc16_count, stdout);
++ for (i = 0; i < reloc16_count; i++)
++ write32(relocs16[i], stdout);
++ write32(reloc_count, stdout);
++
++ /* Now print each relocation */
++ for (i = 0; i < reloc_count; i++)
++ write32(relocs[i], stdout);
++ } else {
++ /* Print a stop */
++ write32(0, stdout);
++
++ /* Now print each relocation */
++ for (i = 0; i < reloc_count; i++) {
++ write32(relocs[i], stdout);
++ }
++ }
++ }
++}
++
++static void usage(void)
++{
++ die("relocs [--abs-syms|--abs-relocs|--text|--realmode] vmlinux\n");
++}
++
++int main(int argc, char **argv)
++{
++ int show_absolute_syms, show_absolute_relocs;
++ int as_text, use_real_mode;
++ const char *fname;
++ FILE *fp;
++ int i;
++
++ show_absolute_syms = 0;
++ show_absolute_relocs = 0;
++ as_text = 0;
++ use_real_mode = 0;
++ fname = NULL;
++ for (i = 1; i < argc; i++) {
++ char *arg = argv[i];
++ if (*arg == '-') {
++ if (strcmp(arg, "--abs-syms") == 0) {
++ show_absolute_syms = 1;
++ continue;
++ }
++ if (strcmp(arg, "--abs-relocs") == 0) {
++ show_absolute_relocs = 1;
++ continue;
++ }
++ if (strcmp(arg, "--text") == 0) {
++ as_text = 1;
++ continue;
++ }
++ if (strcmp(arg, "--realmode") == 0) {
++ use_real_mode = 1;
++ continue;
++ }
++ }
++ else if (!fname) {
++ fname = arg;
++ continue;
++ }
++ usage();
++ }
++ if (!fname) {
++ usage();
++ }
++ regex_init(use_real_mode);
++ fp = fopen(fname, "r");
++ if (!fp) {
++ die("Cannot open %s: %s\n",
++ fname, strerror(errno));
++ }
++ read_ehdr(fp);
++ read_shdrs(fp);
++ read_strtabs(fp);
++ read_symtabs(fp);
++ read_relocs(fp);
++ if (show_absolute_syms) {
++ print_absolute_symbols();
++ return 0;
++ }
++ if (show_absolute_relocs) {
++ print_absolute_relocs();
++ return 0;
++ }
++ emit_relocs(as_text, use_real_mode);
++ return 0;
++}
+diff --git a/block/genhd.c b/block/genhd.c
+index 997afd6..4927476 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -744,7 +744,7 @@ void __init printk_all_partitions(void)
+ struct hd_struct *part;
+ char name_buf[BDEVNAME_SIZE];
+ char devt_buf[BDEVT_SIZE];
+- u8 uuid[PARTITION_META_INFO_UUIDLTH * 2 + 1];
++ char uuid_buf[PARTITION_META_INFO_UUIDLTH * 2 + 5];
+
+ /*
+ * Don't show empty devices or things that have been
+@@ -763,14 +763,16 @@ void __init printk_all_partitions(void)
+ while ((part = disk_part_iter_next(&piter))) {
+ bool is_part0 = part == &disk->part0;
+
+- uuid[0] = 0;
++ uuid_buf[0] = '\0';
+ if (part->info)
+- part_unpack_uuid(part->info->uuid, uuid);
++ snprintf(uuid_buf, sizeof(uuid_buf), "%pU",
++ part->info->uuid);
+
+ printk("%s%s %10llu %s %s", is_part0 ? "" : " ",
+ bdevt_str(part_devt(part), devt_buf),
+ (unsigned long long)part->nr_sects >> 1,
+- disk_name(disk, part->partno, name_buf), uuid);
++ disk_name(disk, part->partno, name_buf),
++ uuid_buf);
+ if (is_part0) {
+ if (disk->driverfs_dev != NULL &&
+ disk->driverfs_dev->driver != NULL)
+diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
+index 0a7ed69..ca191ff 100644
+--- a/drivers/acpi/sleep.c
++++ b/drivers/acpi/sleep.c
+@@ -438,6 +438,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
+ },
+ {
+ .callback = init_nvs_nosave,
++ .ident = "Sony Vaio VPCCW29FX",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "VPCCW29FX"),
++ },
++ },
++ {
++ .callback = init_nvs_nosave,
+ .ident = "Averatec AV1020-ED2",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"),
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index cf26222..fb65915 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -384,6 +384,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ .driver_data = board_ahci_yes_fbs }, /* 88se9128 */
+ { PCI_DEVICE(0x1b4b, 0x9125),
+ .driver_data = board_ahci_yes_fbs }, /* 88se9125 */
++ { PCI_DEVICE(0x1b4b, 0x917a),
++ .driver_data = board_ahci_yes_fbs }, /* 88se9172 */
+ { PCI_DEVICE(0x1b4b, 0x91a3),
+ .driver_data = board_ahci_yes_fbs },
+
+diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
+index 5cd04b6..edcffd6 100644
+--- a/drivers/gpio/gpio-mpc8xxx.c
++++ b/drivers/gpio/gpio-mpc8xxx.c
+@@ -163,7 +163,8 @@ static void mpc8xxx_gpio_irq_cascade(unsigned int irq, struct irq_desc *desc)
+ if (mask)
+ generic_handle_irq(irq_linear_revmap(mpc8xxx_gc->irq,
+ 32 - ffs(mask)));
+- chip->irq_eoi(&desc->irq_data);
++ if (chip->irq_eoi)
++ chip->irq_eoi(&desc->irq_data);
+ }
+
+ static void mpc8xxx_irq_unmask(struct irq_data *d)
+diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
+index d47a53b..d3820c2 100644
+--- a/drivers/gpu/drm/i915/i915_irq.c
++++ b/drivers/gpu/drm/i915/i915_irq.c
+@@ -424,14 +424,11 @@ static void gen6_pm_rps_work(struct work_struct *work)
+ mutex_unlock(&dev_priv->dev->struct_mutex);
+ }
+
+-static void pch_irq_handler(struct drm_device *dev)
++static void pch_irq_handler(struct drm_device *dev, u32 pch_iir)
+ {
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+- u32 pch_iir;
+ int pipe;
+
+- pch_iir = I915_READ(SDEIIR);
+-
+ if (pch_iir & SDE_AUDIO_POWER_MASK)
+ DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
+ (pch_iir & SDE_AUDIO_POWER_MASK) >>
+@@ -529,7 +526,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
+ if (de_iir & DE_PCH_EVENT_IVB) {
+ if (pch_iir & SDE_HOTPLUG_MASK_CPT)
+ queue_work(dev_priv->wq, &dev_priv->hotplug_work);
+- pch_irq_handler(dev);
++ pch_irq_handler(dev, pch_iir);
+ }
+
+ if (pm_iir & GEN6_PM_DEFERRED_EVENTS) {
+@@ -629,7 +626,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
+ if (de_iir & DE_PCH_EVENT) {
+ if (pch_iir & hotplug_mask)
+ queue_work(dev_priv->wq, &dev_priv->hotplug_work);
+- pch_irq_handler(dev);
++ pch_irq_handler(dev, pch_iir);
+ }
+
+ if (de_iir & DE_PCU_EVENT) {
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index cbe5a88..a1d53b6 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -570,6 +570,21 @@
+
+ #define GEN6_BSD_RNCID 0x12198
+
++#define GEN7_FF_THREAD_MODE 0x20a0
++#define GEN7_FF_SCHED_MASK 0x0077070
++#define GEN7_FF_TS_SCHED_HS1 (0x5<<16)
++#define GEN7_FF_TS_SCHED_HS0 (0x3<<16)
++#define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1<<16)
++#define GEN7_FF_TS_SCHED_HW (0x0<<16) /* Default */
++#define GEN7_FF_VS_SCHED_HS1 (0x5<<12)
++#define GEN7_FF_VS_SCHED_HS0 (0x3<<12)
++#define GEN7_FF_VS_SCHED_LOAD_BALANCE (0x1<<12) /* Default */
++#define GEN7_FF_VS_SCHED_HW (0x0<<12)
++#define GEN7_FF_DS_SCHED_HS1 (0x5<<4)
++#define GEN7_FF_DS_SCHED_HS0 (0x3<<4)
++#define GEN7_FF_DS_SCHED_LOAD_BALANCE (0x1<<4) /* Default */
++#define GEN7_FF_DS_SCHED_HW (0x0<<4)
++
+ /*
+ * Framebuffer compression (915+ only)
+ */
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 390768f..3ff980d 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -7280,10 +7280,11 @@ static void intel_sanitize_modesetting(struct drm_device *dev,
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg, val;
++ int i;
+
+ /* Clear any frame start delays used for debugging left by the BIOS */
+- for_each_pipe(pipe) {
+- reg = PIPECONF(pipe);
++ for_each_pipe(i) {
++ reg = PIPECONF(i);
+ I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
+ }
+
+@@ -8245,6 +8246,18 @@ static void gen6_init_clock_gating(struct drm_device *dev)
+ }
+ }
+
++static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
++{
++ uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
++
++ reg &= ~GEN7_FF_SCHED_MASK;
++ reg |= GEN7_FF_TS_SCHED_HW;
++ reg |= GEN7_FF_VS_SCHED_HW;
++ reg |= GEN7_FF_DS_SCHED_HW;
++
++ I915_WRITE(GEN7_FF_THREAD_MODE, reg);
++}
++
+ static void ivybridge_init_clock_gating(struct drm_device *dev)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+@@ -8285,6 +8298,8 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
+ DISPPLANE_TRICKLE_FEED_DISABLE);
+ intel_flush_display_plane(dev_priv, pipe);
+ }
++
++ gen7_setup_fixed_func_scheduler(dev_priv);
+ }
+
+ static void g4x_init_clock_gating(struct drm_device *dev)
+diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
+index 7cc37e6..d5af089 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
++++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
+@@ -1024,7 +1024,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
+
+ nvbo->placement.fpfn = 0;
+ nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
+- nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0);
++ nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
+ return nouveau_bo_validate(nvbo, false, true, false);
+ }
+
+diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
+index 38b12e4..2eac8c5 100644
+--- a/drivers/hid/hid-logitech-dj.c
++++ b/drivers/hid/hid-logitech-dj.c
+@@ -26,6 +26,7 @@
+ #include <linux/hid.h>
+ #include <linux/module.h>
+ #include <linux/usb.h>
++#include <asm/unaligned.h>
+ #include "usbhid/usbhid.h"
+ #include "hid-ids.h"
+ #include "hid-logitech-dj.h"
+@@ -265,8 +266,8 @@ static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev,
+ goto dj_device_allocate_fail;
+ }
+
+- dj_dev->reports_supported = le32_to_cpu(
+- dj_report->report_params[DEVICE_PAIRED_RF_REPORT_TYPE]);
++ dj_dev->reports_supported = get_unaligned_le32(
++ dj_report->report_params + DEVICE_PAIRED_RF_REPORT_TYPE);
+ dj_dev->hdev = dj_hiddev;
+ dj_dev->dj_receiver_dev = djrcv_dev;
+ dj_dev->device_index = dj_report->device_index;
+diff --git a/drivers/hid/hid-wiimote.c b/drivers/hid/hid-wiimote.c
+index 76739c0..bfa9a27 100644
+--- a/drivers/hid/hid-wiimote.c
++++ b/drivers/hid/hid-wiimote.c
+@@ -829,7 +829,7 @@ static void __ir_to_input(struct wiimote_data *wdata, const __u8 *ir,
+
+ /*
+ * Basic IR data is encoded into 3 bytes. The first two bytes are the
+- * upper 8 bit of the X/Y data, the 3rd byte contains the lower 2 bits
++ * lower 8 bit of the X/Y data, the 3rd byte contains the upper 2 bits
+ * of both.
+ * If data is packed, then the 3rd byte is put first and slightly
+ * reordered. This allows to interleave packed and non-packed data to
+@@ -838,17 +838,11 @@ static void __ir_to_input(struct wiimote_data *wdata, const __u8 *ir,
+ */
+
+ if (packed) {
+- x = ir[1] << 2;
+- y = ir[2] << 2;
+-
+- x |= ir[0] & 0x3;
+- y |= (ir[0] >> 2) & 0x3;
++ x = ir[1] | ((ir[0] & 0x03) << 8);
++ y = ir[2] | ((ir[0] & 0x0c) << 6);
+ } else {
+- x = ir[0] << 2;
+- y = ir[1] << 2;
+-
+- x |= (ir[2] >> 4) & 0x3;
+- y |= (ir[2] >> 6) & 0x3;
++ x = ir[0] | ((ir[2] & 0x30) << 4);
++ y = ir[1] | ((ir[2] & 0xc0) << 2);
+ }
+
+ input_report_abs(wdata->ir, xid, x);
+diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
+index a76d85f..79b4bcb 100644
+--- a/drivers/i2c/busses/i2c-davinci.c
++++ b/drivers/i2c/busses/i2c-davinci.c
+@@ -755,7 +755,7 @@ static int davinci_i2c_remove(struct platform_device *pdev)
+ dev->clk = NULL;
+
+ davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, 0);
+- free_irq(IRQ_I2C, dev);
++ free_irq(dev->irq, dev);
+ iounmap(dev->base);
+ kfree(dev);
+
+diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
+index 730215e..2cda65bf 100644
+--- a/drivers/i2c/busses/i2c-eg20t.c
++++ b/drivers/i2c/busses/i2c-eg20t.c
+@@ -315,7 +315,7 @@ static s32 pch_i2c_wait_for_xfer_complete(struct i2c_algo_pch_data *adap)
+ {
+ long ret;
+ ret = wait_event_timeout(pch_event,
+- (adap->pch_event_flag != 0), msecs_to_jiffies(50));
++ (adap->pch_event_flag != 0), msecs_to_jiffies(1000));
+
+ if (ret == 0) {
+ pch_err(adap, "timeout: %x\n", adap->pch_event_flag);
+diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
+index 46b6500..3d7885a 100644
+--- a/drivers/i2c/busses/i2c-tegra.c
++++ b/drivers/i2c/busses/i2c-tegra.c
+@@ -401,8 +401,6 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
+ disable_irq_nosync(i2c_dev->irq);
+ i2c_dev->irq_disabled = 1;
+ }
+-
+- complete(&i2c_dev->msg_complete);
+ goto err;
+ }
+
+@@ -411,7 +409,6 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
+ i2c_dev->msg_err |= I2C_ERR_NO_ACK;
+ if (status & I2C_INT_ARBITRATION_LOST)
+ i2c_dev->msg_err |= I2C_ERR_ARBITRATION_LOST;
+- complete(&i2c_dev->msg_complete);
+ goto err;
+ }
+
+@@ -429,14 +426,14 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
+ tegra_i2c_mask_irq(i2c_dev, I2C_INT_TX_FIFO_DATA_REQ);
+ }
+
++ i2c_writel(i2c_dev, status, I2C_INT_STATUS);
++ if (i2c_dev->is_dvc)
++ dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS);
++
+ if (status & I2C_INT_PACKET_XFER_COMPLETE) {
+ BUG_ON(i2c_dev->msg_buf_remaining);
+ complete(&i2c_dev->msg_complete);
+ }
+-
+- i2c_writel(i2c_dev, status, I2C_INT_STATUS);
+- if (i2c_dev->is_dvc)
+- dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS);
+ return IRQ_HANDLED;
+ err:
+ /* An error occurred, mask all interrupts */
+@@ -446,6 +443,8 @@ err:
+ i2c_writel(i2c_dev, status, I2C_INT_STATUS);
+ if (i2c_dev->is_dvc)
+ dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS);
++
++ complete(&i2c_dev->msg_complete);
+ return IRQ_HANDLED;
+ }
+
+diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
+index 71f0c0f..a841123 100644
+--- a/drivers/infiniband/core/umem.c
++++ b/drivers/infiniband/core/umem.c
+@@ -269,7 +269,7 @@ void ib_umem_release(struct ib_umem *umem)
+ } else
+ down_write(&mm->mmap_sem);
+
+- current->mm->locked_vm -= diff;
++ current->mm->pinned_vm -= diff;
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ kfree(umem);
+diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
+index 0747004..2e6a538 100644
+--- a/drivers/infiniband/hw/cxgb4/cm.c
++++ b/drivers/infiniband/hw/cxgb4/cm.c
+@@ -2714,6 +2714,12 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
+ unsigned int tid = GET_TID(req);
+
+ ep = lookup_tid(t, tid);
++ if (!ep) {
++ printk(KERN_WARNING MOD
++ "Abort on non-existent endpoint, tid %d\n", tid);
++ kfree_skb(skb);
++ return 0;
++ }
+ if (is_neg_adv_abort(req->status)) {
+ PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
+ ep->hwtid);
+@@ -2725,11 +2731,8 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
+
+ /*
+ * Wake up any threads in rdma_init() or rdma_fini().
+- * However, this is not needed if com state is just
+- * MPA_REQ_SENT
+ */
+- if (ep->com.state != MPA_REQ_SENT)
+- c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
++ c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
+ sched(dev, skb);
+ return 0;
+ }
+diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
+index 2ee47d0..f00c70e 100644
+--- a/drivers/input/tablet/wacom_wac.c
++++ b/drivers/input/tablet/wacom_wac.c
+@@ -843,12 +843,7 @@ static int wacom_bpt_pen(struct wacom_wac *wacom)
+ unsigned char *data = wacom->data;
+ int prox = 0, x = 0, y = 0, p = 0, d = 0, pen = 0, btn1 = 0, btn2 = 0;
+
+- /*
+- * Similar to Graphire protocol, data[1] & 0x20 is proximity and
+- * data[1] & 0x18 is tool ID. 0x30 is safety check to ignore
+- * 2 unused tool ID's.
+- */
+- prox = (data[1] & 0x30) == 0x30;
++ prox = (data[1] & 0x20) == 0x20;
+
+ /*
+ * All reports shared between PEN and RUBBER tool must be
+diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
+index 35c1e17..97b2e21 100644
+--- a/drivers/iommu/dmar.c
++++ b/drivers/iommu/dmar.c
+@@ -1056,8 +1056,8 @@ static const char *intr_remap_fault_reasons[] =
+
+ const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
+ {
+- if (fault_reason >= 0x20 && (fault_reason <= 0x20 +
+- ARRAY_SIZE(intr_remap_fault_reasons))) {
++ if (fault_reason >= 0x20 && (fault_reason - 0x20 <
++ ARRAY_SIZE(intr_remap_fault_reasons))) {
+ *fault_type = INTR_REMAP;
+ return intr_remap_fault_reasons[fault_reason - 0x20];
+ } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index bdc447f..ccf347f 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -2267,12 +2267,6 @@ static int domain_add_dev_info(struct dmar_domain *domain,
+ if (!info)
+ return -ENOMEM;
+
+- ret = domain_context_mapping(domain, pdev, translation);
+- if (ret) {
+- free_devinfo_mem(info);
+- return ret;
+- }
+-
+ info->segment = pci_domain_nr(pdev->bus);
+ info->bus = pdev->bus->number;
+ info->devfn = pdev->devfn;
+@@ -2285,6 +2279,17 @@ static int domain_add_dev_info(struct dmar_domain *domain,
+ pdev->dev.archdata.iommu = info;
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+
++ ret = domain_context_mapping(domain, pdev, translation);
++ if (ret) {
++ spin_lock_irqsave(&device_domain_lock, flags);
++ list_del(&info->link);
++ list_del(&info->global);
++ pdev->dev.archdata.iommu = NULL;
++ spin_unlock_irqrestore(&device_domain_lock, flags);
++ free_devinfo_mem(info);
++ return ret;
++ }
++
+ return 0;
+ }
+
+diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c
+index 6d5ceee..fd17bb3 100644
+--- a/drivers/isdn/gigaset/capi.c
++++ b/drivers/isdn/gigaset/capi.c
+@@ -14,6 +14,7 @@
+ #include "gigaset.h"
+ #include <linux/proc_fs.h>
+ #include <linux/seq_file.h>
++#include <linux/ratelimit.h>
+ #include <linux/isdn/capilli.h>
+ #include <linux/isdn/capicmd.h>
+ #include <linux/isdn/capiutil.h>
+@@ -223,10 +224,14 @@ get_appl(struct gigaset_capi_ctr *iif, u16 appl)
+ static inline void dump_cmsg(enum debuglevel level, const char *tag, _cmsg *p)
+ {
+ #ifdef CONFIG_GIGASET_DEBUG
++ /* dump at most 20 messages in 20 secs */
++ static DEFINE_RATELIMIT_STATE(msg_dump_ratelimit, 20 * HZ, 20);
+ _cdebbuf *cdb;
+
+ if (!(gigaset_debuglevel & level))
+ return;
++ if (!___ratelimit(&msg_dump_ratelimit, tag))
++ return;
+
+ cdb = capi_cmsg2str(p);
+ if (cdb) {
+@@ -1882,6 +1887,9 @@ static void do_disconnect_req(struct gigaset_capi_ctr *iif,
+
+ /* check for active logical connection */
+ if (bcs->apconnstate >= APCONN_ACTIVE) {
++ /* clear it */
++ bcs->apconnstate = APCONN_SETUP;
++
+ /*
+ * emit DISCONNECT_B3_IND with cause 0x3301
+ * use separate cmsg structure, as the content of iif->acmsg
+@@ -1906,6 +1914,7 @@ static void do_disconnect_req(struct gigaset_capi_ctr *iif,
+ }
+ capi_cmsg2message(b3cmsg,
+ __skb_put(b3skb, CAPI_DISCONNECT_B3_IND_BASELEN));
++ dump_cmsg(DEBUG_CMD, __func__, b3cmsg);
+ kfree(b3cmsg);
+ capi_ctr_handle_message(&iif->ctr, ap->id, b3skb);
+ }
+@@ -2059,12 +2068,6 @@ static void do_reset_b3_req(struct gigaset_capi_ctr *iif,
+ }
+
+ /*
+- * dump unsupported/ignored messages at most twice per minute,
+- * some apps send those very frequently
+- */
+-static unsigned long ignored_msg_dump_time;
+-
+-/*
+ * unsupported CAPI message handler
+ */
+ static void do_unsupported(struct gigaset_capi_ctr *iif,
+@@ -2073,8 +2076,7 @@ static void do_unsupported(struct gigaset_capi_ctr *iif,
+ {
+ /* decode message */
+ capi_message2cmsg(&iif->acmsg, skb->data);
+- if (printk_timed_ratelimit(&ignored_msg_dump_time, 30 * 1000))
+- dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
++ dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
+ send_conf(iif, ap, skb, CapiMessageNotSupportedInCurrentState);
+ }
+
+@@ -2085,11 +2087,9 @@ static void do_nothing(struct gigaset_capi_ctr *iif,
+ struct gigaset_capi_appl *ap,
+ struct sk_buff *skb)
+ {
+- if (printk_timed_ratelimit(&ignored_msg_dump_time, 30 * 1000)) {
+- /* decode message */
+- capi_message2cmsg(&iif->acmsg, skb->data);
+- dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
+- }
++ /* decode message */
++ capi_message2cmsg(&iif->acmsg, skb->data);
++ dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
+ dev_kfree_skb_any(skb);
+ }
+
+diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c
+index 6d12623..e95fac0 100644
+--- a/drivers/isdn/gigaset/ev-layer.c
++++ b/drivers/isdn/gigaset/ev-layer.c
+@@ -190,6 +190,7 @@ struct reply_t gigaset_tab_nocid[] =
+ ACT_INIT} },
+ {RSP_OK, 121, 121, -1, 0, 0, {ACT_GOTVER,
+ ACT_INIT} },
++{RSP_NONE, 121, 121, -1, 120, 0, {ACT_GETSTRING} },
+
+ /* leave dle mode */
+ {RSP_INIT, 0, 0, SEQ_DLE0, 201, 5, {0}, "^SDLE=0\r"},
+@@ -1314,8 +1315,9 @@ static void do_action(int action, struct cardstate *cs,
+ s = ev->ptr;
+
+ if (!strcmp(s, "OK")) {
++ /* OK without version string: assume old response */
+ *p_genresp = 1;
+- *p_resp_code = RSP_ERROR;
++ *p_resp_code = RSP_NONE;
+ break;
+ }
+
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index adcd850..700ecae 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -453,7 +453,7 @@ static void submit_flushes(struct work_struct *ws)
+ atomic_inc(&rdev->nr_pending);
+ atomic_inc(&rdev->nr_pending);
+ rcu_read_unlock();
+- bi = bio_alloc_mddev(GFP_KERNEL, 0, mddev);
++ bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
+ bi->bi_end_io = md_end_flush;
+ bi->bi_private = rdev;
+ bi->bi_bdev = rdev->bdev;
+diff --git a/drivers/media/dvb/siano/smsusb.c b/drivers/media/dvb/siano/smsusb.c
+index 51c7121..b7d1e3e 100644
+--- a/drivers/media/dvb/siano/smsusb.c
++++ b/drivers/media/dvb/siano/smsusb.c
+@@ -542,6 +542,8 @@ static const struct usb_device_id smsusb_id_table[] __devinitconst = {
+ .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
+ { USB_DEVICE(0x2040, 0xc090),
+ .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
++ { USB_DEVICE(0x2040, 0xc0a0),
++ .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
+ { } /* Terminating entry */
+ };
+
+diff --git a/drivers/media/video/uvc/uvc_v4l2.c b/drivers/media/video/uvc/uvc_v4l2.c
+index cf7788f..1030479 100644
+--- a/drivers/media/video/uvc/uvc_v4l2.c
++++ b/drivers/media/video/uvc/uvc_v4l2.c
+@@ -689,7 +689,7 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
+ break;
+ }
+ pin = iterm->id;
+- } else if (pin < selector->bNrInPins) {
++ } else if (index < selector->bNrInPins) {
+ pin = selector->baSourceID[index];
+ list_for_each_entry(iterm, &chain->entities, chain) {
+ if (!UVC_ENTITY_IS_ITERM(iterm))
+diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
+index 3ab565e..558a495 100644
+--- a/drivers/mmc/core/sdio.c
++++ b/drivers/mmc/core/sdio.c
+@@ -663,7 +663,7 @@ static int mmc_sdio_resume(struct mmc_host *host)
+ }
+
+ if (!err && host->sdio_irqs)
+- mmc_signal_sdio_irq(host);
++ wake_up_process(host->sdio_irq_thread);
+ mmc_release_host(host);
+
+ /*
+diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
+index 68f81b9..9dd0462 100644
+--- a/drivers/mmc/core/sdio_irq.c
++++ b/drivers/mmc/core/sdio_irq.c
+@@ -28,18 +28,20 @@
+
+ #include "sdio_ops.h"
+
+-static int process_sdio_pending_irqs(struct mmc_card *card)
++static int process_sdio_pending_irqs(struct mmc_host *host)
+ {
++ struct mmc_card *card = host->card;
+ int i, ret, count;
+ unsigned char pending;
+ struct sdio_func *func;
+
+ /*
+ * Optimization, if there is only 1 function interrupt registered
+- * call irq handler directly
++ * and we know an IRQ was signaled then call irq handler directly.
++ * Otherwise do the full probe.
+ */
+ func = card->sdio_single_irq;
+- if (func) {
++ if (func && host->sdio_irq_pending) {
+ func->irq_handler(func);
+ return 1;
+ }
+@@ -116,7 +118,8 @@ static int sdio_irq_thread(void *_host)
+ ret = __mmc_claim_host(host, &host->sdio_irq_thread_abort);
+ if (ret)
+ break;
+- ret = process_sdio_pending_irqs(host->card);
++ ret = process_sdio_pending_irqs(host);
++ host->sdio_irq_pending = false;
+ mmc_release_host(host);
+
+ /*
+diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
+index fddb714..a9ff89ff 100644
+--- a/drivers/mtd/sm_ftl.c
++++ b/drivers/mtd/sm_ftl.c
+@@ -1256,7 +1256,7 @@ static void sm_remove_dev(struct mtd_blktrans_dev *dev)
+
+ static struct mtd_blktrans_ops sm_ftl_ops = {
+ .name = "smblk",
+- .major = -1,
++ .major = 0,
+ .part_bits = SM_FTL_PARTN_BITS,
+ .blksize = SM_SECTOR_SIZE,
+ .getgeo = sm_getgeo,
+diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
+index b2dc2c8..2e09edb 100644
+--- a/drivers/net/ethernet/dlink/dl2k.c
++++ b/drivers/net/ethernet/dlink/dl2k.c
+@@ -1259,55 +1259,21 @@ rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
+ {
+ int phy_addr;
+ struct netdev_private *np = netdev_priv(dev);
+- struct mii_data *miidata = (struct mii_data *) &rq->ifr_ifru;
+-
+- struct netdev_desc *desc;
+- int i;
++ struct mii_ioctl_data *miidata = if_mii(rq);
+
+ phy_addr = np->phy_addr;
+ switch (cmd) {
+- case SIOCDEVPRIVATE:
+- break;
+-
+- case SIOCDEVPRIVATE + 1:
+- miidata->out_value = mii_read (dev, phy_addr, miidata->reg_num);
++ case SIOCGMIIPHY:
++ miidata->phy_id = phy_addr;
+ break;
+- case SIOCDEVPRIVATE + 2:
+- mii_write (dev, phy_addr, miidata->reg_num, miidata->in_value);
++ case SIOCGMIIREG:
++ miidata->val_out = mii_read (dev, phy_addr, miidata->reg_num);
+ break;
+- case SIOCDEVPRIVATE + 3:
+- break;
+- case SIOCDEVPRIVATE + 4:
+- break;
+- case SIOCDEVPRIVATE + 5:
+- netif_stop_queue (dev);
++ case SIOCSMIIREG:
++ if (!capable(CAP_NET_ADMIN))
++ return -EPERM;
++ mii_write (dev, phy_addr, miidata->reg_num, miidata->val_in);
+ break;
+- case SIOCDEVPRIVATE + 6:
+- netif_wake_queue (dev);
+- break;
+- case SIOCDEVPRIVATE + 7:
+- printk
+- ("tx_full=%x cur_tx=%lx old_tx=%lx cur_rx=%lx old_rx=%lx\n",
+- netif_queue_stopped(dev), np->cur_tx, np->old_tx, np->cur_rx,
+- np->old_rx);
+- break;
+- case SIOCDEVPRIVATE + 8:
+- printk("TX ring:\n");
+- for (i = 0; i < TX_RING_SIZE; i++) {
+- desc = &np->tx_ring[i];
+- printk
+- ("%02x:cur:%08x next:%08x status:%08x frag1:%08x frag0:%08x",
+- i,
+- (u32) (np->tx_ring_dma + i * sizeof (*desc)),
+- (u32)le64_to_cpu(desc->next_desc),
+- (u32)le64_to_cpu(desc->status),
+- (u32)(le64_to_cpu(desc->fraginfo) >> 32),
+- (u32)le64_to_cpu(desc->fraginfo));
+- printk ("\n");
+- }
+- printk ("\n");
+- break;
+-
+ default:
+ return -EOPNOTSUPP;
+ }
+diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h
+index ba0adca..30c2da3 100644
+--- a/drivers/net/ethernet/dlink/dl2k.h
++++ b/drivers/net/ethernet/dlink/dl2k.h
+@@ -365,13 +365,6 @@ struct ioctl_data {
+ char *data;
+ };
+
+-struct mii_data {
+- __u16 reserved;
+- __u16 reg_num;
+- __u16 in_value;
+- __u16 out_value;
+-};
+-
+ /* The Rx and Tx buffer descriptors. */
+ struct netdev_desc {
+ __le64 next_desc;
+diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
+index bf8153e..7570c1a 100644
+--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
++++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
+@@ -649,12 +649,8 @@ static int
+ be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
+ {
+ struct be_adapter *adapter = netdev_priv(netdev);
+- char file_name[ETHTOOL_FLASH_MAX_FILENAME];
+
+- file_name[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0;
+- strcpy(file_name, efl->data);
+-
+- return be_load_fw(adapter, file_name);
++ return be_load_fw(adapter, efl->data);
+ }
+
+ static int
+diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
+index 20f0243..c5ce163 100644
+--- a/drivers/net/wireless/b43legacy/main.c
++++ b/drivers/net/wireless/b43legacy/main.c
+@@ -1564,8 +1564,6 @@ static int b43legacy_request_firmware(struct b43legacy_wldev *dev)
+ const char *filename;
+ int err;
+
+- /* do dummy read */
+- ssb_read32(dev->dev, SSB_TMSHIGH);
+ if (!fw->ucode) {
+ if (rev == 2)
+ filename = "ucode2";
+diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
+index 2b7bcc8..db34db6 100644
+--- a/drivers/net/wireless/rtlwifi/usb.c
++++ b/drivers/net/wireless/rtlwifi/usb.c
+@@ -113,46 +113,38 @@ static int _usbctrl_vendorreq_sync_read(struct usb_device *udev, u8 request,
+ return status;
+ }
+
+-static u32 _usb_read_sync(struct usb_device *udev, u32 addr, u16 len)
++static u32 _usb_read_sync(struct rtl_priv *rtlpriv, u32 addr, u16 len)
+ {
++ struct device *dev = rtlpriv->io.dev;
++ struct usb_device *udev = to_usb_device(dev);
+ u8 request;
+ u16 wvalue;
+ u16 index;
+- u32 *data;
+- u32 ret;
++ __le32 *data = &rtlpriv->usb_data[rtlpriv->usb_data_index];
+
+- data = kmalloc(sizeof(u32), GFP_KERNEL);
+- if (!data)
+- return -ENOMEM;
+ request = REALTEK_USB_VENQT_CMD_REQ;
+ index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */
+
+ wvalue = (u16)addr;
+ _usbctrl_vendorreq_sync_read(udev, request, wvalue, index, data, len);
+- ret = *data;
+- kfree(data);
+- return ret;
++ if (++rtlpriv->usb_data_index >= RTL_USB_MAX_RX_COUNT)
++ rtlpriv->usb_data_index = 0;
++ return le32_to_cpu(*data);
+ }
+
+ static u8 _usb_read8_sync(struct rtl_priv *rtlpriv, u32 addr)
+ {
+- struct device *dev = rtlpriv->io.dev;
+-
+- return (u8)_usb_read_sync(to_usb_device(dev), addr, 1);
++ return (u8)_usb_read_sync(rtlpriv, addr, 1);
+ }
+
+ static u16 _usb_read16_sync(struct rtl_priv *rtlpriv, u32 addr)
+ {
+- struct device *dev = rtlpriv->io.dev;
+-
+- return (u16)_usb_read_sync(to_usb_device(dev), addr, 2);
++ return (u16)_usb_read_sync(rtlpriv, addr, 2);
+ }
+
+ static u32 _usb_read32_sync(struct rtl_priv *rtlpriv, u32 addr)
+ {
+- struct device *dev = rtlpriv->io.dev;
+-
+- return _usb_read_sync(to_usb_device(dev), addr, 4);
++ return _usb_read_sync(rtlpriv, addr, 4);
+ }
+
+ static void _usb_write_async(struct usb_device *udev, u32 addr, u32 val,
+@@ -913,6 +905,11 @@ int __devinit rtl_usb_probe(struct usb_interface *intf,
+ return -ENOMEM;
+ }
+ rtlpriv = hw->priv;
++ rtlpriv->usb_data = kzalloc(RTL_USB_MAX_RX_COUNT * sizeof(u32),
++ GFP_KERNEL);
++ if (!rtlpriv->usb_data)
++ return -ENOMEM;
++ rtlpriv->usb_data_index = 0;
+ SET_IEEE80211_DEV(hw, &intf->dev);
+ udev = interface_to_usbdev(intf);
+ usb_get_dev(udev);
+@@ -990,6 +987,7 @@ void rtl_usb_disconnect(struct usb_interface *intf)
+ /* rtl_deinit_rfkill(hw); */
+ rtl_usb_deinit(hw);
+ rtl_deinit_core(hw);
++ kfree(rtlpriv->usb_data);
+ rtlpriv->cfg->ops->deinit_sw_leds(hw);
+ rtlpriv->cfg->ops->deinit_sw_vars(hw);
+ _rtl_usb_io_handler_release(hw);
+diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
+index 713c7dd..b1e9deb 100644
+--- a/drivers/net/wireless/rtlwifi/wifi.h
++++ b/drivers/net/wireless/rtlwifi/wifi.h
+@@ -63,7 +63,7 @@
+ #define AC_MAX 4
+ #define QOS_QUEUE_NUM 4
+ #define RTL_MAC80211_NUM_QUEUE 5
+-
++#define RTL_USB_MAX_RX_COUNT 100
+ #define QBSS_LOAD_SIZE 5
+ #define MAX_WMMELE_LENGTH 64
+
+@@ -1621,6 +1621,10 @@ struct rtl_priv {
+ interface or hardware */
+ unsigned long status;
+
++ /* data buffer pointer for USB reads */
++ __le32 *usb_data;
++ int usb_data_index;
++
+ /*This must be the last item so
+ that it points to the data allocated
+ beyond this structure like:
+diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
+index c006dee..40c4705 100644
+--- a/drivers/platform/x86/sony-laptop.c
++++ b/drivers/platform/x86/sony-laptop.c
+@@ -127,7 +127,7 @@ MODULE_PARM_DESC(minor,
+ "default is -1 (automatic)");
+ #endif
+
+-static int kbd_backlight; /* = 1 */
++static int kbd_backlight = 1;
+ module_param(kbd_backlight, int, 0444);
+ MODULE_PARM_DESC(kbd_backlight,
+ "set this to 0 to disable keyboard backlight, "
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index 938398f..6ec610c 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -2765,6 +2765,8 @@ unset_supplies:
+ unset_regulator_supplies(rdev);
+
+ scrub:
++ if (rdev->supply)
++ regulator_put(rdev->supply);
+ kfree(rdev->constraints);
+ device_unregister(&rdev->dev);
+ /* device core frees rdev */
+diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
+index e86edfc..1e80a48 100644
+--- a/drivers/rtc/rtc-pl031.c
++++ b/drivers/rtc/rtc-pl031.c
+@@ -312,6 +312,7 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
+ int ret;
+ struct pl031_local *ldata;
+ struct rtc_class_ops *ops = id->data;
++ unsigned long time;
+
+ ret = amba_request_regions(adev, NULL);
+ if (ret)
+@@ -343,6 +344,23 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
+ writel(readl(ldata->base + RTC_CR) | RTC_CR_CWEN,
+ ldata->base + RTC_CR);
+
++ /*
++ * On ST PL031 variants, the RTC reset value does not provide correct
++ * weekday for 2000-01-01. Correct the erroneous sunday to saturday.
++ */
++ if (ldata->hw_designer == AMBA_VENDOR_ST) {
++ if (readl(ldata->base + RTC_YDR) == 0x2000) {
++ time = readl(ldata->base + RTC_DR);
++ if ((time &
++ (RTC_MON_MASK | RTC_MDAY_MASK | RTC_WDAY_MASK))
++ == 0x02120000) {
++ time = time | (0x7 << RTC_WDAY_SHIFT);
++ writel(0x2000, ldata->base + RTC_YLR);
++ writel(time, ldata->base + RTC_LR);
++ }
++ }
++ }
++
+ ldata->rtc = rtc_device_register("pl031", &adev->dev, ops,
+ THIS_MODULE);
+ if (IS_ERR(ldata->rtc)) {
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 865d452..b4d2c86 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -1674,30 +1674,26 @@ static void figure_bus_target_lun(struct ctlr_info *h,
+
+ if (is_logical_dev_addr_mode(lunaddrbytes)) {
+ /* logical device */
+- if (unlikely(is_scsi_rev_5(h))) {
+- /* p1210m, logical drives lun assignments
+- * match SCSI REPORT LUNS data.
++ lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
++ if (is_msa2xxx(h, device)) {
++ /* msa2xxx way, put logicals on bus 1
++ * and match target/lun numbers box
++ * reports.
+ */
+- lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
+- *bus = 0;
+- *target = 0;
+- *lun = (lunid & 0x3fff) + 1;
++ *bus = 1;
++ *target = (lunid >> 16) & 0x3fff;
++ *lun = lunid & 0x00ff;
+ } else {
+- /* not p1210m... */
+- lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
+- if (is_msa2xxx(h, device)) {
+- /* msa2xxx way, put logicals on bus 1
+- * and match target/lun numbers box
+- * reports.
+- */
+- *bus = 1;
+- *target = (lunid >> 16) & 0x3fff;
+- *lun = lunid & 0x00ff;
++ if (likely(is_scsi_rev_5(h))) {
++ /* All current smart arrays (circa 2011) */
++ *bus = 0;
++ *target = 0;
++ *lun = (lunid & 0x3fff) + 1;
+ } else {
+- /* Traditional smart array way. */
++ /* Traditional old smart array way. */
+ *bus = 0;
+- *lun = 0;
+ *target = lunid & 0x3fff;
++ *lun = 0;
+ }
+ }
+ } else {
+@@ -4072,10 +4068,10 @@ static int hpsa_request_irq(struct ctlr_info *h,
+
+ if (h->msix_vector || h->msi_vector)
+ rc = request_irq(h->intr[h->intr_mode], msixhandler,
+- IRQF_DISABLED, h->devname, h);
++ 0, h->devname, h);
+ else
+ rc = request_irq(h->intr[h->intr_mode], intxhandler,
+- IRQF_DISABLED, h->devname, h);
++ IRQF_SHARED, h->devname, h);
+ if (rc) {
+ dev_err(&h->pdev->dev, "unable to get irq %d for %s\n",
+ h->intr[h->intr_mode], h->devname);
+diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
+index a97edab..83d08b6 100644
+--- a/drivers/scsi/isci/init.c
++++ b/drivers/scsi/isci/init.c
+@@ -465,7 +465,7 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic
+ if (!orom)
+ orom = isci_request_oprom(pdev);
+
+- for (i = 0; orom && i < ARRAY_SIZE(orom->ctrl); i++) {
++ for (i = 0; orom && i < num_controllers(pdev); i++) {
+ if (sci_oem_parameters_validate(&orom->ctrl[i])) {
+ dev_warn(&pdev->dev,
+ "[%d]: invalid oem parameters detected, falling back to firmware\n", i);
+diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
+index b1ddfef..ac336e1 100644
+--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
++++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
+@@ -3274,7 +3274,7 @@ _base_get_port_facts(struct MPT2SAS_ADAPTER *ioc, int port, int sleep_flag)
+ }
+
+ pfacts = &ioc->pfacts[port];
+- memset(pfacts, 0, sizeof(Mpi2PortFactsReply_t));
++ memset(pfacts, 0, sizeof(struct mpt2sas_port_facts));
+ pfacts->PortNumber = mpi_reply.PortNumber;
+ pfacts->VP_ID = mpi_reply.VP_ID;
+ pfacts->VF_ID = mpi_reply.VF_ID;
+@@ -3316,7 +3316,7 @@ _base_get_ioc_facts(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
+ }
+
+ facts = &ioc->facts;
+- memset(facts, 0, sizeof(Mpi2IOCFactsReply_t));
++ memset(facts, 0, sizeof(struct mpt2sas_facts));
+ facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
+ facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
+ facts->VP_ID = mpi_reply.VP_ID;
+@@ -4193,7 +4193,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
+ goto out_free_resources;
+
+ ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
+- sizeof(Mpi2PortFactsReply_t), GFP_KERNEL);
++ sizeof(struct mpt2sas_port_facts), GFP_KERNEL);
+ if (!ioc->pfacts) {
+ r = -ENOMEM;
+ goto out_free_resources;
+diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
+index 5f748c0..6a62934 100644
+--- a/drivers/spi/spi-fsl-spi.c
++++ b/drivers/spi/spi-fsl-spi.c
+@@ -933,7 +933,7 @@ err:
+
+ static void fsl_spi_cs_control(struct spi_device *spi, bool on)
+ {
+- struct device *dev = spi->dev.parent;
++ struct device *dev = spi->dev.parent->parent;
+ struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(dev->platform_data);
+ u16 cs = spi->chip_select;
+ int gpio = pinfo->gpios[cs];
+diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
+index 5e78c77..4ad2c0e 100644
+--- a/drivers/staging/comedi/comedi_fops.c
++++ b/drivers/staging/comedi/comedi_fops.c
+@@ -280,7 +280,7 @@ static int do_devconfig_ioctl(struct comedi_device *dev,
+ if (ret == 0) {
+ if (!try_module_get(dev->driver->module)) {
+ comedi_device_detach(dev);
+- return -ENOSYS;
++ ret = -ENOSYS;
+ }
+ }
+
+diff --git a/drivers/tty/serial/8250.c b/drivers/tty/serial/8250.c
+index eeadf1b..70585b6 100644
+--- a/drivers/tty/serial/8250.c
++++ b/drivers/tty/serial/8250.c
+@@ -2327,10 +2327,11 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
+ quot++;
+
+ if (up->capabilities & UART_CAP_FIFO && up->port.fifosize > 1) {
+- if (baud < 2400)
+- fcr = UART_FCR_ENABLE_FIFO | UART_FCR_TRIGGER_1;
+- else
+- fcr = uart_config[up->port.type].fcr;
++ fcr = uart_config[up->port.type].fcr;
++ if (baud < 2400) {
++ fcr &= ~UART_FCR_TRIGGER_MASK;
++ fcr |= UART_FCR_TRIGGER_1;
++ }
+ }
+
+ /*
+diff --git a/drivers/tty/serial/8250_pci.c b/drivers/tty/serial/8250_pci.c
+index 825937a..482d51e 100644
+--- a/drivers/tty/serial/8250_pci.c
++++ b/drivers/tty/serial/8250_pci.c
+@@ -1590,54 +1590,72 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = 0x8811,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
+ .init = pci_eg20t_init,
+ .setup = pci_default_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = 0x8812,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
+ .init = pci_eg20t_init,
+ .setup = pci_default_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = 0x8813,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
+ .init = pci_eg20t_init,
+ .setup = pci_default_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = 0x8814,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
+ .init = pci_eg20t_init,
+ .setup = pci_default_setup,
+ },
+ {
+ .vendor = 0x10DB,
+ .device = 0x8027,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
+ .init = pci_eg20t_init,
+ .setup = pci_default_setup,
+ },
+ {
+ .vendor = 0x10DB,
+ .device = 0x8028,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
+ .init = pci_eg20t_init,
+ .setup = pci_default_setup,
+ },
+ {
+ .vendor = 0x10DB,
+ .device = 0x8029,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
+ .init = pci_eg20t_init,
+ .setup = pci_default_setup,
+ },
+ {
+ .vendor = 0x10DB,
+ .device = 0x800C,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
+ .init = pci_eg20t_init,
+ .setup = pci_default_setup,
+ },
+ {
+ .vendor = 0x10DB,
+ .device = 0x800D,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
+ .init = pci_eg20t_init,
+ .setup = pci_default_setup,
+ },
+diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
+index 7e02c9c..5b3d063 100644
+--- a/drivers/tty/serial/mxs-auart.c
++++ b/drivers/tty/serial/mxs-auart.c
+@@ -368,6 +368,8 @@ static void mxs_auart_settermios(struct uart_port *u,
+
+ writel(ctrl, u->membase + AUART_LINECTRL);
+ writel(ctrl2, u->membase + AUART_CTRL2);
++
++ uart_update_timeout(u, termios->c_cflag, baud);
+ }
+
+ static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
+diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
+index 0406d7f..af5ffb9 100644
+--- a/drivers/tty/serial/serial_core.c
++++ b/drivers/tty/serial/serial_core.c
+@@ -2305,6 +2305,7 @@ void uart_unregister_driver(struct uart_driver *drv)
+ tty_unregister_driver(p);
+ put_tty_driver(p);
+ kfree(drv->state);
++ drv->state = NULL;
+ drv->tty_driver = NULL;
+ }
+
+diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
+index 2db0327..554ac90 100644
+--- a/drivers/usb/class/cdc-wdm.c
++++ b/drivers/usb/class/cdc-wdm.c
+@@ -496,11 +496,13 @@ static int wdm_flush(struct file *file, fl_owner_t id)
+ struct wdm_device *desc = file->private_data;
+
+ wait_event(desc->wait, !test_bit(WDM_IN_USE, &desc->flags));
+- if (desc->werr < 0)
++
++ /* cannot dereference desc->intf if WDM_DISCONNECTING */
++ if (desc->werr < 0 && !test_bit(WDM_DISCONNECTING, &desc->flags))
+ dev_err(&desc->intf->dev, "Error in flush path: %d\n",
+ desc->werr);
+
+- return desc->werr;
++ return usb_translate_errors(desc->werr);
+ }
+
+ static unsigned int wdm_poll(struct file *file, struct poll_table_struct *wait)
+@@ -511,7 +513,7 @@ static unsigned int wdm_poll(struct file *file, struct poll_table_struct *wait)
+
+ spin_lock_irqsave(&desc->iuspin, flags);
+ if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
+- mask = POLLERR;
++ mask = POLLHUP | POLLERR;
+ spin_unlock_irqrestore(&desc->iuspin, flags);
+ goto desc_out;
+ }
+@@ -586,10 +588,15 @@ static int wdm_release(struct inode *inode, struct file *file)
+ mutex_unlock(&desc->wlock);
+
+ if (!desc->count) {
+- dev_dbg(&desc->intf->dev, "wdm_release: cleanup");
+- kill_urbs(desc);
+- if (!test_bit(WDM_DISCONNECTING, &desc->flags))
++ if (!test_bit(WDM_DISCONNECTING, &desc->flags)) {
++ dev_dbg(&desc->intf->dev, "wdm_release: cleanup");
++ kill_urbs(desc);
+ desc->intf->needs_remote_wakeup = 0;
++ } else {
++ /* must avoid dev_printk here as desc->intf is invalid */
++ pr_debug(KBUILD_MODNAME " %s: device gone - cleaning up\n", __func__);
++ cleanup(desc);
++ }
+ }
+ mutex_unlock(&wdm_mutex);
+ return 0;
+@@ -805,6 +812,8 @@ static void wdm_disconnect(struct usb_interface *intf)
+ mutex_unlock(&desc->rlock);
+ if (!desc->count)
+ cleanup(desc);
++ else
++ dev_dbg(&intf->dev, "%s: %d open files - postponing cleanup\n", __func__, desc->count);
+ mutex_unlock(&wdm_mutex);
+ }
+
+diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
+index 7abf060..f6ff837 100644
+--- a/drivers/usb/core/devio.c
++++ b/drivers/usb/core/devio.c
+@@ -295,17 +295,14 @@ static struct async *async_getcompleted(struct dev_state *ps)
+ static struct async *async_getpending(struct dev_state *ps,
+ void __user *userurb)
+ {
+- unsigned long flags;
+ struct async *as;
+
+- spin_lock_irqsave(&ps->lock, flags);
+ list_for_each_entry(as, &ps->async_pending, asynclist)
+ if (as->userurb == userurb) {
+ list_del_init(&as->asynclist);
+- spin_unlock_irqrestore(&ps->lock, flags);
+ return as;
+ }
+- spin_unlock_irqrestore(&ps->lock, flags);
++
+ return NULL;
+ }
+
+@@ -360,6 +357,7 @@ static void cancel_bulk_urbs(struct dev_state *ps, unsigned bulk_addr)
+ __releases(ps->lock)
+ __acquires(ps->lock)
+ {
++ struct urb *urb;
+ struct async *as;
+
+ /* Mark all the pending URBs that match bulk_addr, up to but not
+@@ -382,8 +380,11 @@ __acquires(ps->lock)
+ list_for_each_entry(as, &ps->async_pending, asynclist) {
+ if (as->bulk_status == AS_UNLINK) {
+ as->bulk_status = 0; /* Only once */
++ urb = as->urb;
++ usb_get_urb(urb);
+ spin_unlock(&ps->lock); /* Allow completions */
+- usb_unlink_urb(as->urb);
++ usb_unlink_urb(urb);
++ usb_put_urb(urb);
+ spin_lock(&ps->lock);
+ goto rescan;
+ }
+@@ -434,6 +435,7 @@ static void async_completed(struct urb *urb)
+
+ static void destroy_async(struct dev_state *ps, struct list_head *list)
+ {
++ struct urb *urb;
+ struct async *as;
+ unsigned long flags;
+
+@@ -441,10 +443,13 @@ static void destroy_async(struct dev_state *ps, struct list_head *list)
+ while (!list_empty(list)) {
+ as = list_entry(list->next, struct async, asynclist);
+ list_del_init(&as->asynclist);
++ urb = as->urb;
++ usb_get_urb(urb);
+
+ /* drop the spinlock so the completion handler can run */
+ spin_unlock_irqrestore(&ps->lock, flags);
+- usb_kill_urb(as->urb);
++ usb_kill_urb(urb);
++ usb_put_urb(urb);
+ spin_lock_irqsave(&ps->lock, flags);
+ }
+ spin_unlock_irqrestore(&ps->lock, flags);
+@@ -1350,12 +1355,24 @@ static int proc_submiturb(struct dev_state *ps, void __user *arg)
+
+ static int proc_unlinkurb(struct dev_state *ps, void __user *arg)
+ {
++ struct urb *urb;
+ struct async *as;
++ unsigned long flags;
+
++ spin_lock_irqsave(&ps->lock, flags);
+ as = async_getpending(ps, arg);
+- if (!as)
++ if (!as) {
++ spin_unlock_irqrestore(&ps->lock, flags);
+ return -EINVAL;
+- usb_kill_urb(as->urb);
++ }
++
++ urb = as->urb;
++ usb_get_urb(urb);
++ spin_unlock_irqrestore(&ps->lock, flags);
++
++ usb_kill_urb(urb);
++ usb_put_urb(urb);
++
+ return 0;
+ }
+
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 2b0a341..52d27ed 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -2420,6 +2420,10 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
+ NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
+
++ /* Try to enable USB2 hardware LPM again */
++ if (udev->usb2_hw_lpm_capable == 1)
++ usb_set_usb2_hardware_lpm(udev, 1);
++
+ /* System sleep transitions should never fail */
+ if (!PMSG_IS_AUTO(msg))
+ status = 0;
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 4c65eb6..32d3adc 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -123,6 +123,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ /* Guillemot Webcam Hercules Dualpix Exchange*/
+ { USB_DEVICE(0x06f8, 0x3005), .driver_info = USB_QUIRK_RESET_RESUME },
+
++ /* Midiman M-Audio Keystation 88es */
++ { USB_DEVICE(0x0763, 0x0192), .driver_info = USB_QUIRK_RESET_RESUME },
++
+ /* M-Systems Flash Disk Pioneers */
+ { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
+
+diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
+index 3888778..45e8479 100644
+--- a/drivers/usb/core/usb.h
++++ b/drivers/usb/core/usb.h
+@@ -132,20 +132,6 @@ static inline int is_usb_device_driver(struct device_driver *drv)
+ for_devices;
+ }
+
+-/* translate USB error codes to codes user space understands */
+-static inline int usb_translate_errors(int error_code)
+-{
+- switch (error_code) {
+- case 0:
+- case -ENOMEM:
+- case -ENODEV:
+- return error_code;
+- default:
+- return -EIO;
+- }
+-}
+-
+-
+ /* for labeling diagnostics */
+ extern const char *usbcore_name;
+
+diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c
+index 8e3e509..01de16e 100644
+--- a/drivers/usb/gadget/fsl_udc_core.c
++++ b/drivers/usb/gadget/fsl_udc_core.c
+@@ -736,6 +736,8 @@ static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
+ lastreq = list_entry(ep->queue.prev, struct fsl_req, queue);
+ lastreq->tail->next_td_ptr =
+ cpu_to_hc32(req->head->td_dma & DTD_ADDR_MASK);
++ /* Ensure dTD's next dtd pointer to be updated */
++ wmb();
+ /* Read prime bit, if 1 goto done */
+ if (fsl_readl(&dr_regs->endpointprime) & bitmask)
+ return;
+diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
+index 971d312..ee85e81 100644
+--- a/drivers/usb/host/ehci-pci.c
++++ b/drivers/usb/host/ehci-pci.c
+@@ -365,7 +365,9 @@ static bool usb_is_intel_switchable_ehci(struct pci_dev *pdev)
+ {
+ return pdev->class == PCI_CLASS_SERIAL_USB_EHCI &&
+ pdev->vendor == PCI_VENDOR_ID_INTEL &&
+- pdev->device == 0x1E26;
++ (pdev->device == 0x1E26 ||
++ pdev->device == 0x8C2D ||
++ pdev->device == 0x8C26);
+ }
+
+ static void ehci_enable_xhci_companion(void)
+diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
+index 95a9fec..a60c7aa 100644
+--- a/drivers/usb/host/ohci-at91.c
++++ b/drivers/usb/host/ohci-at91.c
+@@ -199,7 +199,7 @@ static void usb_hcd_at91_remove(struct usb_hcd *hcd,
+ /*-------------------------------------------------------------------------*/
+
+ static int __devinit
+-ohci_at91_start (struct usb_hcd *hcd)
++ohci_at91_reset (struct usb_hcd *hcd)
+ {
+ struct at91_usbh_data *board = hcd->self.controller->platform_data;
+ struct ohci_hcd *ohci = hcd_to_ohci (hcd);
+@@ -209,6 +209,14 @@ ohci_at91_start (struct usb_hcd *hcd)
+ return ret;
+
+ ohci->num_ports = board->ports;
++ return 0;
++}
++
++static int __devinit
++ohci_at91_start (struct usb_hcd *hcd)
++{
++ struct ohci_hcd *ohci = hcd_to_ohci (hcd);
++ int ret;
+
+ if ((ret = ohci_run(ohci)) < 0) {
+ err("can't start %s", hcd->self.bus_name);
+@@ -390,6 +398,7 @@ static const struct hc_driver ohci_at91_hc_driver = {
+ /*
+ * basic lifecycle operations
+ */
++ .reset = ohci_at91_reset,
+ .start = ohci_at91_start,
+ .stop = ohci_stop,
+ .shutdown = ohci_shutdown,
+diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
+index 2afff88..833b3c6 100644
+--- a/drivers/usb/host/pci-quirks.c
++++ b/drivers/usb/host/pci-quirks.c
+@@ -9,6 +9,7 @@
+ */
+
+ #include <linux/types.h>
++#include <linux/kconfig.h>
+ #include <linux/kernel.h>
+ #include <linux/pci.h>
+ #include <linux/init.h>
+@@ -712,12 +713,28 @@ static int handshake(void __iomem *ptr, u32 mask, u32 done,
+ return -ETIMEDOUT;
+ }
+
+-bool usb_is_intel_switchable_xhci(struct pci_dev *pdev)
++#define PCI_DEVICE_ID_INTEL_LYNX_POINT_XHCI 0x8C31
++
++bool usb_is_intel_ppt_switchable_xhci(struct pci_dev *pdev)
+ {
+ return pdev->class == PCI_CLASS_SERIAL_USB_XHCI &&
+ pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI;
+ }
++
++/* The Intel Lynx Point chipset also has switchable ports. */
++bool usb_is_intel_lpt_switchable_xhci(struct pci_dev *pdev)
++{
++ return pdev->class == PCI_CLASS_SERIAL_USB_XHCI &&
++ pdev->vendor == PCI_VENDOR_ID_INTEL &&
++ pdev->device == PCI_DEVICE_ID_INTEL_LYNX_POINT_XHCI;
++}
++
++bool usb_is_intel_switchable_xhci(struct pci_dev *pdev)
++{
++ return usb_is_intel_ppt_switchable_xhci(pdev) ||
++ usb_is_intel_lpt_switchable_xhci(pdev);
++}
+ EXPORT_SYMBOL_GPL(usb_is_intel_switchable_xhci);
+
+ /*
+@@ -742,6 +759,19 @@ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev)
+ {
+ u32 ports_available;
+
++ /* Don't switchover the ports if the user hasn't compiled the xHCI
++ * driver. Otherwise they will see "dead" USB ports that don't power
++ * the devices.
++ */
++ if (!IS_ENABLED(CONFIG_USB_XHCI_HCD)) {
++ dev_warn(&xhci_pdev->dev,
++ "CONFIG_USB_XHCI_HCD is turned off, "
++ "defaulting to EHCI.\n");
++ dev_warn(&xhci_pdev->dev,
++ "USB 3.0 devices will work at USB 2.0 speeds.\n");
++ return;
++ }
++
+ ports_available = 0xffffffff;
+ /* Write USB3_PSSEN, the USB 3.0 Port SuperSpeed Enable
+ * Register, to turn on SuperSpeed terminations for all
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 01c3800..4232e868 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -1699,6 +1699,14 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
+ {
+ struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+ struct dev_info *dev_info, *next;
++ struct list_head *tt_list_head;
++ struct list_head *tt;
++ struct list_head *endpoints;
++ struct list_head *ep, *q;
++ struct xhci_tt_bw_info *tt_info;
++ struct xhci_interval_bw_table *bwt;
++ struct xhci_virt_ep *virt_ep;
++
+ unsigned long flags;
+ int size;
+ int i;
+@@ -1715,6 +1723,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
+ xhci->event_ring = NULL;
+ xhci_dbg(xhci, "Freed event ring\n");
+
++ xhci->cmd_ring_reserved_trbs = 0;
+ if (xhci->cmd_ring)
+ xhci_ring_free(xhci, xhci->cmd_ring);
+ xhci->cmd_ring = NULL;
+@@ -1757,8 +1766,26 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
+ }
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
++ bwt = &xhci->rh_bw->bw_table;
++ for (i = 0; i < XHCI_MAX_INTERVAL; i++) {
++ endpoints = &bwt->interval_bw[i].endpoints;
++ list_for_each_safe(ep, q, endpoints) {
++ virt_ep = list_entry(ep, struct xhci_virt_ep, bw_endpoint_list);
++ list_del(&virt_ep->bw_endpoint_list);
++ kfree(virt_ep);
++ }
++ }
++
++ tt_list_head = &xhci->rh_bw->tts;
++ list_for_each_safe(tt, q, tt_list_head) {
++ tt_info = list_entry(tt, struct xhci_tt_bw_info, tt_list);
++ list_del(tt);
++ kfree(tt_info);
++ }
++
+ xhci->num_usb2_ports = 0;
+ xhci->num_usb3_ports = 0;
++ xhci->num_active_eps = 0;
+ kfree(xhci->usb2_ports);
+ kfree(xhci->usb3_ports);
+ kfree(xhci->port_array);
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 211296a..daf5754 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -72,6 +72,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ xhci_dbg(xhci, "QUIRK: Fresco Logic revision %u "
+ "has broken MSI implementation\n",
+ pdev->revision);
++ xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+ }
+
+ if (pdev->vendor == PCI_VENDOR_ID_NEC)
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 43b3447..fb0981e 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -1735,8 +1735,12 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ /* handle completion code */
+ switch (trb_comp_code) {
+ case COMP_SUCCESS:
+- frame->status = 0;
+- break;
++ if (TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) {
++ frame->status = 0;
++ break;
++ }
++ if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
++ trb_comp_code = COMP_SHORT_TX;
+ case COMP_SHORT_TX:
+ frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
+ -EREMOTEIO : 0;
+@@ -1752,6 +1756,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ break;
+ case COMP_DEV_ERR:
+ case COMP_STALL:
++ case COMP_TX_ERR:
+ frame->status = -EPROTO;
+ skip_td = true;
+ break;
+@@ -1832,13 +1837,16 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ switch (trb_comp_code) {
+ case COMP_SUCCESS:
+ /* Double check that the HW transferred everything. */
+- if (event_trb != td->last_trb) {
++ if (event_trb != td->last_trb ||
++ TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
+ xhci_warn(xhci, "WARN Successful completion "
+ "on short TX\n");
+ if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+ *status = -EREMOTEIO;
+ else
+ *status = 0;
++ if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
++ trb_comp_code = COMP_SHORT_TX;
+ } else {
+ *status = 0;
+ }
+@@ -1977,6 +1985,13 @@ static int handle_tx_event(struct xhci_hcd *xhci,
+ * transfer type
+ */
+ case COMP_SUCCESS:
++ if (TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
++ break;
++ if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
++ trb_comp_code = COMP_SHORT_TX;
++ else
++ xhci_warn(xhci, "WARN Successful completion on short TX: "
++ "needs XHCI_TRUST_TX_LENGTH quirk?\n");
+ case COMP_SHORT_TX:
+ break;
+ case COMP_STOP:
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 4850c4d..363b141 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1466,6 +1466,7 @@ struct xhci_hcd {
+ #define XHCI_RESET_ON_RESUME (1 << 7)
+ #define XHCI_SW_BW_CHECKING (1 << 8)
+ #define XHCI_AMD_0x96_HOST (1 << 9)
++#define XHCI_TRUST_TX_LENGTH (1 << 10)
+ unsigned int num_active_eps;
+ unsigned int limit_active_eps;
+ /* There are two roothubs to keep track of bus suspend info for */
+diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
+index bd6d008..b9ac9a3 100644
+--- a/drivers/usb/misc/usbtest.c
++++ b/drivers/usb/misc/usbtest.c
+@@ -1025,7 +1025,10 @@ test_ctrl_queue(struct usbtest_dev *dev, struct usbtest_param *param)
+ case 13: /* short read, resembling case 10 */
+ req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
+ /* last data packet "should" be DATA1, not DATA0 */
+- len = 1024 - udev->descriptor.bMaxPacketSize0;
++ if (udev->speed == USB_SPEED_SUPER)
++ len = 1024 - 512;
++ else
++ len = 1024 - udev->descriptor.bMaxPacketSize0;
+ expected = -EREMOTEIO;
+ break;
+ case 14: /* short read; try to fill the last packet */
+@@ -1384,11 +1387,15 @@ static int test_halt(struct usbtest_dev *tdev, int ep, struct urb *urb)
+
+ static int halt_simple(struct usbtest_dev *dev)
+ {
+- int ep;
+- int retval = 0;
+- struct urb *urb;
++ int ep;
++ int retval = 0;
++ struct urb *urb;
++ struct usb_device *udev = testdev_to_usbdev(dev);
+
+- urb = simple_alloc_urb(testdev_to_usbdev(dev), 0, 512);
++ if (udev->speed == USB_SPEED_SUPER)
++ urb = simple_alloc_urb(udev, 0, 1024);
++ else
++ urb = simple_alloc_urb(udev, 0, 512);
+ if (urb == NULL)
+ return -ENOMEM;
+
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index c4cf3f3..450bdfe 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -809,6 +809,7 @@ static struct usb_device_id id_table_combined [] = {
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(LARSENBRUSGAARD_VID, LB_ALTITRACK_PID) },
+ { USB_DEVICE(GN_OTOMETRICS_VID, AURICAL_USB_PID) },
++ { USB_DEVICE(PI_VID, PI_E861_PID) },
+ { USB_DEVICE(BAYER_VID, BAYER_CONTOUR_CABLE_PID) },
+ { USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index c6dd18e..219b199 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -785,6 +785,14 @@
+ #define RTSYSTEMS_SERIAL_VX7_PID 0x9e52 /* Serial converter for VX-7 Radios using FT232RL */
+ #define RTSYSTEMS_CT29B_PID 0x9e54 /* CT29B Radio Cable */
+
++
++/*
++ * Physik Instrumente
++ * http://www.physikinstrumente.com/en/products/
++ */
++#define PI_VID 0x1a72 /* Vendor ID */
++#define PI_E861_PID 0x1008 /* E-861 piezo controller USB connection */
++
+ /*
+ * Bayer Ascensia Contour blood glucose meter USB-converter cable.
+ * http://winglucofacts.com/cables/
+diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
+index 21c82b0..2856474 100644
+--- a/drivers/usb/serial/ti_usb_3410_5052.c
++++ b/drivers/usb/serial/ti_usb_3410_5052.c
+@@ -165,7 +165,7 @@ static unsigned int product_5052_count;
+ /* the array dimension is the number of default entries plus */
+ /* TI_EXTRA_VID_PID_COUNT user defined entries plus 1 terminating */
+ /* null entry */
+-static struct usb_device_id ti_id_table_3410[14+TI_EXTRA_VID_PID_COUNT+1] = {
++static struct usb_device_id ti_id_table_3410[15+TI_EXTRA_VID_PID_COUNT+1] = {
+ { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) },
+ { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) },
+ { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) },
+@@ -180,6 +180,7 @@ static struct usb_device_id ti_id_table_3410[14+TI_EXTRA_VID_PID_COUNT+1] = {
+ { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) },
+ { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) },
+ { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) },
++ { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) },
+ };
+
+ static struct usb_device_id ti_id_table_5052[5+TI_EXTRA_VID_PID_COUNT+1] = {
+@@ -189,7 +190,7 @@ static struct usb_device_id ti_id_table_5052[5+TI_EXTRA_VID_PID_COUNT+1] = {
+ { USB_DEVICE(TI_VENDOR_ID, TI_5052_FIRMWARE_PRODUCT_ID) },
+ };
+
+-static struct usb_device_id ti_id_table_combined[18+2*TI_EXTRA_VID_PID_COUNT+1] = {
++static struct usb_device_id ti_id_table_combined[19+2*TI_EXTRA_VID_PID_COUNT+1] = {
+ { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) },
+ { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) },
+ { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) },
+@@ -208,6 +209,7 @@ static struct usb_device_id ti_id_table_combined[18+2*TI_EXTRA_VID_PID_COUNT+1]
+ { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) },
+ { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) },
+ { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) },
++ { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) },
+ { }
+ };
+
+diff --git a/drivers/usb/serial/ti_usb_3410_5052.h b/drivers/usb/serial/ti_usb_3410_5052.h
+index f140f1b..b353e7e 100644
+--- a/drivers/usb/serial/ti_usb_3410_5052.h
++++ b/drivers/usb/serial/ti_usb_3410_5052.h
+@@ -37,6 +37,7 @@
+ #define TI_5152_BOOT_PRODUCT_ID 0x5152 /* no EEPROM, no firmware */
+ #define TI_5052_EEPROM_PRODUCT_ID 0x505A /* EEPROM, no firmware */
+ #define TI_5052_FIRMWARE_PRODUCT_ID 0x505F /* firmware is running */
++#define FRI2_PRODUCT_ID 0x5053 /* Fish River Island II */
+
+ /* Multi-Tech vendor and product ids */
+ #define MTS_VENDOR_ID 0x06E0
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index 24caba7..591f57f 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -1885,6 +1885,13 @@ UNUSUAL_DEV( 0x1652, 0x6600, 0x0201, 0x0201,
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_RESIDUE ),
+
++/* Reported by Jesse Feddema <jdfeddema@gmail.com> */
++UNUSUAL_DEV( 0x177f, 0x0400, 0x0000, 0x0000,
++ "Yarvik",
++ "PMP400",
++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++ US_FL_BULK_IGNORE_TAG | US_FL_MAX_SECTORS_64 ),
++
+ /* Reported by Hans de Goede <hdegoede@redhat.com>
+ * These Appotech controllers are found in Picture Frames, they provide a
+ * (buggy) emulation of a cdrom drive which contains the windows software
+diff --git a/drivers/video/omap2/dss/venc.c b/drivers/video/omap2/dss/venc.c
+index 7533458..7152b53 100644
+--- a/drivers/video/omap2/dss/venc.c
++++ b/drivers/video/omap2/dss/venc.c
+@@ -679,6 +679,11 @@ void venc_dump_regs(struct seq_file *s)
+ {
+ #define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, venc_read_reg(r))
+
++ if (cpu_is_omap44xx()) {
++ seq_printf(s, "VENC currently disabled on OMAP44xx\n");
++ return;
++ }
++
+ if (venc_runtime_get())
+ return;
+
+diff --git a/drivers/xen/events.c b/drivers/xen/events.c
+index 6e075cd..fec1204 100644
+--- a/drivers/xen/events.c
++++ b/drivers/xen/events.c
+@@ -600,7 +600,7 @@ static void disable_pirq(struct irq_data *data)
+ disable_dynirq(data);
+ }
+
+-static int find_irq_by_gsi(unsigned gsi)
++int xen_irq_from_gsi(unsigned gsi)
+ {
+ struct irq_info *info;
+
+@@ -614,6 +614,7 @@ static int find_irq_by_gsi(unsigned gsi)
+
+ return -1;
+ }
++EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
+
+ /*
+ * Do not make any assumptions regarding the relationship between the
+@@ -633,7 +634,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
+
+ mutex_lock(&irq_mapping_update_lock);
+
+- irq = find_irq_by_gsi(gsi);
++ irq = xen_irq_from_gsi(gsi);
+ if (irq != -1) {
+ printk(KERN_INFO "xen_map_pirq_gsi: returning irq %d for gsi %u\n",
+ irq, gsi);
+diff --git a/fs/aio.c b/fs/aio.c
+index b9d64d8..3b65ee7 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -1477,6 +1477,10 @@ static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
+ if (ret < 0)
+ goto out;
+
++ ret = rw_verify_area(type, kiocb->ki_filp, &kiocb->ki_pos, ret);
++ if (ret < 0)
++ goto out;
++
+ kiocb->ki_nr_segs = kiocb->ki_nbytes;
+ kiocb->ki_cur_seg = 0;
+ /* ki_nbytes/left now reflect bytes instead of segs */
+@@ -1488,11 +1492,17 @@ out:
+ return ret;
+ }
+
+-static ssize_t aio_setup_single_vector(struct kiocb *kiocb)
++static ssize_t aio_setup_single_vector(int type, struct file * file, struct kiocb *kiocb)
+ {
++ int bytes;
++
++ bytes = rw_verify_area(type, file, &kiocb->ki_pos, kiocb->ki_left);
++ if (bytes < 0)
++ return bytes;
++
+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
+ kiocb->ki_iovec->iov_base = kiocb->ki_buf;
+- kiocb->ki_iovec->iov_len = kiocb->ki_left;
++ kiocb->ki_iovec->iov_len = bytes;
+ kiocb->ki_nr_segs = 1;
+ kiocb->ki_cur_seg = 0;
+ return 0;
+@@ -1517,10 +1527,7 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat)
+ if (unlikely(!access_ok(VERIFY_WRITE, kiocb->ki_buf,
+ kiocb->ki_left)))
+ break;
+- ret = security_file_permission(file, MAY_READ);
+- if (unlikely(ret))
+- break;
+- ret = aio_setup_single_vector(kiocb);
++ ret = aio_setup_single_vector(READ, file, kiocb);
+ if (ret)
+ break;
+ ret = -EINVAL;
+@@ -1535,10 +1542,7 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat)
+ if (unlikely(!access_ok(VERIFY_READ, kiocb->ki_buf,
+ kiocb->ki_left)))
+ break;
+- ret = security_file_permission(file, MAY_WRITE);
+- if (unlikely(ret))
+- break;
+- ret = aio_setup_single_vector(kiocb);
++ ret = aio_setup_single_vector(WRITE, file, kiocb);
+ if (ret)
+ break;
+ ret = -EINVAL;
+@@ -1549,9 +1553,6 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat)
+ ret = -EBADF;
+ if (unlikely(!(file->f_mode & FMODE_READ)))
+ break;
+- ret = security_file_permission(file, MAY_READ);
+- if (unlikely(ret))
+- break;
+ ret = aio_setup_vectored_rw(READ, kiocb, compat);
+ if (ret)
+ break;
+@@ -1563,9 +1564,6 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat)
+ ret = -EBADF;
+ if (unlikely(!(file->f_mode & FMODE_WRITE)))
+ break;
+- ret = security_file_permission(file, MAY_WRITE);
+- if (unlikely(ret))
+- break;
+ ret = aio_setup_vectored_rw(WRITE, kiocb, compat);
+ if (ret)
+ break;
+diff --git a/fs/bio.c b/fs/bio.c
+index b1fe82c..4fc4dbb 100644
+--- a/fs/bio.c
++++ b/fs/bio.c
+@@ -507,11 +507,12 @@ int bio_get_nr_vecs(struct block_device *bdev)
+ struct request_queue *q = bdev_get_queue(bdev);
+ int nr_pages;
+
+- nr_pages = ((queue_max_sectors(q) << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT;
+- if (nr_pages > queue_max_segments(q))
+- nr_pages = queue_max_segments(q);
++ nr_pages = min_t(unsigned,
++ queue_max_segments(q),
++ queue_max_sectors(q) / (PAGE_SIZE >> 9) + 1);
++
++ return min_t(unsigned, nr_pages, BIO_MAX_PAGES);
+
+- return nr_pages;
+ }
+ EXPORT_SYMBOL(bio_get_nr_vecs);
+
+diff --git a/fs/block_dev.c b/fs/block_dev.c
+index abe9b48..9b98987 100644
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -68,7 +68,7 @@ static void bdev_inode_switch_bdi(struct inode *inode,
+ spin_unlock(&dst->wb.list_lock);
+ }
+
+-static sector_t max_block(struct block_device *bdev)
++sector_t blkdev_max_block(struct block_device *bdev)
+ {
+ sector_t retval = ~((sector_t)0);
+ loff_t sz = i_size_read(bdev->bd_inode);
+@@ -139,7 +139,7 @@ static int
+ blkdev_get_block(struct inode *inode, sector_t iblock,
+ struct buffer_head *bh, int create)
+ {
+- if (iblock >= max_block(I_BDEV(inode))) {
++ if (iblock >= blkdev_max_block(I_BDEV(inode))) {
+ if (create)
+ return -EIO;
+
+@@ -161,7 +161,7 @@ static int
+ blkdev_get_blocks(struct inode *inode, sector_t iblock,
+ struct buffer_head *bh, int create)
+ {
+- sector_t end_block = max_block(I_BDEV(inode));
++ sector_t end_block = blkdev_max_block(I_BDEV(inode));
+ unsigned long max_blocks = bh->b_size >> inode->i_blkbits;
+
+ if ((iblock + max_blocks) > end_block) {
+diff --git a/fs/buffer.c b/fs/buffer.c
+index 19d8eb7..c807931 100644
+--- a/fs/buffer.c
++++ b/fs/buffer.c
+@@ -971,6 +971,7 @@ init_page_buffers(struct page *page, struct block_device *bdev,
+ struct buffer_head *head = page_buffers(page);
+ struct buffer_head *bh = head;
+ int uptodate = PageUptodate(page);
++ sector_t end_block = blkdev_max_block(I_BDEV(bdev->bd_inode));
+
+ do {
+ if (!buffer_mapped(bh)) {
+@@ -979,7 +980,8 @@ init_page_buffers(struct page *page, struct block_device *bdev,
+ bh->b_blocknr = block;
+ if (uptodate)
+ set_buffer_uptodate(bh);
+- set_buffer_mapped(bh);
++ if (block < end_block)
++ set_buffer_mapped(bh);
+ }
+ block++;
+ bh = bh->b_this_page;
+diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
+index b4c2c99..b1451af 100644
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -703,7 +703,7 @@ static loff_t cifs_llseek(struct file *file, loff_t offset, int origin)
+ * origin == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
+ * the cached file length
+ */
+- if (origin != SEEK_SET || origin != SEEK_CUR) {
++ if (origin != SEEK_SET && origin != SEEK_CUR) {
+ int rc;
+ struct inode *inode = file->f_path.dentry->d_inode;
+
+diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c
+index 5c866e0..adae962 100644
+--- a/fs/ext3/ialloc.c
++++ b/fs/ext3/ialloc.c
+@@ -525,8 +525,12 @@ got:
+ if (IS_DIRSYNC(inode))
+ handle->h_sync = 1;
+ if (insert_inode_locked(inode) < 0) {
+- err = -EINVAL;
+- goto fail_drop;
++ /*
++ * Likely a bitmap corruption causing inode to be allocated
++ * twice.
++ */
++ err = -EIO;
++ goto fail;
+ }
+ spin_lock(&sbi->s_next_gen_lock);
+ inode->i_generation = sbi->s_next_generation++;
+diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
+index 00beb4f..8fb6844 100644
+--- a/fs/ext4/ialloc.c
++++ b/fs/ext4/ialloc.c
+@@ -885,8 +885,12 @@ got:
+ if (IS_DIRSYNC(inode))
+ ext4_handle_sync(handle);
+ if (insert_inode_locked(inode) < 0) {
+- err = -EINVAL;
+- goto fail_drop;
++ /*
++ * Likely a bitmap corruption causing inode to be allocated
++ * twice.
++ */
++ err = -EIO;
++ goto fail;
+ }
+ spin_lock(&sbi->s_next_gen_lock);
+ inode->i_generation = sbi->s_next_generation++;
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index bab7c58..03d9b90 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -3526,16 +3526,16 @@ out:
+ return ret;
+ }
+
+-static void nfs4_write_cached_acl(struct inode *inode, const char *buf, size_t acl_len)
++static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len)
+ {
+ struct nfs4_cached_acl *acl;
+
+- if (buf && acl_len <= PAGE_SIZE) {
++ if (pages && acl_len <= PAGE_SIZE) {
+ acl = kmalloc(sizeof(*acl) + acl_len, GFP_KERNEL);
+ if (acl == NULL)
+ goto out;
+ acl->cached = 1;
+- memcpy(acl->data, buf, acl_len);
++ _copy_from_pages(acl->data, pages, pgbase, acl_len);
+ } else {
+ acl = kmalloc(sizeof(*acl), GFP_KERNEL);
+ if (acl == NULL)
+@@ -3568,7 +3568,6 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
+ struct nfs_getaclres res = {
+ .acl_len = buflen,
+ };
+- void *resp_buf;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
+ .rpc_argp = &args,
+@@ -3582,26 +3581,29 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
+ if (npages == 0)
+ npages = 1;
+
++ /* Add an extra page to handle the bitmap returned */
++ npages++;
++
+ for (i = 0; i < npages; i++) {
+ pages[i] = alloc_page(GFP_KERNEL);
+ if (!pages[i])
+ goto out_free;
+ }
+- if (npages > 1) {
+- /* for decoding across pages */
+- res.acl_scratch = alloc_page(GFP_KERNEL);
+- if (!res.acl_scratch)
+- goto out_free;
+- }
++
++ /* for decoding across pages */
++ res.acl_scratch = alloc_page(GFP_KERNEL);
++ if (!res.acl_scratch)
++ goto out_free;
++
+ args.acl_len = npages * PAGE_SIZE;
+ args.acl_pgbase = 0;
++
+ /* Let decode_getfacl know not to fail if the ACL data is larger than
+ * the page we send as a guess */
+ if (buf == NULL)
+ res.acl_flags |= NFS4_ACL_LEN_REQUEST;
+- resp_buf = page_address(pages[0]);
+
+- dprintk("%s buf %p buflen %ld npages %d args.acl_len %ld\n",
++ dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n",
+ __func__, buf, buflen, npages, args.acl_len);
+ ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
+ &msg, &args.seq_args, &res.seq_res, 0);
+@@ -3610,9 +3612,9 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
+
+ acl_len = res.acl_len - res.acl_data_offset;
+ if (acl_len > args.acl_len)
+- nfs4_write_cached_acl(inode, NULL, acl_len);
++ nfs4_write_cached_acl(inode, NULL, 0, acl_len);
+ else
+- nfs4_write_cached_acl(inode, resp_buf + res.acl_data_offset,
++ nfs4_write_cached_acl(inode, pages, res.acl_data_offset,
+ acl_len);
+ if (buf) {
+ ret = -ERANGE;
+diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
+index 68adab4..bdd5bdc 100644
+--- a/fs/nfs/nfs4xdr.c
++++ b/fs/nfs/nfs4xdr.c
+@@ -4965,11 +4965,19 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
+ bitmap[3] = {0};
+ struct kvec *iov = req->rq_rcv_buf.head;
+ int status;
++ size_t page_len = xdr->buf->page_len;
+
+ res->acl_len = 0;
+ if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0)
+ goto out;
++
+ bm_p = xdr->p;
++ res->acl_data_offset = be32_to_cpup(bm_p) + 2;
++ res->acl_data_offset <<= 2;
++ /* Check if the acl data starts beyond the allocated buffer */
++ if (res->acl_data_offset > page_len)
++ return -ERANGE;
++
+ if ((status = decode_attr_bitmap(xdr, bitmap)) != 0)
+ goto out;
+ if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0)
+@@ -4979,28 +4987,24 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
+ return -EIO;
+ if (likely(bitmap[0] & FATTR4_WORD0_ACL)) {
+ size_t hdrlen;
+- u32 recvd;
+
+ /* The bitmap (xdr len + bitmaps) and the attr xdr len words
+ * are stored with the acl data to handle the problem of
+ * variable length bitmaps.*/
+ xdr->p = bm_p;
+- res->acl_data_offset = be32_to_cpup(bm_p) + 2;
+- res->acl_data_offset <<= 2;
+
+ /* We ignore &savep and don't do consistency checks on
+ * the attr length. Let userspace figure it out.... */
+ hdrlen = (u8 *)xdr->p - (u8 *)iov->iov_base;
+ attrlen += res->acl_data_offset;
+- recvd = req->rq_rcv_buf.len - hdrlen;
+- if (attrlen > recvd) {
++ if (attrlen > page_len) {
+ if (res->acl_flags & NFS4_ACL_LEN_REQUEST) {
+ /* getxattr interface called with a NULL buf */
+ res->acl_len = attrlen;
+ goto out;
+ }
+- dprintk("NFS: acl reply: attrlen %u > recvd %u\n",
+- attrlen, recvd);
++ dprintk("NFS: acl reply: attrlen %u > page_len %zu\n",
++ attrlen, page_len);
+ return -EINVAL;
+ }
+ xdr_read_pages(xdr, attrlen);
+diff --git a/fs/super.c b/fs/super.c
+index afd0f1a..2a698f6 100644
+--- a/fs/super.c
++++ b/fs/super.c
+@@ -1166,6 +1166,8 @@ int freeze_super(struct super_block *sb)
+ printk(KERN_ERR
+ "VFS:Filesystem freeze failed\n");
+ sb->s_frozen = SB_UNFROZEN;
++ smp_wmb();
++ wake_up(&sb->s_wait_unfrozen);
+ deactivate_locked_super(sb);
+ return ret;
+ }
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 11f1951..43d36b7 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -2094,6 +2094,7 @@ extern void unregister_blkdev(unsigned int, const char *);
+ extern struct block_device *bdget(dev_t);
+ extern struct block_device *bdgrab(struct block_device *bdev);
+ extern void bd_set_size(struct block_device *, loff_t size);
++extern sector_t blkdev_max_block(struct block_device *bdev);
+ extern void bd_forget(struct inode *inode);
+ extern void bdput(struct block_device *);
+ extern void invalidate_bdev(struct block_device *);
+diff --git a/include/linux/genhd.h b/include/linux/genhd.h
+index c6f7f6a..4eec461 100644
+--- a/include/linux/genhd.h
++++ b/include/linux/genhd.h
+@@ -222,12 +222,6 @@ static inline void part_pack_uuid(const u8 *uuid_str, u8 *to)
+ }
+ }
+
+-static inline char *part_unpack_uuid(const u8 *uuid, char *out)
+-{
+- sprintf(out, "%pU", uuid);
+- return out;
+-}
+-
+ static inline int disk_max_parts(struct gendisk *disk)
+ {
+ if (disk->flags & GENHD_FL_EXT_DEVT)
+diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
+index 35410ef..6136821 100644
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -744,6 +744,13 @@ static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
+ {
+ return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
+ }
++
++bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);
++
++#else
++
++static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; }
++
+ #endif
+
+ #ifdef __KVM_HAVE_DEVICE_ASSIGNMENT
+diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
+index a3ac9c4..deb6282 100644
+--- a/include/linux/mmc/host.h
++++ b/include/linux/mmc/host.h
+@@ -303,6 +303,7 @@ struct mmc_host {
+
+ unsigned int sdio_irqs;
+ struct task_struct *sdio_irq_thread;
++ bool sdio_irq_pending;
+ atomic_t sdio_irq_thread_abort;
+
+ mmc_pm_flag_t pm_flags; /* requested pm features */
+@@ -356,6 +357,7 @@ extern int mmc_cache_ctrl(struct mmc_host *, u8);
+ static inline void mmc_signal_sdio_irq(struct mmc_host *host)
+ {
+ host->ops->enable_sdio_irq(host, 0);
++ host->sdio_irq_pending = true;
+ wake_up_process(host->sdio_irq_thread);
+ }
+
+diff --git a/include/linux/usb.h b/include/linux/usb.h
+index 7503352..4269c3f 100644
+--- a/include/linux/usb.h
++++ b/include/linux/usb.h
+@@ -1599,6 +1599,19 @@ usb_maxpacket(struct usb_device *udev, int pipe, int is_out)
+
+ /* ----------------------------------------------------------------------- */
+
++/* translate USB error codes to codes user space understands */
++static inline int usb_translate_errors(int error_code)
++{
++ switch (error_code) {
++ case 0:
++ case -ENOMEM:
++ case -ENODEV:
++ return error_code;
++ default:
++ return -EIO;
++ }
++}
++
+ /* Events from the usb core */
+ #define USB_DEVICE_ADD 0x0001
+ #define USB_DEVICE_REMOVE 0x0002
+diff --git a/include/xen/events.h b/include/xen/events.h
+index d287997..8f3d622 100644
+--- a/include/xen/events.h
++++ b/include/xen/events.h
+@@ -96,6 +96,9 @@ int xen_irq_from_pirq(unsigned pirq);
+ /* Return the pirq allocated to the irq. */
+ int xen_pirq_from_irq(unsigned irq);
+
++/* Return the irq allocated to the gsi */
++int xen_irq_from_gsi(unsigned gsi);
++
+ /* Determine whether to ignore this IRQ if it is passed to a guest. */
+ int xen_test_irq_shared(int irq);
+
+diff --git a/init/do_mounts.c b/init/do_mounts.c
+index db6e5ee..d6c229f 100644
+--- a/init/do_mounts.c
++++ b/init/do_mounts.c
+@@ -470,7 +470,7 @@ void __init change_floppy(char *fmt, ...)
+ void __init mount_root(void)
+ {
+ #ifdef CONFIG_ROOT_NFS
+- if (MAJOR(ROOT_DEV) == UNNAMED_MAJOR) {
++ if (ROOT_DEV == Root_NFS) {
+ if (mount_nfs_root())
+ return;
+
+diff --git a/init/main.c b/init/main.c
+index 217ed23..cb08fea2 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -563,9 +563,6 @@ asmlinkage void __init start_kernel(void)
+ early_boot_irqs_disabled = false;
+ local_irq_enable();
+
+- /* Interrupts are enabled now so all GFP allocations are safe. */
+- gfp_allowed_mask = __GFP_BITS_MASK;
+-
+ kmem_cache_init_late();
+
+ /*
+@@ -798,6 +795,10 @@ static int __init kernel_init(void * unused)
+ * Wait until kthreadd is all set-up.
+ */
+ wait_for_completion(&kthreadd_done);
++
++ /* Now the scheduler is fully set up and can do blocking allocations */
++ gfp_allowed_mask = __GFP_BITS_MASK;
++
+ /*
+ * init can allocate pages on any node
+ */
+diff --git a/kernel/compat.c b/kernel/compat.c
+index f346ced..a6d0649 100644
+--- a/kernel/compat.c
++++ b/kernel/compat.c
+@@ -320,25 +320,54 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
+
+ #ifdef __ARCH_WANT_SYS_SIGPROCMASK
+
+-asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
+- compat_old_sigset_t __user *oset)
++/*
++ * sys_sigprocmask SIG_SETMASK sets the first (compat) word of the
++ * blocked set of signals to the supplied signal set
++ */
++static inline void compat_sig_setmask(sigset_t *blocked, compat_sigset_word set)
+ {
+- old_sigset_t s;
+- long ret;
+- mm_segment_t old_fs;
++ memcpy(blocked->sig, &set, sizeof(set));
++}
+
+- if (set && get_user(s, set))
+- return -EFAULT;
+- old_fs = get_fs();
+- set_fs(KERNEL_DS);
+- ret = sys_sigprocmask(how,
+- set ? (old_sigset_t __user *) &s : NULL,
+- oset ? (old_sigset_t __user *) &s : NULL);
+- set_fs(old_fs);
+- if (ret == 0)
+- if (oset)
+- ret = put_user(s, oset);
+- return ret;
++asmlinkage long compat_sys_sigprocmask(int how,
++ compat_old_sigset_t __user *nset,
++ compat_old_sigset_t __user *oset)
++{
++ old_sigset_t old_set, new_set;
++ sigset_t new_blocked;
++
++ old_set = current->blocked.sig[0];
++
++ if (nset) {
++ if (get_user(new_set, nset))
++ return -EFAULT;
++ new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
++
++ new_blocked = current->blocked;
++
++ switch (how) {
++ case SIG_BLOCK:
++ sigaddsetmask(&new_blocked, new_set);
++ break;
++ case SIG_UNBLOCK:
++ sigdelsetmask(&new_blocked, new_set);
++ break;
++ case SIG_SETMASK:
++ compat_sig_setmask(&new_blocked, new_set);
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ set_current_blocked(&new_blocked);
++ }
++
++ if (oset) {
++ if (put_user(old_set, oset))
++ return -EFAULT;
++ }
++
++ return 0;
+ }
+
+ #endif
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index bb425b1..7947e16 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -1215,8 +1215,13 @@ static void worker_enter_idle(struct worker *worker)
+ } else
+ wake_up_all(&gcwq->trustee_wait);
+
+- /* sanity check nr_running */
+- WARN_ON_ONCE(gcwq->nr_workers == gcwq->nr_idle &&
++ /*
++ * Sanity check nr_running. Because trustee releases gcwq->lock
++ * between setting %WORKER_ROGUE and zapping nr_running, the
++ * warning may trigger spuriously. Check iff trustee is idle.
++ */
++ WARN_ON_ONCE(gcwq->trustee_state == TRUSTEE_DONE &&
++ gcwq->nr_workers == gcwq->nr_idle &&
+ atomic_read(get_gcwq_nr_running(gcwq->cpu)));
+ }
+
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 778554f..c8425b1 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -4549,6 +4549,12 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
+ swap_buffers:
+ /* Swap primary and spare array */
+ thresholds->spare = thresholds->primary;
++ /* If all events are unregistered, free the spare array */
++ if (!new) {
++ kfree(thresholds->spare);
++ thresholds->spare = NULL;
++ }
++
+ rcu_assign_pointer(thresholds->primary, new);
+
+ /* To be sure that nobody uses thresholds */
+diff --git a/mm/swapfile.c b/mm/swapfile.c
+index 2015a1e..d3955f2 100644
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -2102,7 +2102,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
+ p->flags |= SWP_SOLIDSTATE;
+ p->cluster_next = 1 + (random32() % p->highest_bit);
+ }
+- if (discard_swap(p) == 0 && (swap_flags & SWAP_FLAG_DISCARD))
++ if ((swap_flags & SWAP_FLAG_DISCARD) && discard_swap(p) == 0)
+ p->flags |= SWP_DISCARDABLE;
+ }
+
+diff --git a/net/core/ethtool.c b/net/core/ethtool.c
+index f444817..2b587ec 100644
+--- a/net/core/ethtool.c
++++ b/net/core/ethtool.c
+@@ -1549,6 +1549,8 @@ static noinline_for_stack int ethtool_flash_device(struct net_device *dev,
+ if (!dev->ethtool_ops->flash_device)
+ return -EOPNOTSUPP;
+
++ efl.data[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0;
++
+ return dev->ethtool_ops->flash_device(dev, &efl);
+ }
+
+diff --git a/net/wireless/reg.c b/net/wireless/reg.c
+index 3302c56..c1c99dd 100644
+--- a/net/wireless/reg.c
++++ b/net/wireless/reg.c
+@@ -379,7 +379,15 @@ static void reg_regdb_query(const char *alpha2)
+
+ schedule_work(&reg_regdb_work);
+ }
++
++/* Feel free to add any other sanity checks here */
++static void reg_regdb_size_check(void)
++{
++ /* We should ideally BUILD_BUG_ON() but then random builds would fail */
++ WARN_ONCE(!reg_regdb_size, "db.txt is empty, you should update it...");
++}
+ #else
++static inline void reg_regdb_size_check(void) {}
+ static inline void reg_regdb_query(const char *alpha2) {}
+ #endif /* CONFIG_CFG80211_INTERNAL_REGDB */
+
+@@ -2236,6 +2244,8 @@ int __init regulatory_init(void)
+ spin_lock_init(&reg_requests_lock);
+ spin_lock_init(&reg_pending_beacons_lock);
+
++ reg_regdb_size_check();
++
+ cfg80211_regdomain = cfg80211_world_regdom;
+
+ user_alpha2[0] = '9';
+diff --git a/scripts/Makefile b/scripts/Makefile
+index df7678f..3626666 100644
+--- a/scripts/Makefile
++++ b/scripts/Makefile
+@@ -8,6 +8,8 @@
+ # conmakehash: Create arrays for initializing the kernel console tables
+ # docproc: Used in Documentation/DocBook
+
++HOST_EXTRACFLAGS += -I$(srctree)/tools/include
++
+ hostprogs-$(CONFIG_KALLSYMS) += kallsyms
+ hostprogs-$(CONFIG_LOGO) += pnmtologo
+ hostprogs-$(CONFIG_VT) += conmakehash
+diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
+index f466587..b4f802d 100644
+--- a/security/selinux/selinuxfs.c
++++ b/security/selinux/selinuxfs.c
+@@ -1238,6 +1238,7 @@ static int sel_make_bools(void)
+ kfree(bool_pending_names[i]);
+ kfree(bool_pending_names);
+ kfree(bool_pending_values);
++ bool_num = 0;
+ bool_pending_names = NULL;
+ bool_pending_values = NULL;
+
+diff --git a/tools/usb/ffs-test.c b/tools/usb/ffs-test.c
+index b9c7986..f17dfee 100644
+--- a/tools/usb/ffs-test.c
++++ b/tools/usb/ffs-test.c
+@@ -324,7 +324,7 @@ static void *start_thread_helper(void *arg)
+
+ ret = t->in(t, t->buf, t->buf_size);
+ if (ret > 0) {
+- ret = t->out(t, t->buf, t->buf_size);
++ ret = t->out(t, t->buf, ret);
+ name = out_name;
+ op = "write";
+ } else {
+diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
+index fd817a2..533db33 100644
+--- a/virt/kvm/iommu.c
++++ b/virt/kvm/iommu.c
+@@ -239,9 +239,13 @@ int kvm_iommu_map_guest(struct kvm *kvm)
+ return -ENODEV;
+ }
+
++ mutex_lock(&kvm->slots_lock);
++
+ kvm->arch.iommu_domain = iommu_domain_alloc(&pci_bus_type);
+- if (!kvm->arch.iommu_domain)
+- return -ENOMEM;
++ if (!kvm->arch.iommu_domain) {
++ r = -ENOMEM;
++ goto out_unlock;
++ }
+
+ if (!allow_unsafe_assigned_interrupts &&
+ !iommu_domain_has_cap(kvm->arch.iommu_domain,
+@@ -252,17 +256,16 @@ int kvm_iommu_map_guest(struct kvm *kvm)
+ " module option.\n", __func__);
+ iommu_domain_free(kvm->arch.iommu_domain);
+ kvm->arch.iommu_domain = NULL;
+- return -EPERM;
++ r = -EPERM;
++ goto out_unlock;
+ }
+
+ r = kvm_iommu_map_memslots(kvm);
+ if (r)
+- goto out_unmap;
+-
+- return 0;
++ kvm_iommu_unmap_memslots(kvm);
+
+-out_unmap:
+- kvm_iommu_unmap_memslots(kvm);
++out_unlock:
++ mutex_unlock(&kvm->slots_lock);
+ return r;
+ }
+
+@@ -338,7 +341,11 @@ int kvm_iommu_unmap_guest(struct kvm *kvm)
+ if (!domain)
+ return 0;
+
++ mutex_lock(&kvm->slots_lock);
+ kvm_iommu_unmap_memslots(kvm);
++ kvm->arch.iommu_domain = NULL;
++ mutex_unlock(&kvm->slots_lock);
++
+ iommu_domain_free(domain);
+ return 0;
+ }
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index e401c1b..ec747dc 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -289,15 +289,15 @@ static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
+ */
+ idx = srcu_read_lock(&kvm->srcu);
+ spin_lock(&kvm->mmu_lock);
++
+ kvm->mmu_notifier_seq++;
+ need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty;
+- spin_unlock(&kvm->mmu_lock);
+- srcu_read_unlock(&kvm->srcu, idx);
+-
+ /* we've to flush the tlb before the pages can be freed */
+ if (need_tlb_flush)
+ kvm_flush_remote_tlbs(kvm);
+
++ spin_unlock(&kvm->mmu_lock);
++ srcu_read_unlock(&kvm->srcu, idx);
+ }
+
+ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
+@@ -335,12 +335,12 @@ static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
+ for (; start < end; start += PAGE_SIZE)
+ need_tlb_flush |= kvm_unmap_hva(kvm, start);
+ need_tlb_flush |= kvm->tlbs_dirty;
+- spin_unlock(&kvm->mmu_lock);
+- srcu_read_unlock(&kvm->srcu, idx);
+-
+ /* we've to flush the tlb before the pages can be freed */
+ if (need_tlb_flush)
+ kvm_flush_remote_tlbs(kvm);
++
++ spin_unlock(&kvm->mmu_lock);
++ srcu_read_unlock(&kvm->srcu, idx);
+ }
+
+ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
+@@ -378,13 +378,14 @@ static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
+
+ idx = srcu_read_lock(&kvm->srcu);
+ spin_lock(&kvm->mmu_lock);
+- young = kvm_age_hva(kvm, address);
+- spin_unlock(&kvm->mmu_lock);
+- srcu_read_unlock(&kvm->srcu, idx);
+
++ young = kvm_age_hva(kvm, address);
+ if (young)
+ kvm_flush_remote_tlbs(kvm);
+
++ spin_unlock(&kvm->mmu_lock);
++ srcu_read_unlock(&kvm->srcu, idx);
++
+ return young;
+ }
+
+@@ -1666,6 +1667,10 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
+ goto vcpu_destroy;
+
+ mutex_lock(&kvm->lock);
++ if (!kvm_vcpu_compatible(vcpu)) {
++ r = -EINVAL;
++ goto unlock_vcpu_destroy;
++ }
+ if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
+ r = -EINVAL;
+ goto unlock_vcpu_destroy;
diff --git a/1019_linux-3.2.20.patch b/1019_linux-3.2.20.patch
new file mode 100644
index 00000000..5350ff69
--- /dev/null
+++ b/1019_linux-3.2.20.patch
@@ -0,0 +1,2829 @@
+diff --git a/Makefile b/Makefile
+index c291184..c7e9cc4 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 19
++SUBLEVEL = 20
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index ef642a0..987c72d 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -520,7 +520,7 @@ config ARCH_IXP4XX
+ depends on MMU
+ select CLKSRC_MMIO
+ select CPU_XSCALE
+- select GENERIC_GPIO
++ select ARCH_REQUIRE_GPIOLIB
+ select GENERIC_CLOCKEVENTS
+ select HAVE_SCHED_CLOCK
+ select MIGHT_HAVE_PCI
+diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
+index b86a005..caf28fc 100644
+--- a/arch/arm/mach-ixp4xx/common.c
++++ b/arch/arm/mach-ixp4xx/common.c
+@@ -29,6 +29,7 @@
+ #include <linux/clockchips.h>
+ #include <linux/io.h>
+ #include <linux/export.h>
++#include <linux/gpio.h>
+
+ #include <mach/udc.h>
+ #include <mach/hardware.h>
+@@ -106,7 +107,7 @@ static signed char irq2gpio[32] = {
+ 7, 8, 9, 10, 11, 12, -1, -1,
+ };
+
+-int gpio_to_irq(int gpio)
++static int ixp4xx_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
+ {
+ int irq;
+
+@@ -116,7 +117,6 @@ int gpio_to_irq(int gpio)
+ }
+ return -EINVAL;
+ }
+-EXPORT_SYMBOL(gpio_to_irq);
+
+ int irq_to_gpio(unsigned int irq)
+ {
+@@ -376,12 +376,56 @@ static struct platform_device *ixp46x_devices[] __initdata = {
+ unsigned long ixp4xx_exp_bus_size;
+ EXPORT_SYMBOL(ixp4xx_exp_bus_size);
+
++static int ixp4xx_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
++{
++ gpio_line_config(gpio, IXP4XX_GPIO_IN);
++
++ return 0;
++}
++
++static int ixp4xx_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
++ int level)
++{
++ gpio_line_set(gpio, level);
++ gpio_line_config(gpio, IXP4XX_GPIO_OUT);
++
++ return 0;
++}
++
++static int ixp4xx_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
++{
++ int value;
++
++ gpio_line_get(gpio, &value);
++
++ return value;
++}
++
++static void ixp4xx_gpio_set_value(struct gpio_chip *chip, unsigned gpio,
++ int value)
++{
++ gpio_line_set(gpio, value);
++}
++
++static struct gpio_chip ixp4xx_gpio_chip = {
++ .label = "IXP4XX_GPIO_CHIP",
++ .direction_input = ixp4xx_gpio_direction_input,
++ .direction_output = ixp4xx_gpio_direction_output,
++ .get = ixp4xx_gpio_get_value,
++ .set = ixp4xx_gpio_set_value,
++ .to_irq = ixp4xx_gpio_to_irq,
++ .base = 0,
++ .ngpio = 16,
++};
++
+ void __init ixp4xx_sys_init(void)
+ {
+ ixp4xx_exp_bus_size = SZ_16M;
+
+ platform_add_devices(ixp4xx_devices, ARRAY_SIZE(ixp4xx_devices));
+
++ gpiochip_add(&ixp4xx_gpio_chip);
++
+ if (cpu_is_ixp46x()) {
+ int region;
+
+diff --git a/arch/arm/mach-ixp4xx/include/mach/gpio.h b/arch/arm/mach-ixp4xx/include/mach/gpio.h
+index 83d6b4e..ef37f26 100644
+--- a/arch/arm/mach-ixp4xx/include/mach/gpio.h
++++ b/arch/arm/mach-ixp4xx/include/mach/gpio.h
+@@ -1,79 +1,2 @@
+-/*
+- * arch/arm/mach-ixp4xx/include/mach/gpio.h
+- *
+- * IXP4XX GPIO wrappers for arch-neutral GPIO calls
+- *
+- * Written by Milan Svoboda <msvoboda@ra.rockwell.com>
+- * Based on PXA implementation by Philipp Zabel <philipp.zabel@gmail.com>
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, write to the Free Software
+- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+- *
+- */
+-
+-#ifndef __ASM_ARCH_IXP4XX_GPIO_H
+-#define __ASM_ARCH_IXP4XX_GPIO_H
+-
+-#include <linux/kernel.h>
+-#include <mach/hardware.h>
+-
+-#define __ARM_GPIOLIB_COMPLEX
+-
+-static inline int gpio_request(unsigned gpio, const char *label)
+-{
+- return 0;
+-}
+-
+-static inline void gpio_free(unsigned gpio)
+-{
+- might_sleep();
+-
+- return;
+-}
+-
+-static inline int gpio_direction_input(unsigned gpio)
+-{
+- gpio_line_config(gpio, IXP4XX_GPIO_IN);
+- return 0;
+-}
+-
+-static inline int gpio_direction_output(unsigned gpio, int level)
+-{
+- gpio_line_set(gpio, level);
+- gpio_line_config(gpio, IXP4XX_GPIO_OUT);
+- return 0;
+-}
+-
+-static inline int gpio_get_value(unsigned gpio)
+-{
+- int value;
+-
+- gpio_line_get(gpio, &value);
+-
+- return value;
+-}
+-
+-static inline void gpio_set_value(unsigned gpio, int value)
+-{
+- gpio_line_set(gpio, value);
+-}
+-
+-#include <asm-generic/gpio.h> /* cansleep wrappers */
+-
+-extern int gpio_to_irq(int gpio);
+-#define gpio_to_irq gpio_to_irq
+-extern int irq_to_gpio(unsigned int irq);
+-
+-#endif
++/* empty */
+
+diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
+index e446bab..a93ed04 100644
+--- a/arch/microblaze/Kconfig
++++ b/arch/microblaze/Kconfig
+@@ -46,7 +46,7 @@ config GENERIC_CLOCKEVENTS
+ def_bool y
+
+ config GENERIC_GPIO
+- def_bool y
++ bool
+
+ config GENERIC_CSUM
+ def_bool y
+diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
+index 3999ec0..67d1ce0 100644
+--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
++++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
+@@ -2,6 +2,7 @@
+ #define BCM63XX_GPIO_H
+
+ #include <linux/init.h>
++#include <bcm63xx_cpu.h>
+
+ int __init bcm63xx_gpio_init(void);
+
+diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
+index 5350342..07ef351 100644
+--- a/arch/parisc/kernel/entry.S
++++ b/arch/parisc/kernel/entry.S
+@@ -552,7 +552,7 @@
+ * entry (identifying the physical page) and %r23 up with
+ * the from tlb entry (or nothing if only a to entry---for
+ * clear_user_page_asm) */
+- .macro do_alias spc,tmp,tmp1,va,pte,prot,fault
++ .macro do_alias spc,tmp,tmp1,va,pte,prot,fault,patype
+ cmpib,COND(<>),n 0,\spc,\fault
+ ldil L%(TMPALIAS_MAP_START),\tmp
+ #if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
+@@ -581,11 +581,15 @@
+ */
+ cmpiclr,= 0x01,\tmp,%r0
+ ldi (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
+-#ifdef CONFIG_64BIT
++.ifc \patype,20
+ depd,z \prot,8,7,\prot
+-#else
++.else
++.ifc \patype,11
+ depw,z \prot,8,7,\prot
+-#endif
++.else
++ .error "undefined PA type to do_alias"
++.endif
++.endif
+ /*
+ * OK, it is in the temp alias region, check whether "from" or "to".
+ * Check "subtle" note in pacache.S re: r23/r26.
+@@ -1189,7 +1193,7 @@ dtlb_miss_20w:
+ nop
+
+ dtlb_check_alias_20w:
+- do_alias spc,t0,t1,va,pte,prot,dtlb_fault
++ do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20
+
+ idtlbt pte,prot
+
+@@ -1213,7 +1217,7 @@ nadtlb_miss_20w:
+ nop
+
+ nadtlb_check_alias_20w:
+- do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate
++ do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20
+
+ idtlbt pte,prot
+
+@@ -1245,7 +1249,7 @@ dtlb_miss_11:
+ nop
+
+ dtlb_check_alias_11:
+- do_alias spc,t0,t1,va,pte,prot,dtlb_fault
++ do_alias spc,t0,t1,va,pte,prot,dtlb_fault,11
+
+ idtlba pte,(va)
+ idtlbp prot,(va)
+@@ -1277,7 +1281,7 @@ nadtlb_miss_11:
+ nop
+
+ nadtlb_check_alias_11:
+- do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate
++ do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,11
+
+ idtlba pte,(va)
+ idtlbp prot,(va)
+@@ -1304,7 +1308,7 @@ dtlb_miss_20:
+ nop
+
+ dtlb_check_alias_20:
+- do_alias spc,t0,t1,va,pte,prot,dtlb_fault
++ do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20
+
+ idtlbt pte,prot
+
+@@ -1330,7 +1334,7 @@ nadtlb_miss_20:
+ nop
+
+ nadtlb_check_alias_20:
+- do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate
++ do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20
+
+ idtlbt pte,prot
+
+@@ -1457,7 +1461,7 @@ naitlb_miss_20w:
+ nop
+
+ naitlb_check_alias_20w:
+- do_alias spc,t0,t1,va,pte,prot,naitlb_fault
++ do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20
+
+ iitlbt pte,prot
+
+@@ -1511,7 +1515,7 @@ naitlb_miss_11:
+ nop
+
+ naitlb_check_alias_11:
+- do_alias spc,t0,t1,va,pte,prot,itlb_fault
++ do_alias spc,t0,t1,va,pte,prot,itlb_fault,11
+
+ iitlba pte,(%sr0, va)
+ iitlbp prot,(%sr0, va)
+@@ -1557,7 +1561,7 @@ naitlb_miss_20:
+ nop
+
+ naitlb_check_alias_20:
+- do_alias spc,t0,t1,va,pte,prot,naitlb_fault
++ do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20
+
+ iitlbt pte,prot
+
+diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
+index fa6f2b8..64a9998 100644
+--- a/arch/parisc/kernel/vmlinux.lds.S
++++ b/arch/parisc/kernel/vmlinux.lds.S
+@@ -50,8 +50,10 @@ SECTIONS
+ . = KERNEL_BINARY_TEXT_START;
+
+ _text = .; /* Text and read-only data */
+- .text ALIGN(16) : {
++ .head ALIGN(16) : {
+ HEAD_TEXT
++ } = 0
++ .text ALIGN(16) : {
+ TEXT_TEXT
+ SCHED_TEXT
+ LOCK_TEXT
+@@ -65,7 +67,7 @@ SECTIONS
+ *(.fixup)
+ *(.lock.text) /* out-of-line lock text */
+ *(.gnu.warning)
+- } = 0
++ }
+ /* End of text section */
+ _etext = .;
+
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index e7c920b..cca659e 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -982,7 +982,10 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
+ .wbinvd = native_wbinvd,
+
+ .read_msr = native_read_msr_safe,
++ .rdmsr_regs = native_rdmsr_safe_regs,
+ .write_msr = xen_write_msr_safe,
++ .wrmsr_regs = native_wrmsr_safe_regs,
++
+ .read_tsc = native_read_tsc,
+ .read_pmc = native_read_pmc,
+
+diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
+index 7711d94..5535477 100644
+--- a/drivers/acpi/battery.c
++++ b/drivers/acpi/battery.c
+@@ -643,11 +643,19 @@ static int acpi_battery_update(struct acpi_battery *battery)
+
+ static void acpi_battery_refresh(struct acpi_battery *battery)
+ {
++ int power_unit;
++
+ if (!battery->bat.dev)
+ return;
+
++ power_unit = battery->power_unit;
++
+ acpi_battery_get_info(battery);
+- /* The battery may have changed its reporting units. */
++
++ if (power_unit == battery->power_unit)
++ return;
++
++ /* The battery has changed its reporting units. */
+ sysfs_remove_battery(battery);
+ sysfs_add_battery(battery);
+ }
+diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
+index 5d1d076..d452592 100644
+--- a/drivers/atm/solos-pci.c
++++ b/drivers/atm/solos-pci.c
+@@ -984,6 +984,7 @@ static uint32_t fpga_tx(struct solos_card *card)
+ } else if (skb && card->using_dma) {
+ SKB_CB(skb)->dma_addr = pci_map_single(card->dev, skb->data,
+ skb->len, PCI_DMA_TODEVICE);
++ card->tx_skb[port] = skb;
+ iowrite32(SKB_CB(skb)->dma_addr,
+ card->config_regs + TX_DMA_ADDR(port));
+ }
+@@ -1152,7 +1153,8 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ db_fpga_upgrade = db_firmware_upgrade = 0;
+ }
+
+- if (card->fpga_version >= DMA_SUPPORTED){
++ if (card->fpga_version >= DMA_SUPPORTED) {
++ pci_set_master(dev);
+ card->using_dma = 1;
+ } else {
+ card->using_dma = 0;
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index 99fefbd..f1bd44f 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -74,10 +74,15 @@ static struct usb_device_id ath3k_table[] = {
+ { USB_DEVICE(0x0CF3, 0x311D) },
+ { USB_DEVICE(0x13d3, 0x3375) },
+ { USB_DEVICE(0x04CA, 0x3005) },
++ { USB_DEVICE(0x13d3, 0x3362) },
++ { USB_DEVICE(0x0CF3, 0xE004) },
+
+ /* Atheros AR5BBU12 with sflash firmware */
+ { USB_DEVICE(0x0489, 0xE02C) },
+
++ /* Atheros AR5BBU22 with sflash firmware */
++ { USB_DEVICE(0x0489, 0xE03C) },
++
+ { } /* Terminating entry */
+ };
+
+@@ -93,6 +98,11 @@ static struct usb_device_id ath3k_blist_tbl[] = {
+ { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
++
++ /* Atheros AR5BBU22 with sflash firmware */
++ { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
+
+ { } /* Terminating entry */
+ };
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index e56da6a..fc4bcd6 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -61,7 +61,7 @@ static struct usb_device_id btusb_table[] = {
+ { USB_DEVICE_INFO(0xe0, 0x01, 0x01) },
+
+ /* Broadcom SoftSailing reporting vendor specific */
+- { USB_DEVICE(0x05ac, 0x21e1) },
++ { USB_DEVICE(0x0a5c, 0x21e1) },
+
+ /* Apple MacBookPro 7,1 */
+ { USB_DEVICE(0x05ac, 0x8213) },
+@@ -101,9 +101,16 @@ static struct usb_device_id btusb_table[] = {
+ { USB_DEVICE(0x0c10, 0x0000) },
+
+ /* Broadcom BCM20702A0 */
++ { USB_DEVICE(0x0489, 0xe042) },
+ { USB_DEVICE(0x0a5c, 0x21e3) },
++ { USB_DEVICE(0x0a5c, 0x21e6) },
++ { USB_DEVICE(0x0a5c, 0x21e8) },
++ { USB_DEVICE(0x0a5c, 0x21f3) },
+ { USB_DEVICE(0x413c, 0x8197) },
+
++ /* Foxconn - Hon Hai */
++ { USB_DEVICE(0x0489, 0xe033) },
++
+ { } /* Terminating entry */
+ };
+
+@@ -130,10 +137,15 @@ static struct usb_device_id blacklist_table[] = {
+ { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
+
+ /* Atheros AR5BBU12 with sflash firmware */
+ { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
+
++ /* Atheros AR5BBU12 with sflash firmware */
++ { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
++
+ /* Broadcom BCM2035 */
+ { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU },
+ { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU },
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index a1d53b6..06ec1e5 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -3533,7 +3533,11 @@
+ #define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT)
+ #define GEN6_RP_CONTROL 0xA024
+ #define GEN6_RP_MEDIA_TURBO (1<<11)
+-#define GEN6_RP_USE_NORMAL_FREQ (1<<9)
++#define GEN6_RP_MEDIA_MODE_MASK (3<<9)
++#define GEN6_RP_MEDIA_HW_TURBO_MODE (3<<9)
++#define GEN6_RP_MEDIA_HW_NORMAL_MODE (2<<9)
++#define GEN6_RP_MEDIA_HW_MODE (1<<9)
++#define GEN6_RP_MEDIA_SW_MODE (0<<9)
+ #define GEN6_RP_MEDIA_IS_GFX (1<<8)
+ #define GEN6_RP_ENABLE (1<<7)
+ #define GEN6_RP_UP_IDLE_MIN (0x1<<3)
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 3ff980d..4720397 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -1864,7 +1864,7 @@ static void intel_update_fbc(struct drm_device *dev)
+ if (enable_fbc < 0) {
+ DRM_DEBUG_KMS("fbc set to per-chip default\n");
+ enable_fbc = 1;
+- if (INTEL_INFO(dev)->gen <= 5)
++ if (INTEL_INFO(dev)->gen <= 6)
+ enable_fbc = 0;
+ }
+ if (!enable_fbc) {
+@@ -8005,7 +8005,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
+ I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+ I915_WRITE(GEN6_RP_CONTROL,
+ GEN6_RP_MEDIA_TURBO |
+- GEN6_RP_USE_NORMAL_FREQ |
++ GEN6_RP_MEDIA_HW_NORMAL_MODE |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index 12eb789..d4c4937 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -1149,10 +1149,10 @@ static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
+
+ DRM_DEBUG_KMS("Turn eDP power off\n");
+
+- WARN(intel_dp->want_panel_vdd, "Cannot turn power off while VDD is on\n");
++ WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
+
+ pp = ironlake_get_pp_control(dev_priv);
+- pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
++ pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE);
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
+
+@@ -1260,18 +1260,16 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
+ {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
++
++ /* Make sure the panel is off before trying to change the mode. But also
++ * ensure that we have vdd while we switch off the panel. */
++ ironlake_edp_panel_vdd_on(intel_dp);
+ ironlake_edp_backlight_off(intel_dp);
+ ironlake_edp_panel_off(intel_dp);
+
+- /* Wake up the sink first */
+- ironlake_edp_panel_vdd_on(intel_dp);
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+ intel_dp_link_down(intel_dp);
+ ironlake_edp_panel_vdd_off(intel_dp, false);
+-
+- /* Make sure the panel is off before trying to
+- * change the mode
+- */
+ }
+
+ static void intel_dp_commit(struct drm_encoder *encoder)
+@@ -1303,10 +1301,11 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
+ uint32_t dp_reg = I915_READ(intel_dp->output_reg);
+
+ if (mode != DRM_MODE_DPMS_ON) {
++ /* Switching the panel off requires vdd. */
++ ironlake_edp_panel_vdd_on(intel_dp);
+ ironlake_edp_backlight_off(intel_dp);
+ ironlake_edp_panel_off(intel_dp);
+
+- ironlake_edp_panel_vdd_on(intel_dp);
+ intel_dp_sink_dpms(intel_dp, mode);
+ intel_dp_link_down(intel_dp);
+ ironlake_edp_panel_vdd_off(intel_dp, false);
+diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
+index 583c2d0..ceec71b 100644
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -716,6 +716,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
+ },
+ },
+ {
++ .callback = intel_no_lvds_dmi_callback,
++ .ident = "Clientron E830",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Clientron"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "E830"),
++ },
++ },
++ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Asus EeeBox PC EB1007",
+ .matches = {
+@@ -733,6 +741,30 @@ static const struct dmi_system_id intel_no_lvds[] = {
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
++ .ident = "Hewlett-Packard HP t5740e Thin Client",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "HP t5740e Thin Client"),
++ },
++ },
++ {
++ .callback = intel_no_lvds_dmi_callback,
++ .ident = "Hewlett-Packard t5745",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
++ DMI_MATCH(DMI_BOARD_NAME, "hp t5745"),
++ },
++ },
++ {
++ .callback = intel_no_lvds_dmi_callback,
++ .ident = "Hewlett-Packard st5747",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
++ DMI_MATCH(DMI_BOARD_NAME, "hp st5747"),
++ },
++ },
++ {
++ .callback = intel_no_lvds_dmi_callback,
+ .ident = "MSI Wind Box DC500",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
+diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
+index 8eddcca..a8d8ee5 100644
+--- a/drivers/gpu/drm/i915/intel_sdvo.c
++++ b/drivers/gpu/drm/i915/intel_sdvo.c
+@@ -769,10 +769,12 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
+ ((v_sync_len & 0x30) >> 4);
+
+ dtd->part2.dtd_flags = 0x18;
++ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
++ dtd->part2.dtd_flags |= DTD_FLAG_INTERLACE;
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+- dtd->part2.dtd_flags |= 0x2;
++ dtd->part2.dtd_flags |= DTD_FLAG_HSYNC_POSITIVE;
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+- dtd->part2.dtd_flags |= 0x4;
++ dtd->part2.dtd_flags |= DTD_FLAG_VSYNC_POSITIVE;
+
+ dtd->part2.sdvo_flags = 0;
+ dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
+@@ -806,9 +808,11 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
+ mode->clock = dtd->part1.clock * 10;
+
+ mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
+- if (dtd->part2.dtd_flags & 0x2)
++ if (dtd->part2.dtd_flags & DTD_FLAG_INTERLACE)
++ mode->flags |= DRM_MODE_FLAG_INTERLACE;
++ if (dtd->part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
+ mode->flags |= DRM_MODE_FLAG_PHSYNC;
+- if (dtd->part2.dtd_flags & 0x4)
++ if (dtd->part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE)
+ mode->flags |= DRM_MODE_FLAG_PVSYNC;
+ }
+
+diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h
+index 4aa6f34..372f33b 100644
+--- a/drivers/gpu/drm/i915/intel_sdvo_regs.h
++++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h
+@@ -61,6 +61,11 @@ struct intel_sdvo_caps {
+ u16 output_flags;
+ } __attribute__((packed));
+
++/* Note: SDVO detailed timing flags match EDID misc flags. */
++#define DTD_FLAG_HSYNC_POSITIVE (1 << 1)
++#define DTD_FLAG_VSYNC_POSITIVE (1 << 2)
++#define DTD_FLAG_INTERLACE (1 << 7)
++
+ /** This matches the EDID DTD structure, more or less */
+ struct intel_sdvo_dtd {
+ struct {
+diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
+index 2b1fcad..12041fa 100644
+--- a/drivers/gpu/drm/i915/intel_tv.c
++++ b/drivers/gpu/drm/i915/intel_tv.c
+@@ -1307,6 +1307,11 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
+
+ I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
+ I915_WRITE(TV_CTL, save_tv_ctl);
++ POSTING_READ(TV_CTL);
++
++ /* For unknown reasons the hw barfs if we don't do this vblank wait. */
++ intel_wait_for_vblank(intel_tv->base.base.dev,
++ to_intel_crtc(intel_tv->base.base.crtc)->pipe);
+
+ /* Restore interrupt config */
+ if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index 92c9628..dac178b 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -977,6 +977,11 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
+ WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
+ WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
+ WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
++ if ((rdev->family == CHIP_JUNIPER) ||
++ (rdev->family == CHIP_CYPRESS) ||
++ (rdev->family == CHIP_HEMLOCK) ||
++ (rdev->family == CHIP_BARTS))
++ WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
+ }
+ WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
+@@ -2074,9 +2079,12 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
+ /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
+ if (rdev->flags & RADEON_IS_IGP)
+ rdev->config.evergreen.tile_config |= 1 << 4;
+- else
+- rdev->config.evergreen.tile_config |=
+- ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
++ else {
++ if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
++ rdev->config.evergreen.tile_config |= 1 << 4;
++ else
++ rdev->config.evergreen.tile_config |= 0 << 4;
++ }
+ rdev->config.evergreen.tile_config |=
+ ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) << 8;
+ rdev->config.evergreen.tile_config |=
+@@ -2108,9 +2116,9 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
+ WREG32(CC_SYS_RB_BACKEND_DISABLE, rb);
+ WREG32(GC_USER_RB_BACKEND_DISABLE, rb);
+ WREG32(CC_GC_SHADER_PIPE_CONFIG, sp);
+- }
++ }
+
+- grbm_gfx_index |= SE_BROADCAST_WRITES;
++ grbm_gfx_index = INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES;
+ WREG32(GRBM_GFX_INDEX, grbm_gfx_index);
+ WREG32(RLC_GFX_INDEX, grbm_gfx_index);
+
+diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
+index e00039e..0128445 100644
+--- a/drivers/gpu/drm/radeon/evergreend.h
++++ b/drivers/gpu/drm/radeon/evergreend.h
+@@ -230,6 +230,7 @@
+ #define MC_VM_MD_L1_TLB0_CNTL 0x2654
+ #define MC_VM_MD_L1_TLB1_CNTL 0x2658
+ #define MC_VM_MD_L1_TLB2_CNTL 0x265C
++#define MC_VM_MD_L1_TLB3_CNTL 0x2698
+
+ #define FUS_MC_VM_MD_L1_TLB0_CNTL 0x265C
+ #define FUS_MC_VM_MD_L1_TLB1_CNTL 0x2660
+diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
+index 0e57998..9e50814 100644
+--- a/drivers/gpu/drm/radeon/ni.c
++++ b/drivers/gpu/drm/radeon/ni.c
+@@ -804,8 +804,10 @@ static void cayman_gpu_init(struct radeon_device *rdev)
+ rdev->config.cayman.tile_config |= (3 << 0);
+ break;
+ }
+- rdev->config.cayman.tile_config |=
+- ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
++ if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
++ rdev->config.cayman.tile_config |= 1 << 4;
++ else
++ rdev->config.cayman.tile_config |= 0 << 4;
+ rdev->config.cayman.tile_config |=
+ ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
+ rdev->config.cayman.tile_config |=
+diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
+index 8e1532f..9d2c369 100644
+--- a/drivers/gpu/drm/radeon/radeon_atombios.c
++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
+@@ -438,7 +438,9 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
+ */
+ if ((dev->pdev->device == 0x9498) &&
+ (dev->pdev->subsystem_vendor == 0x1682) &&
+- (dev->pdev->subsystem_device == 0x2452)) {
++ (dev->pdev->subsystem_device == 0x2452) &&
++ (i2c_bus->valid == false) &&
++ !(supported_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))) {
+ struct radeon_device *rdev = dev->dev_private;
+ *i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93);
+ }
+diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
+index 23ae1c6..e36ba7f 100644
+--- a/drivers/gpu/drm/radeon/rv770.c
++++ b/drivers/gpu/drm/radeon/rv770.c
+@@ -151,6 +151,8 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
+ WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
+ WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
+ WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
++ if (rdev->family == CHIP_RV740)
++ WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
+@@ -689,8 +691,12 @@ static void rv770_gpu_init(struct radeon_device *rdev)
+
+ if (rdev->family == CHIP_RV770)
+ gb_tiling_config |= BANK_TILING(1);
+- else
+- gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
++ else {
++ if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
++ gb_tiling_config |= BANK_TILING(1);
++ else
++ gb_tiling_config |= BANK_TILING(0);
++ }
+ rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3);
+ gb_tiling_config |= GROUP_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
+ if ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
+diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
+index 79fa588..7538092 100644
+--- a/drivers/gpu/drm/radeon/rv770d.h
++++ b/drivers/gpu/drm/radeon/rv770d.h
+@@ -174,6 +174,7 @@
+ #define MC_VM_MD_L1_TLB0_CNTL 0x2654
+ #define MC_VM_MD_L1_TLB1_CNTL 0x2658
+ #define MC_VM_MD_L1_TLB2_CNTL 0x265C
++#define MC_VM_MD_L1_TLB3_CNTL 0x2698
+ #define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C
+ #define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038
+ #define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034
+diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
+index 0bb0f5f..0d27bff 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo.c
++++ b/drivers/gpu/drm/ttm/ttm_bo.c
+@@ -1816,6 +1816,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
+ spin_unlock(&glob->lru_lock);
+ (void) ttm_bo_cleanup_refs(bo, false, false, false);
+ kref_put(&bo->list_kref, ttm_bo_release_list);
++ spin_lock(&glob->lru_lock);
+ continue;
+ }
+
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+index f4e7763..c41226a 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+@@ -66,7 +66,7 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv,
+ cmd += sizeof(remap_cmd) / sizeof(uint32);
+
+ for (i = 0; i < num_pages; ++i) {
+- if (VMW_PPN_SIZE > 4)
++ if (VMW_PPN_SIZE <= 4)
+ *cmd = page_to_pfn(*pages++);
+ else
+ *((uint64_t *)cmd) = page_to_pfn(*pages++);
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index 966a6e7..f1d5408 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -381,12 +381,27 @@ static void dump_command(unsigned long phys_addr)
+
+ static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
+ {
+- u32 *event = __evt;
+- int type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
+- int devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
+- int domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
+- int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
+- u64 address = (u64)(((u64)event[3]) << 32) | event[2];
++ int type, devid, domid, flags;
++ volatile u32 *event = __evt;
++ int count = 0;
++ u64 address;
++
++retry:
++ type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
++ devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
++ domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
++ flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
++ address = (u64)(((u64)event[3]) << 32) | event[2];
++
++ if (type == 0) {
++ /* Did we hit the erratum? */
++ if (++count == LOOP_TIMEOUT) {
++ pr_err("AMD-Vi: No event written to event log\n");
++ return;
++ }
++ udelay(1);
++ goto retry;
++ }
+
+ printk(KERN_ERR "AMD-Vi: Event logged [");
+
+@@ -439,6 +454,8 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
+ default:
+ printk(KERN_ERR "UNKNOWN type=0x%02x]\n", type);
+ }
++
++ memset(__evt, 0, 4 * sizeof(u32));
+ }
+
+ static void iommu_poll_events(struct amd_iommu *iommu)
+diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
+index 20d5852..6269eb0 100644
+--- a/drivers/iommu/amd_iommu_init.c
++++ b/drivers/iommu/amd_iommu_init.c
+@@ -943,6 +943,9 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
+ if (!iommu->dev)
+ return 1;
+
++ iommu->root_pdev = pci_get_bus_and_slot(iommu->dev->bus->number,
++ PCI_DEVFN(0, 0));
++
+ iommu->cap_ptr = h->cap_ptr;
+ iommu->pci_seg = h->pci_seg;
+ iommu->mmio_phys = h->mmio_phys;
+@@ -1225,20 +1228,16 @@ static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
+ {
+ int i, j;
+ u32 ioc_feature_control;
+- struct pci_dev *pdev = NULL;
++ struct pci_dev *pdev = iommu->root_pdev;
+
+ /* RD890 BIOSes may not have completely reconfigured the iommu */
+- if (!is_rd890_iommu(iommu->dev))
++ if (!is_rd890_iommu(iommu->dev) || !pdev)
+ return;
+
+ /*
+ * First, we need to ensure that the iommu is enabled. This is
+ * controlled by a register in the northbridge
+ */
+- pdev = pci_get_bus_and_slot(iommu->dev->bus->number, PCI_DEVFN(0, 0));
+-
+- if (!pdev)
+- return;
+
+ /* Select Northbridge indirect register 0x75 and enable writing */
+ pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
+@@ -1248,8 +1247,6 @@ static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
+ if (!(ioc_feature_control & 0x1))
+ pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
+
+- pci_dev_put(pdev);
+-
+ /* Restore the iommu BAR */
+ pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
+ iommu->stored_addr_lo);
+diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
+index 5b9c507..40ab83b 100644
+--- a/drivers/iommu/amd_iommu_types.h
++++ b/drivers/iommu/amd_iommu_types.h
+@@ -385,6 +385,9 @@ struct amd_iommu {
+ /* Pointer to PCI device of this IOMMU */
+ struct pci_dev *dev;
+
++ /* Cache pdev to root device for resume quirks */
++ struct pci_dev *root_pdev;
++
+ /* physical address of MMIO space */
+ u64 mmio_phys;
+ /* virtual address of MMIO space */
+diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
+index 318a869..4035b6d 100644
+--- a/drivers/mtd/Kconfig
++++ b/drivers/mtd/Kconfig
+@@ -128,7 +128,7 @@ config MTD_AFS_PARTS
+
+ config MTD_OF_PARTS
+ tristate "OpenFirmware partitioning information support"
+- default Y
++ default y
+ depends on OF
+ help
+ This provides a partition parsing function which derives
+diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
+index 69148ae..f024375 100644
+--- a/drivers/mtd/nand/nand_bbt.c
++++ b/drivers/mtd/nand/nand_bbt.c
+@@ -324,6 +324,7 @@ static int scan_read_raw_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
+
+ buf += mtd->oobsize + mtd->writesize;
+ len -= mtd->writesize;
++ offs += mtd->writesize;
+ }
+ return 0;
+ }
+diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
+index 30745b5..1e52736 100644
+--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
++++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
+@@ -437,7 +437,7 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
+ length = status & BCOM_FEC_RX_BD_LEN_MASK;
+ skb_put(rskb, length - 4); /* length without CRC32 */
+ rskb->protocol = eth_type_trans(rskb, dev);
+- if (!skb_defer_rx_timestamp(skb))
++ if (!skb_defer_rx_timestamp(rskb))
+ netif_rx(rskb);
+
+ spin_lock(&priv->lock);
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index 697cae3..cc2565c 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -62,8 +62,12 @@
+ #define R8169_MSG_DEFAULT \
+ (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
+
+-#define TX_BUFFS_AVAIL(tp) \
+- (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1)
++#define TX_SLOTS_AVAIL(tp) \
++ (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx)
++
++/* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
++#define TX_FRAGS_READY_FOR(tp,nr_frags) \
++ (TX_SLOTS_AVAIL(tp) >= (nr_frags + 1))
+
+ /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
+ The RTL chips use a 64 element hash table based on the Ethernet CRC. */
+@@ -5512,7 +5516,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
+ u32 opts[2];
+ int frags;
+
+- if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) {
++ if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
+ netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
+ goto err_stop_0;
+ }
+@@ -5560,10 +5564,21 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
+
+ RTL_W8(TxPoll, NPQ);
+
+- if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) {
++ if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
++ /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
++ * not miss a ring update when it notices a stopped queue.
++ */
++ smp_wmb();
+ netif_stop_queue(dev);
+- smp_rmb();
+- if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)
++ /* Sync with rtl_tx:
++ * - publish queue status and cur_tx ring index (write barrier)
++ * - refresh dirty_tx ring index (read barrier).
++ * May the current thread have a pessimistic view of the ring
++ * status and forget to wake up queue, a racing rtl_tx thread
++ * can't.
++ */
++ smp_mb();
++ if (TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS))
+ netif_wake_queue(dev);
+ }
+
+@@ -5663,9 +5678,16 @@ static void rtl8169_tx_interrupt(struct net_device *dev,
+
+ if (tp->dirty_tx != dirty_tx) {
+ tp->dirty_tx = dirty_tx;
+- smp_wmb();
++ /* Sync with rtl8169_start_xmit:
++ * - publish dirty_tx ring index (write barrier)
++ * - refresh cur_tx ring index and queue status (read barrier)
++ * May the current thread miss the stopped queue condition,
++ * a racing xmit thread can only have a right view of the
++ * ring status.
++ */
++ smp_mb();
+ if (netif_queue_stopped(dev) &&
+- (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) {
++ TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
+ netif_wake_queue(dev);
+ }
+ /*
+@@ -5674,7 +5696,6 @@ static void rtl8169_tx_interrupt(struct net_device *dev,
+ * of start_xmit activity is detected (if it is not detected,
+ * it is slow enough). -- FR
+ */
+- smp_rmb();
+ if (tp->cur_tx != dirty_tx)
+ RTL_W8(TxPoll, NPQ);
+ }
+diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
+index 959d448..97f342e 100644
+--- a/drivers/net/macvlan.c
++++ b/drivers/net/macvlan.c
+@@ -258,7 +258,7 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
+
+ xmit_world:
+ skb->ip_summed = ip_summed;
+- skb_set_dev(skb, vlan->lowerdev);
++ skb->dev = vlan->lowerdev;
+ return dev_queue_xmit(skb);
+ }
+
+diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
+index a9abee8..fc147a5 100644
+--- a/drivers/net/usb/asix.c
++++ b/drivers/net/usb/asix.c
+@@ -35,6 +35,7 @@
+ #include <linux/crc32.h>
+ #include <linux/usb/usbnet.h>
+ #include <linux/slab.h>
++#include <linux/if_vlan.h>
+
+ #define DRIVER_VERSION "08-Nov-2011"
+ #define DRIVER_NAME "asix"
+@@ -348,7 +349,7 @@ static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ return 2;
+ }
+
+- if (size > dev->net->mtu + ETH_HLEN) {
++ if (size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) {
+ netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
+ size);
+ return 0;
+diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
+index 03b0a65..76fd277 100644
+--- a/drivers/net/wireless/ath/ath9k/xmit.c
++++ b/drivers/net/wireless/ath/ath9k/xmit.c
+@@ -64,7 +64,8 @@ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
+ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
+ struct ath_txq *txq,
+ struct ath_atx_tid *tid,
+- struct sk_buff *skb);
++ struct sk_buff *skb,
++ bool dequeue);
+
+ enum {
+ MCS_HT20,
+@@ -761,7 +762,7 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
+ fi = get_frame_info(skb);
+ bf = fi->bf;
+ if (!fi->bf)
+- bf = ath_tx_setup_buffer(sc, txq, tid, skb);
++ bf = ath_tx_setup_buffer(sc, txq, tid, skb, true);
+
+ if (!bf)
+ continue;
+@@ -1669,7 +1670,7 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
+ return;
+ }
+
+- bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
++ bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb, false);
+ if (!bf)
+ return;
+
+@@ -1696,7 +1697,7 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
+
+ bf = fi->bf;
+ if (!bf)
+- bf = ath_tx_setup_buffer(sc, txq, tid, skb);
++ bf = ath_tx_setup_buffer(sc, txq, tid, skb, false);
+
+ if (!bf)
+ return;
+@@ -1761,7 +1762,8 @@ u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
+ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
+ struct ath_txq *txq,
+ struct ath_atx_tid *tid,
+- struct sk_buff *skb)
++ struct sk_buff *skb,
++ bool dequeue)
+ {
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_frame_info *fi = get_frame_info(skb);
+@@ -1802,6 +1804,8 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
+ return bf;
+
+ error:
++ if (dequeue)
++ __skb_unlink(skb, &tid->buf_q);
+ dev_kfree_skb_any(skb);
+ return NULL;
+ }
+@@ -1833,7 +1837,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
+ */
+ ath_tx_send_ampdu(sc, tid, skb, txctl);
+ } else {
+- bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
++ bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb, false);
+ if (!bf)
+ goto out;
+
+diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
+index 9823e41..a97a52a 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
++++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
+@@ -211,7 +211,7 @@ static struct iwl_base_params iwl2000_base_params = {
+ .chain_noise_scale = 1000,
+ .wd_timeout = IWL_DEF_WD_TIMEOUT,
+ .max_event_log_size = 512,
+- .shadow_reg_enable = true,
++ .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
+ .hd_v2 = true,
+ };
+
+@@ -230,7 +230,7 @@ static struct iwl_base_params iwl2030_base_params = {
+ .chain_noise_scale = 1000,
+ .wd_timeout = IWL_LONG_WD_TIMEOUT,
+ .max_event_log_size = 512,
+- .shadow_reg_enable = true,
++ .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
+ .hd_v2 = true,
+ };
+
+diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
+index b4f809c..0b9f797 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
++++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
+@@ -308,7 +308,7 @@ static struct iwl_base_params iwl6000_base_params = {
+ .chain_noise_scale = 1000,
+ .wd_timeout = IWL_DEF_WD_TIMEOUT,
+ .max_event_log_size = 512,
+- .shadow_reg_enable = true,
++ .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
+ };
+
+ static struct iwl_base_params iwl6050_base_params = {
+@@ -325,7 +325,7 @@ static struct iwl_base_params iwl6050_base_params = {
+ .chain_noise_scale = 1500,
+ .wd_timeout = IWL_DEF_WD_TIMEOUT,
+ .max_event_log_size = 1024,
+- .shadow_reg_enable = true,
++ .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
+ };
+ static struct iwl_base_params iwl6000_g2_base_params = {
+ .eeprom_size = OTP_LOW_IMAGE_SIZE,
+@@ -341,7 +341,7 @@ static struct iwl_base_params iwl6000_g2_base_params = {
+ .chain_noise_scale = 1000,
+ .wd_timeout = IWL_LONG_WD_TIMEOUT,
+ .max_event_log_size = 512,
+- .shadow_reg_enable = true,
++ .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
+ };
+
+ static struct iwl_ht_params iwl6000_ht_params = {
+diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+index 66118ce..9ba2c1b 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
++++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+@@ -886,6 +886,7 @@ static void rs_bt_update_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+ if ((priv->bt_traffic_load != priv->last_bt_traffic_load) ||
+ (priv->bt_full_concurrent != full_concurrent)) {
+ priv->bt_full_concurrent = full_concurrent;
++ priv->last_bt_traffic_load = priv->bt_traffic_load;
+
+ /* Update uCode's rate table. */
+ tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+diff --git a/drivers/net/wireless/wl1251/sdio.c b/drivers/net/wireless/wl1251/sdio.c
+index 1b851f6..e2750a1 100644
+--- a/drivers/net/wireless/wl1251/sdio.c
++++ b/drivers/net/wireless/wl1251/sdio.c
+@@ -260,6 +260,7 @@ static int wl1251_sdio_probe(struct sdio_func *func,
+ }
+
+ if (wl->irq) {
++ irq_set_status_flags(wl->irq, IRQ_NOAUTOEN);
+ ret = request_irq(wl->irq, wl1251_line_irq, 0, "wl1251", wl);
+ if (ret < 0) {
+ wl1251_error("request_irq() failed: %d", ret);
+@@ -267,7 +268,6 @@ static int wl1251_sdio_probe(struct sdio_func *func,
+ }
+
+ irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
+- disable_irq(wl->irq);
+
+ wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
+ wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
+diff --git a/drivers/net/wireless/wl1251/spi.c b/drivers/net/wireless/wl1251/spi.c
+index eaa5f95..134ae9c 100644
+--- a/drivers/net/wireless/wl1251/spi.c
++++ b/drivers/net/wireless/wl1251/spi.c
+@@ -281,6 +281,7 @@ static int __devinit wl1251_spi_probe(struct spi_device *spi)
+
+ wl->use_eeprom = pdata->use_eeprom;
+
++ irq_set_status_flags(wl->irq, IRQ_NOAUTOEN);
+ ret = request_irq(wl->irq, wl1251_irq, 0, DRIVER_NAME, wl);
+ if (ret < 0) {
+ wl1251_error("request_irq() failed: %d", ret);
+@@ -289,8 +290,6 @@ static int __devinit wl1251_spi_probe(struct spi_device *spi)
+
+ irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
+
+- disable_irq(wl->irq);
+-
+ ret = wl1251_init_ieee80211(wl);
+ if (ret)
+ goto out_irq;
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index f85cfa6..f0ab58e 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -1382,16 +1382,19 @@ static int scsi_lld_busy(struct request_queue *q)
+ {
+ struct scsi_device *sdev = q->queuedata;
+ struct Scsi_Host *shost;
+- struct scsi_target *starget;
+
+ if (!sdev)
+ return 0;
+
+ shost = sdev->host;
+- starget = scsi_target(sdev);
+
+- if (scsi_host_in_recovery(shost) || scsi_host_is_busy(shost) ||
+- scsi_target_is_busy(starget) || scsi_device_is_busy(sdev))
++ /*
++ * Ignore host/starget busy state.
++ * Since block layer does not have a concept of fairness across
++ * multiple queues, congestion of host/starget needs to be handled
++ * in SCSI layer.
++ */
++ if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
+ return 1;
+
+ return 0;
+diff --git a/drivers/scsi/scsi_wait_scan.c b/drivers/scsi/scsi_wait_scan.c
+index 74708fc..ae78148 100644
+--- a/drivers/scsi/scsi_wait_scan.c
++++ b/drivers/scsi/scsi_wait_scan.c
+@@ -12,7 +12,7 @@
+
+ #include <linux/module.h>
+ #include <linux/device.h>
+-#include <scsi/scsi_scan.h>
++#include "scsi_priv.h"
+
+ static int __init wait_scan_init(void)
+ {
+diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
+index cad8b92..455a251 100644
+--- a/drivers/target/target_core_file.c
++++ b/drivers/target/target_core_file.c
+@@ -134,21 +134,11 @@ static struct se_device *fd_create_virtdevice(
+ ret = PTR_ERR(dev_p);
+ goto fail;
+ }
+-#if 0
+- if (di->no_create_file)
+- flags = O_RDWR | O_LARGEFILE;
+- else
+- flags = O_RDWR | O_CREAT | O_LARGEFILE;
+-#else
+- flags = O_RDWR | O_CREAT | O_LARGEFILE;
+-#endif
+-/* flags |= O_DIRECT; */
+ /*
+- * If fd_buffered_io=1 has not been set explicitly (the default),
+- * use O_SYNC to force FILEIO writes to disk.
++ * Use O_DSYNC by default instead of O_SYNC to forgo syncing
++ * of pure timestamp updates.
+ */
+- if (!(fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO))
+- flags |= O_SYNC;
++ flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
+
+ file = filp_open(dev_p, flags, 0600);
+ if (IS_ERR(file)) {
+@@ -400,26 +390,6 @@ static void fd_emulate_sync_cache(struct se_task *task)
+ transport_complete_sync_cache(cmd, ret == 0);
+ }
+
+-/*
+- * WRITE Force Unit Access (FUA) emulation on a per struct se_task
+- * LBA range basis..
+- */
+-static void fd_emulate_write_fua(struct se_cmd *cmd, struct se_task *task)
+-{
+- struct se_device *dev = cmd->se_dev;
+- struct fd_dev *fd_dev = dev->dev_ptr;
+- loff_t start = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
+- loff_t end = start + task->task_size;
+- int ret;
+-
+- pr_debug("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n",
+- task->task_lba, task->task_size);
+-
+- ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
+- if (ret != 0)
+- pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
+-}
+-
+ static int fd_do_task(struct se_task *task)
+ {
+ struct se_cmd *cmd = task->task_se_cmd;
+@@ -434,19 +404,21 @@ static int fd_do_task(struct se_task *task)
+ ret = fd_do_readv(task);
+ } else {
+ ret = fd_do_writev(task);
+-
++ /*
++ * Perform implict vfs_fsync_range() for fd_do_writev() ops
++ * for SCSI WRITEs with Forced Unit Access (FUA) set.
++ * Allow this to happen independent of WCE=0 setting.
++ */
+ if (ret > 0 &&
+- dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 &&
+ dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
+ (cmd->se_cmd_flags & SCF_FUA)) {
+- /*
+- * We might need to be a bit smarter here
+- * and return some sense data to let the initiator
+- * know the FUA WRITE cache sync failed..?
+- */
+- fd_emulate_write_fua(cmd, task);
+- }
++ struct fd_dev *fd_dev = dev->dev_ptr;
++ loff_t start = task->task_lba *
++ dev->se_sub_dev->se_dev_attrib.block_size;
++ loff_t end = start + task->task_size;
+
++ vfs_fsync_range(fd_dev->fd_file, start, end, 1);
++ }
+ }
+
+ if (ret < 0) {
+@@ -478,7 +450,6 @@ enum {
+ static match_table_t tokens = {
+ {Opt_fd_dev_name, "fd_dev_name=%s"},
+ {Opt_fd_dev_size, "fd_dev_size=%s"},
+- {Opt_fd_buffered_io, "fd_buffered_io=%d"},
+ {Opt_err, NULL}
+ };
+
+@@ -490,7 +461,7 @@ static ssize_t fd_set_configfs_dev_params(
+ struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
+ char *orig, *ptr, *arg_p, *opts;
+ substring_t args[MAX_OPT_ARGS];
+- int ret = 0, arg, token;
++ int ret = 0, token;
+
+ opts = kstrdup(page, GFP_KERNEL);
+ if (!opts)
+@@ -534,19 +505,6 @@ static ssize_t fd_set_configfs_dev_params(
+ " bytes\n", fd_dev->fd_dev_size);
+ fd_dev->fbd_flags |= FBDF_HAS_SIZE;
+ break;
+- case Opt_fd_buffered_io:
+- match_int(args, &arg);
+- if (arg != 1) {
+- pr_err("bogus fd_buffered_io=%d value\n", arg);
+- ret = -EINVAL;
+- goto out;
+- }
+-
+- pr_debug("FILEIO: Using buffered I/O"
+- " operations for struct fd_dev\n");
+-
+- fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO;
+- break;
+ default:
+ break;
+ }
+@@ -578,10 +536,8 @@ static ssize_t fd_show_configfs_dev_params(
+ ssize_t bl = 0;
+
+ bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
+- bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s\n",
+- fd_dev->fd_dev_name, fd_dev->fd_dev_size,
+- (fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO) ?
+- "Buffered" : "Synchronous");
++ bl += sprintf(b + bl, " File: %s Size: %llu Mode: O_DSYNC\n",
++ fd_dev->fd_dev_name, fd_dev->fd_dev_size);
+ return bl;
+ }
+
+diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
+index 59e6e73..53ece69 100644
+--- a/drivers/target/target_core_file.h
++++ b/drivers/target/target_core_file.h
+@@ -18,7 +18,6 @@ struct fd_request {
+
+ #define FBDF_HAS_PATH 0x01
+ #define FBDF_HAS_SIZE 0x02
+-#define FDBD_USE_BUFFERED_IO 0x04
+
+ struct fd_dev {
+ u32 fbd_flags;
+diff --git a/fs/attr.c b/fs/attr.c
+index 7ee7ba4..b8f55c4 100644
+--- a/fs/attr.c
++++ b/fs/attr.c
+@@ -176,6 +176,11 @@ int notify_change(struct dentry * dentry, struct iattr * attr)
+ return -EPERM;
+ }
+
++ if ((ia_valid & ATTR_SIZE) && IS_I_VERSION(inode)) {
++ if (attr->ia_size != inode->i_size)
++ inode_inc_iversion(inode);
++ }
++
+ if ((ia_valid & ATTR_MODE)) {
+ mode_t amode = attr->ia_mode;
+ /* Flag setting protected by i_mutex */
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index c467ac8..2f3ff59 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -43,6 +43,7 @@
+
+ #define CIFS_MIN_RCV_POOL 4
+
++#define MAX_REOPEN_ATT 5 /* these many maximum attempts to reopen a file */
+ /*
+ * default attribute cache timeout (jiffies)
+ */
+diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
+index 6f4e243..26917d3 100644
+--- a/fs/cifs/cifsproto.h
++++ b/fs/cifs/cifsproto.h
+@@ -184,11 +184,13 @@ extern int CIFSTCon(unsigned int xid, struct cifs_ses *ses,
+
+ extern int CIFSFindFirst(const int xid, struct cifs_tcon *tcon,
+ const char *searchName, const struct nls_table *nls_codepage,
+- __u16 *searchHandle, struct cifs_search_info *psrch_inf,
++ __u16 *searchHandle, __u16 search_flags,
++ struct cifs_search_info *psrch_inf,
+ int map, const char dirsep);
+
+ extern int CIFSFindNext(const int xid, struct cifs_tcon *tcon,
+- __u16 searchHandle, struct cifs_search_info *psrch_inf);
++ __u16 searchHandle, __u16 search_flags,
++ struct cifs_search_info *psrch_inf);
+
+ extern int CIFSFindClose(const int, struct cifs_tcon *tcon,
+ const __u16 search_handle);
+diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
+index e89803b..6aa7457 100644
+--- a/fs/cifs/cifssmb.c
++++ b/fs/cifs/cifssmb.c
+@@ -4327,7 +4327,7 @@ int
+ CIFSFindFirst(const int xid, struct cifs_tcon *tcon,
+ const char *searchName,
+ const struct nls_table *nls_codepage,
+- __u16 *pnetfid,
++ __u16 *pnetfid, __u16 search_flags,
+ struct cifs_search_info *psrch_inf, int remap, const char dirsep)
+ {
+ /* level 257 SMB_ */
+@@ -4399,8 +4399,7 @@ findFirstRetry:
+ cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM |
+ ATTR_DIRECTORY);
+ pSMB->SearchCount = cpu_to_le16(CIFSMaxBufSize/sizeof(FILE_UNIX_INFO));
+- pSMB->SearchFlags = cpu_to_le16(CIFS_SEARCH_CLOSE_AT_END |
+- CIFS_SEARCH_RETURN_RESUME);
++ pSMB->SearchFlags = cpu_to_le16(search_flags);
+ pSMB->InformationLevel = cpu_to_le16(psrch_inf->info_level);
+
+ /* BB what should we set StorageType to? Does it matter? BB */
+@@ -4470,8 +4469,8 @@ findFirstRetry:
+ return rc;
+ }
+
+-int CIFSFindNext(const int xid, struct cifs_tcon *tcon,
+- __u16 searchHandle, struct cifs_search_info *psrch_inf)
++int CIFSFindNext(const int xid, struct cifs_tcon *tcon, __u16 searchHandle,
++ __u16 search_flags, struct cifs_search_info *psrch_inf)
+ {
+ TRANSACTION2_FNEXT_REQ *pSMB = NULL;
+ TRANSACTION2_FNEXT_RSP *pSMBr = NULL;
+@@ -4514,8 +4513,7 @@ int CIFSFindNext(const int xid, struct cifs_tcon *tcon,
+ cpu_to_le16(CIFSMaxBufSize / sizeof(FILE_UNIX_INFO));
+ pSMB->InformationLevel = cpu_to_le16(psrch_inf->info_level);
+ pSMB->ResumeKey = psrch_inf->resume_key;
+- pSMB->SearchFlags =
+- cpu_to_le16(CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_RETURN_RESUME);
++ pSMB->SearchFlags = cpu_to_le16(search_flags);
+
+ name_len = psrch_inf->resume_name_len;
+ params += name_len;
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index 0f7dc22..0bb785f 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -1534,10 +1534,11 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
+ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
+ bool fsuid_only)
+ {
+- struct cifsFileInfo *open_file;
++ struct cifsFileInfo *open_file, *inv_file = NULL;
+ struct cifs_sb_info *cifs_sb;
+ bool any_available = false;
+ int rc;
++ unsigned int refind = 0;
+
+ /* Having a null inode here (because mapping->host was set to zero by
+ the VFS or MM) should not happen but we had reports of on oops (due to
+@@ -1557,40 +1558,25 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
+
+ spin_lock(&cifs_file_list_lock);
+ refind_writable:
++ if (refind > MAX_REOPEN_ATT) {
++ spin_unlock(&cifs_file_list_lock);
++ return NULL;
++ }
+ list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
+ if (!any_available && open_file->pid != current->tgid)
+ continue;
+ if (fsuid_only && open_file->uid != current_fsuid())
+ continue;
+ if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
+- cifsFileInfo_get(open_file);
+-
+ if (!open_file->invalidHandle) {
+ /* found a good writable file */
++ cifsFileInfo_get(open_file);
+ spin_unlock(&cifs_file_list_lock);
+ return open_file;
++ } else {
++ if (!inv_file)
++ inv_file = open_file;
+ }
+-
+- spin_unlock(&cifs_file_list_lock);
+-
+- /* Had to unlock since following call can block */
+- rc = cifs_reopen_file(open_file, false);
+- if (!rc)
+- return open_file;
+-
+- /* if it fails, try another handle if possible */
+- cFYI(1, "wp failed on reopen file");
+- cifsFileInfo_put(open_file);
+-
+- spin_lock(&cifs_file_list_lock);
+-
+- /* else we simply continue to the next entry. Thus
+- we do not loop on reopen errors. If we
+- can not reopen the file, for example if we
+- reconnected to a server with another client
+- racing to delete or lock the file we would not
+- make progress if we restarted before the beginning
+- of the loop here. */
+ }
+ }
+ /* couldn't find useable FH with same pid, try any available */
+@@ -1598,7 +1584,30 @@ refind_writable:
+ any_available = true;
+ goto refind_writable;
+ }
++
++ if (inv_file) {
++ any_available = false;
++ cifsFileInfo_get(inv_file);
++ }
++
+ spin_unlock(&cifs_file_list_lock);
++
++ if (inv_file) {
++ rc = cifs_reopen_file(inv_file, false);
++ if (!rc)
++ return inv_file;
++ else {
++ spin_lock(&cifs_file_list_lock);
++ list_move_tail(&inv_file->flist,
++ &cifs_inode->openFileList);
++ spin_unlock(&cifs_file_list_lock);
++ cifsFileInfo_put(inv_file);
++ spin_lock(&cifs_file_list_lock);
++ ++refind;
++ goto refind_writable;
++ }
++ }
++
+ return NULL;
+ }
+
+diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
+index a090bbe..db4a138 100644
+--- a/fs/cifs/readdir.c
++++ b/fs/cifs/readdir.c
+@@ -219,6 +219,7 @@ int get_symlink_reparse_path(char *full_path, struct cifs_sb_info *cifs_sb,
+
+ static int initiate_cifs_search(const int xid, struct file *file)
+ {
++ __u16 search_flags;
+ int rc = 0;
+ char *full_path = NULL;
+ struct cifsFileInfo *cifsFile;
+@@ -270,8 +271,12 @@ ffirst_retry:
+ cifsFile->srch_inf.info_level = SMB_FIND_FILE_DIRECTORY_INFO;
+ }
+
++ search_flags = CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_RETURN_RESUME;
++ if (backup_cred(cifs_sb))
++ search_flags |= CIFS_SEARCH_BACKUP_SEARCH;
++
+ rc = CIFSFindFirst(xid, pTcon, full_path, cifs_sb->local_nls,
+- &cifsFile->netfid, &cifsFile->srch_inf,
++ &cifsFile->netfid, search_flags, &cifsFile->srch_inf,
+ cifs_sb->mnt_cifs_flags &
+ CIFS_MOUNT_MAP_SPECIAL_CHR, CIFS_DIR_SEP(cifs_sb));
+ if (rc == 0)
+@@ -502,11 +507,13 @@ static int cifs_save_resume_key(const char *current_entry,
+ static int find_cifs_entry(const int xid, struct cifs_tcon *pTcon,
+ struct file *file, char **ppCurrentEntry, int *num_to_ret)
+ {
++ __u16 search_flags;
+ int rc = 0;
+ int pos_in_buf = 0;
+ loff_t first_entry_in_buffer;
+ loff_t index_to_find = file->f_pos;
+ struct cifsFileInfo *cifsFile = file->private_data;
++ struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
+ /* check if index in the buffer */
+
+ if ((cifsFile == NULL) || (ppCurrentEntry == NULL) ||
+@@ -560,10 +567,14 @@ static int find_cifs_entry(const int xid, struct cifs_tcon *pTcon,
+ cifsFile);
+ }
+
++ search_flags = CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_RETURN_RESUME;
++ if (backup_cred(cifs_sb))
++ search_flags |= CIFS_SEARCH_BACKUP_SEARCH;
++
+ while ((index_to_find >= cifsFile->srch_inf.index_of_last_entry) &&
+ (rc == 0) && !cifsFile->srch_inf.endOfSearch) {
+ cFYI(1, "calling findnext2");
+- rc = CIFSFindNext(xid, pTcon, cifsFile->netfid,
++ rc = CIFSFindNext(xid, pTcon, cifsFile->netfid, search_flags,
+ &cifsFile->srch_inf);
+ /* FindFirst/Next set last_entry to NULL on malformed reply */
+ if (cifsFile->srch_inf.last_entry)
+diff --git a/fs/exofs/super.c b/fs/exofs/super.c
+index e6085ec..7ed5000 100644
+--- a/fs/exofs/super.c
++++ b/fs/exofs/super.c
+@@ -745,7 +745,6 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
+ sbi->one_comp.obj.partition = opts->pid;
+ sbi->one_comp.obj.id = 0;
+ exofs_make_credential(sbi->one_comp.cred, &sbi->one_comp.obj);
+- sbi->oc.numdevs = 1;
+ sbi->oc.single_comp = EC_SINGLE_COMP;
+ sbi->oc.comps = &sbi->one_comp;
+
+@@ -803,6 +802,7 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
+ goto free_sbi;
+
+ ore_comp_set_dev(&sbi->oc, 0, od);
++ sbi->oc.numdevs = 1;
+ }
+
+ __sbi_read_stats(sbi);
+diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
+index 12ccacd..914bf9e 100644
+--- a/fs/ext4/balloc.c
++++ b/fs/ext4/balloc.c
+@@ -88,8 +88,8 @@ unsigned ext4_num_overhead_clusters(struct super_block *sb,
+ * unusual file system layouts.
+ */
+ if (ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp), block_group)) {
+- block_cluster = EXT4_B2C(sbi, (start -
+- ext4_block_bitmap(sb, gdp)));
++ block_cluster = EXT4_B2C(sbi,
++ ext4_block_bitmap(sb, gdp) - start);
+ if (block_cluster < num_clusters)
+ block_cluster = -1;
+ else if (block_cluster == num_clusters) {
+@@ -100,7 +100,7 @@ unsigned ext4_num_overhead_clusters(struct super_block *sb,
+
+ if (ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp), block_group)) {
+ inode_cluster = EXT4_B2C(sbi,
+- start - ext4_inode_bitmap(sb, gdp));
++ ext4_inode_bitmap(sb, gdp) - start);
+ if (inode_cluster < num_clusters)
+ inode_cluster = -1;
+ else if (inode_cluster == num_clusters) {
+@@ -112,7 +112,7 @@ unsigned ext4_num_overhead_clusters(struct super_block *sb,
+ itbl_blk = ext4_inode_table(sb, gdp);
+ for (i = 0; i < sbi->s_itb_per_group; i++) {
+ if (ext4_block_in_group(sb, itbl_blk + i, block_group)) {
+- c = EXT4_B2C(sbi, start - itbl_blk + i);
++ c = EXT4_B2C(sbi, itbl_blk + i - start);
+ if ((c < num_clusters) || (c == inode_cluster) ||
+ (c == block_cluster) || (c == itbl_cluster))
+ continue;
+diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
+index ab25f57..76a6e3b 100644
+--- a/fs/ext4/ioctl.c
++++ b/fs/ext4/ioctl.c
+@@ -36,7 +36,7 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ handle_t *handle = NULL;
+ int err, migrate = 0;
+ struct ext4_iloc iloc;
+- unsigned int oldflags;
++ unsigned int oldflags, mask, i;
+ unsigned int jflag;
+
+ if (!inode_owner_or_capable(inode))
+@@ -113,9 +113,14 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ if (err)
+ goto flags_err;
+
+- flags = flags & EXT4_FL_USER_MODIFIABLE;
+- flags |= oldflags & ~EXT4_FL_USER_MODIFIABLE;
+- ei->i_flags = flags;
++ for (i = 0, mask = 1; i < 32; i++, mask <<= 1) {
++ if (!(mask & EXT4_FL_USER_MODIFIABLE))
++ continue;
++ if (mask & flags)
++ ext4_set_inode_flag(inode, i);
++ else
++ ext4_clear_inode_flag(inode, i);
++ }
+
+ ext4_set_inode_flags(inode);
+ inode->i_ctime = ext4_current_time(inode);
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index e2d8be8..1d07c12 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -2567,6 +2567,9 @@ int ext4_mb_release(struct super_block *sb)
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
+
++ if (sbi->s_proc)
++ remove_proc_entry("mb_groups", sbi->s_proc);
++
+ if (sbi->s_group_info) {
+ for (i = 0; i < ngroups; i++) {
+ grinfo = ext4_get_group_info(sb, i);
+@@ -2614,8 +2617,6 @@ int ext4_mb_release(struct super_block *sb)
+ }
+
+ free_percpu(sbi->s_locality_groups);
+- if (sbi->s_proc)
+- remove_proc_entry("mb_groups", sbi->s_proc);
+
+ return 0;
+ }
+@@ -4693,6 +4694,7 @@ do_more:
+ */
+ new_entry = kmem_cache_alloc(ext4_free_ext_cachep, GFP_NOFS);
+ if (!new_entry) {
++ ext4_mb_unload_buddy(&e4b);
+ err = -ENOMEM;
+ goto error_return;
+ }
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index aa4c782..4dd0890 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -1037,6 +1037,12 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, stru
+ EXT4_ERROR_INODE(dir, "bad inode number: %u", ino);
+ return ERR_PTR(-EIO);
+ }
++ if (unlikely(ino == dir->i_ino)) {
++ EXT4_ERROR_INODE(dir, "'%.*s' linked to parent dir",
++ dentry->d_name.len,
++ dentry->d_name.name);
++ return ERR_PTR(-EIO);
++ }
+ inode = ext4_iget(dir->i_sb, ino);
+ if (inode == ERR_PTR(-ESTALE)) {
+ EXT4_ERROR_INODE(dir,
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 961059b..ab7aa3f 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -480,6 +480,7 @@ void __ext4_error(struct super_block *sb, const char *function,
+ printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
+ sb->s_id, function, line, current->comm, &vaf);
+ va_end(args);
++ save_error_info(sb, function, line);
+
+ ext4_handle_error(sb);
+ }
+@@ -3727,7 +3728,8 @@ no_journal:
+ goto failed_mount4;
+ }
+
+- ext4_setup_super(sb, es, sb->s_flags & MS_RDONLY);
++ if (ext4_setup_super(sb, es, sb->s_flags & MS_RDONLY))
++ sb->s_flags |= MS_RDONLY;
+
+ /* determine the minimum size of new large inodes, if present */
+ if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) {
+diff --git a/fs/namespace.c b/fs/namespace.c
+index cfc6d44..ca4913a 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -1244,8 +1244,9 @@ void umount_tree(struct vfsmount *mnt, int propagate, struct list_head *kill)
+ list_del_init(&p->mnt_expire);
+ list_del_init(&p->mnt_list);
+ __touch_mnt_namespace(p->mnt_ns);
++ if (p->mnt_ns)
++ __mnt_make_shortterm(p);
+ p->mnt_ns = NULL;
+- __mnt_make_shortterm(p);
+ list_del_init(&p->mnt_child);
+ if (p->mnt_parent != p) {
+ p->mnt_parent->mnt_ghosts++;
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 03d9b90..a3cae5d 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -96,6 +96,8 @@ static int nfs4_map_errors(int err)
+ case -NFS4ERR_BADOWNER:
+ case -NFS4ERR_BADNAME:
+ return -EINVAL;
++ case -NFS4ERR_SHARE_DENIED:
++ return -EACCES;
+ default:
+ dprintk("%s could not handle NFSv4 error %d\n",
+ __func__, -err);
+diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
+index 14b6cd0..def807c 100644
+--- a/include/drm/drm_pciids.h
++++ b/include/drm/drm_pciids.h
+@@ -181,6 +181,7 @@
+ {0x1002, 0x6747, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6748, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
++ {0x1002, 0x674A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6758, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+@@ -198,6 +199,7 @@
+ {0x1002, 0x6767, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6768, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
++ {0x1002, 0x6771, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6778, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+@@ -493,6 +495,7 @@
+ {0x1002, 0x9645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
+ {0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
++ {0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
+ {0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+@@ -512,6 +515,7 @@
+ {0x1002, 0x9807, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
++ {0x1002, 0x980A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0, 0, 0}
+
+ #define r128_PCI_IDS \
+diff --git a/include/linux/Kbuild b/include/linux/Kbuild
+index 619b565..bd21ecd 100644
+--- a/include/linux/Kbuild
++++ b/include/linux/Kbuild
+@@ -224,6 +224,7 @@ header-y += kd.h
+ header-y += kdev_t.h
+ header-y += kernel.h
+ header-y += kernelcapi.h
++header-y += kernel-page-flags.h
+ header-y += keyboard.h
+ header-y += keyctl.h
+ header-y += l2tp.h
+diff --git a/include/linux/kernel-page-flags.h b/include/linux/kernel-page-flags.h
+index bd92a89..096b05d 100644
+--- a/include/linux/kernel-page-flags.h
++++ b/include/linux/kernel-page-flags.h
+@@ -31,6 +31,8 @@
+
+ #define KPF_KSM 21
+
++#ifdef __KERNEL__
++
+ /* kernel hacking assistances
+ * WARNING: subject to change, never rely on them!
+ */
+@@ -43,4 +45,6 @@
+ #define KPF_ARCH 38
+ #define KPF_UNCACHED 39
+
++#endif /* __KERNEL__ */
++
+ #endif /* LINUX_KERNEL_PAGE_FLAGS_H */
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index cbeb586..cb52340 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -1420,15 +1420,6 @@ static inline bool netdev_uses_dsa_tags(struct net_device *dev)
+ return 0;
+ }
+
+-#ifndef CONFIG_NET_NS
+-static inline void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
+-{
+- skb->dev = dev;
+-}
+-#else /* CONFIG_NET_NS */
+-void skb_set_dev(struct sk_buff *skb, struct net_device *dev);
+-#endif
+-
+ static inline bool netdev_uses_trailer_tags(struct net_device *dev)
+ {
+ #ifdef CONFIG_NET_DSA_TAG_TRAILER
+diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
+index 0ddd161..31d2844 100644
+--- a/include/linux/netfilter_bridge.h
++++ b/include/linux/netfilter_bridge.h
+@@ -104,9 +104,18 @@ struct bridge_skb_cb {
+ } daddr;
+ };
+
++static inline void br_drop_fake_rtable(struct sk_buff *skb)
++{
++ struct dst_entry *dst = skb_dst(skb);
++
++ if (dst && (dst->flags & DST_FAKE_RTABLE))
++ skb_dst_drop(skb);
++}
++
+ #else
+ #define nf_bridge_maybe_copy_header(skb) (0)
+ #define nf_bridge_pad(skb) (0)
++#define br_drop_fake_rtable(skb) do { } while (0)
+ #endif /* CONFIG_BRIDGE_NETFILTER */
+
+ #endif /* __KERNEL__ */
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index e689b47..bdb4590 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -1866,8 +1866,6 @@ static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
+ {
+ int delta = 0;
+
+- if (headroom < NET_SKB_PAD)
+- headroom = NET_SKB_PAD;
+ if (headroom > skb_headroom(skb))
+ delta = headroom - skb_headroom(skb);
+
+diff --git a/include/net/dst.h b/include/net/dst.h
+index 75766b4..16010d1 100644
+--- a/include/net/dst.h
++++ b/include/net/dst.h
+@@ -54,6 +54,8 @@ struct dst_entry {
+ #define DST_NOCACHE 0x0010
+ #define DST_NOCOUNT 0x0020
+ #define DST_NOPEER 0x0040
++#define DST_FAKE_RTABLE 0x0080
++#define DST_XFRM_TUNNEL 0x0100
+
+ short error;
+ short obsolete;
+diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
+index 6a72a58..ad03988 100644
+--- a/include/net/sctp/sctp.h
++++ b/include/net/sctp/sctp.h
+@@ -703,4 +703,17 @@ static inline void sctp_v4_map_v6(union sctp_addr *addr)
+ addr->v6.sin6_addr.s6_addr32[2] = htonl(0x0000ffff);
+ }
+
++/* The cookie is always 0 since this is how it's used in the
++ * pmtu code.
++ */
++static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t)
++{
++ if (t->dst && !dst_check(t->dst, 0)) {
++ dst_release(t->dst);
++ t->dst = NULL;
++ }
++
++ return t->dst;
++}
++
+ #endif /* __net_sctp_h__ */
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 26f1ab0..79ee71f 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -352,7 +352,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
+ }
+ charge = 0;
+ if (mpnt->vm_flags & VM_ACCOUNT) {
+- unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
++ unsigned long len;
++ len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
+ if (security_vm_enough_memory(len))
+ goto fail_nomem;
+ charge = len;
+diff --git a/lib/btree.c b/lib/btree.c
+index 2a34392..297124d 100644
+--- a/lib/btree.c
++++ b/lib/btree.c
+@@ -319,8 +319,8 @@ void *btree_get_prev(struct btree_head *head, struct btree_geo *geo,
+
+ if (head->height == 0)
+ return NULL;
+-retry:
+ longcpy(key, __key, geo->keylen);
++retry:
+ dec_key(geo, key);
+
+ node = head->node;
+@@ -351,7 +351,7 @@ retry:
+ }
+ miss:
+ if (retry_key) {
+- __key = retry_key;
++ longcpy(key, retry_key, geo->keylen);
+ retry_key = NULL;
+ goto retry;
+ }
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 7120c2e..5f5c545 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -2068,6 +2068,15 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma)
+ kref_get(&reservations->refs);
+ }
+
++static void resv_map_put(struct vm_area_struct *vma)
++{
++ struct resv_map *reservations = vma_resv_map(vma);
++
++ if (!reservations)
++ return;
++ kref_put(&reservations->refs, resv_map_release);
++}
++
+ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
+ {
+ struct hstate *h = hstate_vma(vma);
+@@ -2083,7 +2092,7 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
+ reserve = (end - start) -
+ region_count(&reservations->regions, start, end);
+
+- kref_put(&reservations->refs, resv_map_release);
++ resv_map_put(vma);
+
+ if (reserve) {
+ hugetlb_acct_memory(h, -reserve);
+@@ -2884,12 +2893,16 @@ int hugetlb_reserve_pages(struct inode *inode,
+ set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
+ }
+
+- if (chg < 0)
+- return chg;
++ if (chg < 0) {
++ ret = chg;
++ goto out_err;
++ }
+
+ /* There must be enough filesystem quota for the mapping */
+- if (hugetlb_get_quota(inode->i_mapping, chg))
+- return -ENOSPC;
++ if (hugetlb_get_quota(inode->i_mapping, chg)) {
++ ret = -ENOSPC;
++ goto out_err;
++ }
+
+ /*
+ * Check enough hugepages are available for the reservation.
+@@ -2898,7 +2911,7 @@ int hugetlb_reserve_pages(struct inode *inode,
+ ret = hugetlb_acct_memory(h, chg);
+ if (ret < 0) {
+ hugetlb_put_quota(inode->i_mapping, chg);
+- return ret;
++ goto out_err;
+ }
+
+ /*
+@@ -2915,6 +2928,10 @@ int hugetlb_reserve_pages(struct inode *inode,
+ if (!vma || vma->vm_flags & VM_MAYSHARE)
+ region_add(&inode->i_mapping->private_list, from, to);
+ return 0;
++out_err:
++ if (vma)
++ resv_map_put(vma);
++ return ret;
+ }
+
+ void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
+diff --git a/mm/slub.c b/mm/slub.c
+index a99c785..af47188 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -1506,15 +1506,19 @@ static inline void *acquire_slab(struct kmem_cache *s,
+ freelist = page->freelist;
+ counters = page->counters;
+ new.counters = counters;
+- if (mode)
++ if (mode) {
+ new.inuse = page->objects;
++ new.freelist = NULL;
++ } else {
++ new.freelist = freelist;
++ }
+
+ VM_BUG_ON(new.frozen);
+ new.frozen = 1;
+
+ } while (!__cmpxchg_double_slab(s, page,
+ freelist, counters,
+- NULL, new.counters,
++ new.freelist, new.counters,
+ "lock and freeze"));
+
+ remove_partial(n, page);
+@@ -1556,7 +1560,6 @@ static void *get_partial_node(struct kmem_cache *s,
+ object = t;
+ available = page->objects - page->inuse;
+ } else {
+- page->freelist = t;
+ available = put_cpu_partial(s, page, 0);
+ }
+ if (kmem_cache_debug(s) || available > s->cpu_partial / 2)
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index 27be2f0..eeba3bb 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -256,7 +256,7 @@ struct vmap_area {
+ struct rb_node rb_node; /* address sorted rbtree */
+ struct list_head list; /* address sorted list */
+ struct list_head purge_list; /* "lazy purge" list */
+- void *private;
++ struct vm_struct *vm;
+ struct rcu_head rcu_head;
+ };
+
+@@ -1160,9 +1160,10 @@ void __init vmalloc_init(void)
+ /* Import existing vmlist entries. */
+ for (tmp = vmlist; tmp; tmp = tmp->next) {
+ va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT);
+- va->flags = tmp->flags | VM_VM_AREA;
++ va->flags = VM_VM_AREA;
+ va->va_start = (unsigned long)tmp->addr;
+ va->va_end = va->va_start + tmp->size;
++ va->vm = tmp;
+ __insert_vmap_area(va);
+ }
+
+@@ -1260,7 +1261,7 @@ static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
+ vm->addr = (void *)va->va_start;
+ vm->size = va->va_end - va->va_start;
+ vm->caller = caller;
+- va->private = vm;
++ va->vm = vm;
+ va->flags |= VM_VM_AREA;
+ }
+
+@@ -1383,7 +1384,7 @@ static struct vm_struct *find_vm_area(const void *addr)
+
+ va = find_vmap_area((unsigned long)addr);
+ if (va && va->flags & VM_VM_AREA)
+- return va->private;
++ return va->vm;
+
+ return NULL;
+ }
+@@ -1402,7 +1403,7 @@ struct vm_struct *remove_vm_area(const void *addr)
+
+ va = find_vmap_area((unsigned long)addr);
+ if (va && va->flags & VM_VM_AREA) {
+- struct vm_struct *vm = va->private;
++ struct vm_struct *vm = va->vm;
+
+ if (!(vm->flags & VM_UNLIST)) {
+ struct vm_struct *tmp, **p;
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index cb33d9c..fbe2d2c 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -697,7 +697,7 @@ static enum page_references page_check_references(struct page *page,
+ return PAGEREF_RECLAIM;
+
+ if (referenced_ptes) {
+- if (PageAnon(page))
++ if (PageSwapBacked(page))
+ return PAGEREF_ACTIVATE;
+ /*
+ * All mapped pages start out with page table
+diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
+index bc25286..0cccca8 100644
+--- a/net/8021q/vlan_dev.c
++++ b/net/8021q/vlan_dev.c
+@@ -156,7 +156,7 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
+ skb = __vlan_hwaccel_put_tag(skb, vlan_tci);
+ }
+
+- skb_set_dev(skb, vlan_dev_info(dev)->real_dev);
++ skb->dev = vlan_dev_info(dev)->real_dev;
+ len = skb->len;
+ ret = dev_queue_xmit(skb);
+
+diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
+index ee64287..e221f88 100644
+--- a/net/bridge/br_forward.c
++++ b/net/bridge/br_forward.c
+@@ -47,6 +47,7 @@ int br_dev_queue_push_xmit(struct sk_buff *skb)
+ kfree_skb(skb);
+ } else {
+ skb_push(skb, ETH_HLEN);
++ br_drop_fake_rtable(skb);
+ dev_queue_xmit(skb);
+ }
+
+diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
+index fa8b8f7..577ea5d 100644
+--- a/net/bridge/br_netfilter.c
++++ b/net/bridge/br_netfilter.c
+@@ -147,7 +147,7 @@ void br_netfilter_rtable_init(struct net_bridge *br)
+ rt->dst.dev = br->dev;
+ rt->dst.path = &rt->dst;
+ dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
+- rt->dst.flags = DST_NOXFRM | DST_NOPEER;
++ rt->dst.flags = DST_NOXFRM | DST_NOPEER | DST_FAKE_RTABLE;
+ rt->dst.ops = &fake_dst_ops;
+ }
+
+@@ -687,11 +687,7 @@ static unsigned int br_nf_local_in(unsigned int hook, struct sk_buff *skb,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+ {
+- struct rtable *rt = skb_rtable(skb);
+-
+- if (rt && rt == bridge_parent_rtable(in))
+- skb_dst_drop(skb);
+-
++ br_drop_fake_rtable(skb);
+ return NF_ACCEPT;
+ }
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 61a7baa..1cbddc9 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -1607,10 +1607,14 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
+ kfree_skb(skb);
+ return NET_RX_DROP;
+ }
+- skb_set_dev(skb, dev);
++ skb->dev = dev;
++ skb_dst_drop(skb);
+ skb->tstamp.tv64 = 0;
+ skb->pkt_type = PACKET_HOST;
+ skb->protocol = eth_type_trans(skb, dev);
++ skb->mark = 0;
++ secpath_reset(skb);
++ nf_reset(skb);
+ return netif_rx(skb);
+ }
+ EXPORT_SYMBOL_GPL(dev_forward_skb);
+@@ -1865,36 +1869,6 @@ void netif_device_attach(struct net_device *dev)
+ }
+ EXPORT_SYMBOL(netif_device_attach);
+
+-/**
+- * skb_dev_set -- assign a new device to a buffer
+- * @skb: buffer for the new device
+- * @dev: network device
+- *
+- * If an skb is owned by a device already, we have to reset
+- * all data private to the namespace a device belongs to
+- * before assigning it a new device.
+- */
+-#ifdef CONFIG_NET_NS
+-void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
+-{
+- skb_dst_drop(skb);
+- if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
+- secpath_reset(skb);
+- nf_reset(skb);
+- skb_init_secmark(skb);
+- skb->mark = 0;
+- skb->priority = 0;
+- skb->nf_trace = 0;
+- skb->ipvs_property = 0;
+-#ifdef CONFIG_NET_SCHED
+- skb->tc_index = 0;
+-#endif
+- }
+- skb->dev = dev;
+-}
+-EXPORT_SYMBOL(skb_set_dev);
+-#endif /* CONFIG_NET_NS */
+-
+ /*
+ * Invalidate hardware checksum when packet is to be mangled, and
+ * complete checksum manually on outgoing path.
+diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
+index a5b4134..530787b 100644
+--- a/net/ipv4/esp4.c
++++ b/net/ipv4/esp4.c
+@@ -457,28 +457,22 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
+ struct esp_data *esp = x->data;
+ u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
+ u32 align = max_t(u32, blksize, esp->padlen);
+- u32 rem;
+-
+- mtu -= x->props.header_len + crypto_aead_authsize(esp->aead);
+- rem = mtu & (align - 1);
+- mtu &= ~(align - 1);
++ unsigned int net_adj;
+
+ switch (x->props.mode) {
+- case XFRM_MODE_TUNNEL:
+- break;
+- default:
+ case XFRM_MODE_TRANSPORT:
+- /* The worst case */
+- mtu -= blksize - 4;
+- mtu += min_t(u32, blksize - 4, rem);
+- break;
+ case XFRM_MODE_BEET:
+- /* The worst case. */
+- mtu += min_t(u32, IPV4_BEET_PHMAXLEN, rem);
++ net_adj = sizeof(struct iphdr);
+ break;
++ case XFRM_MODE_TUNNEL:
++ net_adj = 0;
++ break;
++ default:
++ BUG();
+ }
+
+- return mtu - 2;
++ return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
++ net_adj) & ~(align - 1)) + (net_adj - 2);
+ }
+
+ static void esp4_err(struct sk_buff *skb, u32 info)
+diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
+index 80106d8..d01f9c6 100644
+--- a/net/ipv4/fib_semantics.c
++++ b/net/ipv4/fib_semantics.c
+@@ -146,6 +146,12 @@ static void free_fib_info_rcu(struct rcu_head *head)
+ {
+ struct fib_info *fi = container_of(head, struct fib_info, rcu);
+
++ change_nexthops(fi) {
++ if (nexthop_nh->nh_dev)
++ dev_put(nexthop_nh->nh_dev);
++ } endfor_nexthops(fi);
++
++ release_net(fi->fib_net);
+ if (fi->fib_metrics != (u32 *) dst_default_metrics)
+ kfree(fi->fib_metrics);
+ kfree(fi);
+@@ -157,13 +163,7 @@ void free_fib_info(struct fib_info *fi)
+ pr_warning("Freeing alive fib_info %p\n", fi);
+ return;
+ }
+- change_nexthops(fi) {
+- if (nexthop_nh->nh_dev)
+- dev_put(nexthop_nh->nh_dev);
+- nexthop_nh->nh_dev = NULL;
+- } endfor_nexthops(fi);
+ fib_info_cnt--;
+- release_net(fi->fib_net);
+ call_rcu(&fi->rcu, free_fib_info_rcu);
+ }
+
+diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
+index 3ce23f9..cd2d639 100644
+--- a/net/ipv4/fib_trie.c
++++ b/net/ipv4/fib_trie.c
+@@ -1372,6 +1372,8 @@ static int check_leaf(struct fib_table *tb, struct trie *t, struct leaf *l,
+
+ if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos)
+ continue;
++ if (fi->fib_dead)
++ continue;
+ if (fa->fa_info->fib_scope < flp->flowi4_scope)
+ continue;
+ fib_alias_accessed(fa);
+diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
+index 1ac7938..65dd543 100644
+--- a/net/ipv6/esp6.c
++++ b/net/ipv6/esp6.c
+@@ -411,19 +411,15 @@ static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
+ struct esp_data *esp = x->data;
+ u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
+ u32 align = max_t(u32, blksize, esp->padlen);
+- u32 rem;
++ unsigned int net_adj;
+
+- mtu -= x->props.header_len + crypto_aead_authsize(esp->aead);
+- rem = mtu & (align - 1);
+- mtu &= ~(align - 1);
+-
+- if (x->props.mode != XFRM_MODE_TUNNEL) {
+- u32 padsize = ((blksize - 1) & 7) + 1;
+- mtu -= blksize - padsize;
+- mtu += min_t(u32, blksize - padsize, rem);
+- }
++ if (x->props.mode != XFRM_MODE_TUNNEL)
++ net_adj = sizeof(struct ipv6hdr);
++ else
++ net_adj = 0;
+
+- return mtu - 2;
++ return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
++ net_adj) & ~(align - 1)) + (net_adj - 2);
+ }
+
+ static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index f7f07e2..ae98e09 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1178,6 +1178,29 @@ static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
+ return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
+ }
+
++static void ip6_append_data_mtu(int *mtu,
++ int *maxfraglen,
++ unsigned int fragheaderlen,
++ struct sk_buff *skb,
++ struct rt6_info *rt)
++{
++ if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
++ if (skb == NULL) {
++ /* first fragment, reserve header_len */
++ *mtu = *mtu - rt->dst.header_len;
++
++ } else {
++ /*
++ * this fragment is not first, the headers
++ * space is regarded as data space.
++ */
++ *mtu = dst_mtu(rt->dst.path);
++ }
++ *maxfraglen = ((*mtu - fragheaderlen) & ~7)
++ + fragheaderlen - sizeof(struct frag_hdr);
++ }
++}
++
+ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
+ int offset, int len, int odd, struct sk_buff *skb),
+ void *from, int length, int transhdrlen,
+@@ -1187,7 +1210,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
+ struct inet_sock *inet = inet_sk(sk);
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ struct inet_cork *cork;
+- struct sk_buff *skb;
++ struct sk_buff *skb, *skb_prev = NULL;
+ unsigned int maxfraglen, fragheaderlen;
+ int exthdrlen;
+ int dst_exthdrlen;
+@@ -1245,8 +1268,12 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
+ inet->cork.fl.u.ip6 = *fl6;
+ np->cork.hop_limit = hlimit;
+ np->cork.tclass = tclass;
+- mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
+- rt->dst.dev->mtu : dst_mtu(&rt->dst);
++ if (rt->dst.flags & DST_XFRM_TUNNEL)
++ mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
++ rt->dst.dev->mtu : dst_mtu(&rt->dst);
++ else
++ mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
++ rt->dst.dev->mtu : dst_mtu(rt->dst.path);
+ if (np->frag_size < mtu) {
+ if (np->frag_size)
+ mtu = np->frag_size;
+@@ -1342,25 +1369,27 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
+ unsigned int fraglen;
+ unsigned int fraggap;
+ unsigned int alloclen;
+- struct sk_buff *skb_prev;
+ alloc_new_skb:
+- skb_prev = skb;
+-
+ /* There's no room in the current skb */
+- if (skb_prev)
+- fraggap = skb_prev->len - maxfraglen;
++ if (skb)
++ fraggap = skb->len - maxfraglen;
+ else
+ fraggap = 0;
++ /* update mtu and maxfraglen if necessary */
++ if (skb == NULL || skb_prev == NULL)
++ ip6_append_data_mtu(&mtu, &maxfraglen,
++ fragheaderlen, skb, rt);
++
++ skb_prev = skb;
+
+ /*
+ * If remaining data exceeds the mtu,
+ * we know we need more fragment(s).
+ */
+ datalen = length + fraggap;
+- if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
+- datalen = maxfraglen - fragheaderlen;
+
+- fraglen = datalen + fragheaderlen;
++ if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
++ datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
+ if ((flags & MSG_MORE) &&
+ !(rt->dst.dev->features&NETIF_F_SG))
+ alloclen = mtu;
+@@ -1369,13 +1398,16 @@ alloc_new_skb:
+
+ alloclen += dst_exthdrlen;
+
+- /*
+- * The last fragment gets additional space at tail.
+- * Note: we overallocate on fragments with MSG_MODE
+- * because we have no idea if we're the last one.
+- */
+- if (datalen == length + fraggap)
+- alloclen += rt->dst.trailer_len;
++ if (datalen != length + fraggap) {
++ /*
++ * this is not the last fragment, the trailer
++ * space is regarded as data space.
++ */
++ datalen += rt->dst.trailer_len;
++ }
++
++ alloclen += rt->dst.trailer_len;
++ fraglen = datalen + fragheaderlen;
+
+ /*
+ * We just reserve space for fragment header.
+diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
+index 2a2a3e7..2fbbe1f 100644
+--- a/net/l2tp/l2tp_ip.c
++++ b/net/l2tp/l2tp_ip.c
+@@ -251,9 +251,16 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ {
+ struct inet_sock *inet = inet_sk(sk);
+ struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *) uaddr;
+- int ret = -EINVAL;
++ int ret;
+ int chk_addr_ret;
+
++ if (!sock_flag(sk, SOCK_ZAPPED))
++ return -EINVAL;
++ if (addr_len < sizeof(struct sockaddr_l2tpip))
++ return -EINVAL;
++ if (addr->l2tp_family != AF_INET)
++ return -EINVAL;
++
+ ret = -EADDRINUSE;
+ read_lock_bh(&l2tp_ip_lock);
+ if (__l2tp_ip_bind_lookup(&init_net, addr->l2tp_addr.s_addr, sk->sk_bound_dev_if, addr->l2tp_conn_id))
+@@ -283,6 +290,8 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ sk_del_node_init(sk);
+ write_unlock_bh(&l2tp_ip_lock);
+ ret = 0;
++ sock_reset_flag(sk, SOCK_ZAPPED);
++
+ out:
+ release_sock(sk);
+
+@@ -303,13 +312,14 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
+ __be32 saddr;
+ int oif, rc;
+
+- rc = -EINVAL;
++ if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */
++ return -EINVAL;
++
+ if (addr_len < sizeof(*lsa))
+- goto out;
++ return -EINVAL;
+
+- rc = -EAFNOSUPPORT;
+ if (lsa->l2tp_family != AF_INET)
+- goto out;
++ return -EAFNOSUPPORT;
+
+ lock_sock(sk);
+
+@@ -363,6 +373,14 @@ out:
+ return rc;
+ }
+
++static int l2tp_ip_disconnect(struct sock *sk, int flags)
++{
++ if (sock_flag(sk, SOCK_ZAPPED))
++ return 0;
++
++ return udp_disconnect(sk, flags);
++}
++
+ static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr,
+ int *uaddr_len, int peer)
+ {
+@@ -598,7 +616,7 @@ static struct proto l2tp_ip_prot = {
+ .close = l2tp_ip_close,
+ .bind = l2tp_ip_bind,
+ .connect = l2tp_ip_connect,
+- .disconnect = udp_disconnect,
++ .disconnect = l2tp_ip_disconnect,
+ .ioctl = udp_ioctl,
+ .destroy = l2tp_ip_destroy_sock,
+ .setsockopt = ip_setsockopt,
+diff --git a/net/mac80211/util.c b/net/mac80211/util.c
+index d5230ec..7095ae5 100644
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -1111,6 +1111,12 @@ int ieee80211_reconfig(struct ieee80211_local *local)
+ }
+ }
+
++ /* add back keys */
++ list_for_each_entry(sdata, &local->interfaces, list)
++ if (ieee80211_sdata_running(sdata))
++ ieee80211_enable_keys(sdata);
++
++ wake_up:
+ /*
+ * Clear the WLAN_STA_BLOCK_BA flag so new aggregation
+ * sessions can be established after a resume.
+@@ -1132,12 +1138,6 @@ int ieee80211_reconfig(struct ieee80211_local *local)
+ mutex_unlock(&local->sta_mtx);
+ }
+
+- /* add back keys */
+- list_for_each_entry(sdata, &local->interfaces, list)
+- if (ieee80211_sdata_running(sdata))
+- ieee80211_enable_keys(sdata);
+-
+- wake_up:
+ ieee80211_wake_queues_by_reason(hw,
+ IEEE80211_QUEUE_STOP_REASON_SUSPEND);
+
+diff --git a/net/sctp/output.c b/net/sctp/output.c
+index 817174e..8fc4dcd 100644
+--- a/net/sctp/output.c
++++ b/net/sctp/output.c
+@@ -377,9 +377,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
+ */
+ skb_set_owner_w(nskb, sk);
+
+- /* The 'obsolete' field of dst is set to 2 when a dst is freed. */
+- if (!dst || (dst->obsolete > 1)) {
+- dst_release(dst);
++ if (!sctp_transport_dst_check(tp)) {
+ sctp_transport_route(tp, NULL, sctp_sk(sk));
+ if (asoc && (asoc->param_flags & SPP_PMTUD_ENABLE)) {
+ sctp_assoc_sync_pmtu(asoc);
+diff --git a/net/sctp/transport.c b/net/sctp/transport.c
+index 394c57c..8da4481 100644
+--- a/net/sctp/transport.c
++++ b/net/sctp/transport.c
+@@ -226,23 +226,6 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
+ transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
+ }
+
+-/* this is a complete rip-off from __sk_dst_check
+- * the cookie is always 0 since this is how it's used in the
+- * pmtu code
+- */
+-static struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t)
+-{
+- struct dst_entry *dst = t->dst;
+-
+- if (dst && dst->obsolete && dst->ops->check(dst, 0) == NULL) {
+- dst_release(t->dst);
+- t->dst = NULL;
+- return NULL;
+- }
+-
+- return dst;
+-}
+-
+ void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
+ {
+ struct dst_entry *dst;
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index f0268ea..b2250da 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -959,6 +959,8 @@ call_reserveresult(struct rpc_task *task)
+ }
+
+ switch (status) {
++ case -ENOMEM:
++ rpc_delay(task, HZ >> 2);
+ case -EAGAIN: /* woken up; retry */
+ task->tk_action = call_reserve;
+ return;
+diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
+index c64c0ef..3ac9789 100644
+--- a/net/sunrpc/xprt.c
++++ b/net/sunrpc/xprt.c
+@@ -977,15 +977,16 @@ static void xprt_alloc_slot(struct rpc_task *task)
+ goto out_init_req;
+ switch (PTR_ERR(req)) {
+ case -ENOMEM:
+- rpc_delay(task, HZ >> 2);
+ dprintk("RPC: dynamic allocation of request slot "
+ "failed! Retrying\n");
++ task->tk_status = -ENOMEM;
+ break;
+ case -EAGAIN:
+ rpc_sleep_on(&xprt->backlog, task, NULL);
+ dprintk("RPC: waiting for request slot\n");
++ default:
++ task->tk_status = -EAGAIN;
+ }
+- task->tk_status = -EAGAIN;
+ return;
+ out_init_req:
+ task->tk_status = 0;
+diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
+index 9049a5c..0174034 100644
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -1919,6 +1919,9 @@ no_transform:
+ }
+ ok:
+ xfrm_pols_put(pols, drop_pols);
++ if (dst && dst->xfrm &&
++ dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
++ dst->flags |= DST_XFRM_TUNNEL;
+ return dst;
+
+ nopol:
+diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
+index 0220b0f..839165f 100644
+--- a/sound/usb/pcm.c
++++ b/sound/usb/pcm.c
+@@ -698,6 +698,9 @@ static int snd_usb_pcm_check_knot(struct snd_pcm_runtime *runtime,
+ int count = 0, needs_knot = 0;
+ int err;
+
++ kfree(subs->rate_list.list);
++ subs->rate_list.list = NULL;
++
+ list_for_each_entry(fp, &subs->fmt_list, list) {
+ if (fp->rates & SNDRV_PCM_RATE_CONTINUOUS)
+ return 0;
diff --git a/1020_linux-3.2.21.patch b/1020_linux-3.2.21.patch
new file mode 100644
index 00000000..6824be2e
--- /dev/null
+++ b/1020_linux-3.2.21.patch
@@ -0,0 +1,2499 @@
+diff --git a/Makefile b/Makefile
+index c7e9cc4..7eb465e 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 20
++SUBLEVEL = 21
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/mach-imx/hotplug.c b/arch/arm/mach-imx/hotplug.c
+index 89493ab..20ed2d5 100644
+--- a/arch/arm/mach-imx/hotplug.c
++++ b/arch/arm/mach-imx/hotplug.c
+@@ -12,6 +12,7 @@
+
+ #include <linux/errno.h>
+ #include <asm/cacheflush.h>
++#include <asm/cp15.h>
+ #include <mach/common.h>
+
+ int platform_cpu_kill(unsigned int cpu)
+@@ -19,6 +20,44 @@ int platform_cpu_kill(unsigned int cpu)
+ return 1;
+ }
+
++static inline void cpu_enter_lowpower(void)
++{
++ unsigned int v;
++
++ flush_cache_all();
++ asm volatile(
++ "mcr p15, 0, %1, c7, c5, 0\n"
++ " mcr p15, 0, %1, c7, c10, 4\n"
++ /*
++ * Turn off coherency
++ */
++ " mrc p15, 0, %0, c1, c0, 1\n"
++ " bic %0, %0, %3\n"
++ " mcr p15, 0, %0, c1, c0, 1\n"
++ " mrc p15, 0, %0, c1, c0, 0\n"
++ " bic %0, %0, %2\n"
++ " mcr p15, 0, %0, c1, c0, 0\n"
++ : "=&r" (v)
++ : "r" (0), "Ir" (CR_C), "Ir" (0x40)
++ : "cc");
++}
++
++static inline void cpu_leave_lowpower(void)
++{
++ unsigned int v;
++
++ asm volatile(
++ "mrc p15, 0, %0, c1, c0, 0\n"
++ " orr %0, %0, %1\n"
++ " mcr p15, 0, %0, c1, c0, 0\n"
++ " mrc p15, 0, %0, c1, c0, 1\n"
++ " orr %0, %0, %2\n"
++ " mcr p15, 0, %0, c1, c0, 1\n"
++ : "=&r" (v)
++ : "Ir" (CR_C), "Ir" (0x40)
++ : "cc");
++}
++
+ /*
+ * platform-specific code to shutdown a CPU
+ *
+@@ -26,9 +65,10 @@ int platform_cpu_kill(unsigned int cpu)
+ */
+ void platform_cpu_die(unsigned int cpu)
+ {
+- flush_cache_all();
++ cpu_enter_lowpower();
+ imx_enable_cpu(cpu, false);
+ cpu_do_idle();
++ cpu_leave_lowpower();
+
+ /* We should never return from idle */
+ panic("cpu %d unexpectedly exit from shutdown\n", cpu);
+diff --git a/arch/arm/mach-imx/mach-mx21ads.c b/arch/arm/mach-imx/mach-mx21ads.c
+index 25f8402..ad994b3 100644
+--- a/arch/arm/mach-imx/mach-mx21ads.c
++++ b/arch/arm/mach-imx/mach-mx21ads.c
+@@ -32,7 +32,7 @@
+ * Memory-mapped I/O on MX21ADS base board
+ */
+ #define MX21ADS_MMIO_BASE_ADDR 0xf5000000
+-#define MX21ADS_MMIO_SIZE SZ_16M
++#define MX21ADS_MMIO_SIZE 0xc00000
+
+ #define MX21ADS_REG_ADDR(offset) (void __force __iomem *) \
+ (MX21ADS_MMIO_BASE_ADDR + (offset))
+diff --git a/arch/arm/mach-mx5/crm_regs.h b/arch/arm/mach-mx5/crm_regs.h
+index 5e11ba7..5e3f1f0 100644
+--- a/arch/arm/mach-mx5/crm_regs.h
++++ b/arch/arm/mach-mx5/crm_regs.h
+@@ -23,7 +23,7 @@
+ #define MX53_DPLL1_BASE MX53_IO_ADDRESS(MX53_PLL1_BASE_ADDR)
+ #define MX53_DPLL2_BASE MX53_IO_ADDRESS(MX53_PLL2_BASE_ADDR)
+ #define MX53_DPLL3_BASE MX53_IO_ADDRESS(MX53_PLL3_BASE_ADDR)
+-#define MX53_DPLL4_BASE MX53_IO_ADDRESS(MX53_PLL3_BASE_ADDR)
++#define MX53_DPLL4_BASE MX53_IO_ADDRESS(MX53_PLL4_BASE_ADDR)
+
+ /* PLL Register Offsets */
+ #define MXC_PLL_DP_CTL 0x00
+diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
+index 0b6d796..2e3200c 100644
+--- a/arch/powerpc/kernel/module_32.c
++++ b/arch/powerpc/kernel/module_32.c
+@@ -176,8 +176,8 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
+
+ static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val)
+ {
+- if (entry->jump[0] == 0x3d600000 + ((val + 0x8000) >> 16)
+- && entry->jump[1] == 0x396b0000 + (val & 0xffff))
++ if (entry->jump[0] == 0x3d800000 + ((val + 0x8000) >> 16)
++ && entry->jump[1] == 0x398c0000 + (val & 0xffff))
+ return 1;
+ return 0;
+ }
+@@ -204,10 +204,9 @@ static uint32_t do_plt_call(void *location,
+ entry++;
+ }
+
+- /* Stolen from Paul Mackerras as well... */
+- entry->jump[0] = 0x3d600000+((val+0x8000)>>16); /* lis r11,sym@ha */
+- entry->jump[1] = 0x396b0000 + (val&0xffff); /* addi r11,r11,sym@l*/
+- entry->jump[2] = 0x7d6903a6; /* mtctr r11 */
++ entry->jump[0] = 0x3d800000+((val+0x8000)>>16); /* lis r12,sym@ha */
++ entry->jump[1] = 0x398c0000 + (val&0xffff); /* addi r12,r12,sym@l*/
++ entry->jump[2] = 0x7d8903a6; /* mtctr r12 */
+ entry->jump[3] = 0x4e800420; /* bctr */
+
+ DEBUGP("Initialized plt for 0x%x at %p\n", val, entry);
+diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
+index be6d9e3..3470624 100644
+--- a/arch/x86/crypto/aesni-intel_asm.S
++++ b/arch/x86/crypto/aesni-intel_asm.S
+@@ -2460,10 +2460,12 @@ ENTRY(aesni_cbc_dec)
+ pxor IN3, STATE4
+ movaps IN4, IV
+ #else
+- pxor (INP), STATE2
+- pxor 0x10(INP), STATE3
+ pxor IN1, STATE4
+ movaps IN2, IV
++ movups (INP), IN1
++ pxor IN1, STATE2
++ movups 0x10(INP), IN2
++ pxor IN2, STATE3
+ #endif
+ movups STATE1, (OUTP)
+ movups STATE2, 0x10(OUTP)
+diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
+index 1b82f7e..679229f 100644
+--- a/arch/x86/include/asm/uv/uv_bau.h
++++ b/arch/x86/include/asm/uv/uv_bau.h
+@@ -149,7 +149,6 @@
+ /* 4 bits of software ack period */
+ #define UV2_ACK_MASK 0x7UL
+ #define UV2_ACK_UNITS_SHFT 3
+-#define UV2_LEG_SHFT UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT
+ #define UV2_EXT_SHFT UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT
+
+ /*
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index 0bab2b1..3524e1f 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -571,6 +571,24 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
+ }
+ }
+
++ /* re-enable TopologyExtensions if switched off by BIOS */
++ if ((c->x86 == 0x15) &&
++ (c->x86_model >= 0x10) && (c->x86_model <= 0x1f) &&
++ !cpu_has(c, X86_FEATURE_TOPOEXT)) {
++ u64 val;
++
++ if (!rdmsrl_amd_safe(0xc0011005, &val)) {
++ val |= 1ULL << 54;
++ wrmsrl_amd_safe(0xc0011005, val);
++ rdmsrl(0xc0011005, val);
++ if (val & (1ULL << 54)) {
++ set_cpu_cap(c, X86_FEATURE_TOPOEXT);
++ printk(KERN_INFO FW_INFO "CPU: Re-enabling "
++ "disabled Topology Extensions Support\n");
++ }
++ }
++ }
++
+ cpu_detect_cache_sizes(c);
+
+ /* Multi core CPU? */
+diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
+index f547421..445a61c 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
++++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
+@@ -52,6 +52,7 @@ struct threshold_block {
+ unsigned int cpu;
+ u32 address;
+ u16 interrupt_enable;
++ bool interrupt_capable;
+ u16 threshold_limit;
+ struct kobject kobj;
+ struct list_head miscj;
+@@ -86,6 +87,21 @@ struct thresh_restart {
+ u16 old_limit;
+ };
+
++static bool lvt_interrupt_supported(unsigned int bank, u32 msr_high_bits)
++{
++ /*
++ * bank 4 supports APIC LVT interrupts implicitly since forever.
++ */
++ if (bank == 4)
++ return true;
++
++ /*
++ * IntP: interrupt present; if this bit is set, the thresholding
++ * bank can generate APIC LVT interrupts
++ */
++ return msr_high_bits & BIT(28);
++}
++
+ static int lvt_off_valid(struct threshold_block *b, int apic, u32 lo, u32 hi)
+ {
+ int msr = (hi & MASK_LVTOFF_HI) >> 20;
+@@ -107,8 +123,10 @@ static int lvt_off_valid(struct threshold_block *b, int apic, u32 lo, u32 hi)
+ return 1;
+ };
+
+-/* must be called with correct cpu affinity */
+-/* Called via smp_call_function_single() */
++/*
++ * Called via smp_call_function_single(), must be called with correct
++ * cpu affinity.
++ */
+ static void threshold_restart_bank(void *_tr)
+ {
+ struct thresh_restart *tr = _tr;
+@@ -131,6 +149,12 @@ static void threshold_restart_bank(void *_tr)
+ (new_count & THRESHOLD_MAX);
+ }
+
++ /* clear IntType */
++ hi &= ~MASK_INT_TYPE_HI;
++
++ if (!tr->b->interrupt_capable)
++ goto done;
++
+ if (tr->set_lvt_off) {
+ if (lvt_off_valid(tr->b, tr->lvt_off, lo, hi)) {
+ /* set new lvt offset */
+@@ -139,9 +163,10 @@ static void threshold_restart_bank(void *_tr)
+ }
+ }
+
+- tr->b->interrupt_enable ?
+- (hi = (hi & ~MASK_INT_TYPE_HI) | INT_TYPE_APIC) :
+- (hi &= ~MASK_INT_TYPE_HI);
++ if (tr->b->interrupt_enable)
++ hi |= INT_TYPE_APIC;
++
++ done:
+
+ hi |= MASK_COUNT_EN_HI;
+ wrmsr(tr->b->address, lo, hi);
+@@ -206,14 +231,18 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
+ if (shared_bank[bank] && c->cpu_core_id)
+ break;
+ #endif
+- offset = setup_APIC_mce(offset,
+- (high & MASK_LVTOFF_HI) >> 20);
+
+ memset(&b, 0, sizeof(b));
+- b.cpu = cpu;
+- b.bank = bank;
+- b.block = block;
+- b.address = address;
++ b.cpu = cpu;
++ b.bank = bank;
++ b.block = block;
++ b.address = address;
++ b.interrupt_capable = lvt_interrupt_supported(bank, high);
++
++ if (b.interrupt_capable) {
++ int new = (high & MASK_LVTOFF_HI) >> 20;
++ offset = setup_APIC_mce(offset, new);
++ }
+
+ mce_threshold_block_init(&b, offset);
+ mce_threshold_vector = amd_threshold_interrupt;
+@@ -313,6 +342,9 @@ store_interrupt_enable(struct threshold_block *b, const char *buf, size_t size)
+ struct thresh_restart tr;
+ unsigned long new;
+
++ if (!b->interrupt_capable)
++ return -EINVAL;
++
+ if (strict_strtoul(buf, 0, &new) < 0)
+ return -EINVAL;
+
+@@ -471,6 +503,7 @@ static __cpuinit int allocate_threshold_blocks(unsigned int cpu,
+ b->cpu = cpu;
+ b->address = address;
+ b->interrupt_enable = 0;
++ b->interrupt_capable = lvt_interrupt_supported(bank, high);
+ b->threshold_limit = THRESHOLD_MAX;
+
+ INIT_LIST_HEAD(&b->miscj);
+diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
+index 81aee5a..29a69550 100644
+--- a/arch/x86/platform/uv/tlb_uv.c
++++ b/arch/x86/platform/uv/tlb_uv.c
+@@ -1304,7 +1304,6 @@ static void __init enable_timeouts(void)
+ */
+ mmr_image |= (1L << SOFTACK_MSHIFT);
+ if (is_uv2_hub()) {
+- mmr_image &= ~(1L << UV2_LEG_SHFT);
+ mmr_image |= (1L << UV2_EXT_SHFT);
+ }
+ write_mmr_misc_control(pnode, mmr_image);
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index cca659e..44d4393 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -198,6 +198,9 @@ static void __init xen_banner(void)
+ xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : "");
+ }
+
++#define CPUID_THERM_POWER_LEAF 6
++#define APERFMPERF_PRESENT 0
++
+ static __read_mostly unsigned int cpuid_leaf1_edx_mask = ~0;
+ static __read_mostly unsigned int cpuid_leaf1_ecx_mask = ~0;
+
+@@ -218,6 +221,11 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx,
+ maskedx = cpuid_leaf1_edx_mask;
+ break;
+
++ case CPUID_THERM_POWER_LEAF:
++ /* Disabling APERFMPERF for kernel usage */
++ maskecx = ~(1 << APERFMPERF_PRESENT);
++ break;
++
+ case 0xb:
+ /* Suppress extended topology stuff */
+ maskebx = 0;
+diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
+index b427711..58b49d1 100644
+--- a/drivers/char/agp/intel-agp.c
++++ b/drivers/char/agp/intel-agp.c
+@@ -897,6 +897,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
+ ID(PCI_DEVICE_ID_INTEL_B43_HB),
+ ID(PCI_DEVICE_ID_INTEL_B43_1_HB),
+ ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB),
++ ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB),
+ ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB),
+ ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB),
+ ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB),
+diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
+index 5da67f1..6f24604 100644
+--- a/drivers/char/agp/intel-agp.h
++++ b/drivers/char/agp/intel-agp.h
+@@ -211,6 +211,7 @@
+ #define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30
+ #define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32
+ #define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040
++#define PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB 0x0069
+ #define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042
+ #define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044
+ #define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
+diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
+index 241df2e..0477982 100644
+--- a/drivers/char/hw_random/atmel-rng.c
++++ b/drivers/char/hw_random/atmel-rng.c
+@@ -36,6 +36,13 @@ static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max,
+ /* data ready? */
+ if (readl(trng->base + TRNG_ODATA) & 1) {
+ *data = readl(trng->base + TRNG_ODATA);
++ /*
++ ensure data ready is only set again AFTER the next data
++ word is ready in case it got set between checking ISR
++ and reading ODATA, so we don't risk re-reading the
++ same word
++ */
++ readl(trng->base + TRNG_ISR);
+ return 4;
+ } else
+ return 0;
+diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
+index d0f8830..3a1bfd7 100644
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -214,6 +214,7 @@ static const struct intel_device_info intel_sandybridge_d_info = {
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_bsd_ring = 1,
+ .has_blt_ring = 1,
++ .has_force_wake = 1,
+ };
+
+ static const struct intel_device_info intel_sandybridge_m_info = {
+@@ -222,6 +223,7 @@ static const struct intel_device_info intel_sandybridge_m_info = {
+ .has_fbc = 1,
+ .has_bsd_ring = 1,
+ .has_blt_ring = 1,
++ .has_force_wake = 1,
+ };
+
+ static const struct intel_device_info intel_ivybridge_d_info = {
+@@ -229,6 +231,7 @@ static const struct intel_device_info intel_ivybridge_d_info = {
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .has_bsd_ring = 1,
+ .has_blt_ring = 1,
++ .has_force_wake = 1,
+ };
+
+ static const struct intel_device_info intel_ivybridge_m_info = {
+@@ -237,6 +240,7 @@ static const struct intel_device_info intel_ivybridge_m_info = {
+ .has_fbc = 0, /* FBC is not enabled on Ivybridge mobile yet */
+ .has_bsd_ring = 1,
+ .has_blt_ring = 1,
++ .has_force_wake = 1,
+ };
+
+ static const struct pci_device_id pciidlist[] = { /* aka */
+@@ -939,7 +943,7 @@ MODULE_LICENSE("GPL and additional rights");
+
+ /* We give fast paths for the really cool registers */
+ #define NEEDS_FORCE_WAKE(dev_priv, reg) \
+- (((dev_priv)->info->gen >= 6) && \
++ ((HAS_FORCE_WAKE((dev_priv)->dev)) && \
+ ((reg) < 0x40000) && \
+ ((reg) != FORCEWAKE) && \
+ ((reg) != ECOBUS))
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index ae294a0..d62c731 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -244,6 +244,7 @@ struct intel_device_info {
+ u8 is_broadwater:1;
+ u8 is_crestline:1;
+ u8 is_ivybridge:1;
++ u8 has_force_wake:1;
+ u8 has_fbc:1;
+ u8 has_pipe_cxsr:1;
+ u8 has_hotplug:1;
+@@ -1001,6 +1002,8 @@ struct drm_i915_file_private {
+ #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
+ #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
+
++#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
++
+ #include "i915_trace.h"
+
+ extern struct drm_ioctl_desc i915_ioctls[];
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 06ec1e5..fd53122 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -164,6 +164,14 @@
+ #define MI_DISPLAY_FLIP MI_INSTR(0x14, 2)
+ #define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1)
+ #define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20)
++/* IVB has funny definitions for which plane to flip. */
++#define MI_DISPLAY_FLIP_IVB_PLANE_A (0 << 19)
++#define MI_DISPLAY_FLIP_IVB_PLANE_B (1 << 19)
++#define MI_DISPLAY_FLIP_IVB_SPRITE_A (2 << 19)
++#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
++#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19)
++#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
++
+ #define MI_SET_CONTEXT MI_INSTR(0x18, 0)
+ #define MI_MM_SPACE_GTT (1<<8)
+ #define MI_MM_SPACE_PHYSICAL (0<<8)
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 4720397..5c1cdb8 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -7009,14 +7009,14 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+ if (ret)
+- goto out;
++ goto err;
+
+ /* Offset into the new buffer for cases of shared fbs between CRTCs */
+ offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
+
+ ret = BEGIN_LP_RING(6);
+ if (ret)
+- goto out;
++ goto err_unpin;
+
+ /* Can't queue multiple flips, so wait for the previous
+ * one to finish before executing the next.
+@@ -7033,7 +7033,11 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
+ OUT_RING(obj->gtt_offset + offset);
+ OUT_RING(MI_NOOP);
+ ADVANCE_LP_RING();
+-out:
++ return 0;
++
++err_unpin:
++ i915_gem_object_unpin(obj);
++err:
+ return ret;
+ }
+
+@@ -7050,14 +7054,14 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+ if (ret)
+- goto out;
++ goto err;
+
+ /* Offset into the new buffer for cases of shared fbs between CRTCs */
+ offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
+
+ ret = BEGIN_LP_RING(6);
+ if (ret)
+- goto out;
++ goto err_unpin;
+
+ if (intel_crtc->plane)
+ flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+@@ -7072,7 +7076,11 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
+ OUT_RING(MI_NOOP);
+
+ ADVANCE_LP_RING();
+-out:
++ return 0;
++
++err_unpin:
++ i915_gem_object_unpin(obj);
++err:
+ return ret;
+ }
+
+@@ -7088,11 +7096,11 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+ if (ret)
+- goto out;
++ goto err;
+
+ ret = BEGIN_LP_RING(4);
+ if (ret)
+- goto out;
++ goto err_unpin;
+
+ /* i965+ uses the linear or tiled offsets from the
+ * Display Registers (which do not change across a page-flip)
+@@ -7111,7 +7119,11 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
+ pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
+ OUT_RING(pf | pipesrc);
+ ADVANCE_LP_RING();
+-out:
++ return 0;
++
++err_unpin:
++ i915_gem_object_unpin(obj);
++err:
+ return ret;
+ }
+
+@@ -7127,11 +7139,11 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+ if (ret)
+- goto out;
++ goto err;
+
+ ret = BEGIN_LP_RING(4);
+ if (ret)
+- goto out;
++ goto err_unpin;
+
+ OUT_RING(MI_DISPLAY_FLIP |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+@@ -7142,7 +7154,11 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
+ pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
+ OUT_RING(pf | pipesrc);
+ ADVANCE_LP_RING();
+-out:
++ return 0;
++
++err_unpin:
++ i915_gem_object_unpin(obj);
++err:
+ return ret;
+ }
+
+@@ -7160,22 +7176,43 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
++ uint32_t plane_bit = 0;
+ int ret;
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
+ if (ret)
+- goto out;
++ goto err;
++
++ switch(intel_crtc->plane) {
++ case PLANE_A:
++ plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
++ break;
++ case PLANE_B:
++ plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
++ break;
++ case PLANE_C:
++ plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
++ break;
++ default:
++ WARN_ONCE(1, "unknown plane in flip command\n");
++ ret = -ENODEV;
++ goto err;
++ }
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+- goto out;
++ goto err_unpin;
+
+- intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
++ intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
+ intel_ring_emit(ring, (fb->pitch | obj->tiling_mode));
+ intel_ring_emit(ring, (obj->gtt_offset));
+ intel_ring_emit(ring, (MI_NOOP));
+ intel_ring_advance(ring);
+-out:
++ return 0;
++
++err_unpin:
++ i915_gem_object_unpin(obj);
++err:
+ return ret;
+ }
+
+diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
+index 62f9ac5..933e66b 100644
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
+@@ -263,10 +263,15 @@ u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
+
+ static int init_ring_common(struct intel_ring_buffer *ring)
+ {
+- drm_i915_private_t *dev_priv = ring->dev->dev_private;
++ struct drm_device *dev = ring->dev;
++ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj = ring->obj;
++ int ret = 0;
+ u32 head;
+
++ if (HAS_FORCE_WAKE(dev))
++ gen6_gt_force_wake_get(dev_priv);
++
+ /* Stop the ring if it's running. */
+ I915_WRITE_CTL(ring, 0);
+ I915_WRITE_HEAD(ring, 0);
+@@ -314,7 +319,8 @@ static int init_ring_common(struct intel_ring_buffer *ring)
+ I915_READ_HEAD(ring),
+ I915_READ_TAIL(ring),
+ I915_READ_START(ring));
+- return -EIO;
++ ret = -EIO;
++ goto out;
+ }
+
+ if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
+@@ -325,7 +331,11 @@ static int init_ring_common(struct intel_ring_buffer *ring)
+ ring->space = ring_space(ring);
+ }
+
+- return 0;
++out:
++ if (HAS_FORCE_WAKE(dev))
++ gen6_gt_force_wake_put(dev_priv);
++
++ return ret;
+ }
+
+ static int
+@@ -1045,6 +1055,10 @@ int intel_init_ring_buffer(struct drm_device *dev,
+ if (ret)
+ goto err_unref;
+
++ ret = i915_gem_object_set_to_gtt_domain(obj, true);
++ if (ret)
++ goto err_unpin;
++
+ ring->map.size = ring->size;
+ ring->map.offset = dev->agp->base + obj->gtt_offset;
+ ring->map.type = 0;
+diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
+index cea6696..1e72db5 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
+@@ -602,6 +602,55 @@ nouveau_connector_scaler_modes_add(struct drm_connector *connector)
+ return modes;
+ }
+
++static void
++nouveau_connector_detect_depth(struct drm_connector *connector)
++{
++ struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
++ struct nouveau_connector *nv_connector = nouveau_connector(connector);
++ struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
++ struct nvbios *bios = &dev_priv->vbios;
++ struct drm_display_mode *mode = nv_connector->native_mode;
++ bool duallink;
++
++ /* if the edid is feeling nice enough to provide this info, use it */
++ if (nv_connector->edid && connector->display_info.bpc)
++ return;
++
++ /* EDID 1.4 is *supposed* to be supported on eDP, but, Apple... */
++ if (nv_connector->dcb->type == DCB_CONNECTOR_eDP) {
++ connector->display_info.bpc = 6;
++ return;
++ }
++
++ /* we're out of options unless we're LVDS, default to 8bpc */
++ if (nv_encoder->dcb->type != OUTPUT_LVDS) {
++ connector->display_info.bpc = 8;
++ return;
++ }
++
++ connector->display_info.bpc = 6;
++
++ /* LVDS: panel straps */
++ if (bios->fp_no_ddc) {
++ if (bios->fp.if_is_24bit)
++ connector->display_info.bpc = 8;
++ return;
++ }
++
++ /* LVDS: DDC panel, need to first determine the number of links to
++ * know which if_is_24bit flag to check...
++ */
++ if (nv_connector->edid &&
++ nv_connector->dcb->type == DCB_CONNECTOR_LVDS_SPWG)
++ duallink = ((u8 *)nv_connector->edid)[121] == 2;
++ else
++ duallink = mode->clock >= bios->fp.duallink_transition_clk;
++
++ if ((!duallink && (bios->fp.strapless_is_24bit & 1)) ||
++ ( duallink && (bios->fp.strapless_is_24bit & 2)))
++ connector->display_info.bpc = 8;
++}
++
+ static int
+ nouveau_connector_get_modes(struct drm_connector *connector)
+ {
+@@ -646,6 +695,12 @@ nouveau_connector_get_modes(struct drm_connector *connector)
+ ret = 1;
+ }
+
++ /* Attempt to determine display colour depth, this has to happen after
++ * we've determined the "native" mode for LVDS, as the VBIOS tables
++ * require us to compare against a pixel clock in some cases..
++ */
++ nouveau_connector_detect_depth(connector);
++
+ if (nv_encoder->dcb->type == OUTPUT_TV)
+ ret = get_slave_funcs(encoder)->get_modes(encoder, connector);
+
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index dac178b..931f4df 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -2148,6 +2148,9 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
+ smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
+ WREG32(SMX_DC_CTL0, smx_dc_ctl0);
+
++ if (rdev->family <= CHIP_SUMO2)
++ WREG32(SMX_SAR_CTL0, 0x00010000);
++
+ WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
+ POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
+ SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
+diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
+index 0128445..6ecd23f 100644
+--- a/drivers/gpu/drm/radeon/evergreend.h
++++ b/drivers/gpu/drm/radeon/evergreend.h
+@@ -270,6 +270,7 @@
+ #define SCRATCH_UMSK 0x8540
+ #define SCRATCH_ADDR 0x8544
+
++#define SMX_SAR_CTL0 0xA008
+ #define SMX_DC_CTL0 0xA020
+ #define USE_HASH_FUNCTION (1 << 0)
+ #define NUMBER_OF_SETS(x) ((x) << 1)
+diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
+index 9cdda0b..bdfa82a 100644
+--- a/drivers/gpu/drm/radeon/r600.c
++++ b/drivers/gpu/drm/radeon/r600.c
+@@ -1905,6 +1905,7 @@ void r600_gpu_init(struct radeon_device *rdev)
+ WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
+ NUM_CLIP_SEQ(3)));
+ WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
++ WREG32(VC_ENHANCE, 0);
+ }
+
+
+diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
+index bfe1b5d..d4d23a8d 100644
+--- a/drivers/gpu/drm/radeon/r600d.h
++++ b/drivers/gpu/drm/radeon/r600d.h
+@@ -461,6 +461,7 @@
+ #define TC_L2_SIZE(x) ((x)<<5)
+ #define L2_DISABLE_LATE_HIT (1<<9)
+
++#define VC_ENHANCE 0x9714
+
+ #define VGT_CACHE_INVALIDATION 0x88C4
+ #define CACHE_INVALIDATION(x) ((x)<<0)
+diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
+index e36ba7f..cc79449 100644
+--- a/drivers/gpu/drm/radeon/rv770.c
++++ b/drivers/gpu/drm/radeon/rv770.c
+@@ -782,6 +782,9 @@ static void rv770_gpu_init(struct radeon_device *rdev)
+ ACK_FLUSH_CTL(3) |
+ SYNC_FLUSH_CTL));
+
++ if (rdev->family != CHIP_RV770)
++ WREG32(SMX_SAR_CTL0, 0x00003f3f);
++
+ db_debug3 = RREG32(DB_DEBUG3);
+ db_debug3 &= ~DB_CLK_OFF_DELAY(0x1f);
+ switch (rdev->family) {
+@@ -960,7 +963,7 @@ static void rv770_gpu_init(struct radeon_device *rdev)
+
+ WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
+ NUM_CLIP_SEQ(3)));
+-
++ WREG32(VC_ENHANCE, 0);
+ }
+
+ void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
+diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
+index 7538092..7095a71 100644
+--- a/drivers/gpu/drm/radeon/rv770d.h
++++ b/drivers/gpu/drm/radeon/rv770d.h
+@@ -208,6 +208,7 @@
+ #define SCRATCH_UMSK 0x8540
+ #define SCRATCH_ADDR 0x8544
+
++#define SMX_SAR_CTL0 0xA008
+ #define SMX_DC_CTL0 0xA020
+ #define USE_HASH_FUNCTION (1 << 0)
+ #define CACHE_DEPTH(x) ((x) << 1)
+@@ -307,6 +308,8 @@
+ #define TCP_CNTL 0x9610
+ #define TCP_CHAN_STEER 0x9614
+
++#define VC_ENHANCE 0x9714
++
+ #define VGT_CACHE_INVALIDATION 0x88C4
+ #define CACHE_INVALIDATION(x) ((x)<<0)
+ #define VC_ONLY 0
+diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
+index 9a4c3ab..e8e18ca 100644
+--- a/drivers/hwmon/fam15h_power.c
++++ b/drivers/hwmon/fam15h_power.c
+@@ -61,14 +61,14 @@ static ssize_t show_power(struct device *dev,
+ REG_TDP_RUNNING_AVERAGE, &val);
+ running_avg_capture = (val >> 4) & 0x3fffff;
+ running_avg_capture = sign_extend32(running_avg_capture, 21);
+- running_avg_range = val & 0xf;
++ running_avg_range = (val & 0xf) + 1;
+
+ pci_bus_read_config_dword(f4->bus, PCI_DEVFN(PCI_SLOT(f4->devfn), 5),
+ REG_TDP_LIMIT3, &val);
+
+ tdp_limit = val >> 16;
+- curr_pwr_watts = tdp_limit + data->base_tdp -
+- (s32)(running_avg_capture >> (running_avg_range + 1));
++ curr_pwr_watts = (tdp_limit + data->base_tdp) << running_avg_range;
++ curr_pwr_watts -= running_avg_capture;
+ curr_pwr_watts *= data->tdp_to_watts;
+
+ /*
+@@ -78,7 +78,7 @@ static ssize_t show_power(struct device *dev,
+ * scaling factor 1/(2^16). For conversion we use
+ * (10^6)/(2^16) = 15625/(2^10)
+ */
+- curr_pwr_watts = (curr_pwr_watts * 15625) >> 10;
++ curr_pwr_watts = (curr_pwr_watts * 15625) >> (10 + running_avg_range);
+ return sprintf(buf, "%u\n", (unsigned int) curr_pwr_watts);
+ }
+ static DEVICE_ATTR(power1_input, S_IRUGO, show_power, NULL);
+diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
+index 536bda0..8dc84d6 100644
+--- a/drivers/net/can/c_can/c_can.c
++++ b/drivers/net/can/c_can/c_can.c
+@@ -686,7 +686,7 @@ static int c_can_get_berr_counter(const struct net_device *dev,
+ *
+ * We iterate from priv->tx_echo to priv->tx_next and check if the
+ * packet has been transmitted, echo it back to the CAN framework.
+- * If we discover a not yet transmitted package, stop looking for more.
++ * If we discover a not yet transmitted packet, stop looking for more.
+ */
+ static void c_can_do_tx(struct net_device *dev)
+ {
+@@ -698,7 +698,7 @@ static void c_can_do_tx(struct net_device *dev)
+ for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
+ msg_obj_no = get_tx_echo_msg_obj(priv);
+ val = c_can_read_reg32(priv, &priv->regs->txrqst1);
+- if (!(val & (1 << msg_obj_no))) {
++ if (!(val & (1 << (msg_obj_no - 1)))) {
+ can_get_echo_skb(dev,
+ msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
+ stats->tx_bytes += priv->read_reg(priv,
+@@ -706,6 +706,8 @@ static void c_can_do_tx(struct net_device *dev)
+ & IF_MCONT_DLC_MASK;
+ stats->tx_packets++;
+ c_can_inval_msg_object(dev, 0, msg_obj_no);
++ } else {
++ break;
+ }
+ }
+
+@@ -950,7 +952,7 @@ static int c_can_poll(struct napi_struct *napi, int quota)
+ struct net_device *dev = napi->dev;
+ struct c_can_priv *priv = netdev_priv(dev);
+
+- irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
++ irqstatus = priv->irqstatus;
+ if (!irqstatus)
+ goto end;
+
+@@ -1028,12 +1030,11 @@ end:
+
+ static irqreturn_t c_can_isr(int irq, void *dev_id)
+ {
+- u16 irqstatus;
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct c_can_priv *priv = netdev_priv(dev);
+
+- irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
+- if (!irqstatus)
++ priv->irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
++ if (!priv->irqstatus)
+ return IRQ_NONE;
+
+ /* disable all interrupts and schedule the NAPI */
+@@ -1063,10 +1064,11 @@ static int c_can_open(struct net_device *dev)
+ goto exit_irq_fail;
+ }
+
++ napi_enable(&priv->napi);
++
+ /* start the c_can controller */
+ c_can_start(dev);
+
+- napi_enable(&priv->napi);
+ netif_start_queue(dev);
+
+ return 0;
+diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
+index 9b7fbef..5f32d34 100644
+--- a/drivers/net/can/c_can/c_can.h
++++ b/drivers/net/can/c_can/c_can.h
+@@ -76,6 +76,7 @@ struct c_can_priv {
+ unsigned int tx_next;
+ unsigned int tx_echo;
+ void *priv; /* for board-specific data */
++ u16 irqstatus;
+ };
+
+ struct net_device *alloc_c_can_dev(void);
+diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
+index ed1b432..864448b 100644
+--- a/drivers/net/usb/sierra_net.c
++++ b/drivers/net/usb/sierra_net.c
+@@ -943,7 +943,7 @@ struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
+ }
+
+ static const u8 sierra_net_ifnum_list[] = { 7, 10, 11 };
+-static const struct sierra_net_info_data sierra_net_info_data_68A3 = {
++static const struct sierra_net_info_data sierra_net_info_data_direct_ip = {
+ .rx_urb_size = 8 * 1024,
+ .whitelist = {
+ .infolen = ARRAY_SIZE(sierra_net_ifnum_list),
+@@ -951,7 +951,7 @@ static const struct sierra_net_info_data sierra_net_info_data_68A3 = {
+ }
+ };
+
+-static const struct driver_info sierra_net_info_68A3 = {
++static const struct driver_info sierra_net_info_direct_ip = {
+ .description = "Sierra Wireless USB-to-WWAN Modem",
+ .flags = FLAG_WWAN | FLAG_SEND_ZLP,
+ .bind = sierra_net_bind,
+@@ -959,12 +959,18 @@ static const struct driver_info sierra_net_info_68A3 = {
+ .status = sierra_net_status,
+ .rx_fixup = sierra_net_rx_fixup,
+ .tx_fixup = sierra_net_tx_fixup,
+- .data = (unsigned long)&sierra_net_info_data_68A3,
++ .data = (unsigned long)&sierra_net_info_data_direct_ip,
+ };
+
+ static const struct usb_device_id products[] = {
+ {USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */
+- .driver_info = (unsigned long) &sierra_net_info_68A3},
++ .driver_info = (unsigned long) &sierra_net_info_direct_ip},
++ {USB_DEVICE(0x0F3D, 0x68A3), /* AT&T Direct IP modem */
++ .driver_info = (unsigned long) &sierra_net_info_direct_ip},
++ {USB_DEVICE(0x1199, 0x68AA), /* Sierra Wireless Direct IP LTE modem */
++ .driver_info = (unsigned long) &sierra_net_info_direct_ip},
++ {USB_DEVICE(0x0F3D, 0x68AA), /* AT&T Direct IP LTE modem */
++ .driver_info = (unsigned long) &sierra_net_info_direct_ip},
+
+ {}, /* last item */
+ };
+diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
+index 0b9f797..9b6b010 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
++++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
+@@ -49,17 +49,20 @@
+ #define IWL6000_UCODE_API_MAX 6
+ #define IWL6050_UCODE_API_MAX 5
+ #define IWL6000G2_UCODE_API_MAX 6
++#define IWL6035_UCODE_API_MAX 6
+
+ /* Oldest version we won't warn about */
+ #define IWL6000_UCODE_API_OK 4
+ #define IWL6000G2_UCODE_API_OK 5
+ #define IWL6050_UCODE_API_OK 5
+ #define IWL6000G2B_UCODE_API_OK 6
++#define IWL6035_UCODE_API_OK 6
+
+ /* Lowest firmware API version supported */
+ #define IWL6000_UCODE_API_MIN 4
+ #define IWL6050_UCODE_API_MIN 4
+-#define IWL6000G2_UCODE_API_MIN 4
++#define IWL6000G2_UCODE_API_MIN 5
++#define IWL6035_UCODE_API_MIN 6
+
+ #define IWL6000_FW_PRE "iwlwifi-6000-"
+ #define IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE __stringify(api) ".ucode"
+@@ -436,9 +439,24 @@ struct iwl_cfg iwl6030_2bg_cfg = {
+ IWL_DEVICE_6030,
+ };
+
++#define IWL_DEVICE_6035 \
++ .fw_name_pre = IWL6030_FW_PRE, \
++ .ucode_api_max = IWL6035_UCODE_API_MAX, \
++ .ucode_api_ok = IWL6035_UCODE_API_OK, \
++ .ucode_api_min = IWL6035_UCODE_API_MIN, \
++ .eeprom_ver = EEPROM_6030_EEPROM_VERSION, \
++ .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
++ .lib = &iwl6030_lib, \
++ .base_params = &iwl6000_g2_base_params, \
++ .bt_params = &iwl6000_bt_params, \
++ .need_dc_calib = true, \
++ .need_temp_offset_calib = true, \
++ .led_mode = IWL_LED_RF_STATE, \
++ .adv_pm = true
++
+ struct iwl_cfg iwl6035_2agn_cfg = {
+ .name = "6035 Series 2x2 AGN/BT",
+- IWL_DEVICE_6030,
++ IWL_DEVICE_6035,
+ .ht_params = &iwl6000_ht_params,
+ };
+
+diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+index 5cfb3d1..ccf1524 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
++++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+@@ -1245,7 +1245,7 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
+ key_flags |= STA_KEY_MULTICAST_MSK;
+
+ sta_cmd.key.key_flags = key_flags;
+- sta_cmd.key.key_offset = WEP_INVALID_OFFSET;
++ sta_cmd.key.key_offset = keyconf->hw_key_idx;
+ sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK;
+ sta_cmd.mode = STA_CONTROL_MODIFY_MSK;
+
+diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
+index d7d2512..16cdd12 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
++++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
+@@ -1696,6 +1696,7 @@ static int iwlagn_mac_setup_register(struct iwl_priv *priv,
+ WIPHY_FLAG_DISABLE_BEACON_HINTS |
+ WIPHY_FLAG_IBSS_RSN;
+
++#ifdef CONFIG_PM_SLEEP
+ if (priv->ucode_wowlan.code.len && device_can_wakeup(bus(priv)->dev)) {
+ hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
+ WIPHY_WOWLAN_DISCONNECT |
+@@ -1712,6 +1713,7 @@ static int iwlagn_mac_setup_register(struct iwl_priv *priv,
+ hw->wiphy->wowlan.pattern_max_len =
+ IWLAGN_WOWLAN_MAX_PATTERN_LEN;
+ }
++#endif
+
+ if (iwlagn_mod_params.power_save)
+ hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
+@@ -1739,6 +1741,7 @@ static int iwlagn_mac_setup_register(struct iwl_priv *priv,
+ ret = ieee80211_register_hw(priv->hw);
+ if (ret) {
+ IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
++ iwl_leds_exit(priv);
+ return ret;
+ }
+ priv->mac80211_registered = 1;
+diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
+index d9b089e..0683006 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
++++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
+@@ -228,6 +228,7 @@
+ #define SCD_TXFACT (SCD_BASE + 0x10)
+ #define SCD_ACTIVE (SCD_BASE + 0x14)
+ #define SCD_QUEUECHAIN_SEL (SCD_BASE + 0xe8)
++#define SCD_CHAINEXT_EN (SCD_BASE + 0x244)
+ #define SCD_AGGR_SEL (SCD_BASE + 0x248)
+ #define SCD_INTERRUPT_MASK (SCD_BASE + 0x108)
+
+diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
+index 5f17ab8..5815cf5 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
++++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
+@@ -868,6 +868,11 @@ static void iwl_trans_pcie_tx_start(struct iwl_trans *trans)
+ iwl_write_prph(bus(trans), SCD_DRAM_BASE_ADDR,
+ trans_pcie->scd_bc_tbls.dma >> 10);
+
++ /* The chain extension of the SCD doesn't work well. This feature is
++ * enabled by default by the HW, so we need to disable it manually.
++ */
++ iwl_write_prph(bus(trans), SCD_CHAINEXT_EN, 0);
++
+ /* Enable DMA channel */
+ for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
+ iwl_write_direct32(bus(trans), FH_TCSR_CHNL_TX_CONFIG_REG(chan),
+diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
+index 99ff12d..c778164 100644
+--- a/drivers/net/wireless/rt2x00/rt2x00.h
++++ b/drivers/net/wireless/rt2x00/rt2x00.h
+@@ -390,8 +390,7 @@ struct rt2x00_intf {
+ * for hardware which doesn't support hardware
+ * sequence counting.
+ */
+- spinlock_t seqlock;
+- u16 seqno;
++ atomic_t seqno;
+ };
+
+ static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
+diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
+index 373dae1..921da9a 100644
+--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
++++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
+@@ -277,7 +277,6 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
+ else
+ rt2x00dev->intf_sta_count++;
+
+- spin_lock_init(&intf->seqlock);
+ mutex_init(&intf->beacon_skb_mutex);
+ intf->beacon = entry;
+
+diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
+index 9b1b2b7..50f92d5 100644
+--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
++++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
+@@ -207,6 +207,7 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
++ u16 seqno;
+
+ if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
+ return;
+@@ -227,15 +228,13 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
+ * sequence counting per-frame, since those will override the
+ * sequence counter given by mac80211.
+ */
+- spin_lock(&intf->seqlock);
+-
+ if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
+- intf->seqno += 0x10;
+- hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
+- hdr->seq_ctrl |= cpu_to_le16(intf->seqno);
+-
+- spin_unlock(&intf->seqlock);
++ seqno = atomic_add_return(0x10, &intf->seqno);
++ else
++ seqno = atomic_read(&intf->seqno);
+
++ hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
++ hdr->seq_ctrl |= cpu_to_le16(seqno);
+ }
+
+ static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev,
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 6d4a531..e5b75eb 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -1689,6 +1689,11 @@ int pci_prepare_to_sleep(struct pci_dev *dev)
+ if (target_state == PCI_POWER_ERROR)
+ return -EIO;
+
++ /* Some devices mustn't be in D3 during system sleep */
++ if (target_state == PCI_D3hot &&
++ (dev->dev_flags & PCI_DEV_FLAGS_NO_D3_DURING_SLEEP))
++ return 0;
++
+ pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
+
+ error = pci_set_power_state(dev, target_state);
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 78fda9c..3c56fec 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -2940,6 +2940,32 @@ static void __devinit disable_igfx_irq(struct pci_dev *dev)
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
+
++/*
++ * The Intel 6 Series/C200 Series chipset's EHCI controllers on many
++ * ASUS motherboards will cause memory corruption or a system crash
++ * if they are in D3 while the system is put into S3 sleep.
++ */
++static void __devinit asus_ehci_no_d3(struct pci_dev *dev)
++{
++ const char *sys_info;
++ static const char good_Asus_board[] = "P8Z68-V";
++
++ if (dev->dev_flags & PCI_DEV_FLAGS_NO_D3_DURING_SLEEP)
++ return;
++ if (dev->subsystem_vendor != PCI_VENDOR_ID_ASUSTEK)
++ return;
++ sys_info = dmi_get_system_info(DMI_BOARD_NAME);
++ if (sys_info && memcmp(sys_info, good_Asus_board,
++ sizeof(good_Asus_board) - 1) == 0)
++ return;
++
++ dev_info(&dev->dev, "broken D3 during system sleep on ASUS\n");
++ dev->dev_flags |= PCI_DEV_FLAGS_NO_D3_DURING_SLEEP;
++ device_set_wakeup_capable(&dev->dev, false);
++}
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1c26, asus_ehci_no_d3);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1c2d, asus_ehci_no_d3);
++
+ static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
+ struct pci_fixup *end)
+ {
+diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
+index ac336e1..e903077 100644
+--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
++++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
+@@ -1739,7 +1739,7 @@ static inline void _base_writeq(__u64 b, volatile void __iomem *addr,
+ static inline u8
+ _base_get_msix_index(struct MPT2SAS_ADAPTER *ioc)
+ {
+- return ioc->cpu_msix_table[smp_processor_id()];
++ return ioc->cpu_msix_table[raw_smp_processor_id()];
+ }
+
+ /**
+diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
+index 5b05744..0364ca2 100644
+--- a/drivers/target/target_core_alua.c
++++ b/drivers/target/target_core_alua.c
+@@ -352,9 +352,11 @@ int target_emulate_set_target_port_groups(struct se_task *task)
+
+ out:
+ transport_kunmap_data_sg(cmd);
+- task->task_scsi_status = GOOD;
+- transport_complete_task(task, 1);
+- return 0;
++ if (!rc) {
++ task->task_scsi_status = GOOD;
++ transport_complete_task(task, 1);
++ }
++ return rc;
+ }
+
+ static inline int core_alua_state_nonoptimized(
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index e61d9c4..1094469 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -498,6 +498,14 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp)
+
+ usb_autopm_put_interface(acm->control);
+
++ /*
++ * Unthrottle device in case the TTY was closed while throttled.
++ */
++ spin_lock_irq(&acm->read_lock);
++ acm->throttled = 0;
++ acm->throttle_req = 0;
++ spin_unlock_irq(&acm->read_lock);
++
+ if (acm_submit_read_urbs(acm, GFP_KERNEL))
+ goto bail_out;
+
+diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
+index 554ac90..19fb5fa 100644
+--- a/drivers/usb/class/cdc-wdm.c
++++ b/drivers/usb/class/cdc-wdm.c
+@@ -31,6 +31,8 @@
+ #define DRIVER_AUTHOR "Oliver Neukum"
+ #define DRIVER_DESC "USB Abstract Control Model driver for USB WCM Device Management"
+
++#define HUAWEI_VENDOR_ID 0x12D1
++
+ static const struct usb_device_id wdm_ids[] = {
+ {
+ .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS |
+@@ -38,6 +40,20 @@ static const struct usb_device_id wdm_ids[] = {
+ .bInterfaceClass = USB_CLASS_COMM,
+ .bInterfaceSubClass = USB_CDC_SUBCLASS_DMM
+ },
++ {
++ /*
++ * Huawei E392, E398 and possibly other Qualcomm based modems
++ * embed the Qualcomm QMI protocol inside CDC on CDC ECM like
++ * control interfaces. Userspace access to this is required
++ * to configure the accompanying data interface
++ */
++ .match_flags = USB_DEVICE_ID_MATCH_VENDOR |
++ USB_DEVICE_ID_MATCH_INT_INFO,
++ .idVendor = HUAWEI_VENDOR_ID,
++ .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
++ .bInterfaceSubClass = 1,
++ .bInterfaceProtocol = 9, /* NOTE: CDC ECM control interface! */
++ },
+ { }
+ };
+
+diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
+index 5f1404a..61d08dd 100644
+--- a/drivers/usb/core/hcd-pci.c
++++ b/drivers/usb/core/hcd-pci.c
+@@ -495,15 +495,6 @@ static int hcd_pci_suspend_noirq(struct device *dev)
+
+ pci_save_state(pci_dev);
+
+- /*
+- * Some systems crash if an EHCI controller is in D3 during
+- * a sleep transition. We have to leave such controllers in D0.
+- */
+- if (hcd->broken_pci_sleep) {
+- dev_dbg(dev, "Staying in PCI D0\n");
+- return retval;
+- }
+-
+ /* If the root hub is dead rather than suspended, disallow remote
+ * wakeup. usb_hc_died() should ensure that both hosts are marked as
+ * dying, so we only need to check the primary roothub.
+diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
+index ca717da..ef116a5 100644
+--- a/drivers/usb/core/message.c
++++ b/drivers/usb/core/message.c
+@@ -1803,7 +1803,6 @@ free_interfaces:
+ intfc = cp->intf_cache[i];
+ intf->altsetting = intfc->altsetting;
+ intf->num_altsetting = intfc->num_altsetting;
+- intf->intf_assoc = find_iad(dev, cp, i);
+ kref_get(&intfc->ref);
+
+ alt = usb_altnum_to_altsetting(intf, 0);
+@@ -1816,6 +1815,8 @@ free_interfaces:
+ if (!alt)
+ alt = &intf->altsetting[0];
+
++ intf->intf_assoc =
++ find_iad(dev, cp, alt->desc.bInterfaceNumber);
+ intf->cur_altsetting = alt;
+ usb_enable_interface(dev, intf, true);
+ intf->dev.parent = &dev->dev;
+diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
+index da2f711..339be10 100644
+--- a/drivers/usb/host/ehci-hcd.c
++++ b/drivers/usb/host/ehci-hcd.c
+@@ -620,6 +620,9 @@ static int ehci_init(struct usb_hcd *hcd)
+ hw = ehci->async->hw;
+ hw->hw_next = QH_NEXT(ehci, ehci->async->qh_dma);
+ hw->hw_info1 = cpu_to_hc32(ehci, QH_HEAD);
++#if defined(CONFIG_PPC_PS3)
++ hw->hw_info1 |= cpu_to_hc32(ehci, (1 << 7)); /* I = 1 */
++#endif
+ hw->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT);
+ hw->hw_qtd_next = EHCI_LIST_END(ehci);
+ ehci->async->qh_state = QH_STATE_LINKED;
+diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
+index ee85e81..a79e64b 100644
+--- a/drivers/usb/host/ehci-pci.c
++++ b/drivers/usb/host/ehci-pci.c
+@@ -144,14 +144,6 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
+ hcd->has_tt = 1;
+ tdi_reset(ehci);
+ }
+- if (pdev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK) {
+- /* EHCI #1 or #2 on 6 Series/C200 Series chipset */
+- if (pdev->device == 0x1c26 || pdev->device == 0x1c2d) {
+- ehci_info(ehci, "broken D3 during system sleep on ASUS\n");
+- hcd->broken_pci_sleep = 1;
+- device_set_wakeup_capable(&pdev->dev, false);
+- }
+- }
+ break;
+ case PCI_VENDOR_ID_TDI:
+ if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) {
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 4232e868..a40ab98 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -693,10 +693,9 @@ static void xhci_free_tt_info(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev,
+ int slot_id)
+ {
+- struct list_head *tt;
+ struct list_head *tt_list_head;
+- struct list_head *tt_next;
+- struct xhci_tt_bw_info *tt_info;
++ struct xhci_tt_bw_info *tt_info, *next;
++ bool slot_found = false;
+
+ /* If the device never made it past the Set Address stage,
+ * it may not have the real_port set correctly.
+@@ -708,34 +707,16 @@ static void xhci_free_tt_info(struct xhci_hcd *xhci,
+ }
+
+ tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts);
+- if (list_empty(tt_list_head))
+- return;
+-
+- list_for_each(tt, tt_list_head) {
+- tt_info = list_entry(tt, struct xhci_tt_bw_info, tt_list);
+- if (tt_info->slot_id == slot_id)
++ list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
++ /* Multi-TT hubs will have more than one entry */
++ if (tt_info->slot_id == slot_id) {
++ slot_found = true;
++ list_del(&tt_info->tt_list);
++ kfree(tt_info);
++ } else if (slot_found) {
+ break;
++ }
+ }
+- /* Cautionary measure in case the hub was disconnected before we
+- * stored the TT information.
+- */
+- if (tt_info->slot_id != slot_id)
+- return;
+-
+- tt_next = tt->next;
+- tt_info = list_entry(tt, struct xhci_tt_bw_info,
+- tt_list);
+- /* Multi-TT hubs will have more than one entry */
+- do {
+- list_del(tt);
+- kfree(tt_info);
+- tt = tt_next;
+- if (list_empty(tt_list_head))
+- break;
+- tt_next = tt->next;
+- tt_info = list_entry(tt, struct xhci_tt_bw_info,
+- tt_list);
+- } while (tt_info->slot_id == slot_id);
+ }
+
+ int xhci_alloc_tt_info(struct xhci_hcd *xhci,
+@@ -1699,17 +1680,9 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
+ {
+ struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+ struct dev_info *dev_info, *next;
+- struct list_head *tt_list_head;
+- struct list_head *tt;
+- struct list_head *endpoints;
+- struct list_head *ep, *q;
+- struct xhci_tt_bw_info *tt_info;
+- struct xhci_interval_bw_table *bwt;
+- struct xhci_virt_ep *virt_ep;
+-
+ unsigned long flags;
+ int size;
+- int i;
++ int i, j, num_ports;
+
+ /* Free the Event Ring Segment Table and the actual Event Ring */
+ size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
+@@ -1766,21 +1739,22 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
+ }
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
+- bwt = &xhci->rh_bw->bw_table;
+- for (i = 0; i < XHCI_MAX_INTERVAL; i++) {
+- endpoints = &bwt->interval_bw[i].endpoints;
+- list_for_each_safe(ep, q, endpoints) {
+- virt_ep = list_entry(ep, struct xhci_virt_ep, bw_endpoint_list);
+- list_del(&virt_ep->bw_endpoint_list);
+- kfree(virt_ep);
++ num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
++ for (i = 0; i < num_ports; i++) {
++ struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
++ for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
++ struct list_head *ep = &bwt->interval_bw[j].endpoints;
++ while (!list_empty(ep))
++ list_del_init(ep->next);
+ }
+ }
+
+- tt_list_head = &xhci->rh_bw->tts;
+- list_for_each_safe(tt, q, tt_list_head) {
+- tt_info = list_entry(tt, struct xhci_tt_bw_info, tt_list);
+- list_del(tt);
+- kfree(tt_info);
++ for (i = 0; i < num_ports; i++) {
++ struct xhci_tt_bw_info *tt, *n;
++ list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
++ list_del(&tt->tt_list);
++ kfree(tt);
++ }
+ }
+
+ xhci->num_usb2_ports = 0;
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 4c00606..05f82e9 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -783,8 +783,8 @@ int xhci_suspend(struct xhci_hcd *xhci)
+ command = xhci_readl(xhci, &xhci->op_regs->command);
+ command |= CMD_CSS;
+ xhci_writel(xhci, command, &xhci->op_regs->command);
+- if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10*100)) {
+- xhci_warn(xhci, "WARN: xHC CMD_CSS timeout\n");
++ if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10 * 1000)) {
++ xhci_warn(xhci, "WARN: xHC save state timeout\n");
+ spin_unlock_irq(&xhci->lock);
+ return -ETIMEDOUT;
+ }
+@@ -836,8 +836,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
+ command |= CMD_CRS;
+ xhci_writel(xhci, command, &xhci->op_regs->command);
+ if (handshake(xhci, &xhci->op_regs->status,
+- STS_RESTORE, 0, 10*100)) {
+- xhci_dbg(xhci, "WARN: xHC CMD_CSS timeout\n");
++ STS_RESTORE, 0, 10 * 1000)) {
++ xhci_warn(xhci, "WARN: xHC restore state timeout\n");
+ spin_unlock_irq(&xhci->lock);
+ return -ETIMEDOUT;
+ }
+diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
+index c860597..9c8845a 100644
+--- a/drivers/usb/musb/musb_gadget.c
++++ b/drivers/usb/musb/musb_gadget.c
+@@ -1234,6 +1234,7 @@ static int musb_gadget_disable(struct usb_ep *ep)
+ }
+
+ musb_ep->desc = NULL;
++ musb_ep->end_point.desc = NULL;
+
+ /* abort all pending DMA and requests */
+ nuke(musb_ep, -ESHUTDOWN);
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index f2c57e0..aa0c43f 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -82,6 +82,7 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */
+ { USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */
+ { USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */
++ { USB_DEVICE(0x10C4, 0x80C4) }, /* Cygnal Integrated Products, Inc., Optris infrared thermometer */
+ { USB_DEVICE(0x10C4, 0x80CA) }, /* Degree Controls Inc */
+ { USB_DEVICE(0x10C4, 0x80DD) }, /* Tracient RFID */
+ { USB_DEVICE(0x10C4, 0x80F6) }, /* Suunto sports instrument */
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 450bdfe..4045e39 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -737,6 +737,7 @@ static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_SERIAL_VX7_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_CT29B_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_RTS01_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) },
+ { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 219b199..d27d7d7 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -784,6 +784,7 @@
+ #define RTSYSTEMS_VID 0x2100 /* Vendor ID */
+ #define RTSYSTEMS_SERIAL_VX7_PID 0x9e52 /* Serial converter for VX-7 Radios using FT232RL */
+ #define RTSYSTEMS_CT29B_PID 0x9e54 /* CT29B Radio Cable */
++#define RTSYSTEMS_RTS01_PID 0x9e57 /* USB-RTS01 Radio Cable */
+
+
+ /*
+diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
+index ba0d287..42de17b 100644
+--- a/drivers/usb/serial/mct_u232.c
++++ b/drivers/usb/serial/mct_u232.c
+@@ -359,13 +359,16 @@ static int mct_u232_set_modem_ctrl(struct usb_serial *serial,
+ MCT_U232_SET_REQUEST_TYPE,
+ 0, 0, buf, MCT_U232_SET_MODEM_CTRL_SIZE,
+ WDR_TIMEOUT);
+- if (rc < 0)
+- dev_err(&serial->dev->dev,
+- "Set MODEM CTRL 0x%x failed (error = %d)\n", mcr, rc);
++ kfree(buf);
++
+ dbg("set_modem_ctrl: state=0x%x ==> mcr=0x%x", control_state, mcr);
+
+- kfree(buf);
+- return rc;
++ if (rc < 0) {
++ dev_err(&serial->dev->dev,
++ "Set MODEM CTRL 0x%x failed (error = %d)\n", mcr, rc);
++ return rc;
++ }
++ return 0;
+ } /* mct_u232_set_modem_ctrl */
+
+ static int mct_u232_get_modem_stat(struct usb_serial *serial,
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index cbe3451..61d6c31 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -47,6 +47,7 @@
+ /* Function prototypes */
+ static int option_probe(struct usb_serial *serial,
+ const struct usb_device_id *id);
++static void option_release(struct usb_serial *serial);
+ static int option_send_setup(struct usb_serial_port *port);
+ static void option_instat_callback(struct urb *urb);
+
+@@ -425,7 +426,7 @@ static void option_instat_callback(struct urb *urb);
+ #define SAMSUNG_VENDOR_ID 0x04e8
+ #define SAMSUNG_PRODUCT_GT_B3730 0x6889
+
+-/* YUGA products www.yuga-info.com*/
++/* YUGA products www.yuga-info.com gavin.kx@qq.com */
+ #define YUGA_VENDOR_ID 0x257A
+ #define YUGA_PRODUCT_CEM600 0x1601
+ #define YUGA_PRODUCT_CEM610 0x1602
+@@ -442,6 +443,8 @@ static void option_instat_callback(struct urb *urb);
+ #define YUGA_PRODUCT_CEU516 0x160C
+ #define YUGA_PRODUCT_CEU528 0x160D
+ #define YUGA_PRODUCT_CEU526 0x160F
++#define YUGA_PRODUCT_CEU881 0x161F
++#define YUGA_PRODUCT_CEU882 0x162F
+
+ #define YUGA_PRODUCT_CWM600 0x2601
+ #define YUGA_PRODUCT_CWM610 0x2602
+@@ -457,23 +460,26 @@ static void option_instat_callback(struct urb *urb);
+ #define YUGA_PRODUCT_CWU518 0x260B
+ #define YUGA_PRODUCT_CWU516 0x260C
+ #define YUGA_PRODUCT_CWU528 0x260D
++#define YUGA_PRODUCT_CWU581 0x260E
+ #define YUGA_PRODUCT_CWU526 0x260F
+-
+-#define YUGA_PRODUCT_CLM600 0x2601
+-#define YUGA_PRODUCT_CLM610 0x2602
+-#define YUGA_PRODUCT_CLM500 0x2603
+-#define YUGA_PRODUCT_CLM510 0x2604
+-#define YUGA_PRODUCT_CLM800 0x2605
+-#define YUGA_PRODUCT_CLM900 0x2606
+-
+-#define YUGA_PRODUCT_CLU718 0x2607
+-#define YUGA_PRODUCT_CLU716 0x2608
+-#define YUGA_PRODUCT_CLU728 0x2609
+-#define YUGA_PRODUCT_CLU726 0x260A
+-#define YUGA_PRODUCT_CLU518 0x260B
+-#define YUGA_PRODUCT_CLU516 0x260C
+-#define YUGA_PRODUCT_CLU528 0x260D
+-#define YUGA_PRODUCT_CLU526 0x260F
++#define YUGA_PRODUCT_CWU582 0x261F
++#define YUGA_PRODUCT_CWU583 0x262F
++
++#define YUGA_PRODUCT_CLM600 0x3601
++#define YUGA_PRODUCT_CLM610 0x3602
++#define YUGA_PRODUCT_CLM500 0x3603
++#define YUGA_PRODUCT_CLM510 0x3604
++#define YUGA_PRODUCT_CLM800 0x3605
++#define YUGA_PRODUCT_CLM900 0x3606
++
++#define YUGA_PRODUCT_CLU718 0x3607
++#define YUGA_PRODUCT_CLU716 0x3608
++#define YUGA_PRODUCT_CLU728 0x3609
++#define YUGA_PRODUCT_CLU726 0x360A
++#define YUGA_PRODUCT_CLU518 0x360B
++#define YUGA_PRODUCT_CLU516 0x360C
++#define YUGA_PRODUCT_CLU528 0x360D
++#define YUGA_PRODUCT_CLU526 0x360F
+
+ /* Viettel products */
+ #define VIETTEL_VENDOR_ID 0x2262
+@@ -666,6 +672,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3806, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0x01, 0x31) },
++ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0x01, 0x32) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x31) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x32) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x31) },
+@@ -1207,6 +1215,11 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU516) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU528) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) },
++ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU881) },
++ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU882) },
++ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU581) },
++ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU582) },
++ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU583) },
+ { USB_DEVICE_AND_INTERFACE_INFO(VIETTEL_VENDOR_ID, VIETTEL_PRODUCT_VT1000, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZD_VENDOR_ID, ZD_PRODUCT_7000, 0xff, 0xff, 0xff) },
+ { USB_DEVICE(LG_VENDOR_ID, LG_PRODUCT_L02C) }, /* docomo L-02C modem */
+@@ -1257,7 +1270,7 @@ static struct usb_serial_driver option_1port_device = {
+ .ioctl = usb_wwan_ioctl,
+ .attach = usb_wwan_startup,
+ .disconnect = usb_wwan_disconnect,
+- .release = usb_wwan_release,
++ .release = option_release,
+ .read_int_callback = option_instat_callback,
+ #ifdef CONFIG_PM
+ .suspend = usb_wwan_suspend,
+@@ -1267,35 +1280,6 @@ static struct usb_serial_driver option_1port_device = {
+
+ static int debug;
+
+-/* per port private data */
+-
+-#define N_IN_URB 4
+-#define N_OUT_URB 4
+-#define IN_BUFLEN 4096
+-#define OUT_BUFLEN 4096
+-
+-struct option_port_private {
+- /* Input endpoints and buffer for this port */
+- struct urb *in_urbs[N_IN_URB];
+- u8 *in_buffer[N_IN_URB];
+- /* Output endpoints and buffer for this port */
+- struct urb *out_urbs[N_OUT_URB];
+- u8 *out_buffer[N_OUT_URB];
+- unsigned long out_busy; /* Bit vector of URBs in use */
+- int opened;
+- struct usb_anchor delayed;
+-
+- /* Settings for the port */
+- int rts_state; /* Handshaking pins (outputs) */
+- int dtr_state;
+- int cts_state; /* Handshaking pins (inputs) */
+- int dsr_state;
+- int dcd_state;
+- int ri_state;
+-
+- unsigned long tx_start_time[N_OUT_URB];
+-};
+-
+ /* Functions used by new usb-serial code. */
+ static int __init option_init(void)
+ {
+@@ -1393,12 +1377,22 @@ static int option_probe(struct usb_serial *serial,
+ return 0;
+ }
+
++static void option_release(struct usb_serial *serial)
++{
++ struct usb_wwan_intf_private *priv = usb_get_serial_data(serial);
++
++ usb_wwan_release(serial);
++
++ kfree(priv);
++}
++
+ static void option_instat_callback(struct urb *urb)
+ {
+ int err;
+ int status = urb->status;
+ struct usb_serial_port *port = urb->context;
+- struct option_port_private *portdata = usb_get_serial_port_data(port);
++ struct usb_wwan_port_private *portdata =
++ usb_get_serial_port_data(port);
+
+ dbg("%s", __func__);
+ dbg("%s: urb %p port %p has data %p", __func__, urb, port, portdata);
+@@ -1459,7 +1453,7 @@ static int option_send_setup(struct usb_serial_port *port)
+ struct usb_serial *serial = port->serial;
+ struct usb_wwan_intf_private *intfdata =
+ (struct usb_wwan_intf_private *) serial->private;
+- struct option_port_private *portdata;
++ struct usb_wwan_port_private *portdata;
+ int ifNum = serial->interface->cur_altsetting->desc.bInterfaceNumber;
+ int val = 0;
+ dbg("%s", __func__);
+diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
+index 3187d8b..6634477 100644
+--- a/drivers/usb/serial/qcserial.c
++++ b/drivers/usb/serial/qcserial.c
+@@ -105,7 +105,13 @@ static const struct usb_device_id id_table[] = {
+ {USB_DEVICE(0x1410, 0xa021)}, /* Novatel Gobi 3000 Composite */
+ {USB_DEVICE(0x413c, 0x8193)}, /* Dell Gobi 3000 QDL */
+ {USB_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */
++ {USB_DEVICE(0x1199, 0x9010)}, /* Sierra Wireless Gobi 3000 QDL */
++ {USB_DEVICE(0x1199, 0x9012)}, /* Sierra Wireless Gobi 3000 QDL */
+ {USB_DEVICE(0x1199, 0x9013)}, /* Sierra Wireless Gobi 3000 Modem device (MC8355) */
++ {USB_DEVICE(0x1199, 0x9014)}, /* Sierra Wireless Gobi 3000 QDL */
++ {USB_DEVICE(0x1199, 0x9015)}, /* Sierra Wireless Gobi 3000 Modem device */
++ {USB_DEVICE(0x1199, 0x9018)}, /* Sierra Wireless Gobi 3000 QDL */
++ {USB_DEVICE(0x1199, 0x9019)}, /* Sierra Wireless Gobi 3000 Modem device */
+ {USB_DEVICE(0x12D1, 0x14F0)}, /* Sony Gobi 3000 QDL */
+ {USB_DEVICE(0x12D1, 0x14F1)}, /* Sony Gobi 3000 Composite */
+ { } /* Terminating entry */
+diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
+index e093585..535d087 100644
+--- a/drivers/usb/serial/sierra.c
++++ b/drivers/usb/serial/sierra.c
+@@ -304,6 +304,10 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */
+ .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
+ },
++ /* AT&T Direct IP LTE modems */
++ { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF),
++ .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
++ },
+ { USB_DEVICE(0x0f3d, 0x68A3), /* Airprime/Sierra Wireless Direct IP modems */
+ .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
+ },
+diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
+index 38d7ebd..8bea45c 100644
+--- a/drivers/usb/serial/usb-serial.c
++++ b/drivers/usb/serial/usb-serial.c
+@@ -669,12 +669,14 @@ exit:
+ static struct usb_serial_driver *search_serial_device(
+ struct usb_interface *iface)
+ {
+- const struct usb_device_id *id;
++ const struct usb_device_id *id = NULL;
+ struct usb_serial_driver *drv;
++ struct usb_driver *driver = to_usb_driver(iface->dev.driver);
+
+ /* Check if the usb id matches a known device */
+ list_for_each_entry(drv, &usb_serial_driver_list, driver_list) {
+- id = get_iface_id(drv, iface);
++ if (drv->usb_driver == driver)
++ id = get_iface_id(drv, iface);
+ if (id)
+ return drv;
+ }
+diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
+index 9f63e49..5ef7afb 100644
+--- a/fs/fuse/dir.c
++++ b/fs/fuse/dir.c
+@@ -858,6 +858,7 @@ int fuse_update_attributes(struct inode *inode, struct kstat *stat,
+ if (stat) {
+ generic_fillattr(inode, stat);
+ stat->mode = fi->orig_i_mode;
++ stat->ino = fi->orig_ino;
+ }
+ }
+
+diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
+index cf6db0a..89c4a58 100644
+--- a/fs/fuse/fuse_i.h
++++ b/fs/fuse/fuse_i.h
+@@ -82,6 +82,9 @@ struct fuse_inode {
+ preserve the original mode */
+ mode_t orig_i_mode;
+
++ /** 64 bit inode number */
++ u64 orig_ino;
++
+ /** Version of last attribute change */
+ u64 attr_version;
+
+diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
+index aa83109..1f82d95 100644
+--- a/fs/fuse/inode.c
++++ b/fs/fuse/inode.c
+@@ -91,6 +91,7 @@ static struct inode *fuse_alloc_inode(struct super_block *sb)
+ fi->nlookup = 0;
+ fi->attr_version = 0;
+ fi->writectr = 0;
++ fi->orig_ino = 0;
+ INIT_LIST_HEAD(&fi->write_files);
+ INIT_LIST_HEAD(&fi->queued_writes);
+ INIT_LIST_HEAD(&fi->writepages);
+@@ -140,6 +141,18 @@ static int fuse_remount_fs(struct super_block *sb, int *flags, char *data)
+ return 0;
+ }
+
++/*
++ * ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down
++ * so that it will fit.
++ */
++static ino_t fuse_squash_ino(u64 ino64)
++{
++ ino_t ino = (ino_t) ino64;
++ if (sizeof(ino_t) < sizeof(u64))
++ ino ^= ino64 >> (sizeof(u64) - sizeof(ino_t)) * 8;
++ return ino;
++}
++
+ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
+ u64 attr_valid)
+ {
+@@ -149,7 +162,7 @@ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
+ fi->attr_version = ++fc->attr_version;
+ fi->i_time = attr_valid;
+
+- inode->i_ino = attr->ino;
++ inode->i_ino = fuse_squash_ino(attr->ino);
+ inode->i_mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777);
+ set_nlink(inode, attr->nlink);
+ inode->i_uid = attr->uid;
+@@ -175,6 +188,8 @@ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
+ fi->orig_i_mode = inode->i_mode;
+ if (!(fc->flags & FUSE_DEFAULT_PERMISSIONS))
+ inode->i_mode &= ~S_ISVTX;
++
++ fi->orig_ino = attr->ino;
+ }
+
+ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index a3cae5d..8000459 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -1825,6 +1825,7 @@ static struct nfs4_state *nfs4_do_open(struct inode *dir, struct dentry *dentry,
+ struct nfs4_state *res;
+ int status;
+
++ fmode &= FMODE_READ|FMODE_WRITE;
+ do {
+ status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred, &res);
+ if (status == 0)
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index 7cda65b..c0cfa0d 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -176,6 +176,8 @@ enum pci_dev_flags {
+ PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2,
+ /* Provide indication device is assigned by a Virtual Machine Manager */
+ PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) 4,
++ /* Device causes system crash if in D3 during S3 sleep */
++ PCI_DEV_FLAGS_NO_D3_DURING_SLEEP = (__force pci_dev_flags_t) 8,
+ };
+
+ enum pci_irq_reroute_variant {
+diff --git a/include/linux/swapops.h b/include/linux/swapops.h
+index 2189d3f..d6955607 100644
+--- a/include/linux/swapops.h
++++ b/include/linux/swapops.h
+@@ -8,13 +8,15 @@
+ * get good packing density in that tree, so the index should be dense in
+ * the low-order bits.
+ *
+- * We arrange the `type' and `offset' fields so that `type' is at the five
++ * We arrange the `type' and `offset' fields so that `type' is at the seven
+ * high-order bits of the swp_entry_t and `offset' is right-aligned in the
+- * remaining bits.
++ * remaining bits. Although `type' itself needs only five bits, we allow for
++ * shmem/tmpfs to shift it all up a further two bits: see swp_to_radix_entry().
+ *
+ * swp_entry_t's are *never* stored anywhere in their arch-dependent format.
+ */
+-#define SWP_TYPE_SHIFT(e) (sizeof(e.val) * 8 - MAX_SWAPFILES_SHIFT)
++#define SWP_TYPE_SHIFT(e) ((sizeof(e.val) * 8) - \
++ (MAX_SWAPFILES_SHIFT + RADIX_TREE_EXCEPTIONAL_SHIFT))
+ #define SWP_OFFSET_MASK(e) ((1UL << SWP_TYPE_SHIFT(e)) - 1)
+
+ /*
+diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
+index 64cec8d..03354d5 100644
+--- a/include/linux/usb/hcd.h
++++ b/include/linux/usb/hcd.h
+@@ -128,8 +128,6 @@ struct usb_hcd {
+ unsigned wireless:1; /* Wireless USB HCD */
+ unsigned authorized_default:1;
+ unsigned has_tt:1; /* Integrated TT in root hub */
+- unsigned broken_pci_sleep:1; /* Don't put the
+- controller in PCI-D3 for system sleep */
+
+ int irq; /* irq allocated */
+ void __iomem *regs; /* device memory/io */
+diff --git a/kernel/sched.c b/kernel/sched.c
+index 299f55c..576a27f 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -7429,11 +7429,8 @@ int sched_domain_level_max;
+
+ static int __init setup_relax_domain_level(char *str)
+ {
+- unsigned long val;
+-
+- val = simple_strtoul(str, NULL, 0);
+- if (val < sched_domain_level_max)
+- default_relax_domain_level = val;
++ if (kstrtoint(str, 0, &default_relax_domain_level))
++ pr_warn("Unable to set relax_domain_level\n");
+
+ return 1;
+ }
+@@ -7636,7 +7633,6 @@ struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
+ if (!sd)
+ return child;
+
+- set_domain_attribute(sd, attr);
+ cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
+ if (child) {
+ sd->level = child->level + 1;
+@@ -7644,6 +7640,7 @@ struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
+ child->parent = sd;
+ }
+ sd->child = child;
++ set_domain_attribute(sd, attr);
+
+ return sd;
+ }
+diff --git a/mm/swapfile.c b/mm/swapfile.c
+index d3955f2..fad1830 100644
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -1921,24 +1921,20 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
+
+ /*
+ * Find out how many pages are allowed for a single swap
+- * device. There are three limiting factors: 1) the number
++ * device. There are two limiting factors: 1) the number
+ * of bits for the swap offset in the swp_entry_t type, and
+ * 2) the number of bits in the swap pte as defined by the
+- * the different architectures, and 3) the number of free bits
+- * in an exceptional radix_tree entry. In order to find the
++ * different architectures. In order to find the
+ * largest possible bit mask, a swap entry with swap type 0
+ * and swap offset ~0UL is created, encoded to a swap pte,
+ * decoded to a swp_entry_t again, and finally the swap
+ * offset is extracted. This will mask all the bits from
+ * the initial ~0UL mask that can't be encoded in either
+ * the swp_entry_t or the architecture definition of a
+- * swap pte. Then the same is done for a radix_tree entry.
++ * swap pte.
+ */
+ maxpages = swp_offset(pte_to_swp_entry(
+- swp_entry_to_pte(swp_entry(0, ~0UL))));
+- maxpages = swp_offset(radix_to_swp_entry(
+- swp_to_radix_entry(swp_entry(0, maxpages)))) + 1;
+-
++ swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
+ if (maxpages > swap_header->info.last_page) {
+ maxpages = swap_header->info.last_page + 1;
+ /* p->max is an unsigned int: don't overflow it */
+diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
+index 30d7355..f4ddf34 100644
+--- a/net/mac80211/iface.c
++++ b/net/mac80211/iface.c
+@@ -495,6 +495,18 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
+ ieee80211_configure_filter(local);
+ break;
+ default:
++ mutex_lock(&local->mtx);
++ if (local->hw_roc_dev == sdata->dev &&
++ local->hw_roc_channel) {
++ /* ignore return value since this is racy */
++ drv_cancel_remain_on_channel(local);
++ ieee80211_queue_work(&local->hw, &local->hw_roc_done);
++ }
++ mutex_unlock(&local->mtx);
++
++ flush_work(&local->hw_roc_start);
++ flush_work(&local->hw_roc_done);
++
+ flush_work(&sdata->work);
+ /*
+ * When we get here, the interface is marked down.
+diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
+index 1b239be..db2c215 100644
+--- a/net/mac80211/offchannel.c
++++ b/net/mac80211/offchannel.c
+@@ -246,6 +246,22 @@ static void ieee80211_hw_roc_done(struct work_struct *work)
+ return;
+ }
+
++ /* was never transmitted */
++ if (local->hw_roc_skb) {
++ u64 cookie;
++
++ cookie = local->hw_roc_cookie ^ 2;
++
++ cfg80211_mgmt_tx_status(local->hw_roc_dev, cookie,
++ local->hw_roc_skb->data,
++ local->hw_roc_skb->len, false,
++ GFP_KERNEL);
++
++ kfree_skb(local->hw_roc_skb);
++ local->hw_roc_skb = NULL;
++ local->hw_roc_skb_for_status = NULL;
++ }
++
+ if (!local->hw_roc_for_tx)
+ cfg80211_remain_on_channel_expired(local->hw_roc_dev,
+ local->hw_roc_cookie,
+diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
+index d4ad50e..c80c162 100644
+--- a/net/sunrpc/svc.c
++++ b/net/sunrpc/svc.c
+@@ -1358,7 +1358,8 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
+ sizeof(req->rq_snd_buf));
+ return bc_send(req);
+ } else {
+- /* Nothing to do to drop request */
++ /* drop request */
++ xprt_free_bc_request(req);
+ return 0;
+ }
+ }
+diff --git a/net/wireless/util.c b/net/wireless/util.c
+index 8bf8902..d38815d 100644
+--- a/net/wireless/util.c
++++ b/net/wireless/util.c
+@@ -943,6 +943,7 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
+ enum nl80211_iftype iftype)
+ {
+ struct wireless_dev *wdev_iter;
++ u32 used_iftypes = BIT(iftype);
+ int num[NUM_NL80211_IFTYPES];
+ int total = 1;
+ int i, j;
+@@ -976,12 +977,14 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
+
+ num[wdev_iter->iftype]++;
+ total++;
++ used_iftypes |= BIT(wdev_iter->iftype);
+ }
+ mutex_unlock(&rdev->devlist_mtx);
+
+ for (i = 0; i < rdev->wiphy.n_iface_combinations; i++) {
+ const struct ieee80211_iface_combination *c;
+ struct ieee80211_iface_limit *limits;
++ u32 all_iftypes = 0;
+
+ c = &rdev->wiphy.iface_combinations[i];
+
+@@ -996,6 +999,7 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
+ if (rdev->wiphy.software_iftypes & BIT(iftype))
+ continue;
+ for (j = 0; j < c->n_limits; j++) {
++ all_iftypes |= limits[j].types;
+ if (!(limits[j].types & BIT(iftype)))
+ continue;
+ if (limits[j].max < num[iftype])
+@@ -1003,7 +1007,20 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
+ limits[j].max -= num[iftype];
+ }
+ }
+- /* yay, it fits */
++
++ /*
++ * Finally check that all iftypes that we're currently
++ * using are actually part of this combination. If they
++ * aren't then we can't use this combination and have
++ * to continue to the next.
++ */
++ if ((all_iftypes & used_iftypes) != used_iftypes)
++ goto cont;
++
++ /*
++ * This combination covered all interface types and
++ * supported the requested numbers, so we're good.
++ */
+ kfree(limits);
+ return 0;
+ cont:
+diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
+index 71f6744..78b2223 100644
+--- a/sound/pci/hda/hda_codec.h
++++ b/sound/pci/hda/hda_codec.h
+@@ -858,6 +858,7 @@ struct hda_codec {
+ unsigned int pins_shutup:1; /* pins are shut up */
+ unsigned int no_trigger_sense:1; /* don't trigger at pin-sensing */
+ unsigned int ignore_misc_bit:1; /* ignore MISC_NO_PRESENCE bit */
++ unsigned int no_jack_detect:1; /* Machine has no jack-detection */
+ #ifdef CONFIG_SND_HDA_POWER_SAVE
+ unsigned int power_on :1; /* current (global) power-state */
+ unsigned int power_transition :1; /* power-state in transition */
+diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
+index 368f0c5..c78b753 100644
+--- a/sound/pci/hda/hda_local.h
++++ b/sound/pci/hda/hda_local.h
+@@ -515,6 +515,8 @@ int snd_hda_jack_detect(struct hda_codec *codec, hda_nid_t nid);
+
+ static inline bool is_jack_detectable(struct hda_codec *codec, hda_nid_t nid)
+ {
++ if (codec->no_jack_detect)
++ return false;
+ if (!(snd_hda_query_pin_caps(codec, nid) & AC_PINCAP_PRES_DETECT))
+ return false;
+ if (!codec->ignore_misc_bit &&
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 0bc5a46..0005bde 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5278,8 +5278,10 @@ static const struct hda_amp_list alc861_loopbacks[] = {
+
+ /* Pin config fixes */
+ enum {
+- PINFIX_FSC_AMILO_PI1505,
+- PINFIX_ASUS_A6RP,
++ ALC861_FIXUP_FSC_AMILO_PI1505,
++ ALC861_FIXUP_AMP_VREF_0F,
++ ALC861_FIXUP_NO_JACK_DETECT,
++ ALC861_FIXUP_ASUS_A6RP,
+ };
+
+ /* On some laptops, VREF of pin 0x0f is abused for controlling the main amp */
+@@ -5301,8 +5303,16 @@ static void alc861_fixup_asus_amp_vref_0f(struct hda_codec *codec,
+ spec->keep_vref_in_automute = 1;
+ }
+
++/* suppress the jack-detection */
++static void alc_fixup_no_jack_detect(struct hda_codec *codec,
++ const struct alc_fixup *fix, int action)
++{
++ if (action == ALC_FIXUP_ACT_PRE_PROBE)
++ codec->no_jack_detect = 1;
++}
++
+ static const struct alc_fixup alc861_fixups[] = {
+- [PINFIX_FSC_AMILO_PI1505] = {
++ [ALC861_FIXUP_FSC_AMILO_PI1505] = {
+ .type = ALC_FIXUP_PINS,
+ .v.pins = (const struct alc_pincfg[]) {
+ { 0x0b, 0x0221101f }, /* HP */
+@@ -5310,16 +5320,29 @@ static const struct alc_fixup alc861_fixups[] = {
+ { }
+ }
+ },
+- [PINFIX_ASUS_A6RP] = {
++ [ALC861_FIXUP_AMP_VREF_0F] = {
+ .type = ALC_FIXUP_FUNC,
+ .v.func = alc861_fixup_asus_amp_vref_0f,
+ },
++ [ALC861_FIXUP_NO_JACK_DETECT] = {
++ .type = ALC_FIXUP_FUNC,
++ .v.func = alc_fixup_no_jack_detect,
++ },
++ [ALC861_FIXUP_ASUS_A6RP] = {
++ .type = ALC_FIXUP_FUNC,
++ .v.func = alc861_fixup_asus_amp_vref_0f,
++ .chained = true,
++ .chain_id = ALC861_FIXUP_NO_JACK_DETECT,
++ }
+ };
+
+ static const struct snd_pci_quirk alc861_fixup_tbl[] = {
+- SND_PCI_QUIRK_VENDOR(0x1043, "ASUS laptop", PINFIX_ASUS_A6RP),
+- SND_PCI_QUIRK(0x1584, 0x2b01, "Haier W18", PINFIX_ASUS_A6RP),
+- SND_PCI_QUIRK(0x1734, 0x10c7, "FSC Amilo Pi1505", PINFIX_FSC_AMILO_PI1505),
++ SND_PCI_QUIRK(0x1043, 0x1393, "ASUS A6Rp", ALC861_FIXUP_ASUS_A6RP),
++ SND_PCI_QUIRK_VENDOR(0x1043, "ASUS laptop", ALC861_FIXUP_AMP_VREF_0F),
++ SND_PCI_QUIRK(0x1462, 0x7254, "HP DX2200", ALC861_FIXUP_NO_JACK_DETECT),
++ SND_PCI_QUIRK(0x1584, 0x2b01, "Haier W18", ALC861_FIXUP_AMP_VREF_0F),
++ SND_PCI_QUIRK(0x1584, 0x0000, "Uniwill ECS M31EI", ALC861_FIXUP_AMP_VREF_0F),
++ SND_PCI_QUIRK(0x1734, 0x10c7, "FSC Amilo Pi1505", ALC861_FIXUP_FSC_AMILO_PI1505),
+ {}
+ };
+
+@@ -5571,6 +5594,8 @@ enum {
+ ALC662_FIXUP_ASUS_MODE6,
+ ALC662_FIXUP_ASUS_MODE7,
+ ALC662_FIXUP_ASUS_MODE8,
++ ALC662_FIXUP_NO_JACK_DETECT,
++ ALC662_FIXUP_ZOTAC_Z68,
+ };
+
+ static const struct alc_fixup alc662_fixups[] = {
+@@ -5716,6 +5741,17 @@ static const struct alc_fixup alc662_fixups[] = {
+ .chained = true,
+ .chain_id = ALC662_FIXUP_SKU_IGNORE
+ },
++ [ALC662_FIXUP_NO_JACK_DETECT] = {
++ .type = ALC_FIXUP_FUNC,
++ .v.func = alc_fixup_no_jack_detect,
++ },
++ [ALC662_FIXUP_ZOTAC_Z68] = {
++ .type = ALC_FIXUP_PINS,
++ .v.pins = (const struct alc_pincfg[]) {
++ { 0x1b, 0x02214020 }, /* Front HP */
++ { }
++ }
++ },
+ };
+
+ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+@@ -5724,10 +5760,12 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1025, 0x031c, "Gateway NV79", ALC662_FIXUP_SKU_IGNORE),
+ SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE),
+ SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
++ SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT),
+ SND_PCI_QUIRK(0x105b, 0x0cd6, "Foxconn", ALC662_FIXUP_ASUS_MODE2),
+ SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
+ SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD),
+ SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD),
++ SND_PCI_QUIRK(0x19da, 0xa130, "Zotac Z68", ALC662_FIXUP_ZOTAC_Z68),
+ SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T),
+
+ #if 0
+diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
+index 3e7aa22..de61b8a 100644
+--- a/sound/soc/codecs/wm8994.c
++++ b/sound/soc/codecs/wm8994.c
+@@ -41,6 +41,39 @@
+ #define WM8994_NUM_DRC 3
+ #define WM8994_NUM_EQ 3
+
++static struct {
++ unsigned int reg;
++ unsigned int mask;
++} wm8994_vu_bits[] = {
++ { WM8994_LEFT_LINE_INPUT_1_2_VOLUME, WM8994_IN1_VU },
++ { WM8994_RIGHT_LINE_INPUT_1_2_VOLUME, WM8994_IN1_VU },
++ { WM8994_LEFT_LINE_INPUT_3_4_VOLUME, WM8994_IN2_VU },
++ { WM8994_RIGHT_LINE_INPUT_3_4_VOLUME, WM8994_IN2_VU },
++ { WM8994_SPEAKER_VOLUME_LEFT, WM8994_SPKOUT_VU },
++ { WM8994_SPEAKER_VOLUME_RIGHT, WM8994_SPKOUT_VU },
++ { WM8994_LEFT_OUTPUT_VOLUME, WM8994_HPOUT1_VU },
++ { WM8994_RIGHT_OUTPUT_VOLUME, WM8994_HPOUT1_VU },
++ { WM8994_LEFT_OPGA_VOLUME, WM8994_MIXOUT_VU },
++ { WM8994_RIGHT_OPGA_VOLUME, WM8994_MIXOUT_VU },
++
++ { WM8994_AIF1_DAC1_LEFT_VOLUME, WM8994_AIF1DAC1_VU },
++ { WM8994_AIF1_DAC1_RIGHT_VOLUME, WM8994_AIF1DAC1_VU },
++ { WM8994_AIF1_DAC2_LEFT_VOLUME, WM8994_AIF1DAC2_VU },
++ { WM8994_AIF1_DAC2_RIGHT_VOLUME, WM8994_AIF1DAC2_VU },
++ { WM8994_AIF2_DAC_LEFT_VOLUME, WM8994_AIF2DAC_VU },
++ { WM8994_AIF2_DAC_RIGHT_VOLUME, WM8994_AIF2DAC_VU },
++ { WM8994_AIF1_ADC1_LEFT_VOLUME, WM8994_AIF1ADC1_VU },
++ { WM8994_AIF1_ADC1_RIGHT_VOLUME, WM8994_AIF1ADC1_VU },
++ { WM8994_AIF1_ADC2_LEFT_VOLUME, WM8994_AIF1ADC2_VU },
++ { WM8994_AIF1_ADC2_RIGHT_VOLUME, WM8994_AIF1ADC2_VU },
++ { WM8994_AIF2_ADC_LEFT_VOLUME, WM8994_AIF2ADC_VU },
++ { WM8994_AIF2_ADC_RIGHT_VOLUME, WM8994_AIF1ADC2_VU },
++ { WM8994_DAC1_LEFT_VOLUME, WM8994_DAC1_VU },
++ { WM8994_DAC1_RIGHT_VOLUME, WM8994_DAC1_VU },
++ { WM8994_DAC2_LEFT_VOLUME, WM8994_DAC2_VU },
++ { WM8994_DAC2_RIGHT_VOLUME, WM8994_DAC2_VU },
++};
++
+ static int wm8994_drc_base[] = {
+ WM8994_AIF1_DRC1_1,
+ WM8994_AIF1_DRC2_1,
+@@ -889,6 +922,7 @@ static int aif1clk_ev(struct snd_soc_dapm_widget *w,
+ struct snd_soc_codec *codec = w->codec;
+ struct wm8994 *control = codec->control_data;
+ int mask = WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC1R_ENA;
++ int i;
+ int dac;
+ int adc;
+ int val;
+@@ -947,6 +981,13 @@ static int aif1clk_ev(struct snd_soc_dapm_widget *w,
+ WM8994_AIF1DAC2L_ENA);
+ break;
+
++ case SND_SOC_DAPM_POST_PMU:
++ for (i = 0; i < ARRAY_SIZE(wm8994_vu_bits); i++)
++ snd_soc_write(codec, wm8994_vu_bits[i].reg,
++ snd_soc_read(codec,
++ wm8994_vu_bits[i].reg));
++ break;
++
+ case SND_SOC_DAPM_PRE_PMD:
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
+@@ -972,6 +1013,7 @@ static int aif2clk_ev(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+ {
+ struct snd_soc_codec *codec = w->codec;
++ int i;
+ int dac;
+ int adc;
+ int val;
+@@ -1022,6 +1064,13 @@ static int aif2clk_ev(struct snd_soc_dapm_widget *w,
+ WM8994_AIF2DACR_ENA);
+ break;
+
++ case SND_SOC_DAPM_POST_PMU:
++ for (i = 0; i < ARRAY_SIZE(wm8994_vu_bits); i++)
++ snd_soc_write(codec, wm8994_vu_bits[i].reg,
++ snd_soc_read(codec,
++ wm8994_vu_bits[i].reg));
++ break;
++
+ case SND_SOC_DAPM_PRE_PMD:
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
+@@ -1090,17 +1139,19 @@ static int late_enable_ev(struct snd_soc_dapm_widget *w,
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ if (wm8994->aif1clk_enable) {
+- aif1clk_ev(w, kcontrol, event);
++ aif1clk_ev(w, kcontrol, SND_SOC_DAPM_PRE_PMU);
+ snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1,
+ WM8994_AIF1CLK_ENA_MASK,
+ WM8994_AIF1CLK_ENA);
++ aif1clk_ev(w, kcontrol, SND_SOC_DAPM_POST_PMU);
+ wm8994->aif1clk_enable = 0;
+ }
+ if (wm8994->aif2clk_enable) {
+- aif2clk_ev(w, kcontrol, event);
++ aif2clk_ev(w, kcontrol, SND_SOC_DAPM_PRE_PMU);
+ snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1,
+ WM8994_AIF2CLK_ENA_MASK,
+ WM8994_AIF2CLK_ENA);
++ aif2clk_ev(w, kcontrol, SND_SOC_DAPM_POST_PMU);
+ wm8994->aif2clk_enable = 0;
+ }
+ break;
+@@ -1121,15 +1172,17 @@ static int late_disable_ev(struct snd_soc_dapm_widget *w,
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMD:
+ if (wm8994->aif1clk_disable) {
++ aif1clk_ev(w, kcontrol, SND_SOC_DAPM_PRE_PMD);
+ snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1,
+ WM8994_AIF1CLK_ENA_MASK, 0);
+- aif1clk_ev(w, kcontrol, event);
++ aif1clk_ev(w, kcontrol, SND_SOC_DAPM_POST_PMD);
+ wm8994->aif1clk_disable = 0;
+ }
+ if (wm8994->aif2clk_disable) {
++ aif2clk_ev(w, kcontrol, SND_SOC_DAPM_PRE_PMD);
+ snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1,
+ WM8994_AIF2CLK_ENA_MASK, 0);
+- aif2clk_ev(w, kcontrol, event);
++ aif2clk_ev(w, kcontrol, SND_SOC_DAPM_POST_PMD);
+ wm8994->aif2clk_disable = 0;
+ }
+ break;
+@@ -1466,9 +1519,11 @@ SND_SOC_DAPM_POST("Late Disable PGA", late_disable_ev)
+
+ static const struct snd_soc_dapm_widget wm8994_lateclk_widgets[] = {
+ SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, aif1clk_ev,
+- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
++ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
++ SND_SOC_DAPM_PRE_PMD),
+ SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, aif2clk_ev,
+- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
++ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
++ SND_SOC_DAPM_PRE_PMD),
+ SND_SOC_DAPM_PGA("Direct Voice", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("SPKL", WM8994_POWER_MANAGEMENT_3, 8, 0,
+ left_speaker_mixer, ARRAY_SIZE(left_speaker_mixer)),
+@@ -3482,39 +3537,11 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
+
+ wm8994_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
+- /* Latch volume updates (right only; we always do left then right). */
+- snd_soc_update_bits(codec, WM8994_AIF1_DAC1_LEFT_VOLUME,
+- WM8994_AIF1DAC1_VU, WM8994_AIF1DAC1_VU);
+- snd_soc_update_bits(codec, WM8994_AIF1_DAC1_RIGHT_VOLUME,
+- WM8994_AIF1DAC1_VU, WM8994_AIF1DAC1_VU);
+- snd_soc_update_bits(codec, WM8994_AIF1_DAC2_LEFT_VOLUME,
+- WM8994_AIF1DAC2_VU, WM8994_AIF1DAC2_VU);
+- snd_soc_update_bits(codec, WM8994_AIF1_DAC2_RIGHT_VOLUME,
+- WM8994_AIF1DAC2_VU, WM8994_AIF1DAC2_VU);
+- snd_soc_update_bits(codec, WM8994_AIF2_DAC_LEFT_VOLUME,
+- WM8994_AIF2DAC_VU, WM8994_AIF2DAC_VU);
+- snd_soc_update_bits(codec, WM8994_AIF2_DAC_RIGHT_VOLUME,
+- WM8994_AIF2DAC_VU, WM8994_AIF2DAC_VU);
+- snd_soc_update_bits(codec, WM8994_AIF1_ADC1_LEFT_VOLUME,
+- WM8994_AIF1ADC1_VU, WM8994_AIF1ADC1_VU);
+- snd_soc_update_bits(codec, WM8994_AIF1_ADC1_RIGHT_VOLUME,
+- WM8994_AIF1ADC1_VU, WM8994_AIF1ADC1_VU);
+- snd_soc_update_bits(codec, WM8994_AIF1_ADC2_LEFT_VOLUME,
+- WM8994_AIF1ADC2_VU, WM8994_AIF1ADC2_VU);
+- snd_soc_update_bits(codec, WM8994_AIF1_ADC2_RIGHT_VOLUME,
+- WM8994_AIF1ADC2_VU, WM8994_AIF1ADC2_VU);
+- snd_soc_update_bits(codec, WM8994_AIF2_ADC_LEFT_VOLUME,
+- WM8994_AIF2ADC_VU, WM8994_AIF1ADC2_VU);
+- snd_soc_update_bits(codec, WM8994_AIF2_ADC_RIGHT_VOLUME,
+- WM8994_AIF2ADC_VU, WM8994_AIF1ADC2_VU);
+- snd_soc_update_bits(codec, WM8994_DAC1_LEFT_VOLUME,
+- WM8994_DAC1_VU, WM8994_DAC1_VU);
+- snd_soc_update_bits(codec, WM8994_DAC1_RIGHT_VOLUME,
+- WM8994_DAC1_VU, WM8994_DAC1_VU);
+- snd_soc_update_bits(codec, WM8994_DAC2_LEFT_VOLUME,
+- WM8994_DAC2_VU, WM8994_DAC2_VU);
+- snd_soc_update_bits(codec, WM8994_DAC2_RIGHT_VOLUME,
+- WM8994_DAC2_VU, WM8994_DAC2_VU);
++ /* Latch volume update bits */
++ for (i = 0; i < ARRAY_SIZE(wm8994_vu_bits); i++)
++ snd_soc_update_bits(codec, wm8994_vu_bits[i].reg,
++ wm8994_vu_bits[i].mask,
++ wm8994_vu_bits[i].mask);
+
+ /* Set the low bit of the 3D stereo depth so TLV matches */
+ snd_soc_update_bits(codec, WM8994_AIF1_DAC1_FILTERS_2,
diff --git a/1021_linux-3.2.22.patch b/1021_linux-3.2.22.patch
new file mode 100644
index 00000000..e6ad93a7
--- /dev/null
+++ b/1021_linux-3.2.22.patch
@@ -0,0 +1,1245 @@
+diff --git a/Documentation/stable_kernel_rules.txt b/Documentation/stable_kernel_rules.txt
+index 21fd05c..e1f856b 100644
+--- a/Documentation/stable_kernel_rules.txt
++++ b/Documentation/stable_kernel_rules.txt
+@@ -12,6 +12,12 @@ Rules on what kind of patches are accepted, and which ones are not, into the
+ marked CONFIG_BROKEN), an oops, a hang, data corruption, a real
+ security issue, or some "oh, that's not good" issue. In short, something
+ critical.
++ - Serious issues as reported by a user of a distribution kernel may also
++ be considered if they fix a notable performance or interactivity issue.
++ As these fixes are not as obvious and have a higher risk of a subtle
++ regression they should only be submitted by a distribution kernel
++ maintainer and include an addendum linking to a bugzilla entry if it
++ exists and additional information on the user-visible impact.
+ - New device IDs and quirks are also accepted.
+ - No "theoretical race condition" issues, unless an explanation of how the
+ race can be exploited is also provided.
+diff --git a/Makefile b/Makefile
+index 7eb465e..9a7d921 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 21
++SUBLEVEL = 22
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/plat-samsung/include/plat/map-s3c.h b/arch/arm/plat-samsung/include/plat/map-s3c.h
+index 7d04875..c0c70a8 100644
+--- a/arch/arm/plat-samsung/include/plat/map-s3c.h
++++ b/arch/arm/plat-samsung/include/plat/map-s3c.h
+@@ -22,7 +22,7 @@
+ #define S3C24XX_VA_WATCHDOG S3C_VA_WATCHDOG
+
+ #define S3C2412_VA_SSMC S3C_ADDR_CPU(0x00000000)
+-#define S3C2412_VA_EBI S3C_ADDR_CPU(0x00010000)
++#define S3C2412_VA_EBI S3C_ADDR_CPU(0x00100000)
+
+ #define S3C2410_PA_UART (0x50000000)
+ #define S3C24XX_PA_UART S3C2410_PA_UART
+diff --git a/arch/arm/plat-samsung/include/plat/watchdog-reset.h b/arch/arm/plat-samsung/include/plat/watchdog-reset.h
+index 40dbb2b..11b19ea 100644
+--- a/arch/arm/plat-samsung/include/plat/watchdog-reset.h
++++ b/arch/arm/plat-samsung/include/plat/watchdog-reset.h
+@@ -24,7 +24,7 @@ static inline void arch_wdt_reset(void)
+
+ __raw_writel(0, S3C2410_WTCON); /* disable watchdog, to be safe */
+
+- if (s3c2410_wdtclk)
++ if (!IS_ERR(s3c2410_wdtclk))
+ clk_enable(s3c2410_wdtclk);
+
+ /* put initial values into count and data */
+diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
+index f3444f7..0c3b775 100644
+--- a/arch/x86/include/asm/cpufeature.h
++++ b/arch/x86/include/asm/cpufeature.h
+@@ -175,7 +175,7 @@
+ #define X86_FEATURE_XSAVEOPT (7*32+ 4) /* Optimized Xsave */
+ #define X86_FEATURE_PLN (7*32+ 5) /* Intel Power Limit Notification */
+ #define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */
+-#define X86_FEATURE_DTS (7*32+ 7) /* Digital Thermal Sensor */
++#define X86_FEATURE_DTHERM (7*32+ 7) /* Digital Thermal Sensor */
+
+ /* Virtualization flags: Linux defined, word 8 */
+ #define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */
+diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
+index effff47..cb00ccc 100644
+--- a/arch/x86/include/asm/pgtable-3level.h
++++ b/arch/x86/include/asm/pgtable-3level.h
+@@ -31,6 +31,60 @@ static inline void native_set_pte(pte_t *ptep, pte_t pte)
+ ptep->pte_low = pte.pte_low;
+ }
+
++#define pmd_read_atomic pmd_read_atomic
++/*
++ * pte_offset_map_lock on 32bit PAE kernels was reading the pmd_t with
++ * a "*pmdp" dereference done by gcc. Problem is, in certain places
++ * where pte_offset_map_lock is called, concurrent page faults are
++ * allowed, if the mmap_sem is hold for reading. An example is mincore
++ * vs page faults vs MADV_DONTNEED. On the page fault side
++ * pmd_populate rightfully does a set_64bit, but if we're reading the
++ * pmd_t with a "*pmdp" on the mincore side, a SMP race can happen
++ * because gcc will not read the 64bit of the pmd atomically. To fix
++ * this all places running pmd_offset_map_lock() while holding the
++ * mmap_sem in read mode, shall read the pmdp pointer using this
++ * function to know if the pmd is null nor not, and in turn to know if
++ * they can run pmd_offset_map_lock or pmd_trans_huge or other pmd
++ * operations.
++ *
++ * Without THP if the mmap_sem is hold for reading, the pmd can only
++ * transition from null to not null while pmd_read_atomic runs. So
++ * we can always return atomic pmd values with this function.
++ *
++ * With THP if the mmap_sem is hold for reading, the pmd can become
++ * trans_huge or none or point to a pte (and in turn become "stable")
++ * at any time under pmd_read_atomic. We could read it really
++ * atomically here with a atomic64_read for the THP enabled case (and
++ * it would be a whole lot simpler), but to avoid using cmpxchg8b we
++ * only return an atomic pmdval if the low part of the pmdval is later
++ * found stable (i.e. pointing to a pte). And we're returning a none
++ * pmdval if the low part of the pmd is none. In some cases the high
++ * and low part of the pmdval returned may not be consistent if THP is
++ * enabled (the low part may point to previously mapped hugepage,
++ * while the high part may point to a more recently mapped hugepage),
++ * but pmd_none_or_trans_huge_or_clear_bad() only needs the low part
++ * of the pmd to be read atomically to decide if the pmd is unstable
++ * or not, with the only exception of when the low part of the pmd is
++ * zero in which case we return a none pmd.
++ */
++static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
++{
++ pmdval_t ret;
++ u32 *tmp = (u32 *)pmdp;
++
++ ret = (pmdval_t) (*tmp);
++ if (ret) {
++ /*
++ * If the low part is null, we must not read the high part
++ * or we can end up with a partial pmd.
++ */
++ smp_rmb();
++ ret |= ((pmdval_t)*(tmp + 1)) << 32;
++ }
++
++ return (pmd_t) { ret };
++}
++
+ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
+ {
+ set_64bit((unsigned long long *)(ptep), native_pte_val(pte));
+diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
+index c7f64e6..ea6106c 100644
+--- a/arch/x86/kernel/cpu/scattered.c
++++ b/arch/x86/kernel/cpu/scattered.c
+@@ -31,7 +31,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
+ const struct cpuid_bit *cb;
+
+ static const struct cpuid_bit __cpuinitconst cpuid_bits[] = {
+- { X86_FEATURE_DTS, CR_EAX, 0, 0x00000006, 0 },
++ { X86_FEATURE_DTHERM, CR_EAX, 0, 0x00000006, 0 },
+ { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 },
+ { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 },
+ { X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 },
+diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
+index a43fa1a..1502c502 100644
+--- a/drivers/acpi/acpi_pad.c
++++ b/drivers/acpi/acpi_pad.c
+@@ -36,6 +36,7 @@
+ #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
+ #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
+ static DEFINE_MUTEX(isolated_cpus_lock);
++static DEFINE_MUTEX(round_robin_lock);
+
+ static unsigned long power_saving_mwait_eax;
+
+@@ -107,7 +108,7 @@ static void round_robin_cpu(unsigned int tsk_index)
+ if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
+ return;
+
+- mutex_lock(&isolated_cpus_lock);
++ mutex_lock(&round_robin_lock);
+ cpumask_clear(tmp);
+ for_each_cpu(cpu, pad_busy_cpus)
+ cpumask_or(tmp, tmp, topology_thread_cpumask(cpu));
+@@ -116,7 +117,7 @@ static void round_robin_cpu(unsigned int tsk_index)
+ if (cpumask_empty(tmp))
+ cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus);
+ if (cpumask_empty(tmp)) {
+- mutex_unlock(&isolated_cpus_lock);
++ mutex_unlock(&round_robin_lock);
+ return;
+ }
+ for_each_cpu(cpu, tmp) {
+@@ -131,7 +132,7 @@ static void round_robin_cpu(unsigned int tsk_index)
+ tsk_in_cpu[tsk_index] = preferred_cpu;
+ cpumask_set_cpu(preferred_cpu, pad_busy_cpus);
+ cpu_weight[preferred_cpu]++;
+- mutex_unlock(&isolated_cpus_lock);
++ mutex_unlock(&round_robin_lock);
+
+ set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu));
+ }
+diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
+index c3d2dfc..b96544a 100644
+--- a/drivers/base/power/main.c
++++ b/drivers/base/power/main.c
+@@ -869,7 +869,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
+ dpm_wait_for_children(dev, async);
+
+ if (async_error)
+- return 0;
++ goto Complete;
+
+ pm_runtime_get_noresume(dev);
+ if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
+@@ -878,7 +878,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
+ if (pm_wakeup_pending()) {
+ pm_runtime_put_sync(dev);
+ async_error = -EBUSY;
+- return 0;
++ goto Complete;
+ }
+
+ device_lock(dev);
+@@ -926,6 +926,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
+ }
+
+ device_unlock(dev);
++
++ Complete:
+ complete_all(&dev->power.completion);
+
+ if (error) {
+diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
+index 0477982..1b5675b 100644
+--- a/drivers/char/hw_random/atmel-rng.c
++++ b/drivers/char/hw_random/atmel-rng.c
+@@ -34,7 +34,7 @@ static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max,
+ u32 *data = buf;
+
+ /* data ready? */
+- if (readl(trng->base + TRNG_ODATA) & 1) {
++ if (readl(trng->base + TRNG_ISR) & 1) {
+ *data = readl(trng->base + TRNG_ODATA);
+ /*
+ ensure data ready is only set again AFTER the next data
+diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
+index 70ad892..b3ccefa 100644
+--- a/drivers/edac/i7core_edac.c
++++ b/drivers/edac/i7core_edac.c
+@@ -1932,12 +1932,6 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
+ if (mce->bank != 8)
+ return NOTIFY_DONE;
+
+-#ifdef CONFIG_SMP
+- /* Only handle if it is the right mc controller */
+- if (mce->socketid != pvt->i7core_dev->socket)
+- return NOTIFY_DONE;
+-#endif
+-
+ smp_rmb();
+ if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
+ smp_wmb();
+@@ -2234,8 +2228,6 @@ static void i7core_unregister_mci(struct i7core_dev *i7core_dev)
+ if (pvt->enable_scrub)
+ disable_sdram_scrub_setting(mci);
+
+- atomic_notifier_chain_unregister(&x86_mce_decoder_chain, &i7_mce_dec);
+-
+ /* Disable EDAC polling */
+ i7core_pci_ctl_release(pvt);
+
+@@ -2336,8 +2328,6 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
+ /* DCLK for scrub rate setting */
+ pvt->dclk_freq = get_dclk_freq();
+
+- atomic_notifier_chain_register(&x86_mce_decoder_chain, &i7_mce_dec);
+-
+ return 0;
+
+ fail0:
+@@ -2481,8 +2471,10 @@ static int __init i7core_init(void)
+
+ pci_rc = pci_register_driver(&i7core_driver);
+
+- if (pci_rc >= 0)
++ if (pci_rc >= 0) {
++ atomic_notifier_chain_register(&x86_mce_decoder_chain, &i7_mce_dec);
+ return 0;
++ }
+
+ i7core_printk(KERN_ERR, "Failed to register device with error %d.\n",
+ pci_rc);
+@@ -2498,6 +2490,7 @@ static void __exit i7core_exit(void)
+ {
+ debugf2("MC: " __FILE__ ": %s()\n", __func__);
+ pci_unregister_driver(&i7core_driver);
++ atomic_notifier_chain_unregister(&x86_mce_decoder_chain, &i7_mce_dec);
+ }
+
+ module_init(i7core_init);
+diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
+index 7a402bf..18a1293 100644
+--- a/drivers/edac/sb_edac.c
++++ b/drivers/edac/sb_edac.c
+@@ -1661,9 +1661,6 @@ static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
+ debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
+ __func__, mci, &sbridge_dev->pdev[0]->dev);
+
+- atomic_notifier_chain_unregister(&x86_mce_decoder_chain,
+- &sbridge_mce_dec);
+-
+ /* Remove MC sysfs nodes */
+ edac_mc_del_mc(mci->dev);
+
+@@ -1731,8 +1728,6 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev)
+ goto fail0;
+ }
+
+- atomic_notifier_chain_register(&x86_mce_decoder_chain,
+- &sbridge_mce_dec);
+ return 0;
+
+ fail0:
+@@ -1861,8 +1856,10 @@ static int __init sbridge_init(void)
+
+ pci_rc = pci_register_driver(&sbridge_driver);
+
+- if (pci_rc >= 0)
++ if (pci_rc >= 0) {
++ atomic_notifier_chain_register(&x86_mce_decoder_chain, &sbridge_mce_dec);
+ return 0;
++ }
+
+ sbridge_printk(KERN_ERR, "Failed to register device with error %d.\n",
+ pci_rc);
+@@ -1878,6 +1875,7 @@ static void __exit sbridge_exit(void)
+ {
+ debugf2("MC: " __FILE__ ": %s()\n", __func__);
+ pci_unregister_driver(&sbridge_driver);
++ atomic_notifier_chain_unregister(&x86_mce_decoder_chain, &sbridge_mce_dec);
+ }
+
+ module_init(sbridge_init);
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index 3e927ce..a1ee634 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -585,7 +585,7 @@ static bool
+ drm_monitor_supports_rb(struct edid *edid)
+ {
+ if (edid->revision >= 4) {
+- bool ret;
++ bool ret = false;
+ drm_for_each_detailed_block((u8 *)edid, is_rb, &ret);
+ return ret;
+ }
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 3e7c478..3e2edc6 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -3312,6 +3312,10 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
+
+ if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
+ ret = -EIO;
++ } else if (wait_for(i915_seqno_passed(ring->get_seqno(ring),
++ seqno) ||
++ atomic_read(&dev_priv->mm.wedged), 3000)) {
++ ret = -EBUSY;
+ }
+ }
+
+diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
+index d3820c2..578ddfc 100644
+--- a/drivers/gpu/drm/i915/i915_irq.c
++++ b/drivers/gpu/drm/i915/i915_irq.c
+@@ -424,6 +424,30 @@ static void gen6_pm_rps_work(struct work_struct *work)
+ mutex_unlock(&dev_priv->dev->struct_mutex);
+ }
+
++static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
++ u32 pm_iir)
++{
++ unsigned long flags;
++
++ /*
++ * IIR bits should never already be set because IMR should
++ * prevent an interrupt from being shown in IIR. The warning
++ * displays a case where we've unsafely cleared
++ * dev_priv->pm_iir. Although missing an interrupt of the same
++ * type is not a problem, it displays a problem in the logic.
++ *
++ * The mask bit in IMR is cleared by rps_work.
++ */
++
++ spin_lock_irqsave(&dev_priv->rps_lock, flags);
++ dev_priv->pm_iir |= pm_iir;
++ I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
++ POSTING_READ(GEN6_PMIMR);
++ spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
++
++ queue_work(dev_priv->wq, &dev_priv->rps_work);
++}
++
+ static void pch_irq_handler(struct drm_device *dev, u32 pch_iir)
+ {
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+@@ -529,16 +553,8 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
+ pch_irq_handler(dev, pch_iir);
+ }
+
+- if (pm_iir & GEN6_PM_DEFERRED_EVENTS) {
+- unsigned long flags;
+- spin_lock_irqsave(&dev_priv->rps_lock, flags);
+- WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
+- dev_priv->pm_iir |= pm_iir;
+- I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
+- POSTING_READ(GEN6_PMIMR);
+- spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
+- queue_work(dev_priv->wq, &dev_priv->rps_work);
+- }
++ if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
++ gen6_queue_rps_work(dev_priv, pm_iir);
+
+ /* should clear PCH hotplug event before clear CPU irq */
+ I915_WRITE(SDEIIR, pch_iir);
+@@ -634,25 +650,8 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
+ i915_handle_rps_change(dev);
+ }
+
+- if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) {
+- /*
+- * IIR bits should never already be set because IMR should
+- * prevent an interrupt from being shown in IIR. The warning
+- * displays a case where we've unsafely cleared
+- * dev_priv->pm_iir. Although missing an interrupt of the same
+- * type is not a problem, it displays a problem in the logic.
+- *
+- * The mask bit in IMR is cleared by rps_work.
+- */
+- unsigned long flags;
+- spin_lock_irqsave(&dev_priv->rps_lock, flags);
+- WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
+- dev_priv->pm_iir |= pm_iir;
+- I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
+- POSTING_READ(GEN6_PMIMR);
+- spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
+- queue_work(dev_priv->wq, &dev_priv->rps_work);
+- }
++ if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
++ gen6_queue_rps_work(dev_priv, pm_iir);
+
+ /* should clear PCH hotplug event before clear CPU irq */
+ I915_WRITE(SDEIIR, pch_iir);
+diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
+index a1eb83d..f38d196 100644
+--- a/drivers/gpu/drm/i915/i915_suspend.c
++++ b/drivers/gpu/drm/i915/i915_suspend.c
+@@ -739,8 +739,11 @@ static void i915_restore_display(struct drm_device *dev)
+ if (HAS_PCH_SPLIT(dev)) {
+ I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL);
+ I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2);
+- I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
++ /* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2;
++ * otherwise we get blank eDP screen after S3 on some machines
++ */
+ I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->saveBLC_CPU_PWM_CTL2);
++ I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
+ I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
+ I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
+ I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR);
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 5c1cdb8..6aa7716 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -2187,6 +2187,33 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ }
+
+ static int
++intel_finish_fb(struct drm_framebuffer *old_fb)
++{
++ struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
++ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
++ bool was_interruptible = dev_priv->mm.interruptible;
++ int ret;
++
++ wait_event(dev_priv->pending_flip_queue,
++ atomic_read(&dev_priv->mm.wedged) ||
++ atomic_read(&obj->pending_flip) == 0);
++
++ /* Big Hammer, we also need to ensure that any pending
++ * MI_WAIT_FOR_EVENT inside a user batch buffer on the
++ * current scanout is retired before unpinning the old
++ * framebuffer.
++ *
++ * This should only fail upon a hung GPU, in which case we
++ * can safely continue.
++ */
++ dev_priv->mm.interruptible = false;
++ ret = i915_gem_object_finish_gpu(obj);
++ dev_priv->mm.interruptible = was_interruptible;
++
++ return ret;
++}
++
++static int
+ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+ {
+@@ -2224,25 +2251,8 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
+ return ret;
+ }
+
+- if (old_fb) {
+- struct drm_i915_private *dev_priv = dev->dev_private;
+- struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
+-
+- wait_event(dev_priv->pending_flip_queue,
+- atomic_read(&dev_priv->mm.wedged) ||
+- atomic_read(&obj->pending_flip) == 0);
+-
+- /* Big Hammer, we also need to ensure that any pending
+- * MI_WAIT_FOR_EVENT inside a user batch buffer on the
+- * current scanout is retired before unpinning the old
+- * framebuffer.
+- *
+- * This should only fail upon a hung GPU, in which case we
+- * can safely continue.
+- */
+- ret = i915_gem_object_finish_gpu(obj);
+- (void) ret;
+- }
++ if (old_fb)
++ intel_finish_fb(old_fb);
+
+ ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
+ LEAVE_ATOMIC_MODE_SET);
+@@ -3312,6 +3322,23 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ struct drm_device *dev = crtc->dev;
+
++ /* Flush any pending WAITs before we disable the pipe. Note that
++ * we need to drop the struct_mutex in order to acquire it again
++ * during the lowlevel dpms routines around a couple of the
++ * operations. It does not look trivial nor desirable to move
++ * that locking higher. So instead we leave a window for the
++ * submission of further commands on the fb before we can actually
++ * disable it. This race with userspace exists anyway, and we can
++ * only rely on the pipe being disabled by userspace after it
++ * receives the hotplug notification and has flushed any pending
++ * batches.
++ */
++ if (crtc->fb) {
++ mutex_lock(&dev->struct_mutex);
++ intel_finish_fb(crtc->fb);
++ mutex_unlock(&dev->struct_mutex);
++ }
++
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+
+ if (crtc->fb) {
+diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
+index 933e66b..f6613dc 100644
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
+@@ -306,7 +306,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
+
+ I915_WRITE_CTL(ring,
+ ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
+- | RING_REPORT_64K | RING_VALID);
++ | RING_VALID);
+
+ /* If the head is still not zero, the ring is dead */
+ if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
+@@ -1157,18 +1157,6 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long end;
+- u32 head;
+-
+- /* If the reported head position has wrapped or hasn't advanced,
+- * fallback to the slow and accurate path.
+- */
+- head = intel_read_status_page(ring, 4);
+- if (head > ring->head) {
+- ring->head = head;
+- ring->space = ring_space(ring);
+- if (ring->space >= n)
+- return 0;
+- }
+
+ trace_i915_ring_wait_begin(ring);
+ end = jiffies + 3 * HZ;
+diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+index 3a4cc32..cc0801d 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
++++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+@@ -499,7 +499,7 @@ int nouveau_fbcon_init(struct drm_device *dev)
+ nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs;
+
+ ret = drm_fb_helper_init(dev, &nfbdev->helper,
+- nv_two_heads(dev) ? 2 : 1, 4);
++ dev->mode_config.num_crtc, 4);
+ if (ret) {
+ kfree(nfbdev);
+ return ret;
+diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
+index 4c07436..d99aa84 100644
+--- a/drivers/hwmon/applesmc.c
++++ b/drivers/hwmon/applesmc.c
+@@ -215,7 +215,7 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
+ int i;
+
+ if (send_command(cmd) || send_argument(key)) {
+- pr_warn("%s: read arg fail\n", key);
++ pr_warn("%.4s: read arg fail\n", key);
+ return -EIO;
+ }
+
+@@ -223,7 +223,7 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
+
+ for (i = 0; i < len; i++) {
+ if (__wait_status(0x05)) {
+- pr_warn("%s: read data fail\n", key);
++ pr_warn("%.4s: read data fail\n", key);
+ return -EIO;
+ }
+ buffer[i] = inb(APPLESMC_DATA_PORT);
+diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
+index 427468f..0790c98 100644
+--- a/drivers/hwmon/coretemp.c
++++ b/drivers/hwmon/coretemp.c
+@@ -660,7 +660,7 @@ static void __cpuinit get_core_online(unsigned int cpu)
+ * sensors. We check this bit only, all the early CPUs
+ * without thermal sensors will be filtered out.
+ */
+- if (!cpu_has(c, X86_FEATURE_DTS))
++ if (!cpu_has(c, X86_FEATURE_DTHERM))
+ return;
+
+ if (!pdev) {
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index da2f021..532a902 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -288,8 +288,10 @@ static void __cell_release(struct cell *cell, struct bio_list *inmates)
+
+ hlist_del(&cell->list);
+
+- bio_list_add(inmates, cell->holder);
+- bio_list_merge(inmates, &cell->bios);
++ if (inmates) {
++ bio_list_add(inmates, cell->holder);
++ bio_list_merge(inmates, &cell->bios);
++ }
+
+ mempool_free(cell, prison->cell_pool);
+ }
+@@ -312,9 +314,10 @@ static void cell_release(struct cell *cell, struct bio_list *bios)
+ */
+ static void __cell_release_singleton(struct cell *cell, struct bio *bio)
+ {
+- hlist_del(&cell->list);
+ BUG_ON(cell->holder != bio);
+ BUG_ON(!bio_list_empty(&cell->bios));
++
++ __cell_release(cell, NULL);
+ }
+
+ static void cell_release_singleton(struct cell *cell, struct bio *bio)
+diff --git a/drivers/media/dvb/siano/smsusb.c b/drivers/media/dvb/siano/smsusb.c
+index b7d1e3e..fb68805 100644
+--- a/drivers/media/dvb/siano/smsusb.c
++++ b/drivers/media/dvb/siano/smsusb.c
+@@ -544,6 +544,8 @@ static const struct usb_device_id smsusb_id_table[] __devinitconst = {
+ .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
+ { USB_DEVICE(0x2040, 0xc0a0),
+ .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
++ { USB_DEVICE(0x2040, 0xf5a0),
++ .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
+ { } /* Terminating entry */
+ };
+
+diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
+index 2ca10df..981501f 100644
+--- a/drivers/media/video/gspca/gspca.c
++++ b/drivers/media/video/gspca/gspca.c
+@@ -1697,7 +1697,7 @@ static int vidioc_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type buf_type)
+ {
+ struct gspca_dev *gspca_dev = priv;
+- int ret;
++ int i, ret;
+
+ if (buf_type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+@@ -1728,6 +1728,8 @@ static int vidioc_streamoff(struct file *file, void *priv,
+ wake_up_interruptible(&gspca_dev->wq);
+
+ /* empty the transfer queues */
++ for (i = 0; i < gspca_dev->nframes; i++)
++ gspca_dev->frame[i].v4l2_buf.flags &= ~BUF_ALL_FLAGS;
+ atomic_set(&gspca_dev->fr_q, 0);
+ atomic_set(&gspca_dev->fr_i, 0);
+ gspca_dev->fr_o = 0;
+diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
+index 8dc84d6..86cd532 100644
+--- a/drivers/net/can/c_can/c_can.c
++++ b/drivers/net/can/c_can/c_can.c
+@@ -590,8 +590,8 @@ static void c_can_chip_config(struct net_device *dev)
+ priv->write_reg(priv, &priv->regs->control,
+ CONTROL_ENABLE_AR);
+
+- if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY &
+- CAN_CTRLMODE_LOOPBACK)) {
++ if ((priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) &&
++ (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)) {
+ /* loopback + silent mode : useful for hot self-test */
+ priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
+ CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
+diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
+index e023379..e59d006 100644
+--- a/drivers/net/can/flexcan.c
++++ b/drivers/net/can/flexcan.c
+@@ -933,12 +933,12 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
+ u32 clock_freq = 0;
+
+ if (pdev->dev.of_node) {
+- const u32 *clock_freq_p;
++ const __be32 *clock_freq_p;
+
+ clock_freq_p = of_get_property(pdev->dev.of_node,
+ "clock-frequency", NULL);
+ if (clock_freq_p)
+- clock_freq = *clock_freq_p;
++ clock_freq = be32_to_cpup(clock_freq_p);
+ }
+
+ if (!clock_freq) {
+diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
+index a3e65fd..e556fc3 100644
+--- a/drivers/net/ethernet/intel/e1000e/82571.c
++++ b/drivers/net/ethernet/intel/e1000e/82571.c
+@@ -2080,8 +2080,9 @@ const struct e1000_info e1000_82574_info = {
+ | FLAG_HAS_SMART_POWER_DOWN
+ | FLAG_HAS_AMT
+ | FLAG_HAS_CTRLEXT_ON_LOAD,
+- .flags2 = FLAG2_CHECK_PHY_HANG
++ .flags2 = FLAG2_CHECK_PHY_HANG
+ | FLAG2_DISABLE_ASPM_L0S
++ | FLAG2_DISABLE_ASPM_L1
+ | FLAG2_NO_DISABLE_RX,
+ .pba = 32,
+ .max_hw_frame_size = DEFAULT_JUMBO,
+diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
+index 4e933d1..64d3f98 100644
+--- a/drivers/net/ethernet/intel/e1000e/netdev.c
++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
+@@ -5132,14 +5132,6 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
+ return -EINVAL;
+ }
+
+- /* 82573 Errata 17 */
+- if (((adapter->hw.mac.type == e1000_82573) ||
+- (adapter->hw.mac.type == e1000_82574)) &&
+- (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) {
+- adapter->flags2 |= FLAG2_DISABLE_ASPM_L1;
+- e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1);
+- }
+-
+ while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+ /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
+diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
+index 8b0c2ca..6973620 100644
+--- a/drivers/net/wireless/ath/ath9k/hw.c
++++ b/drivers/net/wireless/ath/ath9k/hw.c
+@@ -718,13 +718,25 @@ static void ath9k_hw_init_qos(struct ath_hw *ah)
+
+ u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah)
+ {
++ struct ath_common *common = ath9k_hw_common(ah);
++ int i = 0;
++
+ REG_CLR_BIT(ah, PLL3, PLL3_DO_MEAS_MASK);
+ udelay(100);
+ REG_SET_BIT(ah, PLL3, PLL3_DO_MEAS_MASK);
+
+- while ((REG_READ(ah, PLL4) & PLL4_MEAS_DONE) == 0)
++ while ((REG_READ(ah, PLL4) & PLL4_MEAS_DONE) == 0) {
++
+ udelay(100);
+
++ if (WARN_ON_ONCE(i >= 100)) {
++ ath_err(common, "PLL4 meaurement not done\n");
++ break;
++ }
++
++ i++;
++ }
++
+ return (REG_READ(ah, PLL3) & SQSUM_DVC_MASK) >> 3;
+ }
+ EXPORT_SYMBOL(ar9003_get_pll_sqsum_dvc);
+diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
+index f76a814..95437fc 100644
+--- a/drivers/net/wireless/ath/ath9k/main.c
++++ b/drivers/net/wireless/ath/ath9k/main.c
+@@ -1042,6 +1042,15 @@ void ath_hw_pll_work(struct work_struct *work)
+ hw_pll_work.work);
+ u32 pll_sqsum;
+
++ /*
++ * ensure that the PLL WAR is executed only
++ * after the STA is associated (or) if the
++ * beaconing had started in interfaces that
++ * uses beacons.
++ */
++ if (!(sc->sc_flags & SC_OP_BEACONS))
++ return;
++
+ if (AR_SREV_9485(sc->sc_ah)) {
+
+ ath9k_ps_wakeup(sc);
+@@ -1486,15 +1495,6 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
+ }
+ }
+
+- if ((ah->opmode == NL80211_IFTYPE_ADHOC) ||
+- ((vif->type == NL80211_IFTYPE_ADHOC) &&
+- sc->nvifs > 0)) {
+- ath_err(common, "Cannot create ADHOC interface when other"
+- " interfaces already exist.\n");
+- ret = -EINVAL;
+- goto out;
+- }
+-
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Attach a VIF of type: %d\n", vif->type);
+
+diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
+index 76fd277..c59c592 100644
+--- a/drivers/net/wireless/ath/ath9k/xmit.c
++++ b/drivers/net/wireless/ath/ath9k/xmit.c
+@@ -936,13 +936,13 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
+ }
+
+ /* legacy rates */
++ rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
+ if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
+ !(rate->flags & IEEE80211_RATE_ERP_G))
+ phy = WLAN_RC_PHY_CCK;
+ else
+ phy = WLAN_RC_PHY_OFDM;
+
+- rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
+ info->rates[i].Rate = rate->hw_value;
+ if (rate->hw_value_short) {
+ if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
+index 5815cf5..4661a64 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
++++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
+@@ -1777,6 +1777,7 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ }
+
++#ifdef CONFIG_IWLWIFI_DEBUG
+ static ssize_t iwl_dbgfs_log_event_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+@@ -1814,6 +1815,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
+
+ return count;
+ }
++#endif
+
+ static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
+ char __user *user_buf,
+@@ -1941,7 +1943,9 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
+ return ret;
+ }
+
++#ifdef CONFIG_IWLWIFI_DEBUG
+ DEBUGFS_READ_WRITE_FILE_OPS(log_event);
++#endif
+ DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
+ DEBUGFS_READ_FILE_OPS(fh_reg);
+ DEBUGFS_READ_FILE_OPS(rx_queue);
+@@ -1957,7 +1961,9 @@ static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
+ {
+ DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
+ DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
++#ifdef CONFIG_IWLWIFI_DEBUG
+ DEBUGFS_ADD_FILE(log_event, dir, S_IWUSR | S_IRUSR);
++#endif
+ DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
+ DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
+diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
+index 226faab..fc35308 100644
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -1922,14 +1922,14 @@ static int __devexit xennet_remove(struct xenbus_device *dev)
+
+ dev_dbg(&dev->dev, "%s\n", dev->nodename);
+
+- unregister_netdev(info->netdev);
+-
+ xennet_disconnect_backend(info);
+
+- del_timer_sync(&info->rx_refill_timer);
+-
+ xennet_sysfs_delif(info->netdev);
+
++ unregister_netdev(info->netdev);
++
++ del_timer_sync(&info->rx_refill_timer);
++
+ free_percpu(info->stats);
+
+ free_netdev(info->netdev);
+diff --git a/drivers/oprofile/oprofile_perf.c b/drivers/oprofile/oprofile_perf.c
+index da14432..efc4b7f 100644
+--- a/drivers/oprofile/oprofile_perf.c
++++ b/drivers/oprofile/oprofile_perf.c
+@@ -25,7 +25,7 @@ static int oprofile_perf_enabled;
+ static DEFINE_MUTEX(oprofile_perf_mutex);
+
+ static struct op_counter_config *counter_config;
+-static struct perf_event **perf_events[nr_cpumask_bits];
++static struct perf_event **perf_events[NR_CPUS];
+ static int num_counters;
+
+ /*
+diff --git a/drivers/staging/iio/adc/ad7606_core.c b/drivers/staging/iio/adc/ad7606_core.c
+index 54423ab..2ee187f 100644
+--- a/drivers/staging/iio/adc/ad7606_core.c
++++ b/drivers/staging/iio/adc/ad7606_core.c
+@@ -241,6 +241,7 @@ static const struct attribute_group ad7606_attribute_group = {
+ .indexed = 1, \
+ .channel = num, \
+ .address = num, \
++ .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED), \
+ .scan_index = num, \
+ .scan_type = IIO_ST('s', 16, 16, 0), \
+ }
+diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
+index ec41d38..f4b738f 100644
+--- a/drivers/staging/rtl8712/usb_intf.c
++++ b/drivers/staging/rtl8712/usb_intf.c
+@@ -102,6 +102,8 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = {
+ /* - */
+ {USB_DEVICE(0x20F4, 0x646B)},
+ {USB_DEVICE(0x083A, 0xC512)},
++ {USB_DEVICE(0x25D4, 0x4CA1)},
++ {USB_DEVICE(0x25D4, 0x4CAB)},
+
+ /* RTL8191SU */
+ /* Realtek */
+diff --git a/drivers/staging/rts_pstor/rtsx_transport.c b/drivers/staging/rts_pstor/rtsx_transport.c
+index 4e3d2c1..9b2e5c9 100644
+--- a/drivers/staging/rts_pstor/rtsx_transport.c
++++ b/drivers/staging/rts_pstor/rtsx_transport.c
+@@ -335,6 +335,7 @@ static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
+ int sg_cnt, i, resid;
+ int err = 0;
+ long timeleft;
++ struct scatterlist *sg_ptr;
+ u32 val = TRIG_DMA;
+
+ if ((sg == NULL) || (num_sg <= 0) || !offset || !index)
+@@ -371,7 +372,7 @@ static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
+ sg_cnt = dma_map_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
+
+ resid = size;
+-
++ sg_ptr = sg;
+ chip->sgi = 0;
+ /* Usually the next entry will be @sg@ + 1, but if this sg element
+ * is part of a chained scatterlist, it could jump to the start of
+@@ -379,14 +380,14 @@ static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
+ * the proper sg
+ */
+ for (i = 0; i < *index; i++)
+- sg = sg_next(sg);
++ sg_ptr = sg_next(sg_ptr);
+ for (i = *index; i < sg_cnt; i++) {
+ dma_addr_t addr;
+ unsigned int len;
+ u8 option;
+
+- addr = sg_dma_address(sg);
+- len = sg_dma_len(sg);
++ addr = sg_dma_address(sg_ptr);
++ len = sg_dma_len(sg_ptr);
+
+ RTSX_DEBUGP("DMA addr: 0x%x, Len: 0x%x\n",
+ (unsigned int)addr, len);
+@@ -415,7 +416,7 @@ static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
+ if (!resid)
+ break;
+
+- sg = sg_next(sg);
++ sg_ptr = sg_next(sg_ptr);
+ }
+
+ RTSX_DEBUGP("SG table count = %d\n", chip->sgi);
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index aa0c43f..35e6b5f 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -93,6 +93,7 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
+ { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
+ { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
++ { USB_DEVICE(0x10C4, 0x815F) }, /* Timewave HamLinkUSB */
+ { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
+ { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
+ { USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */
+@@ -134,7 +135,13 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */
+ { USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */
+ { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
++ { USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */
++ { USB_DEVICE(0x166A, 0x0301) }, /* Clipsal 5800PC C-Bus Wireless PC Interface */
+ { USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */
++ { USB_DEVICE(0x166A, 0x0304) }, /* Clipsal 5000CT2 C-Bus Black and White Touchscreen */
++ { USB_DEVICE(0x166A, 0x0305) }, /* Clipsal C-5000CT2 C-Bus Spectrum Colour Touchscreen */
++ { USB_DEVICE(0x166A, 0x0401) }, /* Clipsal L51xx C-Bus Architectural Dimmer */
++ { USB_DEVICE(0x166A, 0x0101) }, /* Clipsal 5560884 C-Bus Multi-room Audio Matrix Switcher */
+ { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */
+ { USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */
+ { USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */
+@@ -146,7 +153,11 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
+ { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
+ { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
++ { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */
++ { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */
+ { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
++ { USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
++ { USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */
+ { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
+ { } /* Terminating Entry */
+ };
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 61d6c31..21a4734 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -235,6 +235,7 @@ static void option_instat_callback(struct urb *urb);
+ #define NOVATELWIRELESS_PRODUCT_G1 0xA001
+ #define NOVATELWIRELESS_PRODUCT_G1_M 0xA002
+ #define NOVATELWIRELESS_PRODUCT_G2 0xA010
++#define NOVATELWIRELESS_PRODUCT_MC551 0xB001
+
+ /* AMOI PRODUCTS */
+ #define AMOI_VENDOR_ID 0x1614
+@@ -496,6 +497,10 @@ static void option_instat_callback(struct urb *urb);
+ /* MediaTek products */
+ #define MEDIATEK_VENDOR_ID 0x0e8d
+
++/* Cellient products */
++#define CELLIENT_VENDOR_ID 0x2692
++#define CELLIENT_PRODUCT_MEN200 0x9005
++
+ /* some devices interfaces need special handling due to a number of reasons */
+ enum option_blacklist_reason {
+ OPTION_BLACKLIST_NONE = 0,
+@@ -730,6 +735,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1_M) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G2) },
++ /* Novatel Ovation MC551 a.k.a. Verizon USB551L */
++ { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) },
+
+ { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
+ { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
+@@ -1227,6 +1234,7 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a1, 0xff, 0x02, 0x01) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x02, 0x01) }, /* MediaTek MT6276M modem & app port */
++ { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
+ { } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, option_ids);
+diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c
+index 08a07a2..57ceaf3 100644
+--- a/fs/nilfs2/gcinode.c
++++ b/fs/nilfs2/gcinode.c
+@@ -191,6 +191,8 @@ void nilfs_remove_all_gcinodes(struct the_nilfs *nilfs)
+ while (!list_empty(head)) {
+ ii = list_first_entry(head, struct nilfs_inode_info, i_dirty);
+ list_del_init(&ii->i_dirty);
++ truncate_inode_pages(&ii->vfs_inode.i_data, 0);
++ nilfs_btnode_cache_clear(&ii->i_btnode_cache);
+ iput(&ii->vfs_inode);
+ }
+ }
+diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
+index bb24ab6..6f24e67 100644
+--- a/fs/nilfs2/segment.c
++++ b/fs/nilfs2/segment.c
+@@ -2309,6 +2309,8 @@ nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head)
+ if (!test_bit(NILFS_I_UPDATED, &ii->i_state))
+ continue;
+ list_del_init(&ii->i_dirty);
++ truncate_inode_pages(&ii->vfs_inode.i_data, 0);
++ nilfs_btnode_cache_clear(&ii->i_btnode_cache);
+ iput(&ii->vfs_inode);
+ }
+ }
+diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
+index a03c098..bc00876 100644
+--- a/include/asm-generic/pgtable.h
++++ b/include/asm-generic/pgtable.h
+@@ -445,6 +445,18 @@ static inline int pmd_write(pmd_t pmd)
+ #endif /* __HAVE_ARCH_PMD_WRITE */
+ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
++#ifndef pmd_read_atomic
++static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
++{
++ /*
++ * Depend on compiler for an atomic pmd read. NOTE: this is
++ * only going to work, if the pmdval_t isn't larger than
++ * an unsigned long.
++ */
++ return *pmdp;
++}
++#endif
++
+ /*
+ * This function is meant to be used by sites walking pagetables with
+ * the mmap_sem hold in read mode to protect against MADV_DONTNEED and
+@@ -458,14 +470,30 @@ static inline int pmd_write(pmd_t pmd)
+ * undefined so behaving like if the pmd was none is safe (because it
+ * can return none anyway). The compiler level barrier() is critically
+ * important to compute the two checks atomically on the same pmdval.
++ *
++ * For 32bit kernels with a 64bit large pmd_t this automatically takes
++ * care of reading the pmd atomically to avoid SMP race conditions
++ * against pmd_populate() when the mmap_sem is hold for reading by the
++ * caller (a special atomic read not done by "gcc" as in the generic
++ * version above, is also needed when THP is disabled because the page
++ * fault can populate the pmd from under us).
+ */
+ static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
+ {
+- /* depend on compiler for an atomic pmd read */
+- pmd_t pmdval = *pmd;
++ pmd_t pmdval = pmd_read_atomic(pmd);
+ /*
+ * The barrier will stabilize the pmdval in a register or on
+ * the stack so that it will stop changing under the code.
++ *
++ * When CONFIG_TRANSPARENT_HUGEPAGE=y on x86 32bit PAE,
++ * pmd_read_atomic is allowed to return a not atomic pmdval
++ * (for example pointing to an hugepage that has never been
++ * mapped in the pmd). The below checks will only care about
++ * the low part of the pmd with 32bit PAE x86 anyway, with the
++ * exception of pmd_none(). So the important thing is that if
++ * the low part of the pmd is found null, the high part will
++ * be also null or the pmd_none() check below would be
++ * confused.
+ */
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ barrier();
+diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
+index f961cc5..da587ad 100644
+--- a/net/batman-adv/routing.c
++++ b/net/batman-adv/routing.c
+@@ -619,6 +619,8 @@ int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if)
+ /* packet needs to be linearized to access the TT changes */
+ if (skb_linearize(skb) < 0)
+ goto out;
++ /* skb_linearize() possibly changed skb->data */
++ tt_query = (struct tt_query_packet *)skb->data;
+
+ if (is_my_mac(tt_query->dst))
+ handle_tt_response(bat_priv, tt_query);
+diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
+index 5f09a57..088af45 100644
+--- a/net/batman-adv/translation-table.c
++++ b/net/batman-adv/translation-table.c
+@@ -1816,10 +1816,10 @@ bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst)
+ {
+ struct tt_local_entry *tt_local_entry = NULL;
+ struct tt_global_entry *tt_global_entry = NULL;
+- bool ret = true;
++ bool ret = false;
+
+ if (!atomic_read(&bat_priv->ap_isolation))
+- return false;
++ goto out;
+
+ tt_local_entry = tt_local_hash_find(bat_priv, dst);
+ if (!tt_local_entry)
+@@ -1829,10 +1829,10 @@ bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst)
+ if (!tt_global_entry)
+ goto out;
+
+- if (_is_ap_isolated(tt_local_entry, tt_global_entry))
++ if (!_is_ap_isolated(tt_local_entry, tt_global_entry))
+ goto out;
+
+- ret = false;
++ ret = true;
+
+ out:
+ if (tt_global_entry)
+diff --git a/net/wireless/reg.c b/net/wireless/reg.c
+index c1c99dd..d57d05b 100644
+--- a/net/wireless/reg.c
++++ b/net/wireless/reg.c
+@@ -1369,7 +1369,7 @@ static void reg_set_request_processed(void)
+ spin_unlock(&reg_requests_lock);
+
+ if (last_request->initiator == NL80211_REGDOM_SET_BY_USER)
+- cancel_delayed_work_sync(&reg_timeout);
++ cancel_delayed_work(&reg_timeout);
+
+ if (need_more_processing)
+ schedule_work(&reg_work);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 0005bde..5f096a5 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5988,6 +5988,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
+ { .id = 0x10ec0272, .name = "ALC272", .patch = patch_alc662 },
+ { .id = 0x10ec0275, .name = "ALC275", .patch = patch_alc269 },
+ { .id = 0x10ec0276, .name = "ALC276", .patch = patch_alc269 },
++ { .id = 0x10ec0280, .name = "ALC280", .patch = patch_alc269 },
+ { .id = 0x10ec0861, .rev = 0x100340, .name = "ALC660",
+ .patch = patch_alc861 },
+ { .id = 0x10ec0660, .name = "ALC660-VD", .patch = patch_alc861vd },
+diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
+index 11224ed..323d4d9 100644
+--- a/tools/hv/hv_kvp_daemon.c
++++ b/tools/hv/hv_kvp_daemon.c
+@@ -384,14 +384,18 @@ int main(void)
+ pfd.fd = fd;
+
+ while (1) {
++ struct sockaddr *addr_p = (struct sockaddr *) &addr;
++ socklen_t addr_l = sizeof(addr);
+ pfd.events = POLLIN;
+ pfd.revents = 0;
+ poll(&pfd, 1, -1);
+
+- len = recv(fd, kvp_recv_buffer, sizeof(kvp_recv_buffer), 0);
++ len = recvfrom(fd, kvp_recv_buffer, sizeof(kvp_recv_buffer), 0,
++ addr_p, &addr_l);
+
+- if (len < 0) {
+- syslog(LOG_ERR, "recv failed; error:%d", len);
++ if (len < 0 || addr.nl_pid) {
++ syslog(LOG_ERR, "recvfrom failed; pid:%u error:%d %s",
++ addr.nl_pid, errno, strerror(errno));
+ close(fd);
+ return -1;
+ }
diff --git a/1022_linux-3.2.23.patch b/1022_linux-3.2.23.patch
new file mode 100644
index 00000000..3d796d03
--- /dev/null
+++ b/1022_linux-3.2.23.patch
@@ -0,0 +1,1862 @@
+diff --git a/Makefile b/Makefile
+index 9a7d921..40d1e3b 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 22
++SUBLEVEL = 23
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
+index e10e59a..1d1710e 100644
+--- a/arch/arm/kernel/smp.c
++++ b/arch/arm/kernel/smp.c
+@@ -471,9 +471,7 @@ static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
+ static void ipi_timer(void)
+ {
+ struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent);
+- irq_enter();
+ evt->event_handler(evt);
+- irq_exit();
+ }
+
+ #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+@@ -572,7 +570,9 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
+
+ switch (ipinr) {
+ case IPI_TIMER:
++ irq_enter();
+ ipi_timer();
++ irq_exit();
+ break;
+
+ case IPI_RESCHEDULE:
+@@ -580,15 +580,21 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
+ break;
+
+ case IPI_CALL_FUNC:
++ irq_enter();
+ generic_smp_call_function_interrupt();
++ irq_exit();
+ break;
+
+ case IPI_CALL_FUNC_SINGLE:
++ irq_enter();
+ generic_smp_call_function_single_interrupt();
++ irq_exit();
+ break;
+
+ case IPI_CPU_STOP:
++ irq_enter();
+ ipi_cpu_stop(cpu);
++ irq_exit();
+ break;
+
+ default:
+diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+index 44d8829..5e8dc08 100644
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -763,7 +763,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
+ lwz r3,VCORE_NAPPING_THREADS(r5)
+ lwz r4,VCPU_PTID(r9)
+ li r0,1
+- sldi r0,r0,r4
++ sld r0,r0,r4
+ andc. r3,r3,r0 /* no sense IPI'ing ourselves */
+ beq 43f
+ mulli r4,r4,PACA_SIZE /* get paca for thread 0 */
+diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
+index 03a217a..b7e63d8 100644
+--- a/arch/powerpc/xmon/xmon.c
++++ b/arch/powerpc/xmon/xmon.c
+@@ -975,7 +975,7 @@ static int cpu_cmd(void)
+ /* print cpus waiting or in xmon */
+ printf("cpus stopped:");
+ count = 0;
+- for (cpu = 0; cpu < NR_CPUS; ++cpu) {
++ for_each_possible_cpu(cpu) {
+ if (cpumask_test_cpu(cpu, &cpus_in_xmon)) {
+ if (count == 0)
+ printf(" %x", cpu);
+diff --git a/drivers/block/umem.c b/drivers/block/umem.c
+index aa27120..9a72277 100644
+--- a/drivers/block/umem.c
++++ b/drivers/block/umem.c
+@@ -513,6 +513,44 @@ static void process_page(unsigned long data)
+ }
+ }
+
++struct mm_plug_cb {
++ struct blk_plug_cb cb;
++ struct cardinfo *card;
++};
++
++static void mm_unplug(struct blk_plug_cb *cb)
++{
++ struct mm_plug_cb *mmcb = container_of(cb, struct mm_plug_cb, cb);
++
++ spin_lock_irq(&mmcb->card->lock);
++ activate(mmcb->card);
++ spin_unlock_irq(&mmcb->card->lock);
++ kfree(mmcb);
++}
++
++static int mm_check_plugged(struct cardinfo *card)
++{
++ struct blk_plug *plug = current->plug;
++ struct mm_plug_cb *mmcb;
++
++ if (!plug)
++ return 0;
++
++ list_for_each_entry(mmcb, &plug->cb_list, cb.list) {
++ if (mmcb->cb.callback == mm_unplug && mmcb->card == card)
++ return 1;
++ }
++ /* Not currently on the callback list */
++ mmcb = kmalloc(sizeof(*mmcb), GFP_ATOMIC);
++ if (!mmcb)
++ return 0;
++
++ mmcb->card = card;
++ mmcb->cb.callback = mm_unplug;
++ list_add(&mmcb->cb.list, &plug->cb_list);
++ return 1;
++}
++
+ static void mm_make_request(struct request_queue *q, struct bio *bio)
+ {
+ struct cardinfo *card = q->queuedata;
+@@ -523,6 +561,8 @@ static void mm_make_request(struct request_queue *q, struct bio *bio)
+ *card->biotail = bio;
+ bio->bi_next = NULL;
+ card->biotail = &bio->bi_next;
++ if (bio->bi_rw & REQ_SYNC || !mm_check_plugged(card))
++ activate(card);
+ spin_unlock_irq(&card->lock);
+
+ return;
+diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
+index c4da951..ca67338 100644
+--- a/drivers/gpu/drm/i915/i915_dma.c
++++ b/drivers/gpu/drm/i915/i915_dma.c
+@@ -1890,6 +1890,27 @@ ips_ping_for_i915_load(void)
+ }
+ }
+
++static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
++{
++ struct apertures_struct *ap;
++ struct pci_dev *pdev = dev_priv->dev->pdev;
++ bool primary;
++
++ ap = alloc_apertures(1);
++ if (!ap)
++ return;
++
++ ap->ranges[0].base = dev_priv->dev->agp->base;
++ ap->ranges[0].size =
++ dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
++ primary =
++ pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
++
++ remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
++
++ kfree(ap);
++}
++
+ /**
+ * i915_driver_load - setup chip and create an initial config
+ * @dev: DRM device
+@@ -1927,6 +1948,15 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
+ goto free_priv;
+ }
+
++ dev_priv->mm.gtt = intel_gtt_get();
++ if (!dev_priv->mm.gtt) {
++ DRM_ERROR("Failed to initialize GTT\n");
++ ret = -ENODEV;
++ goto put_bridge;
++ }
++
++ i915_kick_out_firmware_fb(dev_priv);
++
+ /* overlay on gen2 is broken and can't address above 1G */
+ if (IS_GEN2(dev))
+ dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
+@@ -1950,13 +1980,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
+ goto put_bridge;
+ }
+
+- dev_priv->mm.gtt = intel_gtt_get();
+- if (!dev_priv->mm.gtt) {
+- DRM_ERROR("Failed to initialize GTT\n");
+- ret = -ENODEV;
+- goto out_rmmap;
+- }
+-
+ agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+
+ dev_priv->mm.gtt_mapping =
+diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
+index 50ed53b..fc90c11 100644
+--- a/drivers/md/persistent-data/dm-space-map-checker.c
++++ b/drivers/md/persistent-data/dm-space-map-checker.c
+@@ -8,6 +8,7 @@
+
+ #include <linux/device-mapper.h>
+ #include <linux/export.h>
++#include <linux/vmalloc.h>
+
+ #ifdef CONFIG_DM_DEBUG_SPACE_MAPS
+
+@@ -89,13 +90,23 @@ static int ca_create(struct count_array *ca, struct dm_space_map *sm)
+
+ ca->nr = nr_blocks;
+ ca->nr_free = nr_blocks;
+- ca->counts = kzalloc(sizeof(*ca->counts) * nr_blocks, GFP_KERNEL);
+- if (!ca->counts)
+- return -ENOMEM;
++
++ if (!nr_blocks)
++ ca->counts = NULL;
++ else {
++ ca->counts = vzalloc(sizeof(*ca->counts) * nr_blocks);
++ if (!ca->counts)
++ return -ENOMEM;
++ }
+
+ return 0;
+ }
+
++static void ca_destroy(struct count_array *ca)
++{
++ vfree(ca->counts);
++}
++
+ static int ca_load(struct count_array *ca, struct dm_space_map *sm)
+ {
+ int r;
+@@ -126,12 +137,14 @@ static int ca_load(struct count_array *ca, struct dm_space_map *sm)
+ static int ca_extend(struct count_array *ca, dm_block_t extra_blocks)
+ {
+ dm_block_t nr_blocks = ca->nr + extra_blocks;
+- uint32_t *counts = kzalloc(sizeof(*counts) * nr_blocks, GFP_KERNEL);
++ uint32_t *counts = vzalloc(sizeof(*counts) * nr_blocks);
+ if (!counts)
+ return -ENOMEM;
+
+- memcpy(counts, ca->counts, sizeof(*counts) * ca->nr);
+- kfree(ca->counts);
++ if (ca->counts) {
++ memcpy(counts, ca->counts, sizeof(*counts) * ca->nr);
++ ca_destroy(ca);
++ }
+ ca->nr = nr_blocks;
+ ca->nr_free += extra_blocks;
+ ca->counts = counts;
+@@ -151,11 +164,6 @@ static int ca_commit(struct count_array *old, struct count_array *new)
+ return 0;
+ }
+
+-static void ca_destroy(struct count_array *ca)
+-{
+- kfree(ca->counts);
+-}
+-
+ /*----------------------------------------------------------------*/
+
+ struct sm_checker {
+@@ -343,25 +351,25 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
+ int r;
+ struct sm_checker *smc;
+
+- if (!sm)
+- return NULL;
++ if (IS_ERR_OR_NULL(sm))
++ return ERR_PTR(-EINVAL);
+
+ smc = kmalloc(sizeof(*smc), GFP_KERNEL);
+ if (!smc)
+- return NULL;
++ return ERR_PTR(-ENOMEM);
+
+ memcpy(&smc->sm, &ops_, sizeof(smc->sm));
+ r = ca_create(&smc->old_counts, sm);
+ if (r) {
+ kfree(smc);
+- return NULL;
++ return ERR_PTR(r);
+ }
+
+ r = ca_create(&smc->counts, sm);
+ if (r) {
+ ca_destroy(&smc->old_counts);
+ kfree(smc);
+- return NULL;
++ return ERR_PTR(r);
+ }
+
+ smc->real_sm = sm;
+@@ -371,7 +379,7 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
+ ca_destroy(&smc->counts);
+ ca_destroy(&smc->old_counts);
+ kfree(smc);
+- return NULL;
++ return ERR_PTR(r);
+ }
+
+ r = ca_commit(&smc->old_counts, &smc->counts);
+@@ -379,7 +387,7 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
+ ca_destroy(&smc->counts);
+ ca_destroy(&smc->old_counts);
+ kfree(smc);
+- return NULL;
++ return ERR_PTR(r);
+ }
+
+ return &smc->sm;
+@@ -391,25 +399,25 @@ struct dm_space_map *dm_sm_checker_create_fresh(struct dm_space_map *sm)
+ int r;
+ struct sm_checker *smc;
+
+- if (!sm)
+- return NULL;
++ if (IS_ERR_OR_NULL(sm))
++ return ERR_PTR(-EINVAL);
+
+ smc = kmalloc(sizeof(*smc), GFP_KERNEL);
+ if (!smc)
+- return NULL;
++ return ERR_PTR(-ENOMEM);
+
+ memcpy(&smc->sm, &ops_, sizeof(smc->sm));
+ r = ca_create(&smc->old_counts, sm);
+ if (r) {
+ kfree(smc);
+- return NULL;
++ return ERR_PTR(r);
+ }
+
+ r = ca_create(&smc->counts, sm);
+ if (r) {
+ ca_destroy(&smc->old_counts);
+ kfree(smc);
+- return NULL;
++ return ERR_PTR(r);
+ }
+
+ smc->real_sm = sm;
+diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
+index fc469ba..3d0ed53 100644
+--- a/drivers/md/persistent-data/dm-space-map-disk.c
++++ b/drivers/md/persistent-data/dm-space-map-disk.c
+@@ -290,7 +290,16 @@ struct dm_space_map *dm_sm_disk_create(struct dm_transaction_manager *tm,
+ dm_block_t nr_blocks)
+ {
+ struct dm_space_map *sm = dm_sm_disk_create_real(tm, nr_blocks);
+- return dm_sm_checker_create_fresh(sm);
++ struct dm_space_map *smc;
++
++ if (IS_ERR_OR_NULL(sm))
++ return sm;
++
++ smc = dm_sm_checker_create_fresh(sm);
++ if (IS_ERR(smc))
++ dm_sm_destroy(sm);
++
++ return smc;
+ }
+ EXPORT_SYMBOL_GPL(dm_sm_disk_create);
+
+diff --git a/drivers/md/persistent-data/dm-transaction-manager.c b/drivers/md/persistent-data/dm-transaction-manager.c
+index 6f8d387..ba54aac 100644
+--- a/drivers/md/persistent-data/dm-transaction-manager.c
++++ b/drivers/md/persistent-data/dm-transaction-manager.c
+@@ -138,6 +138,9 @@ EXPORT_SYMBOL_GPL(dm_tm_create_non_blocking_clone);
+
+ void dm_tm_destroy(struct dm_transaction_manager *tm)
+ {
++ if (!tm->is_clone)
++ wipe_shadow_table(tm);
++
+ kfree(tm);
+ }
+ EXPORT_SYMBOL_GPL(dm_tm_destroy);
+@@ -342,8 +345,10 @@ static int dm_tm_create_internal(struct dm_block_manager *bm,
+ }
+
+ *sm = dm_sm_checker_create(inner);
+- if (!*sm)
++ if (IS_ERR(*sm)) {
++ r = PTR_ERR(*sm);
+ goto bad2;
++ }
+
+ } else {
+ r = dm_bm_write_lock(dm_tm_get_bm(*tm), sb_location,
+@@ -362,8 +367,10 @@ static int dm_tm_create_internal(struct dm_block_manager *bm,
+ }
+
+ *sm = dm_sm_checker_create(inner);
+- if (!*sm)
++ if (IS_ERR(*sm)) {
++ r = PTR_ERR(*sm);
+ goto bad2;
++ }
+ }
+
+ return 0;
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index b219449..7a9eef6 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -1919,7 +1919,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
+ if (r10_sync_page_io(rdev,
+ r10_bio->devs[sl].addr +
+ sect,
+- s<<9, conf->tmppage, WRITE)
++ s, conf->tmppage, WRITE)
+ == 0) {
+ /* Well, this device is dead */
+ printk(KERN_NOTICE
+@@ -1956,7 +1956,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
+ switch (r10_sync_page_io(rdev,
+ r10_bio->devs[sl].addr +
+ sect,
+- s<<9, conf->tmppage,
++ s, conf->tmppage,
+ READ)) {
+ case 0:
+ /* Well, this device is dead */
+@@ -2119,7 +2119,7 @@ read_more:
+ rdev = conf->mirrors[mirror].rdev;
+ printk_ratelimited(
+ KERN_ERR
+- "md/raid10:%s: %s: redirecting"
++ "md/raid10:%s: %s: redirecting "
+ "sector %llu to another mirror\n",
+ mdname(mddev),
+ bdevname(rdev->bdev, b),
+@@ -2436,6 +2436,12 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
+ /* want to reconstruct this device */
+ rb2 = r10_bio;
+ sect = raid10_find_virt(conf, sector_nr, i);
++ if (sect >= mddev->resync_max_sectors) {
++ /* last stripe is not complete - don't
++ * try to recover this sector.
++ */
++ continue;
++ }
+ /* Unless we are doing a full sync, we only need
+ * to recover the block if it is set in the bitmap
+ */
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index 858fdbb..6ba4954 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -542,6 +542,12 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
+ * a chance*/
+ md_check_recovery(conf->mddev);
+ }
++ /*
++ * Because md_wait_for_blocked_rdev
++ * will dec nr_pending, we must
++ * increment it first.
++ */
++ atomic_inc(&rdev->nr_pending);
+ md_wait_for_blocked_rdev(rdev, conf->mddev);
+ } else {
+ /* Acknowledged bad block - skip the write */
+@@ -3621,7 +3627,6 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
+ raid_bio->bi_next = (void*)rdev;
+ align_bi->bi_bdev = rdev->bdev;
+ align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
+- align_bi->bi_sector += rdev->data_offset;
+
+ if (!bio_fits_rdev(align_bi) ||
+ is_badblock(rdev, align_bi->bi_sector, align_bi->bi_size>>9,
+@@ -3632,6 +3637,9 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
+ return 0;
+ }
+
++ /* No reshape active, so we can trust rdev->data_offset */
++ align_bi->bi_sector += rdev->data_offset;
++
+ spin_lock_irq(&conf->device_lock);
+ wait_event_lock_irq(conf->wait_for_stripe,
+ conf->quiesce == 0,
+diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
+index 72d3f23..68ecf48 100644
+--- a/drivers/mtd/nand/cafe_nand.c
++++ b/drivers/mtd/nand/cafe_nand.c
+@@ -102,7 +102,7 @@ static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
+ static int cafe_device_ready(struct mtd_info *mtd)
+ {
+ struct cafe_priv *cafe = mtd->priv;
+- int result = !!(cafe_readl(cafe, NAND_STATUS) | 0x40000000);
++ int result = !!(cafe_readl(cafe, NAND_STATUS) & 0x40000000);
+ uint32_t irqs = cafe_readl(cafe, NAND_IRQ);
+
+ cafe_writel(cafe, irqs, NAND_IRQ);
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index f65e0b9..1a88e38 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -77,6 +77,7 @@
+ #include <net/route.h>
+ #include <net/net_namespace.h>
+ #include <net/netns/generic.h>
++#include <net/pkt_sched.h>
+ #include "bonding.h"
+ #include "bond_3ad.h"
+ #include "bond_alb.h"
+@@ -382,8 +383,6 @@ struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr)
+ return next;
+ }
+
+-#define bond_queue_mapping(skb) (*(u16 *)((skb)->cb))
+-
+ /**
+ * bond_dev_queue_xmit - Prepare skb for xmit.
+ *
+@@ -396,7 +395,9 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
+ {
+ skb->dev = slave_dev;
+
+- skb->queue_mapping = bond_queue_mapping(skb);
++ BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
++ sizeof(qdisc_skb_cb(skb)->bond_queue_mapping));
++ skb->queue_mapping = qdisc_skb_cb(skb)->bond_queue_mapping;
+
+ if (unlikely(netpoll_tx_running(slave_dev)))
+ bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
+@@ -4151,7 +4152,7 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
+ /*
+ * Save the original txq to restore before passing to the driver
+ */
+- bond_queue_mapping(skb) = skb->queue_mapping;
++ qdisc_skb_cb(skb)->bond_queue_mapping = skb->queue_mapping;
+
+ if (unlikely(txq >= dev->real_num_tx_queues)) {
+ do {
+diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
+index eeac9ca..68fe73c 100644
+--- a/drivers/net/dummy.c
++++ b/drivers/net/dummy.c
+@@ -37,6 +37,7 @@
+ #include <linux/rtnetlink.h>
+ #include <net/rtnetlink.h>
+ #include <linux/u64_stats_sync.h>
++#include <linux/sched.h>
+
+ static int numdummies = 1;
+
+@@ -186,8 +187,10 @@ static int __init dummy_init_module(void)
+ rtnl_lock();
+ err = __rtnl_link_register(&dummy_link_ops);
+
+- for (i = 0; i < numdummies && !err; i++)
++ for (i = 0; i < numdummies && !err; i++) {
+ err = dummy_init_one();
++ cond_resched();
++ }
+ if (err < 0)
+ __rtnl_link_unregister(&dummy_link_ops);
+ rtnl_unlock();
+diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
+index bf266a0..36c7c4e 100644
+--- a/drivers/net/ethernet/emulex/benet/be_main.c
++++ b/drivers/net/ethernet/emulex/benet/be_main.c
+@@ -696,6 +696,8 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
+
+ copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
+ if (copied) {
++ int gso_segs = skb_shinfo(skb)->gso_segs;
++
+ /* record the sent skb in the sent_skb table */
+ BUG_ON(txo->sent_skb_list[start]);
+ txo->sent_skb_list[start] = skb;
+@@ -713,8 +715,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
+
+ be_txq_notify(adapter, txq->id, wrb_cnt);
+
+- be_tx_stats_update(txo, wrb_cnt, copied,
+- skb_shinfo(skb)->gso_segs, stopped);
++ be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
+ } else {
+ txq->head = start;
+ dev_kfree_skb_any(skb);
+diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
+index 65c51ff..11ddd838 100644
+--- a/drivers/net/ethernet/marvell/sky2.c
++++ b/drivers/net/ethernet/marvell/sky2.c
+@@ -4361,10 +4361,12 @@ static int sky2_set_features(struct net_device *dev, u32 features)
+ struct sky2_port *sky2 = netdev_priv(dev);
+ u32 changed = dev->features ^ features;
+
+- if (changed & NETIF_F_RXCSUM) {
+- u32 on = features & NETIF_F_RXCSUM;
+- sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
+- on ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
++ if ((changed & NETIF_F_RXCSUM) &&
++ !(sky2->hw->flags & SKY2_HW_NEW_LE)) {
++ sky2_write32(sky2->hw,
++ Q_ADDR(rxqaddr[sky2->port], Q_CSR),
++ (features & NETIF_F_RXCSUM)
++ ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
+ }
+
+ if (changed & NETIF_F_RXHASH)
+diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
+index 0f9ee46..4cc4a8b 100644
+--- a/drivers/net/wireless/ath/ath.h
++++ b/drivers/net/wireless/ath/ath.h
+@@ -143,6 +143,7 @@ struct ath_common {
+ u32 keymax;
+ DECLARE_BITMAP(keymap, ATH_KEYMAX);
+ DECLARE_BITMAP(tkip_keymap, ATH_KEYMAX);
++ DECLARE_BITMAP(ccmp_keymap, ATH_KEYMAX);
+ enum ath_crypt_caps crypt_caps;
+
+ unsigned int clockrate;
+diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
+index 6973620..7f97164 100644
+--- a/drivers/net/wireless/ath/ath9k/hw.c
++++ b/drivers/net/wireless/ath/ath9k/hw.c
+@@ -557,7 +557,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
+
+ if (ah->config.serialize_regmode == SER_REG_MODE_AUTO) {
+ if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCI ||
+- ((AR_SREV_9160(ah) || AR_SREV_9280(ah)) &&
++ ((AR_SREV_9160(ah) || AR_SREV_9280(ah) || AR_SREV_9287(ah)) &&
+ !ah->is_pciexpress)) {
+ ah->config.serialize_regmode =
+ SER_REG_MODE_ON;
+diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
+index 2f3aeac..e6d791c 100644
+--- a/drivers/net/wireless/ath/ath9k/recv.c
++++ b/drivers/net/wireless/ath/ath9k/recv.c
+@@ -829,7 +829,8 @@ static bool ath9k_rx_accept(struct ath_common *common,
+ * descriptor does contain a valid key index. This has been observed
+ * mostly with CCMP encryption.
+ */
+- if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID)
++ if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID ||
++ !test_bit(rx_stats->rs_keyix, common->ccmp_keymap))
+ rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
+
+ if (!rx_stats->rs_datalen)
+diff --git a/drivers/net/wireless/ath/key.c b/drivers/net/wireless/ath/key.c
+index 4cf7c5e..1ec3fa5 100644
+--- a/drivers/net/wireless/ath/key.c
++++ b/drivers/net/wireless/ath/key.c
+@@ -556,6 +556,9 @@ int ath_key_config(struct ath_common *common,
+ return -EIO;
+
+ set_bit(idx, common->keymap);
++ if (key->cipher == WLAN_CIPHER_SUITE_CCMP)
++ set_bit(idx, common->ccmp_keymap);
++
+ if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
+ set_bit(idx + 64, common->keymap);
+ set_bit(idx, common->tkip_keymap);
+@@ -582,6 +585,7 @@ void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key)
+ return;
+
+ clear_bit(key->hw_key_idx, common->keymap);
++ clear_bit(key->hw_key_idx, common->ccmp_keymap);
+ if (key->cipher != WLAN_CIPHER_SUITE_TKIP)
+ return;
+
+diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c
+index 7aa9aa0..39fd4d5 100644
+--- a/drivers/net/wireless/mwifiex/11n_rxreorder.c
++++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c
+@@ -267,7 +267,8 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
+ else
+ last_seq = priv->rx_seq[tid];
+
+- if (last_seq >= new_node->start_win)
++ if (last_seq != MWIFIEX_DEF_11N_RX_SEQ_NUM &&
++ last_seq >= new_node->start_win)
+ new_node->start_win = last_seq + 1;
+
+ new_node->win_size = win_size;
+@@ -611,5 +612,5 @@ void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv)
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
+
+ INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
+- memset(priv->rx_seq, 0, sizeof(priv->rx_seq));
++ mwifiex_reset_11n_rx_seq_num(priv);
+ }
+diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.h b/drivers/net/wireless/mwifiex/11n_rxreorder.h
+index 033c8ad..7128baa 100644
+--- a/drivers/net/wireless/mwifiex/11n_rxreorder.h
++++ b/drivers/net/wireless/mwifiex/11n_rxreorder.h
+@@ -37,6 +37,13 @@
+
+ #define ADDBA_RSP_STATUS_ACCEPT 0
+
++#define MWIFIEX_DEF_11N_RX_SEQ_NUM 0xffff
++
++static inline void mwifiex_reset_11n_rx_seq_num(struct mwifiex_private *priv)
++{
++ memset(priv->rx_seq, 0xff, sizeof(priv->rx_seq));
++}
++
+ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *,
+ u16 seqNum,
+ u16 tid, u8 *ta,
+diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
+index 462c710..01dcb1a 100644
+--- a/drivers/net/wireless/mwifiex/cfg80211.c
++++ b/drivers/net/wireless/mwifiex/cfg80211.c
+@@ -1177,11 +1177,11 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
+ void *mdev_priv;
+
+ if (!priv)
+- return NULL;
++ return ERR_PTR(-EFAULT);
+
+ adapter = priv->adapter;
+ if (!adapter)
+- return NULL;
++ return ERR_PTR(-EFAULT);
+
+ switch (type) {
+ case NL80211_IFTYPE_UNSPECIFIED:
+@@ -1190,7 +1190,7 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
+ if (priv->bss_mode) {
+ wiphy_err(wiphy, "cannot create multiple"
+ " station/adhoc interfaces\n");
+- return NULL;
++ return ERR_PTR(-EINVAL);
+ }
+
+ if (type == NL80211_IFTYPE_UNSPECIFIED)
+@@ -1208,14 +1208,15 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
+ break;
+ default:
+ wiphy_err(wiphy, "type not supported\n");
+- return NULL;
++ return ERR_PTR(-EINVAL);
+ }
+
+ dev = alloc_netdev_mq(sizeof(struct mwifiex_private *), name,
+ ether_setup, 1);
+ if (!dev) {
+ wiphy_err(wiphy, "no memory available for netdevice\n");
+- goto error;
++ priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
++ return ERR_PTR(-ENOMEM);
+ }
+
+ dev_net_set(dev, wiphy_net(wiphy));
+@@ -1240,7 +1241,9 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
+ /* Register network device */
+ if (register_netdevice(dev)) {
+ wiphy_err(wiphy, "cannot register virtual network device\n");
+- goto error;
++ free_netdev(dev);
++ priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
++ return ERR_PTR(-EFAULT);
+ }
+
+ sema_init(&priv->async_sem, 1);
+@@ -1252,12 +1255,6 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
+ mwifiex_dev_debugfs_init(priv);
+ #endif
+ return dev;
+-error:
+- if (dev && (dev->reg_state == NETREG_UNREGISTERED))
+- free_netdev(dev);
+- priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
+-
+- return NULL;
+ }
+ EXPORT_SYMBOL_GPL(mwifiex_add_virtual_intf);
+
+diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
+index 6c239c3..06fcf1e 100644
+--- a/drivers/net/wireless/mwifiex/wmm.c
++++ b/drivers/net/wireless/mwifiex/wmm.c
+@@ -406,6 +406,8 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter)
+ priv->add_ba_param.tx_win_size = MWIFIEX_AMPDU_DEF_TXWINSIZE;
+ priv->add_ba_param.rx_win_size = MWIFIEX_AMPDU_DEF_RXWINSIZE;
+
++ mwifiex_reset_11n_rx_seq_num(priv);
++
+ atomic_set(&priv->wmm.tx_pkts_queued, 0);
+ atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
+ }
+@@ -1209,10 +1211,12 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
+ return 0;
+ }
+
+- if (!ptr->is_11n_enabled || mwifiex_is_ba_stream_setup(priv, ptr, tid)
+- || ((priv->sec_info.wpa_enabled
+- || priv->sec_info.wpa2_enabled) && !priv->wpa_is_gtk_set)
+- ) {
++ if (!ptr->is_11n_enabled ||
++ mwifiex_is_ba_stream_setup(priv, ptr, tid) ||
++ priv->wps.session_enable ||
++ ((priv->sec_info.wpa_enabled ||
++ priv->sec_info.wpa2_enabled) &&
++ !priv->wpa_is_gtk_set)) {
+ mwifiex_send_single_packet(priv, ptr, ptr_index, flags);
+ /* ra_list_spinlock has been freed in
+ mwifiex_send_single_packet() */
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+index 94a3e17..0302148 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+@@ -311,9 +311,11 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
+ {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
+ {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
+ {RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
++ {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
+ {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
+ {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
+ {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
++ {RTL_USB_DEVICE(0x4856, 0x0091, rtl92cu_hal_cfg)}, /*NetweeN - Feixun*/
+ /* HP - Lite-On ,8188CUS Slim Combo */
+ {RTL_USB_DEVICE(0x103c, 0x1629, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(0x13d3, 0x3357, rtl92cu_hal_cfg)}, /* AzureWave */
+@@ -355,6 +357,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
+ {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Funai -Abocom*/
+ {RTL_USB_DEVICE(0x0846, 0x9021, rtl92cu_hal_cfg)}, /*Netgear-Sercomm*/
+ {RTL_USB_DEVICE(0x0b05, 0x17ab, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
++ {RTL_USB_DEVICE(0x0bda, 0x8186, rtl92cu_hal_cfg)}, /*Realtek 92CE-VAU*/
+ {RTL_USB_DEVICE(0x0df6, 0x0061, rtl92cu_hal_cfg)}, /*Sitecom-Edimax*/
+ {RTL_USB_DEVICE(0x0e66, 0x0019, rtl92cu_hal_cfg)}, /*Hawking-Edimax*/
+ {RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/
+diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
+index 3269213..64ddb63 100644
+--- a/drivers/target/tcm_fc/tfc_sess.c
++++ b/drivers/target/tcm_fc/tfc_sess.c
+@@ -61,7 +61,8 @@ static struct ft_tport *ft_tport_create(struct fc_lport *lport)
+ struct ft_tport *tport;
+ int i;
+
+- tport = rcu_dereference(lport->prov[FC_TYPE_FCP]);
++ tport = rcu_dereference_protected(lport->prov[FC_TYPE_FCP],
++ lockdep_is_held(&ft_lport_lock));
+ if (tport && tport->tpg)
+ return tport;
+
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 3568374..19b127c 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -692,6 +692,8 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
+ kfree(name);
+
+ iput(inode);
++
++ btrfs_run_delayed_items(trans, root);
+ return ret;
+ }
+
+@@ -897,6 +899,7 @@ again:
+ ret = btrfs_unlink_inode(trans, root, dir,
+ inode, victim_name,
+ victim_name_len);
++ btrfs_run_delayed_items(trans, root);
+ }
+ kfree(victim_name);
+ ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
+@@ -1477,6 +1480,9 @@ again:
+ ret = btrfs_unlink_inode(trans, root, dir, inode,
+ name, name_len);
+ BUG_ON(ret);
++
++ btrfs_run_delayed_items(trans, root);
++
+ kfree(name);
+ iput(inode);
+
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 9e0675a..b21670c 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -2975,18 +2975,15 @@ cifs_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
+ * MS-CIFS indicates that servers are only limited by the client's
+ * bufsize for reads, testing against win98se shows that it throws
+ * INVALID_PARAMETER errors if you try to request too large a read.
++ * OS/2 just sends back short reads.
+ *
+- * If the server advertises a MaxBufferSize of less than one page,
+- * assume that it also can't satisfy reads larger than that either.
+- *
+- * FIXME: Is there a better heuristic for this?
++ * If the server doesn't advertise CAP_LARGE_READ_X, then assume that
++ * it can't handle a read request larger than its MaxBufferSize either.
+ */
+ if (tcon->unix_ext && (unix_cap & CIFS_UNIX_LARGE_READ_CAP))
+ defsize = CIFS_DEFAULT_IOSIZE;
+ else if (server->capabilities & CAP_LARGE_READ_X)
+ defsize = CIFS_DEFAULT_NON_POSIX_RSIZE;
+- else if (server->maxBuf >= PAGE_CACHE_SIZE)
+- defsize = CIFSMaxBufSize;
+ else
+ defsize = server->maxBuf - sizeof(READ_RSP);
+
+diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
+index 6e39668..07ee5b4 100644
+--- a/fs/ocfs2/file.c
++++ b/fs/ocfs2/file.c
+@@ -2422,8 +2422,10 @@ out_dio:
+ unaligned_dio = 0;
+ }
+
+- if (unaligned_dio)
++ if (unaligned_dio) {
++ ocfs2_iocb_clear_unaligned_aio(iocb);
+ atomic_dec(&OCFS2_I(inode)->ip_unaligned_aio);
++ }
+
+ out:
+ if (rw_level != -1)
+diff --git a/fs/open.c b/fs/open.c
+index 22c41b5..e2b5d51 100644
+--- a/fs/open.c
++++ b/fs/open.c
+@@ -396,10 +396,10 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
+ {
+ struct file *file;
+ struct inode *inode;
+- int error;
++ int error, fput_needed;
+
+ error = -EBADF;
+- file = fget(fd);
++ file = fget_raw_light(fd, &fput_needed);
+ if (!file)
+ goto out;
+
+@@ -413,7 +413,7 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
+ if (!error)
+ set_fs_pwd(current->fs, &file->f_path);
+ out_putf:
+- fput(file);
++ fput_light(file, fput_needed);
+ out:
+ return error;
+ }
+diff --git a/fs/splice.c b/fs/splice.c
+index 6d0dfb8..014fcb4 100644
+--- a/fs/splice.c
++++ b/fs/splice.c
+@@ -274,13 +274,16 @@ void spd_release_page(struct splice_pipe_desc *spd, unsigned int i)
+ * Check if we need to grow the arrays holding pages and partial page
+ * descriptions.
+ */
+-int splice_grow_spd(struct pipe_inode_info *pipe, struct splice_pipe_desc *spd)
++int splice_grow_spd(const struct pipe_inode_info *pipe, struct splice_pipe_desc *spd)
+ {
+- if (pipe->buffers <= PIPE_DEF_BUFFERS)
++ unsigned int buffers = ACCESS_ONCE(pipe->buffers);
++
++ spd->nr_pages_max = buffers;
++ if (buffers <= PIPE_DEF_BUFFERS)
+ return 0;
+
+- spd->pages = kmalloc(pipe->buffers * sizeof(struct page *), GFP_KERNEL);
+- spd->partial = kmalloc(pipe->buffers * sizeof(struct partial_page), GFP_KERNEL);
++ spd->pages = kmalloc(buffers * sizeof(struct page *), GFP_KERNEL);
++ spd->partial = kmalloc(buffers * sizeof(struct partial_page), GFP_KERNEL);
+
+ if (spd->pages && spd->partial)
+ return 0;
+@@ -290,10 +293,9 @@ int splice_grow_spd(struct pipe_inode_info *pipe, struct splice_pipe_desc *spd)
+ return -ENOMEM;
+ }
+
+-void splice_shrink_spd(struct pipe_inode_info *pipe,
+- struct splice_pipe_desc *spd)
++void splice_shrink_spd(struct splice_pipe_desc *spd)
+ {
+- if (pipe->buffers <= PIPE_DEF_BUFFERS)
++ if (spd->nr_pages_max <= PIPE_DEF_BUFFERS)
+ return;
+
+ kfree(spd->pages);
+@@ -316,6 +318,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
+ struct splice_pipe_desc spd = {
+ .pages = pages,
+ .partial = partial,
++ .nr_pages_max = PIPE_DEF_BUFFERS,
+ .flags = flags,
+ .ops = &page_cache_pipe_buf_ops,
+ .spd_release = spd_release_page,
+@@ -327,7 +330,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
+ index = *ppos >> PAGE_CACHE_SHIFT;
+ loff = *ppos & ~PAGE_CACHE_MASK;
+ req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+- nr_pages = min(req_pages, pipe->buffers);
++ nr_pages = min(req_pages, spd.nr_pages_max);
+
+ /*
+ * Lookup the (hopefully) full range of pages we need.
+@@ -498,7 +501,7 @@ fill_it:
+ if (spd.nr_pages)
+ error = splice_to_pipe(pipe, &spd);
+
+- splice_shrink_spd(pipe, &spd);
++ splice_shrink_spd(&spd);
+ return error;
+ }
+
+@@ -599,6 +602,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
+ struct splice_pipe_desc spd = {
+ .pages = pages,
+ .partial = partial,
++ .nr_pages_max = PIPE_DEF_BUFFERS,
+ .flags = flags,
+ .ops = &default_pipe_buf_ops,
+ .spd_release = spd_release_page,
+@@ -609,8 +613,8 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
+
+ res = -ENOMEM;
+ vec = __vec;
+- if (pipe->buffers > PIPE_DEF_BUFFERS) {
+- vec = kmalloc(pipe->buffers * sizeof(struct iovec), GFP_KERNEL);
++ if (spd.nr_pages_max > PIPE_DEF_BUFFERS) {
++ vec = kmalloc(spd.nr_pages_max * sizeof(struct iovec), GFP_KERNEL);
+ if (!vec)
+ goto shrink_ret;
+ }
+@@ -618,7 +622,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
+ offset = *ppos & ~PAGE_CACHE_MASK;
+ nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+
+- for (i = 0; i < nr_pages && i < pipe->buffers && len; i++) {
++ for (i = 0; i < nr_pages && i < spd.nr_pages_max && len; i++) {
+ struct page *page;
+
+ page = alloc_page(GFP_USER);
+@@ -666,7 +670,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
+ shrink_ret:
+ if (vec != __vec)
+ kfree(vec);
+- splice_shrink_spd(pipe, &spd);
++ splice_shrink_spd(&spd);
+ return res;
+
+ err:
+@@ -1616,6 +1620,7 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
+ struct splice_pipe_desc spd = {
+ .pages = pages,
+ .partial = partial,
++ .nr_pages_max = PIPE_DEF_BUFFERS,
+ .flags = flags,
+ .ops = &user_page_pipe_buf_ops,
+ .spd_release = spd_release_page,
+@@ -1631,13 +1636,13 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
+
+ spd.nr_pages = get_iovec_page_array(iov, nr_segs, spd.pages,
+ spd.partial, flags & SPLICE_F_GIFT,
+- pipe->buffers);
++ spd.nr_pages_max);
+ if (spd.nr_pages <= 0)
+ ret = spd.nr_pages;
+ else
+ ret = splice_to_pipe(pipe, &spd);
+
+- splice_shrink_spd(pipe, &spd);
++ splice_shrink_spd(&spd);
+ return ret;
+ }
+
+diff --git a/fs/udf/super.c b/fs/udf/super.c
+index 87cb24a..270e135 100644
+--- a/fs/udf/super.c
++++ b/fs/udf/super.c
+@@ -56,6 +56,7 @@
+ #include <linux/seq_file.h>
+ #include <linux/bitmap.h>
+ #include <linux/crc-itu-t.h>
++#include <linux/log2.h>
+ #include <asm/byteorder.h>
+
+ #include "udf_sb.h"
+@@ -1217,16 +1218,65 @@ out_bh:
+ return ret;
+ }
+
++static int udf_load_sparable_map(struct super_block *sb,
++ struct udf_part_map *map,
++ struct sparablePartitionMap *spm)
++{
++ uint32_t loc;
++ uint16_t ident;
++ struct sparingTable *st;
++ struct udf_sparing_data *sdata = &map->s_type_specific.s_sparing;
++ int i;
++ struct buffer_head *bh;
++
++ map->s_partition_type = UDF_SPARABLE_MAP15;
++ sdata->s_packet_len = le16_to_cpu(spm->packetLength);
++ if (!is_power_of_2(sdata->s_packet_len)) {
++ udf_err(sb, "error loading logical volume descriptor: "
++ "Invalid packet length %u\n",
++ (unsigned)sdata->s_packet_len);
++ return -EIO;
++ }
++ if (spm->numSparingTables > 4) {
++ udf_err(sb, "error loading logical volume descriptor: "
++ "Too many sparing tables (%d)\n",
++ (int)spm->numSparingTables);
++ return -EIO;
++ }
++
++ for (i = 0; i < spm->numSparingTables; i++) {
++ loc = le32_to_cpu(spm->locSparingTable[i]);
++ bh = udf_read_tagged(sb, loc, loc, &ident);
++ if (!bh)
++ continue;
++
++ st = (struct sparingTable *)bh->b_data;
++ if (ident != 0 ||
++ strncmp(st->sparingIdent.ident, UDF_ID_SPARING,
++ strlen(UDF_ID_SPARING)) ||
++ sizeof(*st) + le16_to_cpu(st->reallocationTableLen) >
++ sb->s_blocksize) {
++ brelse(bh);
++ continue;
++ }
++
++ sdata->s_spar_map[i] = bh;
++ }
++ map->s_partition_func = udf_get_pblock_spar15;
++ return 0;
++}
++
+ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
+ struct kernel_lb_addr *fileset)
+ {
+ struct logicalVolDesc *lvd;
+- int i, j, offset;
++ int i, offset;
+ uint8_t type;
+ struct udf_sb_info *sbi = UDF_SB(sb);
+ struct genericPartitionMap *gpm;
+ uint16_t ident;
+ struct buffer_head *bh;
++ unsigned int table_len;
+ int ret = 0;
+
+ bh = udf_read_tagged(sb, block, block, &ident);
+@@ -1234,15 +1284,20 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
+ return 1;
+ BUG_ON(ident != TAG_IDENT_LVD);
+ lvd = (struct logicalVolDesc *)bh->b_data;
+-
+- i = udf_sb_alloc_partition_maps(sb, le32_to_cpu(lvd->numPartitionMaps));
+- if (i != 0) {
+- ret = i;
++ table_len = le32_to_cpu(lvd->mapTableLength);
++ if (sizeof(*lvd) + table_len > sb->s_blocksize) {
++ udf_err(sb, "error loading logical volume descriptor: "
++ "Partition table too long (%u > %lu)\n", table_len,
++ sb->s_blocksize - sizeof(*lvd));
+ goto out_bh;
+ }
+
++ ret = udf_sb_alloc_partition_maps(sb, le32_to_cpu(lvd->numPartitionMaps));
++ if (ret)
++ goto out_bh;
++
+ for (i = 0, offset = 0;
+- i < sbi->s_partitions && offset < le32_to_cpu(lvd->mapTableLength);
++ i < sbi->s_partitions && offset < table_len;
+ i++, offset += gpm->partitionMapLength) {
+ struct udf_part_map *map = &sbi->s_partmaps[i];
+ gpm = (struct genericPartitionMap *)
+@@ -1277,38 +1332,9 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
+ } else if (!strncmp(upm2->partIdent.ident,
+ UDF_ID_SPARABLE,
+ strlen(UDF_ID_SPARABLE))) {
+- uint32_t loc;
+- struct sparingTable *st;
+- struct sparablePartitionMap *spm =
+- (struct sparablePartitionMap *)gpm;
+-
+- map->s_partition_type = UDF_SPARABLE_MAP15;
+- map->s_type_specific.s_sparing.s_packet_len =
+- le16_to_cpu(spm->packetLength);
+- for (j = 0; j < spm->numSparingTables; j++) {
+- struct buffer_head *bh2;
+-
+- loc = le32_to_cpu(
+- spm->locSparingTable[j]);
+- bh2 = udf_read_tagged(sb, loc, loc,
+- &ident);
+- map->s_type_specific.s_sparing.
+- s_spar_map[j] = bh2;
+-
+- if (bh2 == NULL)
+- continue;
+-
+- st = (struct sparingTable *)bh2->b_data;
+- if (ident != 0 || strncmp(
+- st->sparingIdent.ident,
+- UDF_ID_SPARING,
+- strlen(UDF_ID_SPARING))) {
+- brelse(bh2);
+- map->s_type_specific.s_sparing.
+- s_spar_map[j] = NULL;
+- }
+- }
+- map->s_partition_func = udf_get_pblock_spar15;
++ if (udf_load_sparable_map(sb, map,
++ (struct sparablePartitionMap *)gpm) < 0)
++ goto out_bh;
+ } else if (!strncmp(upm2->partIdent.ident,
+ UDF_ID_METADATA,
+ strlen(UDF_ID_METADATA))) {
+diff --git a/include/linux/aio.h b/include/linux/aio.h
+index 2314ad8..b1a520e 100644
+--- a/include/linux/aio.h
++++ b/include/linux/aio.h
+@@ -140,6 +140,7 @@ struct kiocb {
+ (x)->ki_dtor = NULL; \
+ (x)->ki_obj.tsk = tsk; \
+ (x)->ki_user_data = 0; \
++ (x)->private = NULL; \
+ } while (0)
+
+ #define AIO_RING_MAGIC 0xa10a10a1
+diff --git a/include/linux/splice.h b/include/linux/splice.h
+index 26e5b61..09a545a 100644
+--- a/include/linux/splice.h
++++ b/include/linux/splice.h
+@@ -51,7 +51,8 @@ struct partial_page {
+ struct splice_pipe_desc {
+ struct page **pages; /* page map */
+ struct partial_page *partial; /* pages[] may not be contig */
+- int nr_pages; /* number of pages in map */
++ int nr_pages; /* number of populated pages in map */
++ unsigned int nr_pages_max; /* pages[] & partial[] arrays size */
+ unsigned int flags; /* splice flags */
+ const struct pipe_buf_operations *ops;/* ops associated with output pipe */
+ void (*spd_release)(struct splice_pipe_desc *, unsigned int);
+@@ -85,9 +86,8 @@ extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *,
+ /*
+ * for dynamic pipe sizing
+ */
+-extern int splice_grow_spd(struct pipe_inode_info *, struct splice_pipe_desc *);
+-extern void splice_shrink_spd(struct pipe_inode_info *,
+- struct splice_pipe_desc *);
++extern int splice_grow_spd(const struct pipe_inode_info *, struct splice_pipe_desc *);
++extern void splice_shrink_spd(struct splice_pipe_desc *);
+ extern void spd_release_page(struct splice_pipe_desc *, unsigned int);
+
+ extern const struct pipe_buf_operations page_cache_pipe_buf_ops;
+diff --git a/include/net/cipso_ipv4.h b/include/net/cipso_ipv4.h
+index 9808877..a7a683e 100644
+--- a/include/net/cipso_ipv4.h
++++ b/include/net/cipso_ipv4.h
+@@ -42,6 +42,7 @@
+ #include <net/netlabel.h>
+ #include <net/request_sock.h>
+ #include <linux/atomic.h>
++#include <asm/unaligned.h>
+
+ /* known doi values */
+ #define CIPSO_V4_DOI_UNKNOWN 0x00000000
+@@ -285,7 +286,33 @@ static inline int cipso_v4_skbuff_getattr(const struct sk_buff *skb,
+ static inline int cipso_v4_validate(const struct sk_buff *skb,
+ unsigned char **option)
+ {
+- return -ENOSYS;
++ unsigned char *opt = *option;
++ unsigned char err_offset = 0;
++ u8 opt_len = opt[1];
++ u8 opt_iter;
++
++ if (opt_len < 8) {
++ err_offset = 1;
++ goto out;
++ }
++
++ if (get_unaligned_be32(&opt[2]) == 0) {
++ err_offset = 2;
++ goto out;
++ }
++
++ for (opt_iter = 6; opt_iter < opt_len;) {
++ if (opt[opt_iter + 1] > (opt_len - opt_iter)) {
++ err_offset = opt_iter + 1;
++ goto out;
++ }
++ opt_iter += opt[opt_iter + 1];
++ }
++
++out:
++ *option = opt + err_offset;
++ return err_offset;
++
+ }
+ #endif /* CONFIG_NETLABEL */
+
+diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
+index 55ce96b..9d7d54a 100644
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -220,13 +220,16 @@ struct tcf_proto {
+
+ struct qdisc_skb_cb {
+ unsigned int pkt_len;
+- unsigned char data[24];
++ u16 bond_queue_mapping;
++ u16 _pad;
++ unsigned char data[20];
+ };
+
+ static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
+ {
+ struct qdisc_skb_cb *qcb;
+- BUILD_BUG_ON(sizeof(skb->cb) < sizeof(unsigned int) + sz);
++
++ BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz);
+ BUILD_BUG_ON(sizeof(qcb->data) < sz);
+ }
+
+diff --git a/kernel/relay.c b/kernel/relay.c
+index b6f803a..a535fc9 100644
+--- a/kernel/relay.c
++++ b/kernel/relay.c
+@@ -1235,6 +1235,7 @@ static ssize_t subbuf_splice_actor(struct file *in,
+ struct splice_pipe_desc spd = {
+ .pages = pages,
+ .nr_pages = 0,
++ .nr_pages_max = PIPE_DEF_BUFFERS,
+ .partial = partial,
+ .flags = flags,
+ .ops = &relay_pipe_buf_ops,
+@@ -1302,8 +1303,8 @@ static ssize_t subbuf_splice_actor(struct file *in,
+ ret += padding;
+
+ out:
+- splice_shrink_spd(pipe, &spd);
+- return ret;
++ splice_shrink_spd(&spd);
++ return ret;
+ }
+
+ static ssize_t relay_file_splice_read(struct file *in,
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 697e49d..5638104 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -2541,10 +2541,12 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
+ if (cpumask_test_cpu(cpu, tracing_cpumask) &&
+ !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
+ atomic_inc(&global_trace.data[cpu]->disabled);
++ ring_buffer_record_disable_cpu(global_trace.buffer, cpu);
+ }
+ if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
+ cpumask_test_cpu(cpu, tracing_cpumask_new)) {
+ atomic_dec(&global_trace.data[cpu]->disabled);
++ ring_buffer_record_enable_cpu(global_trace.buffer, cpu);
+ }
+ }
+ arch_spin_unlock(&ftrace_max_lock);
+@@ -3456,6 +3458,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
+ .pages = pages_def,
+ .partial = partial_def,
+ .nr_pages = 0, /* This gets updated below. */
++ .nr_pages_max = PIPE_DEF_BUFFERS,
+ .flags = flags,
+ .ops = &tracing_pipe_buf_ops,
+ .spd_release = tracing_spd_release_pipe,
+@@ -3527,7 +3530,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
+
+ ret = splice_to_pipe(pipe, &spd);
+ out:
+- splice_shrink_spd(pipe, &spd);
++ splice_shrink_spd(&spd);
+ return ret;
+
+ out_err:
+@@ -4017,6 +4020,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
+ struct splice_pipe_desc spd = {
+ .pages = pages_def,
+ .partial = partial_def,
++ .nr_pages_max = PIPE_DEF_BUFFERS,
+ .flags = flags,
+ .ops = &buffer_pipe_buf_ops,
+ .spd_release = buffer_spd_release,
+@@ -4104,7 +4108,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
+ }
+
+ ret = splice_to_pipe(pipe, &spd);
+- splice_shrink_spd(pipe, &spd);
++ splice_shrink_spd(&spd);
+ out:
+ return ret;
+ }
+diff --git a/mm/madvise.c b/mm/madvise.c
+index 74bf193..23d3a6b 100644
+--- a/mm/madvise.c
++++ b/mm/madvise.c
+@@ -13,6 +13,7 @@
+ #include <linux/hugetlb.h>
+ #include <linux/sched.h>
+ #include <linux/ksm.h>
++#include <linux/file.h>
+
+ /*
+ * Any behaviour which results in changes to the vma->vm_flags needs to
+@@ -197,14 +198,16 @@ static long madvise_remove(struct vm_area_struct *vma,
+ struct address_space *mapping;
+ loff_t offset, endoff;
+ int error;
++ struct file *f;
+
+ *prev = NULL; /* tell sys_madvise we drop mmap_sem */
+
+ if (vma->vm_flags & (VM_LOCKED|VM_NONLINEAR|VM_HUGETLB))
+ return -EINVAL;
+
+- if (!vma->vm_file || !vma->vm_file->f_mapping
+- || !vma->vm_file->f_mapping->host) {
++ f = vma->vm_file;
++
++ if (!f || !f->f_mapping || !f->f_mapping->host) {
+ return -EINVAL;
+ }
+
+@@ -218,9 +221,16 @@ static long madvise_remove(struct vm_area_struct *vma,
+ endoff = (loff_t)(end - vma->vm_start - 1)
+ + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
+
+- /* vmtruncate_range needs to take i_mutex */
++ /*
++ * vmtruncate_range may need to take i_mutex. We need to
++ * explicitly grab a reference because the vma (and hence the
++ * vma's reference to the file) can go away as soon as we drop
++ * mmap_sem.
++ */
++ get_file(f);
+ up_read(&current->mm->mmap_sem);
+ error = vmtruncate_range(mapping->host, offset, endoff);
++ fput(f);
+ down_read(&current->mm->mmap_sem);
+ return error;
+ }
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 6c253f7..7a82174 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -1359,6 +1359,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
+ struct splice_pipe_desc spd = {
+ .pages = pages,
+ .partial = partial,
++ .nr_pages_max = PIPE_DEF_BUFFERS,
+ .flags = flags,
+ .ops = &page_cache_pipe_buf_ops,
+ .spd_release = spd_release_page,
+@@ -1447,7 +1448,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
+ if (spd.nr_pages)
+ error = splice_to_pipe(pipe, &spd);
+
+- splice_shrink_spd(pipe, &spd);
++ splice_shrink_spd(&spd);
+
+ if (error > 0) {
+ *ppos += error;
+diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
+index f603e5b..f3f75ad 100644
+--- a/net/bridge/br_if.c
++++ b/net/bridge/br_if.c
+@@ -240,6 +240,7 @@ int br_add_bridge(struct net *net, const char *name)
+ return -ENOMEM;
+
+ dev_net_set(dev, net);
++ dev->rtnl_link_ops = &br_link_ops;
+
+ res = register_netdev(dev);
+ if (res)
+diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
+index a1daf82..cbf9ccd 100644
+--- a/net/bridge/br_netlink.c
++++ b/net/bridge/br_netlink.c
+@@ -211,7 +211,7 @@ static int br_validate(struct nlattr *tb[], struct nlattr *data[])
+ return 0;
+ }
+
+-static struct rtnl_link_ops br_link_ops __read_mostly = {
++struct rtnl_link_ops br_link_ops __read_mostly = {
+ .kind = "bridge",
+ .priv_size = sizeof(struct net_bridge),
+ .setup = br_dev_setup,
+diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
+index 93264df..b9bba8f 100644
+--- a/net/bridge/br_private.h
++++ b/net/bridge/br_private.h
+@@ -536,6 +536,7 @@ extern int (*br_fdb_test_addr_hook)(struct net_device *dev, unsigned char *addr)
+ #endif
+
+ /* br_netlink.c */
++extern struct rtnl_link_ops br_link_ops;
+ extern int br_netlink_init(void);
+ extern void br_netlink_fini(void);
+ extern void br_ifinfo_notify(int event, struct net_bridge_port *port);
+diff --git a/net/core/ethtool.c b/net/core/ethtool.c
+index 2b587ec..2367246 100644
+--- a/net/core/ethtool.c
++++ b/net/core/ethtool.c
+@@ -1672,6 +1672,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
+ case ETHTOOL_GRXCSUM:
+ case ETHTOOL_GTXCSUM:
+ case ETHTOOL_GSG:
++ case ETHTOOL_GSSET_INFO:
+ case ETHTOOL_GSTRINGS:
+ case ETHTOOL_GTSO:
+ case ETHTOOL_GPERMADDR:
+diff --git a/net/core/netpoll.c b/net/core/netpoll.c
+index ab0633f..db4bb7a 100644
+--- a/net/core/netpoll.c
++++ b/net/core/netpoll.c
+@@ -351,22 +351,23 @@ EXPORT_SYMBOL(netpoll_send_skb_on_dev);
+
+ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
+ {
+- int total_len, eth_len, ip_len, udp_len;
++ int total_len, ip_len, udp_len;
+ struct sk_buff *skb;
+ struct udphdr *udph;
+ struct iphdr *iph;
+ struct ethhdr *eth;
+
+ udp_len = len + sizeof(*udph);
+- ip_len = eth_len = udp_len + sizeof(*iph);
+- total_len = eth_len + ETH_HLEN + NET_IP_ALIGN;
++ ip_len = udp_len + sizeof(*iph);
++ total_len = ip_len + LL_RESERVED_SPACE(np->dev);
+
+- skb = find_skb(np, total_len, total_len - len);
++ skb = find_skb(np, total_len + np->dev->needed_tailroom,
++ total_len - len);
+ if (!skb)
+ return;
+
+ skb_copy_to_linear_data(skb, msg, len);
+- skb->len += len;
++ skb_put(skb, len);
+
+ skb_push(skb, sizeof(*udph));
+ skb_reset_transport_header(skb);
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 2ec200de..af9c3c6 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -1663,6 +1663,7 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
+ struct splice_pipe_desc spd = {
+ .pages = pages,
+ .partial = partial,
++ .nr_pages_max = MAX_SKB_FRAGS,
+ .flags = flags,
+ .ops = &sock_pipe_buf_ops,
+ .spd_release = sock_spd_release,
+@@ -1709,7 +1710,7 @@ done:
+ lock_sock(sk);
+ }
+
+- splice_shrink_spd(pipe, &spd);
++ splice_shrink_spd(&spd);
+ return ret;
+ }
+
+diff --git a/net/core/sock.c b/net/core/sock.c
+index b23f174..8d095b9 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1497,6 +1497,11 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
+ gfp_t gfp_mask;
+ long timeo;
+ int err;
++ int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
++
++ err = -EMSGSIZE;
++ if (npages > MAX_SKB_FRAGS)
++ goto failure;
+
+ gfp_mask = sk->sk_allocation;
+ if (gfp_mask & __GFP_WAIT)
+@@ -1515,14 +1520,12 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
+ if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
+ skb = alloc_skb(header_len, gfp_mask);
+ if (skb) {
+- int npages;
+ int i;
+
+ /* No pages, we're done... */
+ if (!data_len)
+ break;
+
+- npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ skb->truesize += data_len;
+ skb_shinfo(skb)->nr_frags = npages;
+ for (i = 0; i < npages; i++) {
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 059b9d9..2e21751 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -2881,10 +2881,6 @@ static int __net_init ip6_route_net_init(struct net *net)
+ net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
+ net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
+
+-#ifdef CONFIG_PROC_FS
+- proc_net_fops_create(net, "ipv6_route", 0, &ipv6_route_proc_fops);
+- proc_net_fops_create(net, "rt6_stats", S_IRUGO, &rt6_stats_seq_fops);
+-#endif
+ net->ipv6.ip6_rt_gc_expire = 30*HZ;
+
+ ret = 0;
+@@ -2905,10 +2901,6 @@ out_ip6_dst_ops:
+
+ static void __net_exit ip6_route_net_exit(struct net *net)
+ {
+-#ifdef CONFIG_PROC_FS
+- proc_net_remove(net, "ipv6_route");
+- proc_net_remove(net, "rt6_stats");
+-#endif
+ kfree(net->ipv6.ip6_null_entry);
+ #ifdef CONFIG_IPV6_MULTIPLE_TABLES
+ kfree(net->ipv6.ip6_prohibit_entry);
+@@ -2917,11 +2909,33 @@ static void __net_exit ip6_route_net_exit(struct net *net)
+ dst_entries_destroy(&net->ipv6.ip6_dst_ops);
+ }
+
++static int __net_init ip6_route_net_init_late(struct net *net)
++{
++#ifdef CONFIG_PROC_FS
++ proc_net_fops_create(net, "ipv6_route", 0, &ipv6_route_proc_fops);
++ proc_net_fops_create(net, "rt6_stats", S_IRUGO, &rt6_stats_seq_fops);
++#endif
++ return 0;
++}
++
++static void __net_exit ip6_route_net_exit_late(struct net *net)
++{
++#ifdef CONFIG_PROC_FS
++ proc_net_remove(net, "ipv6_route");
++ proc_net_remove(net, "rt6_stats");
++#endif
++}
++
+ static struct pernet_operations ip6_route_net_ops = {
+ .init = ip6_route_net_init,
+ .exit = ip6_route_net_exit,
+ };
+
++static struct pernet_operations ip6_route_net_late_ops = {
++ .init = ip6_route_net_init_late,
++ .exit = ip6_route_net_exit_late,
++};
++
+ static struct notifier_block ip6_route_dev_notifier = {
+ .notifier_call = ip6_route_dev_notify,
+ .priority = 0,
+@@ -2971,19 +2985,25 @@ int __init ip6_route_init(void)
+ if (ret)
+ goto xfrm6_init;
+
++ ret = register_pernet_subsys(&ip6_route_net_late_ops);
++ if (ret)
++ goto fib6_rules_init;
++
+ ret = -ENOBUFS;
+ if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, NULL) ||
+ __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, NULL) ||
+ __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL, NULL))
+- goto fib6_rules_init;
++ goto out_register_late_subsys;
+
+ ret = register_netdevice_notifier(&ip6_route_dev_notifier);
+ if (ret)
+- goto fib6_rules_init;
++ goto out_register_late_subsys;
+
+ out:
+ return ret;
+
++out_register_late_subsys:
++ unregister_pernet_subsys(&ip6_route_net_late_ops);
+ fib6_rules_init:
+ fib6_rules_cleanup();
+ xfrm6_init:
+@@ -3002,6 +3022,7 @@ out_kmem_cache:
+ void ip6_route_cleanup(void)
+ {
+ unregister_netdevice_notifier(&ip6_route_dev_notifier);
++ unregister_pernet_subsys(&ip6_route_net_late_ops);
+ fib6_rules_cleanup();
+ xfrm6_fini();
+ fib6_gc_cleanup();
+diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
+index d2726a7..3c55f63 100644
+--- a/net/l2tp/l2tp_eth.c
++++ b/net/l2tp/l2tp_eth.c
+@@ -167,6 +167,7 @@ static void l2tp_eth_delete(struct l2tp_session *session)
+ if (dev) {
+ unregister_netdev(dev);
+ spriv->dev = NULL;
++ module_put(THIS_MODULE);
+ }
+ }
+ }
+@@ -254,6 +255,7 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
+ if (rc < 0)
+ goto out_del_dev;
+
++ __module_get(THIS_MODULE);
+ /* Must be done after register_netdev() */
+ strlcpy(session->ifname, dev->name, IFNAMSIZ);
+
+diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
+index 2fbbe1f..6c7e609 100644
+--- a/net/l2tp/l2tp_ip.c
++++ b/net/l2tp/l2tp_ip.c
+@@ -515,10 +515,12 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
+ sk->sk_bound_dev_if);
+ if (IS_ERR(rt))
+ goto no_route;
+- if (connected)
++ if (connected) {
+ sk_setup_caps(sk, &rt->dst);
+- else
+- dst_release(&rt->dst); /* safe since we hold rcu_read_lock */
++ } else {
++ skb_dst_set(skb, &rt->dst);
++ goto xmit;
++ }
+ }
+
+ /* We dont need to clone dst here, it is guaranteed to not disappear.
+@@ -526,6 +528,7 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
+ */
+ skb_dst_set_noref(skb, &rt->dst);
+
++xmit:
+ /* Queue the packet to IP for output */
+ rc = ip_queue_xmit(skb, &inet->cork.fl);
+ rcu_read_unlock();
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index 064d20f..cda4875 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -2389,7 +2389,7 @@ ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
+ * frames that we didn't handle, including returning unknown
+ * ones. For all other modes we will return them to the sender,
+ * setting the 0x80 bit in the action category, as required by
+- * 802.11-2007 7.3.1.11.
++ * 802.11-2012 9.24.4.
+ * Newer versions of hostapd shall also use the management frame
+ * registration mechanisms, but older ones still use cooked
+ * monitor interfaces so push all frames there.
+@@ -2399,6 +2399,9 @@ ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
+ sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
+ return RX_DROP_MONITOR;
+
++ if (is_multicast_ether_addr(mgmt->da))
++ return RX_DROP_MONITOR;
++
+ /* do not return rejected action frames */
+ if (mgmt->u.action.category & 0x80)
+ return RX_DROP_UNUSABLE;
+diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c
+index 96633f5..12b6a80 100644
+--- a/net/nfc/nci/ntf.c
++++ b/net/nfc/nci/ntf.c
+@@ -86,7 +86,7 @@ static int nci_rf_activate_nfca_passive_poll(struct nci_dev *ndev,
+ nfca_poll->sens_res = __le16_to_cpu(*((__u16 *)data));
+ data += 2;
+
+- nfca_poll->nfcid1_len = *data++;
++ nfca_poll->nfcid1_len = min_t(__u8, *data++, sizeof(nfca_poll->nfcid1));
+
+ nfc_dbg("sens_res 0x%x, nfcid1_len %d",
+ nfca_poll->sens_res,
+@@ -111,7 +111,7 @@ static int nci_rf_activate_nfca_passive_poll(struct nci_dev *ndev,
+
+ switch (ntf->rf_interface_type) {
+ case NCI_RF_INTERFACE_ISO_DEP:
+- nfca_poll_iso_dep->rats_res_len = *data++;
++ nfca_poll_iso_dep->rats_res_len = min_t(__u8, *data++, 20);
+ if (nfca_poll_iso_dep->rats_res_len > 0) {
+ memcpy(nfca_poll_iso_dep->rats_res,
+ data,
+diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
+index ee7b2b3..7a167fc 100644
+--- a/net/nfc/rawsock.c
++++ b/net/nfc/rawsock.c
+@@ -52,7 +52,10 @@ static int rawsock_release(struct socket *sock)
+ {
+ struct sock *sk = sock->sk;
+
+- nfc_dbg("sock=%p", sock);
++ nfc_dbg("sock=%p sk=%p", sock, sk);
++
++ if (!sk)
++ return 0;
+
+ sock_orphan(sk);
+ sock_put(sk);
+diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
+index 7b7a516..2b973f5 100644
+--- a/sound/pci/hda/patch_sigmatel.c
++++ b/sound/pci/hda/patch_sigmatel.c
+@@ -4457,7 +4457,7 @@ static int stac92xx_init(struct hda_codec *codec)
+ AC_PINCTL_IN_EN);
+ for (i = 0; i < spec->num_pwrs; i++) {
+ hda_nid_t nid = spec->pwr_nids[i];
+- int pinctl, def_conf;
++ unsigned int pinctl, def_conf;
+
+ /* power on when no jack detection is available */
+ /* or when the VREF is used for controlling LED */
+@@ -4484,7 +4484,7 @@ static int stac92xx_init(struct hda_codec *codec)
+ def_conf = get_defcfg_connect(def_conf);
+ /* skip any ports that don't have jacks since presence
+ * detection is useless */
+- if (def_conf != AC_JACK_PORT_NONE &&
++ if (def_conf != AC_JACK_PORT_COMPLEX ||
+ !is_jack_detectable(codec, nid)) {
+ stac_toggle_power_map(codec, nid, 1);
+ continue;
+diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
+index 87d5ef1..8b48801 100644
+--- a/sound/soc/codecs/tlv320aic3x.c
++++ b/sound/soc/codecs/tlv320aic3x.c
+@@ -963,9 +963,7 @@ static int aic3x_hw_params(struct snd_pcm_substream *substream,
+ }
+
+ found:
+- data = snd_soc_read(codec, AIC3X_PLL_PROGA_REG);
+- snd_soc_write(codec, AIC3X_PLL_PROGA_REG,
+- data | (pll_p << PLLP_SHIFT));
++ snd_soc_update_bits(codec, AIC3X_PLL_PROGA_REG, PLLP_MASK, pll_p);
+ snd_soc_write(codec, AIC3X_OVRF_STATUS_AND_PLLR_REG,
+ pll_r << PLLR_SHIFT);
+ snd_soc_write(codec, AIC3X_PLL_PROGB_REG, pll_j << PLLJ_SHIFT);
+diff --git a/sound/soc/codecs/tlv320aic3x.h b/sound/soc/codecs/tlv320aic3x.h
+index 06a1978..16d9999 100644
+--- a/sound/soc/codecs/tlv320aic3x.h
++++ b/sound/soc/codecs/tlv320aic3x.h
+@@ -166,6 +166,7 @@
+
+ /* PLL registers bitfields */
+ #define PLLP_SHIFT 0
++#define PLLP_MASK 7
+ #define PLLQ_SHIFT 3
+ #define PLLR_SHIFT 0
+ #define PLLJ_SHIFT 2
diff --git a/1023_linux-3.2.24.patch b/1023_linux-3.2.24.patch
new file mode 100644
index 00000000..4692eb4d
--- /dev/null
+++ b/1023_linux-3.2.24.patch
@@ -0,0 +1,4684 @@
+diff --git a/Makefile b/Makefile
+index 40d1e3b..80bb4fd 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 23
++SUBLEVEL = 24
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/plat-samsung/adc.c b/arch/arm/plat-samsung/adc.c
+index 33ecd0c..b1e05cc 100644
+--- a/arch/arm/plat-samsung/adc.c
++++ b/arch/arm/plat-samsung/adc.c
+@@ -157,11 +157,13 @@ int s3c_adc_start(struct s3c_adc_client *client,
+ return -EINVAL;
+ }
+
+- if (client->is_ts && adc->ts_pend)
+- return -EAGAIN;
+-
+ spin_lock_irqsave(&adc->lock, flags);
+
++ if (client->is_ts && adc->ts_pend) {
++ spin_unlock_irqrestore(&adc->lock, flags);
++ return -EAGAIN;
++ }
++
+ client->channel = channel;
+ client->nr_samples = nr_samples;
+
+diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
+index 97f8bf6..adda036 100644
+--- a/arch/mips/include/asm/thread_info.h
++++ b/arch/mips/include/asm/thread_info.h
+@@ -60,6 +60,8 @@ struct thread_info {
+ register struct thread_info *__current_thread_info __asm__("$28");
+ #define current_thread_info() __current_thread_info
+
++#endif /* !__ASSEMBLY__ */
++
+ /* thread information allocation */
+ #if defined(CONFIG_PAGE_SIZE_4KB) && defined(CONFIG_32BIT)
+ #define THREAD_SIZE_ORDER (1)
+@@ -97,8 +99,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
+
+ #define free_thread_info(info) kfree(info)
+
+-#endif /* !__ASSEMBLY__ */
+-
+ #define PREEMPT_ACTIVE 0x10000000
+
+ /*
+diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
+index a81176f..be281c6 100644
+--- a/arch/mips/kernel/vmlinux.lds.S
++++ b/arch/mips/kernel/vmlinux.lds.S
+@@ -1,5 +1,6 @@
+ #include <asm/asm-offsets.h>
+ #include <asm/page.h>
++#include <asm/thread_info.h>
+ #include <asm-generic/vmlinux.lds.h>
+
+ #undef mips
+@@ -73,7 +74,7 @@ SECTIONS
+ .data : { /* Data */
+ . = . + DATAOFFSET; /* for CONFIG_MAPPED_KERNEL */
+
+- INIT_TASK_DATA(PAGE_SIZE)
++ INIT_TASK_DATA(THREAD_SIZE)
+ NOSAVE_DATA
+ CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
+ READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
+diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h
+index 98b7c4b..fa3f921 100644
+--- a/arch/powerpc/include/asm/cputime.h
++++ b/arch/powerpc/include/asm/cputime.h
+@@ -126,11 +126,11 @@ static inline u64 cputime64_to_jiffies64(const cputime_t ct)
+ /*
+ * Convert cputime <-> microseconds
+ */
+-extern u64 __cputime_msec_factor;
++extern u64 __cputime_usec_factor;
+
+ static inline unsigned long cputime_to_usecs(const cputime_t ct)
+ {
+- return mulhdu(ct, __cputime_msec_factor) * USEC_PER_MSEC;
++ return mulhdu(ct, __cputime_usec_factor);
+ }
+
+ static inline cputime_t usecs_to_cputime(const unsigned long us)
+@@ -143,7 +143,7 @@ static inline cputime_t usecs_to_cputime(const unsigned long us)
+ sec = us / 1000000;
+ if (ct) {
+ ct *= tb_ticks_per_sec;
+- do_div(ct, 1000);
++ do_div(ct, 1000000);
+ }
+ if (sec)
+ ct += (cputime_t) sec * tb_ticks_per_sec;
+diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
+index 5db163c..ec8affe 100644
+--- a/arch/powerpc/kernel/time.c
++++ b/arch/powerpc/kernel/time.c
+@@ -168,13 +168,13 @@ EXPORT_SYMBOL_GPL(ppc_tb_freq);
+ #ifdef CONFIG_VIRT_CPU_ACCOUNTING
+ /*
+ * Factors for converting from cputime_t (timebase ticks) to
+- * jiffies, milliseconds, seconds, and clock_t (1/USER_HZ seconds).
++ * jiffies, microseconds, seconds, and clock_t (1/USER_HZ seconds).
+ * These are all stored as 0.64 fixed-point binary fractions.
+ */
+ u64 __cputime_jiffies_factor;
+ EXPORT_SYMBOL(__cputime_jiffies_factor);
+-u64 __cputime_msec_factor;
+-EXPORT_SYMBOL(__cputime_msec_factor);
++u64 __cputime_usec_factor;
++EXPORT_SYMBOL(__cputime_usec_factor);
+ u64 __cputime_sec_factor;
+ EXPORT_SYMBOL(__cputime_sec_factor);
+ u64 __cputime_clockt_factor;
+@@ -192,8 +192,8 @@ static void calc_cputime_factors(void)
+
+ div128_by_32(HZ, 0, tb_ticks_per_sec, &res);
+ __cputime_jiffies_factor = res.result_low;
+- div128_by_32(1000, 0, tb_ticks_per_sec, &res);
+- __cputime_msec_factor = res.result_low;
++ div128_by_32(1000000, 0, tb_ticks_per_sec, &res);
++ __cputime_usec_factor = res.result_low;
+ div128_by_32(1, 0, tb_ticks_per_sec, &res);
+ __cputime_sec_factor = res.result_low;
+ div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res);
+diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
+index 4558f0d..479d03c 100644
+--- a/arch/x86/kernel/acpi/boot.c
++++ b/arch/x86/kernel/acpi/boot.c
+@@ -416,12 +416,14 @@ acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
+ return 0;
+ }
+
+- if (intsrc->source_irq == 0 && intsrc->global_irq == 2) {
++ if (intsrc->source_irq == 0) {
+ if (acpi_skip_timer_override) {
+- printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
++ printk(PREFIX "BIOS IRQ0 override ignored.\n");
+ return 0;
+ }
+- if (acpi_fix_pin2_polarity && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
++
++ if ((intsrc->global_irq == 2) && acpi_fix_pin2_polarity
++ && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
+ intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK;
+ printk(PREFIX "BIOS IRQ0 pin2 override: forcing polarity to high active.\n");
+ }
+@@ -1327,17 +1329,12 @@ static int __init dmi_disable_acpi(const struct dmi_system_id *d)
+ }
+
+ /*
+- * Force ignoring BIOS IRQ0 pin2 override
++ * Force ignoring BIOS IRQ0 override
+ */
+ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
+ {
+- /*
+- * The ati_ixp4x0_rev() early PCI quirk should have set
+- * the acpi_skip_timer_override flag already:
+- */
+ if (!acpi_skip_timer_override) {
+- WARN(1, KERN_ERR "ati_ixp4x0 quirk not complete.\n");
+- pr_notice("%s detected: Ignoring BIOS IRQ0 pin2 override\n",
++ pr_notice("%s detected: Ignoring BIOS IRQ0 override\n",
+ d->ident);
+ acpi_skip_timer_override = 1;
+ }
+@@ -1431,7 +1428,7 @@ static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
+ * is enabled. This input is incorrectly designated the
+ * ISA IRQ 0 via an interrupt source override even though
+ * it is wired to the output of the master 8259A and INTIN0
+- * is not connected at all. Force ignoring BIOS IRQ0 pin2
++ * is not connected at all. Force ignoring BIOS IRQ0
+ * override in that cases.
+ */
+ {
+@@ -1466,6 +1463,14 @@ static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6715b"),
+ },
+ },
++ {
++ .callback = dmi_ignore_irq0_timer_override,
++ .ident = "FUJITSU SIEMENS",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "AMILO PRO V2030"),
++ },
++ },
+ {}
+ };
+
+diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
+index 37a458b..e61f79c 100644
+--- a/arch/x86/kernel/reboot.c
++++ b/arch/x86/kernel/reboot.c
+@@ -460,6 +460,14 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 990"),
+ },
+ },
++ { /* Handle problems with rebooting on the Precision M6600. */
++ .callback = set_pci_reboot,
++ .ident = "Dell OptiPlex 990",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"),
++ },
++ },
+ { }
+ };
+
+diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
+index 688be8a..9e76a32 100644
+--- a/block/scsi_ioctl.c
++++ b/block/scsi_ioctl.c
+@@ -721,11 +721,14 @@ int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd)
+ break;
+ }
+
++ if (capable(CAP_SYS_RAWIO))
++ return 0;
++
+ /* In particular, rule out all resets and host-specific ioctls. */
+ printk_ratelimited(KERN_WARNING
+ "%s: sending ioctl %x to a partition!\n", current->comm, cmd);
+
+- return capable(CAP_SYS_RAWIO) ? 0 : -ENOTTY;
++ return -ENOTTY;
+ }
+ EXPORT_SYMBOL(scsi_verify_blk_ioctl);
+
+diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
+index c850de4..eff7222 100644
+--- a/drivers/acpi/processor_core.c
++++ b/drivers/acpi/processor_core.c
+@@ -189,10 +189,12 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
+ * Processor (CPU3, 0x03, 0x00000410, 0x06) {}
+ * }
+ *
+- * Ignores apic_id and always return 0 for CPU0's handle.
++ * Ignores apic_id and always returns 0 for the processor
++ * handle with acpi id 0 if nr_cpu_ids is 1.
++ * This should be the case if SMP tables are not found.
+ * Return -1 for other CPU's handle.
+ */
+- if (acpi_id == 0)
++ if (nr_cpu_ids <= 1 && acpi_id == 0)
+ return acpi_id;
+ else
+ return apic_id;
+diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
+index ca191ff..ed6bc52 100644
+--- a/drivers/acpi/sleep.c
++++ b/drivers/acpi/sleep.c
+@@ -702,8 +702,8 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p)
+ * can wake the system. _S0W may be valid, too.
+ */
+ if (acpi_target_sleep_state == ACPI_STATE_S0 ||
+- (device_may_wakeup(dev) &&
+- adev->wakeup.sleep_state <= acpi_target_sleep_state)) {
++ (device_may_wakeup(dev) && adev->wakeup.flags.valid &&
++ adev->wakeup.sleep_state >= acpi_target_sleep_state)) {
+ acpi_status status;
+
+ acpi_method[3] = 'W';
+diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
+index 9f66181..240a244 100644
+--- a/drivers/acpi/sysfs.c
++++ b/drivers/acpi/sysfs.c
+@@ -173,7 +173,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp)
+ {
+ int result = 0;
+
+- if (!strncmp(val, "enable", strlen("enable") - 1)) {
++ if (!strncmp(val, "enable", strlen("enable"))) {
+ result = acpi_debug_trace(trace_method_name, trace_debug_level,
+ trace_debug_layer, 0);
+ if (result)
+@@ -181,7 +181,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp)
+ goto exit;
+ }
+
+- if (!strncmp(val, "disable", strlen("disable") - 1)) {
++ if (!strncmp(val, "disable", strlen("disable"))) {
+ int name = 0;
+ result = acpi_debug_trace((char *)&name, trace_debug_level,
+ trace_debug_layer, 0);
+diff --git a/drivers/gpio/gpio-wm8994.c b/drivers/gpio/gpio-wm8994.c
+index 96198f3..a2da8f2 100644
+--- a/drivers/gpio/gpio-wm8994.c
++++ b/drivers/gpio/gpio-wm8994.c
+@@ -89,8 +89,11 @@ static int wm8994_gpio_direction_out(struct gpio_chip *chip,
+ struct wm8994_gpio *wm8994_gpio = to_wm8994_gpio(chip);
+ struct wm8994 *wm8994 = wm8994_gpio->wm8994;
+
++ if (value)
++ value = WM8994_GPN_LVL;
++
+ return wm8994_set_bits(wm8994, WM8994_GPIO_1 + offset,
+- WM8994_GPN_DIR, 0);
++ WM8994_GPN_DIR | WM8994_GPN_LVL, value);
+ }
+
+ static void wm8994_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 6aa7716..cc75c4b 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -8043,8 +8043,8 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
+ I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
+
+ if (intel_enable_rc6(dev_priv->dev))
+- rc6_mask = GEN6_RC_CTL_RC6p_ENABLE |
+- GEN6_RC_CTL_RC6_ENABLE;
++ rc6_mask = GEN6_RC_CTL_RC6_ENABLE |
++ ((IS_GEN7(dev_priv->dev)) ? GEN6_RC_CTL_RC6p_ENABLE : 0);
+
+ I915_WRITE(GEN6_RC_CONTROL,
+ rc6_mask |
+diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
+index 299d238..899c712 100644
+--- a/drivers/hid/hid-apple.c
++++ b/drivers/hid/hid-apple.c
+@@ -514,6 +514,12 @@ static const struct hid_device_id apple_devices[] = {
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS),
+ .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI),
++ .driver_data = APPLE_HAS_FN },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO),
++ .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS),
++ .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
+ .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index c27b402..95430a0 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1374,6 +1374,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
+@@ -1884,6 +1887,7 @@ static const struct hid_device_id hid_ignore_list[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MCT) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) },
+@@ -1968,6 +1972,9 @@ static const struct hid_device_id hid_mouse_ignore_list[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
+ { }
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index fba3fc4..7db934d 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -125,6 +125,9 @@
+ #define USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI 0x024c
+ #define USB_DEVICE_ID_APPLE_WELLSPRING6_ISO 0x024d
+ #define USB_DEVICE_ID_APPLE_WELLSPRING6_JIS 0x024e
++#define USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI 0x0262
++#define USB_DEVICE_ID_APPLE_WELLSPRING7_ISO 0x0263
++#define USB_DEVICE_ID_APPLE_WELLSPRING7_JIS 0x0264
+ #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI 0x0239
+ #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO 0x023a
+ #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b
+@@ -491,6 +494,9 @@
+ #define USB_DEVICE_ID_CRYSTALTOUCH 0x0006
+ #define USB_DEVICE_ID_CRYSTALTOUCH_DUAL 0x0007
+
++#define USB_VENDOR_ID_MADCATZ 0x0738
++#define USB_DEVICE_ID_MADCATZ_BEATPAD 0x4540
++
+ #define USB_VENDOR_ID_MCC 0x09db
+ #define USB_DEVICE_ID_MCC_PMD1024LS 0x0076
+ #define USB_DEVICE_ID_MCC_PMD1208LS 0x007a
+diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
+index d912649..1ba7af2 100644
+--- a/drivers/hwmon/it87.c
++++ b/drivers/hwmon/it87.c
+@@ -2086,7 +2086,7 @@ static void __devinit it87_init_device(struct platform_device *pdev)
+
+ /* Start monitoring */
+ it87_write_value(data, IT87_REG_CONFIG,
+- (it87_read_value(data, IT87_REG_CONFIG) & 0x36)
++ (it87_read_value(data, IT87_REG_CONFIG) & 0x3e)
+ | (update_vbat ? 0x41 : 0x01));
+ }
+
+diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c
+index 61c9cf1..1201a15 100644
+--- a/drivers/hwspinlock/hwspinlock_core.c
++++ b/drivers/hwspinlock/hwspinlock_core.c
+@@ -345,7 +345,7 @@ int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
+ spin_lock_init(&hwlock->lock);
+ hwlock->bank = bank;
+
+- ret = hwspin_lock_register_single(hwlock, i);
++ ret = hwspin_lock_register_single(hwlock, base_id + i);
+ if (ret)
+ goto reg_failed;
+ }
+@@ -354,7 +354,7 @@ int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
+
+ reg_failed:
+ while (--i >= 0)
+- hwspin_lock_unregister_single(i);
++ hwspin_lock_unregister_single(base_id + i);
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(hwspin_lock_register);
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index d728875..2189cbf 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -142,6 +142,7 @@ static const struct xpad_device {
+ { 0x0c12, 0x880a, "Pelican Eclipse PL-2023", 0, XTYPE_XBOX },
+ { 0x0c12, 0x8810, "Zeroplus Xbox Controller", 0, XTYPE_XBOX },
+ { 0x0c12, 0x9902, "HAMA VibraX - *FAULTY HARDWARE*", 0, XTYPE_XBOX },
++ { 0x0d2f, 0x0002, "Andamiro Pump It Up pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
+ { 0x0e4c, 0x1097, "Radica Gamester Controller", 0, XTYPE_XBOX },
+ { 0x0e4c, 0x2390, "Radica Games Jtech Controller", 0, XTYPE_XBOX },
+ { 0x0e6f, 0x0003, "Logic3 Freebird wireless Controller", 0, XTYPE_XBOX },
+@@ -164,6 +165,7 @@ static const struct xpad_device {
+ { 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x0f0d, 0x0016, "Hori Real Arcade Pro.EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x0f0d, 0x000d, "Hori Fighting Stick EX2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
++ { 0x1689, 0xfd00, "Razer Onza Tournament Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
+ { 0x0000, 0x0000, "Generic X-Box pad", 0, XTYPE_UNKNOWN }
+ };
+@@ -238,12 +240,14 @@ static struct usb_device_id xpad_table [] = {
+ XPAD_XBOX360_VENDOR(0x045e), /* Microsoft X-Box 360 controllers */
+ XPAD_XBOX360_VENDOR(0x046d), /* Logitech X-Box 360 style controllers */
+ XPAD_XBOX360_VENDOR(0x0738), /* Mad Catz X-Box 360 controllers */
++ { USB_DEVICE(0x0738, 0x4540) }, /* Mad Catz Beat Pad */
+ XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */
+ XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */
+ XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
+ XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */
+ XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */
+- XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
++ XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
++ XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
+ { }
+ };
+
+diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
+index 5ec617e..ec58f48 100644
+--- a/drivers/input/mouse/bcm5974.c
++++ b/drivers/input/mouse/bcm5974.c
+@@ -79,6 +79,10 @@
+ #define USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI 0x0252
+ #define USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO 0x0253
+ #define USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS 0x0254
++/* MacbookPro10,1 (unibody, June 2012) */
++#define USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI 0x0262
++#define USB_DEVICE_ID_APPLE_WELLSPRING7_ISO 0x0263
++#define USB_DEVICE_ID_APPLE_WELLSPRING7_JIS 0x0264
+
+ #define BCM5974_DEVICE(prod) { \
+ .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \
+@@ -128,6 +132,10 @@ static const struct usb_device_id bcm5974_table[] = {
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI),
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO),
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS),
++ /* MacbookPro10,1 */
++ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI),
++ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_ISO),
++ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_JIS),
+ /* Terminating entry */
+ {}
+ };
+@@ -354,6 +362,18 @@ static const struct bcm5974_config bcm5974_config_table[] = {
+ { DIM_X, DIM_X / SN_COORD, -4620, 5140 },
+ { DIM_Y, DIM_Y / SN_COORD, -150, 6600 }
+ },
++ {
++ USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI,
++ USB_DEVICE_ID_APPLE_WELLSPRING7_ISO,
++ USB_DEVICE_ID_APPLE_WELLSPRING7_JIS,
++ HAS_INTEGRATED_BUTTON,
++ 0x84, sizeof(struct bt_data),
++ 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
++ { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 },
++ { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
++ { DIM_X, DIM_X / SN_COORD, -4750, 5280 },
++ { DIM_Y, DIM_Y / SN_COORD, -150, 6730 }
++ },
+ {}
+ };
+
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index f1d5408..a1b8caa 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -59,6 +59,8 @@ static struct protection_domain *pt_domain;
+
+ static struct iommu_ops amd_iommu_ops;
+
++static struct dma_map_ops amd_iommu_dma_ops;
++
+ /*
+ * general struct to manage commands send to an IOMMU
+ */
+@@ -1878,6 +1880,11 @@ static int device_change_notifier(struct notifier_block *nb,
+ list_add_tail(&dma_domain->list, &iommu_pd_list);
+ spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
+
++ if (!iommu_pass_through)
++ dev->archdata.dma_ops = &amd_iommu_dma_ops;
++ else
++ dev->archdata.dma_ops = &nommu_dma_ops;
++
+ break;
+ case BUS_NOTIFY_DEL_DEVICE:
+
+diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
+index 6269eb0..ef2d493 100644
+--- a/drivers/iommu/amd_iommu_init.c
++++ b/drivers/iommu/amd_iommu_init.c
+@@ -1468,6 +1468,8 @@ static int __init amd_iommu_init(void)
+
+ register_syscore_ops(&amd_iommu_syscore_ops);
+
++ x86_platform.iommu_shutdown = disable_iommus;
++
+ if (iommu_pass_through)
+ goto out;
+
+@@ -1476,7 +1478,6 @@ static int __init amd_iommu_init(void)
+ else
+ printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n");
+
+- x86_platform.iommu_shutdown = disable_iommus;
+ out:
+ return ret;
+
+diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
+index 9bfd057..dae2b7a 100644
+--- a/drivers/md/dm-raid1.c
++++ b/drivers/md/dm-raid1.c
+@@ -1080,6 +1080,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ ti->split_io = dm_rh_get_region_size(ms->rh);
+ ti->num_flush_requests = 1;
+ ti->num_discard_requests = 1;
++ ti->discard_zeroes_data_unsupported = 1;
+
+ ms->kmirrord_wq = alloc_workqueue("kmirrord",
+ WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
+@@ -1210,7 +1211,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
+ * We need to dec pending if this was a write.
+ */
+ if (rw == WRITE) {
+- if (!(bio->bi_rw & REQ_FLUSH))
++ if (!(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD)))
+ dm_rh_dec(ms->rh, map_context->ll);
+ return error;
+ }
+diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
+index 7771ed2..69732e0 100644
+--- a/drivers/md/dm-region-hash.c
++++ b/drivers/md/dm-region-hash.c
+@@ -404,6 +404,9 @@ void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio)
+ return;
+ }
+
++ if (bio->bi_rw & REQ_DISCARD)
++ return;
++
+ /* We must inform the log that the sync count has changed. */
+ log->type->set_region_sync(log, region, 0);
+
+@@ -524,7 +527,7 @@ void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios)
+ struct bio *bio;
+
+ for (bio = bios->head; bio; bio = bio->bi_next) {
+- if (bio->bi_rw & REQ_FLUSH)
++ if (bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))
+ continue;
+ rh_inc(rh, dm_rh_bio_to_region(rh, bio));
+ }
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 700ecae..d8646d7 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -3700,8 +3700,8 @@ array_state_show(struct mddev *mddev, char *page)
+ return sprintf(page, "%s\n", array_states[st]);
+ }
+
+-static int do_md_stop(struct mddev * mddev, int ro, int is_open);
+-static int md_set_readonly(struct mddev * mddev, int is_open);
++static int do_md_stop(struct mddev * mddev, int ro, struct block_device *bdev);
++static int md_set_readonly(struct mddev * mddev, struct block_device *bdev);
+ static int do_md_run(struct mddev * mddev);
+ static int restart_array(struct mddev *mddev);
+
+@@ -3717,14 +3717,14 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
+ /* stopping an active array */
+ if (atomic_read(&mddev->openers) > 0)
+ return -EBUSY;
+- err = do_md_stop(mddev, 0, 0);
++ err = do_md_stop(mddev, 0, NULL);
+ break;
+ case inactive:
+ /* stopping an active array */
+ if (mddev->pers) {
+ if (atomic_read(&mddev->openers) > 0)
+ return -EBUSY;
+- err = do_md_stop(mddev, 2, 0);
++ err = do_md_stop(mddev, 2, NULL);
+ } else
+ err = 0; /* already inactive */
+ break;
+@@ -3732,7 +3732,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
+ break; /* not supported yet */
+ case readonly:
+ if (mddev->pers)
+- err = md_set_readonly(mddev, 0);
++ err = md_set_readonly(mddev, NULL);
+ else {
+ mddev->ro = 1;
+ set_disk_ro(mddev->gendisk, 1);
+@@ -3742,7 +3742,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
+ case read_auto:
+ if (mddev->pers) {
+ if (mddev->ro == 0)
+- err = md_set_readonly(mddev, 0);
++ err = md_set_readonly(mddev, NULL);
+ else if (mddev->ro == 1)
+ err = restart_array(mddev);
+ if (err == 0) {
+@@ -5078,15 +5078,17 @@ void md_stop(struct mddev *mddev)
+ }
+ EXPORT_SYMBOL_GPL(md_stop);
+
+-static int md_set_readonly(struct mddev *mddev, int is_open)
++static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
+ {
+ int err = 0;
+ mutex_lock(&mddev->open_mutex);
+- if (atomic_read(&mddev->openers) > is_open) {
++ if (atomic_read(&mddev->openers) > !!bdev) {
+ printk("md: %s still in use.\n",mdname(mddev));
+ err = -EBUSY;
+ goto out;
+ }
++ if (bdev)
++ sync_blockdev(bdev);
+ if (mddev->pers) {
+ __md_stop_writes(mddev);
+
+@@ -5108,18 +5110,26 @@ out:
+ * 0 - completely stop and dis-assemble array
+ * 2 - stop but do not disassemble array
+ */
+-static int do_md_stop(struct mddev * mddev, int mode, int is_open)
++static int do_md_stop(struct mddev * mddev, int mode,
++ struct block_device *bdev)
+ {
+ struct gendisk *disk = mddev->gendisk;
+ struct md_rdev *rdev;
+
+ mutex_lock(&mddev->open_mutex);
+- if (atomic_read(&mddev->openers) > is_open ||
++ if (atomic_read(&mddev->openers) > !!bdev ||
+ mddev->sysfs_active) {
+ printk("md: %s still in use.\n",mdname(mddev));
+ mutex_unlock(&mddev->open_mutex);
+ return -EBUSY;
+ }
++ if (bdev)
++ /* It is possible IO was issued on some other
++ * open file which was closed before we took ->open_mutex.
++ * As that was not the last close __blkdev_put will not
++ * have called sync_blockdev, so we must.
++ */
++ sync_blockdev(bdev);
+
+ if (mddev->pers) {
+ if (mddev->ro)
+@@ -5193,7 +5203,7 @@ static void autorun_array(struct mddev *mddev)
+ err = do_md_run(mddev);
+ if (err) {
+ printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
+- do_md_stop(mddev, 0, 0);
++ do_md_stop(mddev, 0, NULL);
+ }
+ }
+
+@@ -6184,11 +6194,11 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
+ goto done_unlock;
+
+ case STOP_ARRAY:
+- err = do_md_stop(mddev, 0, 1);
++ err = do_md_stop(mddev, 0, bdev);
+ goto done_unlock;
+
+ case STOP_ARRAY_RO:
+- err = md_set_readonly(mddev, 1);
++ err = md_set_readonly(mddev, bdev);
+ goto done_unlock;
+
+ case BLKROSET:
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 7af60ec..2d97bf0 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -1713,8 +1713,14 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
+
+ if (atomic_dec_and_test(&r1_bio->remaining)) {
+ /* if we're here, all write(s) have completed, so clean up */
+- md_done_sync(mddev, r1_bio->sectors, 1);
+- put_buf(r1_bio);
++ int s = r1_bio->sectors;
++ if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
++ test_bit(R1BIO_WriteError, &r1_bio->state))
++ reschedule_retry(r1_bio);
++ else {
++ put_buf(r1_bio);
++ md_done_sync(mddev, s, 1);
++ }
+ }
+ }
+
+@@ -2378,9 +2384,10 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
+ */
+ if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
+ atomic_set(&r1_bio->remaining, read_targets);
+- for (i=0; i<conf->raid_disks; i++) {
++ for (i = 0; i < conf->raid_disks && read_targets; i++) {
+ bio = r1_bio->bios[i];
+ if (bio->bi_end_io == end_sync_read) {
++ read_targets--;
+ md_sync_acct(bio->bi_bdev, nr_sectors);
+ generic_make_request(bio);
+ }
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index 6ba4954..26ef63a 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -196,12 +196,14 @@ static void __release_stripe(struct r5conf *conf, struct stripe_head *sh)
+ BUG_ON(!list_empty(&sh->lru));
+ BUG_ON(atomic_read(&conf->active_stripes)==0);
+ if (test_bit(STRIPE_HANDLE, &sh->state)) {
+- if (test_bit(STRIPE_DELAYED, &sh->state))
++ if (test_bit(STRIPE_DELAYED, &sh->state) &&
++ !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
+ list_add_tail(&sh->lru, &conf->delayed_list);
+ else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
+ sh->bm_seq - conf->seq_write > 0)
+ list_add_tail(&sh->lru, &conf->bitmap_list);
+ else {
++ clear_bit(STRIPE_DELAYED, &sh->state);
+ clear_bit(STRIPE_BIT_DELAY, &sh->state);
+ list_add_tail(&sh->lru, &conf->handle_list);
+ }
+diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
+index f732877..d5cda35 100644
+--- a/drivers/media/dvb/dvb-core/dvbdev.c
++++ b/drivers/media/dvb/dvb-core/dvbdev.c
+@@ -243,6 +243,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
+ if (minor == MAX_DVB_MINORS) {
+ kfree(dvbdevfops);
+ kfree(dvbdev);
++ up_write(&minor_rwsem);
+ mutex_unlock(&dvbdev_register_lock);
+ return -EINVAL;
+ }
+diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
+index 34c03be..83e8e1b 100644
+--- a/drivers/mtd/nand/nandsim.c
++++ b/drivers/mtd/nand/nandsim.c
+@@ -28,7 +28,7 @@
+ #include <linux/module.h>
+ #include <linux/moduleparam.h>
+ #include <linux/vmalloc.h>
+-#include <asm/div64.h>
++#include <linux/math64.h>
+ #include <linux/slab.h>
+ #include <linux/errno.h>
+ #include <linux/string.h>
+@@ -547,12 +547,6 @@ static char *get_partition_name(int i)
+ return kstrdup(buf, GFP_KERNEL);
+ }
+
+-static uint64_t divide(uint64_t n, uint32_t d)
+-{
+- do_div(n, d);
+- return n;
+-}
+-
+ /*
+ * Initialize the nandsim structure.
+ *
+@@ -581,7 +575,7 @@ static int init_nandsim(struct mtd_info *mtd)
+ ns->geom.oobsz = mtd->oobsize;
+ ns->geom.secsz = mtd->erasesize;
+ ns->geom.pgszoob = ns->geom.pgsz + ns->geom.oobsz;
+- ns->geom.pgnum = divide(ns->geom.totsz, ns->geom.pgsz);
++ ns->geom.pgnum = div_u64(ns->geom.totsz, ns->geom.pgsz);
+ ns->geom.totszoob = ns->geom.totsz + (uint64_t)ns->geom.pgnum * ns->geom.oobsz;
+ ns->geom.secshift = ffs(ns->geom.secsz) - 1;
+ ns->geom.pgshift = chip->page_shift;
+@@ -924,7 +918,7 @@ static int setup_wear_reporting(struct mtd_info *mtd)
+
+ if (!rptwear)
+ return 0;
+- wear_eb_count = divide(mtd->size, mtd->erasesize);
++ wear_eb_count = div_u64(mtd->size, mtd->erasesize);
+ mem = wear_eb_count * sizeof(unsigned long);
+ if (mem / sizeof(unsigned long) != wear_eb_count) {
+ NS_ERR("Too many erase blocks for wear reporting\n");
+diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c
+index 3680aa2..2cf084e 100644
+--- a/drivers/net/bonding/bond_debugfs.c
++++ b/drivers/net/bonding/bond_debugfs.c
+@@ -6,7 +6,7 @@
+ #include "bonding.h"
+ #include "bond_alb.h"
+
+-#ifdef CONFIG_DEBUG_FS
++#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_NET_NS)
+
+ #include <linux/debugfs.h>
+ #include <linux/seq_file.h>
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 1a88e38..6c284d1 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -3184,6 +3184,12 @@ static int bond_master_netdev_event(unsigned long event,
+ switch (event) {
+ case NETDEV_CHANGENAME:
+ return bond_event_changename(event_bond);
++ case NETDEV_UNREGISTER:
++ bond_remove_proc_entry(event_bond);
++ break;
++ case NETDEV_REGISTER:
++ bond_create_proc_entry(event_bond);
++ break;
+ default:
+ break;
+ }
+@@ -4391,8 +4397,6 @@ static void bond_uninit(struct net_device *bond_dev)
+
+ bond_work_cancel_all(bond);
+
+- bond_remove_proc_entry(bond);
+-
+ bond_debug_unregister(bond);
+
+ __hw_addr_flush(&bond->mc_list);
+@@ -4794,7 +4798,6 @@ static int bond_init(struct net_device *bond_dev)
+
+ bond_set_lockdep_class(bond_dev);
+
+- bond_create_proc_entry(bond);
+ list_add_tail(&bond->bond_list, &bn->dev_list);
+
+ bond_prepare_sysfs_group(bond);
+diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+index eccdcff..5ae7df7 100644
+--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
++++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+@@ -267,7 +267,6 @@ static void atl1c_check_link_status(struct atl1c_adapter *adapter)
+ dev_warn(&pdev->dev, "stop mac failed\n");
+ atl1c_set_aspm(hw, false);
+ netif_carrier_off(netdev);
+- netif_stop_queue(netdev);
+ atl1c_phy_reset(hw);
+ atl1c_phy_init(&adapter->hw);
+ } else {
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+index aec7212..8dda46a 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+@@ -723,21 +723,6 @@ struct bnx2x_fastpath {
+
+ #define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG
+
+-#define BNX2X_IP_CSUM_ERR(cqe) \
+- (!((cqe)->fast_path_cqe.status_flags & \
+- ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \
+- ((cqe)->fast_path_cqe.type_error_flags & \
+- ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG))
+-
+-#define BNX2X_L4_CSUM_ERR(cqe) \
+- (!((cqe)->fast_path_cqe.status_flags & \
+- ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \
+- ((cqe)->fast_path_cqe.type_error_flags & \
+- ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
+-
+-#define BNX2X_RX_CSUM_OK(cqe) \
+- (!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe)))
+-
+ #define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \
+ (((le16_to_cpu(flags) & \
+ PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+index 580b44e..2c1a5c0 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+@@ -220,7 +220,7 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
+
+ if ((netif_tx_queue_stopped(txq)) &&
+ (bp->state == BNX2X_STATE_OPEN) &&
+- (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3))
++ (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4))
+ netif_tx_wake_queue(txq);
+
+ __netif_tx_unlock(txq);
+@@ -551,6 +551,26 @@ static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
+ le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
+ }
+
++static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
++ struct bnx2x_fastpath *fp)
++{
++ /* Do nothing if no IP/L4 csum validation was done */
++
++ if (cqe->fast_path_cqe.status_flags &
++ (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG |
++ ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG))
++ return;
++
++ /* If both IP/L4 validation were done, check if an error was found. */
++
++ if (cqe->fast_path_cqe.type_error_flags &
++ (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
++ ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
++ fp->eth_q_stats.hw_csum_err++;
++ else
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++}
++
+ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
+ {
+ struct bnx2x *bp = fp->bp;
+@@ -746,13 +766,9 @@ reuse_rx:
+
+ skb_checksum_none_assert(skb);
+
+- if (bp->dev->features & NETIF_F_RXCSUM) {
++ if (bp->dev->features & NETIF_F_RXCSUM)
++ bnx2x_csum_validate(skb, cqe, fp);
+
+- if (likely(BNX2X_RX_CSUM_OK(cqe)))
+- skb->ip_summed = CHECKSUM_UNNECESSARY;
+- else
+- fp->eth_q_stats.hw_csum_err++;
+- }
+ }
+
+ skb_record_rx_queue(skb, fp->index);
+@@ -2238,8 +2254,6 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
+ /* we split the first BD into headers and data BDs
+ * to ease the pain of our fellow microcode engineers
+ * we use one mapping for both BDs
+- * So far this has only been observed to happen
+- * in Other Operating Systems(TM)
+ */
+ static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
+ struct bnx2x_fp_txdata *txdata,
+@@ -2890,7 +2904,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
+
+ txdata->tx_bd_prod += nbd;
+
+- if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) {
++ if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 4)) {
+ netif_tx_stop_queue(txq);
+
+ /* paired memory barrier is in bnx2x_tx_int(), we have to keep
+@@ -2899,7 +2913,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ smp_mb();
+
+ fp->eth_q_stats.driver_xoff++;
+- if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)
++ if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4)
+ netif_tx_wake_queue(txq);
+ }
+ txdata->tx_pkt++;
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index 2dcac28..6b258d9 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -14046,7 +14046,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
+ }
+ }
+
+- if (tg3_flag(tp, 5755_PLUS))
++ if (tg3_flag(tp, 5755_PLUS) ||
++ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ tg3_flag_set(tp, SHORT_DMA_BUG);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
+index e556fc3..3072d35 100644
+--- a/drivers/net/ethernet/intel/e1000e/82571.c
++++ b/drivers/net/ethernet/intel/e1000e/82571.c
+@@ -1571,6 +1571,9 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
+ ctrl = er32(CTRL);
+ status = er32(STATUS);
+ rxcw = er32(RXCW);
++ /* SYNCH bit and IV bit are sticky */
++ udelay(10);
++ rxcw = er32(RXCW);
+
+ if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) {
+
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index cc2565c..9e61d6b 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -4185,6 +4185,7 @@ out:
+ return rc;
+
+ err_out_msi_4:
++ netif_napi_del(&tp->napi);
+ rtl_disable_msi(pdev, tp);
+ iounmap(ioaddr);
+ err_out_free_res_3:
+@@ -4210,6 +4211,8 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
+
+ cancel_delayed_work_sync(&tp->task);
+
++ netif_napi_del(&tp->napi);
++
+ unregister_netdev(dev);
+
+ rtl_release_firmware(tp);
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 72cd190..d4d2bc1 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -1174,6 +1174,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
+ priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion);
+ wmb();
+ priv->hw->desc->set_tx_owner(desc);
++ wmb();
+ }
+
+ /* Interrupt on completition only for the latest segment */
+@@ -1189,6 +1190,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
+
+ /* To avoid raise condition */
+ priv->hw->desc->set_tx_owner(first);
++ wmb();
+
+ priv->cur_tx++;
+
+@@ -1252,6 +1254,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
+ }
+ wmb();
+ priv->hw->desc->set_rx_owner(p + entry);
++ wmb();
+ }
+ }
+
+diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
+index 1b7082d..26106c0 100644
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -504,10 +504,11 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
+ if (copy > size) {
+ ++from;
+ --count;
+- }
++ offset = 0;
++ } else
++ offset += size;
+ copy -= size;
+ offset1 += size;
+- offset = 0;
+ }
+
+ if (len == offset1)
+@@ -517,24 +518,29 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
+ struct page *page[MAX_SKB_FRAGS];
+ int num_pages;
+ unsigned long base;
++ unsigned long truesize;
+
+- len = from->iov_len - offset1;
++ len = from->iov_len - offset;
+ if (!len) {
+- offset1 = 0;
++ offset = 0;
+ ++from;
+ continue;
+ }
+- base = (unsigned long)from->iov_base + offset1;
++ base = (unsigned long)from->iov_base + offset;
+ size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
++ if (i + size > MAX_SKB_FRAGS)
++ return -EMSGSIZE;
+ num_pages = get_user_pages_fast(base, size, 0, &page[i]);
+- if ((num_pages != size) ||
+- (num_pages > MAX_SKB_FRAGS - skb_shinfo(skb)->nr_frags))
+- /* put_page is in skb free */
++ if (num_pages != size) {
++ for (i = 0; i < num_pages; i++)
++ put_page(page[i]);
+ return -EFAULT;
++ }
++ truesize = size * PAGE_SIZE;
+ skb->data_len += len;
+ skb->len += len;
+- skb->truesize += len;
+- atomic_add(len, &skb->sk->sk_wmem_alloc);
++ skb->truesize += truesize;
++ atomic_add(truesize, &skb->sk->sk_wmem_alloc);
+ while (len) {
+ int off = base & ~PAGE_MASK;
+ int size = min_t(int, len, PAGE_SIZE - off);
+@@ -545,7 +551,7 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
+ len -= size;
+ i++;
+ }
+- offset1 = 0;
++ offset = 0;
+ ++from;
+ }
+ return 0;
+@@ -645,7 +651,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
+ int err;
+ struct virtio_net_hdr vnet_hdr = { 0 };
+ int vnet_hdr_len = 0;
+- int copylen;
++ int copylen = 0;
+ bool zerocopy = false;
+
+ if (q->flags & IFF_VNET_HDR) {
+@@ -674,15 +680,31 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
+ if (unlikely(len < ETH_HLEN))
+ goto err;
+
++ err = -EMSGSIZE;
++ if (unlikely(count > UIO_MAXIOV))
++ goto err;
++
+ if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY))
+ zerocopy = true;
+
+ if (zerocopy) {
++ /* Userspace may produce vectors with count greater than
++ * MAX_SKB_FRAGS, so we need to linearize parts of the skb
++ * to let the rest of data to be fit in the frags.
++ */
++ if (count > MAX_SKB_FRAGS) {
++ copylen = iov_length(iv, count - MAX_SKB_FRAGS);
++ if (copylen < vnet_hdr_len)
++ copylen = 0;
++ else
++ copylen -= vnet_hdr_len;
++ }
+ /* There are 256 bytes to be copied in skb, so there is enough
+ * room for skb expand head in case it is used.
+ * The rest buffer is mapped from userspace.
+ */
+- copylen = vnet_hdr.hdr_len;
++ if (copylen < vnet_hdr.hdr_len)
++ copylen = vnet_hdr.hdr_len;
+ if (!copylen)
+ copylen = GOODCOPY_LEN;
+ } else
+@@ -693,10 +715,9 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
+ if (!skb)
+ goto err;
+
+- if (zerocopy) {
++ if (zerocopy)
+ err = zerocopy_sg_from_iovec(skb, iv, vnet_hdr_len, count);
+- skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
+- } else
++ else
+ err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len,
+ len);
+ if (err)
+@@ -715,8 +736,10 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
+ rcu_read_lock_bh();
+ vlan = rcu_dereference_bh(q->vlan);
+ /* copy skb_ubuf_info for callback when skb has no error */
+- if (zerocopy)
++ if (zerocopy) {
+ skb_shinfo(skb)->destructor_arg = m->msg_control;
++ skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
++ }
+ if (vlan)
+ macvlan_start_xmit(skb, vlan->dev);
+ else
+diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
+index ad96164..00ed9c1 100644
+--- a/drivers/net/usb/ipheth.c
++++ b/drivers/net/usb/ipheth.c
+@@ -59,6 +59,7 @@
+ #define USB_PRODUCT_IPHONE_3G 0x1292
+ #define USB_PRODUCT_IPHONE_3GS 0x1294
+ #define USB_PRODUCT_IPHONE_4 0x1297
++#define USB_PRODUCT_IPAD 0x129a
+ #define USB_PRODUCT_IPHONE_4_VZW 0x129c
+ #define USB_PRODUCT_IPHONE_4S 0x12a0
+
+@@ -101,6 +102,10 @@ static struct usb_device_id ipheth_table[] = {
+ IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
+ IPHETH_USBINTF_PROTO) },
+ { USB_DEVICE_AND_INTERFACE_INFO(
++ USB_VENDOR_APPLE, USB_PRODUCT_IPAD,
++ IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
++ IPHETH_USBINTF_PROTO) },
++ { USB_DEVICE_AND_INTERFACE_INFO(
+ USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4_VZW,
+ IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
+ IPHETH_USBINTF_PROTO) },
+diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
+index 833cbef..8a40ff9 100644
+--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
++++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
+@@ -900,8 +900,7 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
+ */
+ if (!(txs->status & TX_STATUS_AMPDU)
+ && (txs->status & TX_STATUS_INTERMEDIATE)) {
+- wiphy_err(wlc->wiphy, "%s: INTERMEDIATE but not AMPDU\n",
+- __func__);
++ BCMMSG(wlc->wiphy, "INTERMEDIATE but not AMPDU\n");
+ return false;
+ }
+
+diff --git a/drivers/net/wireless/ipw2x00/ipw.h b/drivers/net/wireless/ipw2x00/ipw.h
+new file mode 100644
+index 0000000..4007bf5
+--- /dev/null
++++ b/drivers/net/wireless/ipw2x00/ipw.h
+@@ -0,0 +1,23 @@
++/*
++ * Intel Pro/Wireless 2100, 2200BG, 2915ABG network connection driver
++ *
++ * Copyright 2012 Stanislav Yakovlev <stas.yakovlev@gmail.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#ifndef __IPW_H__
++#define __IPW_H__
++
++#include <linux/ieee80211.h>
++
++static const u32 ipw_cipher_suites[] = {
++ WLAN_CIPHER_SUITE_WEP40,
++ WLAN_CIPHER_SUITE_WEP104,
++ WLAN_CIPHER_SUITE_TKIP,
++ WLAN_CIPHER_SUITE_CCMP,
++};
++
++#endif
+diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
+index 127e9c6..10862d4 100644
+--- a/drivers/net/wireless/ipw2x00/ipw2100.c
++++ b/drivers/net/wireless/ipw2x00/ipw2100.c
+@@ -166,6 +166,7 @@ that only one external action is invoked at a time.
+ #include <net/lib80211.h>
+
+ #include "ipw2100.h"
++#include "ipw.h"
+
+ #define IPW2100_VERSION "git-1.2.2"
+
+@@ -1955,6 +1956,9 @@ static int ipw2100_wdev_init(struct net_device *dev)
+ wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band;
+ }
+
++ wdev->wiphy->cipher_suites = ipw_cipher_suites;
++ wdev->wiphy->n_cipher_suites = ARRAY_SIZE(ipw_cipher_suites);
++
+ set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
+ if (wiphy_register(wdev->wiphy)) {
+ ipw2100_down(priv);
+diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
+index 827889b..56bd370 100644
+--- a/drivers/net/wireless/ipw2x00/ipw2200.c
++++ b/drivers/net/wireless/ipw2x00/ipw2200.c
+@@ -34,6 +34,7 @@
+ #include <linux/slab.h>
+ #include <net/cfg80211-wext.h>
+ #include "ipw2200.h"
++#include "ipw.h"
+
+
+ #ifndef KBUILD_EXTMOD
+@@ -11535,6 +11536,9 @@ static int ipw_wdev_init(struct net_device *dev)
+ wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = a_band;
+ }
+
++ wdev->wiphy->cipher_suites = ipw_cipher_suites;
++ wdev->wiphy->n_cipher_suites = ARRAY_SIZE(ipw_cipher_suites);
++
+ set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
+
+ /* With that information in place, we can now register the wiphy... */
+diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-sta.c b/drivers/net/wireless/iwlegacy/iwl-4965-sta.c
+index a262c23..0116ca8 100644
+--- a/drivers/net/wireless/iwlegacy/iwl-4965-sta.c
++++ b/drivers/net/wireless/iwlegacy/iwl-4965-sta.c
+@@ -466,7 +466,7 @@ int iwl4965_remove_dynamic_key(struct iwl_priv *priv,
+ return 0;
+ }
+
+- if (priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
++ if (priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_INVALID) {
+ IWL_WARN(priv, "Removing wrong key %d 0x%x\n",
+ keyconf->keyidx, key_flags);
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+@@ -483,7 +483,7 @@ int iwl4965_remove_dynamic_key(struct iwl_priv *priv,
+ sizeof(struct iwl4965_keyinfo));
+ priv->stations[sta_id].sta.key.key_flags =
+ STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
+- priv->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
++ priv->stations[sta_id].sta.key.key_offset = keyconf->hw_key_idx;
+ priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+ priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c
+index 2bd5659..1bb64c9 100644
+--- a/drivers/net/wireless/iwlegacy/iwl-core.c
++++ b/drivers/net/wireless/iwlegacy/iwl-core.c
+@@ -1884,14 +1884,12 @@ void iwl_legacy_bg_watchdog(unsigned long data)
+ return;
+
+ /* monitor and check for other stuck queues */
+- if (iwl_legacy_is_any_associated(priv)) {
+- for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
+- /* skip as we already checked the command queue */
+- if (cnt == priv->cmd_queue)
+- continue;
+- if (iwl_legacy_check_stuck_queue(priv, cnt))
+- return;
+- }
++ for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
++ /* skip as we already checked the command queue */
++ if (cnt == priv->cmd_queue)
++ continue;
++ if (iwl_legacy_check_stuck_queue(priv, cnt))
++ return;
+ }
+
+ mod_timer(&priv->watchdog, jiffies +
+diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
+index 1e31050..ba28807 100644
+--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
++++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
+@@ -426,8 +426,8 @@ void rt2x00usb_kick_queue(struct data_queue *queue)
+ case QID_RX:
+ if (!rt2x00queue_full(queue))
+ rt2x00queue_for_each_entry(queue,
+- Q_INDEX_DONE,
+ Q_INDEX,
++ Q_INDEX_DONE,
+ NULL,
+ rt2x00usb_kick_rx_entry);
+ break;
+diff --git a/drivers/net/wireless/rtl818x/rtl8187/leds.c b/drivers/net/wireless/rtl818x/rtl8187/leds.c
+index 2e0de2f..c2d5b49 100644
+--- a/drivers/net/wireless/rtl818x/rtl8187/leds.c
++++ b/drivers/net/wireless/rtl818x/rtl8187/leds.c
+@@ -117,7 +117,7 @@ static void rtl8187_led_brightness_set(struct led_classdev *led_dev,
+ radio_on = true;
+ } else if (radio_on) {
+ radio_on = false;
+- cancel_delayed_work_sync(&priv->led_on);
++ cancel_delayed_work(&priv->led_on);
+ ieee80211_queue_delayed_work(hw, &priv->led_off, 0);
+ }
+ } else if (radio_on) {
+diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
+index 12d1e81..d024f83 100644
+--- a/drivers/pci/pci-driver.c
++++ b/drivers/pci/pci-driver.c
+@@ -742,6 +742,18 @@ static int pci_pm_suspend_noirq(struct device *dev)
+
+ pci_pm_set_unknown_state(pci_dev);
+
++ /*
++ * Some BIOSes from ASUS have a bug: If a USB EHCI host controller's
++ * PCI COMMAND register isn't 0, the BIOS assumes that the controller
++ * hasn't been quiesced and tries to turn it off. If the controller
++ * is already in D3, this can hang or cause memory corruption.
++ *
++ * Since the value of the COMMAND register doesn't matter once the
++ * device has been suspended, we can safely set it to 0 here.
++ */
++ if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI)
++ pci_write_config_word(pci_dev, PCI_COMMAND, 0);
++
+ return 0;
+ }
+
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index e5b75eb..6d4a531 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -1689,11 +1689,6 @@ int pci_prepare_to_sleep(struct pci_dev *dev)
+ if (target_state == PCI_POWER_ERROR)
+ return -EIO;
+
+- /* Some devices mustn't be in D3 during system sleep */
+- if (target_state == PCI_D3hot &&
+- (dev->dev_flags & PCI_DEV_FLAGS_NO_D3_DURING_SLEEP))
+- return 0;
+-
+ pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
+
+ error = pci_set_power_state(dev, target_state);
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 3c56fec..78fda9c 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -2940,32 +2940,6 @@ static void __devinit disable_igfx_irq(struct pci_dev *dev)
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
+
+-/*
+- * The Intel 6 Series/C200 Series chipset's EHCI controllers on many
+- * ASUS motherboards will cause memory corruption or a system crash
+- * if they are in D3 while the system is put into S3 sleep.
+- */
+-static void __devinit asus_ehci_no_d3(struct pci_dev *dev)
+-{
+- const char *sys_info;
+- static const char good_Asus_board[] = "P8Z68-V";
+-
+- if (dev->dev_flags & PCI_DEV_FLAGS_NO_D3_DURING_SLEEP)
+- return;
+- if (dev->subsystem_vendor != PCI_VENDOR_ID_ASUSTEK)
+- return;
+- sys_info = dmi_get_system_info(DMI_BOARD_NAME);
+- if (sys_info && memcmp(sys_info, good_Asus_board,
+- sizeof(good_Asus_board) - 1) == 0)
+- return;
+-
+- dev_info(&dev->dev, "broken D3 during system sleep on ASUS\n");
+- dev->dev_flags |= PCI_DEV_FLAGS_NO_D3_DURING_SLEEP;
+- device_set_wakeup_capable(&dev->dev, false);
+-}
+-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1c26, asus_ehci_no_d3);
+-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1c2d, asus_ehci_no_d3);
+-
+ static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
+ struct pci_fixup *end)
+ {
+diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
+index 809a3ae..b46ec11 100644
+--- a/drivers/platform/x86/intel_ips.c
++++ b/drivers/platform/x86/intel_ips.c
+@@ -72,6 +72,7 @@
+ #include <linux/string.h>
+ #include <linux/tick.h>
+ #include <linux/timer.h>
++#include <linux/dmi.h>
+ #include <drm/i915_drm.h>
+ #include <asm/msr.h>
+ #include <asm/processor.h>
+@@ -1505,6 +1506,24 @@ static DEFINE_PCI_DEVICE_TABLE(ips_id_table) = {
+
+ MODULE_DEVICE_TABLE(pci, ips_id_table);
+
++static int ips_blacklist_callback(const struct dmi_system_id *id)
++{
++ pr_info("Blacklisted intel_ips for %s\n", id->ident);
++ return 1;
++}
++
++static const struct dmi_system_id ips_blacklist[] = {
++ {
++ .callback = ips_blacklist_callback,
++ .ident = "HP ProBook",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook"),
++ },
++ },
++ { } /* terminating entry */
++};
++
+ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ {
+ u64 platform_info;
+@@ -1514,6 +1533,9 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ u16 htshi, trc, trc_required_mask;
+ u8 tse;
+
++ if (dmi_check_system(ips_blacklist))
++ return -ENODEV;
++
+ ips = kzalloc(sizeof(struct ips_driver), GFP_KERNEL);
+ if (!ips)
+ return -ENOMEM;
+diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
+index 09e26bf..af1e296 100644
+--- a/drivers/platform/x86/samsung-laptop.c
++++ b/drivers/platform/x86/samsung-laptop.c
+@@ -540,245 +540,34 @@ static DEVICE_ATTR(performance_level, S_IWUSR | S_IRUGO,
+ get_performance_level, set_performance_level);
+
+
+-static int __init dmi_check_cb(const struct dmi_system_id *id)
+-{
+- pr_info("found laptop model '%s'\n",
+- id->ident);
+- return 1;
+-}
+-
+ static struct dmi_system_id __initdata samsung_dmi_table[] = {
+ {
+- .ident = "N128",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR,
+- "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "N128"),
+- DMI_MATCH(DMI_BOARD_NAME, "N128"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "N130",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "N130"),
+- DMI_MATCH(DMI_BOARD_NAME, "N130"),
++ DMI_MATCH(DMI_CHASSIS_TYPE, "8"), /* Portable */
+ },
+- .callback = dmi_check_cb,
+ },
+ {
+- .ident = "N510",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "N510"),
+- DMI_MATCH(DMI_BOARD_NAME, "N510"),
++ DMI_MATCH(DMI_CHASSIS_TYPE, "9"), /* Laptop */
+ },
+- .callback = dmi_check_cb,
+ },
+ {
+- .ident = "X125",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "X125"),
+- DMI_MATCH(DMI_BOARD_NAME, "X125"),
++ DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
+ },
+- .callback = dmi_check_cb,
+ },
+ {
+- .ident = "X120/X170",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "X120/X170"),
+- DMI_MATCH(DMI_BOARD_NAME, "X120/X170"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "NC10",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR,
+- "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "NC10"),
+- DMI_MATCH(DMI_BOARD_NAME, "NC10"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "NP-Q45",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR,
+- "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "SQ45S70S"),
+- DMI_MATCH(DMI_BOARD_NAME, "SQ45S70S"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "X360",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR,
+- "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "X360"),
+- DMI_MATCH(DMI_BOARD_NAME, "X360"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "R410 Plus",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR,
+- "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "R410P"),
+- DMI_MATCH(DMI_BOARD_NAME, "R460"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "R518",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR,
+- "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "R518"),
+- DMI_MATCH(DMI_BOARD_NAME, "R518"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "R519/R719",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR,
+- "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "R519/R719"),
+- DMI_MATCH(DMI_BOARD_NAME, "R519/R719"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "N150/N210/N220",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR,
+- "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220"),
+- DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "N220",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR,
+- "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "N220"),
+- DMI_MATCH(DMI_BOARD_NAME, "N220"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "N150/N210/N220/N230",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR,
+- "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220/N230"),
+- DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220/N230"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "N150P/N210P/N220P",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR,
+- "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "N150P/N210P/N220P"),
+- DMI_MATCH(DMI_BOARD_NAME, "N150P/N210P/N220P"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "R700",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "SR700"),
+- DMI_MATCH(DMI_BOARD_NAME, "SR700"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "R530/R730",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "R530/R730"),
+- DMI_MATCH(DMI_BOARD_NAME, "R530/R730"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "NF110/NF210/NF310",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "NF110/NF210/NF310"),
+- DMI_MATCH(DMI_BOARD_NAME, "NF110/NF210/NF310"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "N145P/N250P/N260P",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "N145P/N250P/N260P"),
+- DMI_MATCH(DMI_BOARD_NAME, "N145P/N250P/N260P"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "R70/R71",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR,
+- "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "R70/R71"),
+- DMI_MATCH(DMI_BOARD_NAME, "R70/R71"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "P460",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "P460"),
+- DMI_MATCH(DMI_BOARD_NAME, "P460"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "R528/R728",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "R528/R728"),
+- DMI_MATCH(DMI_BOARD_NAME, "R528/R728"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "NC210/NC110",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "NC210/NC110"),
+- DMI_MATCH(DMI_BOARD_NAME, "NC210/NC110"),
+- },
+- .callback = dmi_check_cb,
+- },
+- {
+- .ident = "X520",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "X520"),
+- DMI_MATCH(DMI_BOARD_NAME, "X520"),
++ DMI_MATCH(DMI_CHASSIS_TYPE, "14"), /* Sub-Notebook */
+ },
+- .callback = dmi_check_cb,
+ },
+ { },
+ };
+@@ -819,7 +608,8 @@ static int __init samsung_init(void)
+
+ f0000_segment = ioremap_nocache(0xf0000, 0xffff);
+ if (!f0000_segment) {
+- pr_err("Can't map the segment at 0xf0000\n");
++ if (debug || force)
++ pr_err("Can't map the segment at 0xf0000\n");
+ return -EINVAL;
+ }
+
+@@ -832,7 +622,8 @@ static int __init samsung_init(void)
+ }
+
+ if (loca == 0xffff) {
+- pr_err("This computer does not support SABI\n");
++ if (debug || force)
++ pr_err("This computer does not support SABI\n");
+ goto error_no_signature;
+ }
+
+diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
+index 39e41fb..5160354 100644
+--- a/drivers/rtc/rtc-mxc.c
++++ b/drivers/rtc/rtc-mxc.c
+@@ -191,10 +191,11 @@ static irqreturn_t mxc_rtc_interrupt(int irq, void *dev_id)
+ struct platform_device *pdev = dev_id;
+ struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ void __iomem *ioaddr = pdata->ioaddr;
++ unsigned long flags;
+ u32 status;
+ u32 events = 0;
+
+- spin_lock_irq(&pdata->rtc->irq_lock);
++ spin_lock_irqsave(&pdata->rtc->irq_lock, flags);
+ status = readw(ioaddr + RTC_RTCISR) & readw(ioaddr + RTC_RTCIENR);
+ /* clear interrupt sources */
+ writew(status, ioaddr + RTC_RTCISR);
+@@ -217,7 +218,7 @@ static irqreturn_t mxc_rtc_interrupt(int irq, void *dev_id)
+ rtc_update_alarm(&pdev->dev, &pdata->g_rtc_alarm);
+
+ rtc_update_irq(pdata->rtc, 1, events);
+- spin_unlock_irq(&pdata->rtc->irq_lock);
++ spin_unlock_irqrestore(&pdata->rtc->irq_lock, flags);
+
+ return IRQ_HANDLED;
+ }
+diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
+index 532d212..393e7ce 100644
+--- a/drivers/scsi/aic94xx/aic94xx_task.c
++++ b/drivers/scsi/aic94xx/aic94xx_task.c
+@@ -201,7 +201,7 @@ static void asd_get_response_tasklet(struct asd_ascb *ascb,
+
+ if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) {
+ resp->frame_len = le16_to_cpu(*(__le16 *)(r+6));
+- memcpy(&resp->ending_fis[0], r+16, 24);
++ memcpy(&resp->ending_fis[0], r+16, ATA_RESP_FIS_SIZE);
+ ts->buf_valid_size = sizeof(*resp);
+ }
+ }
+diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
+index db9238f..4868fc9 100644
+--- a/drivers/scsi/libsas/sas_ata.c
++++ b/drivers/scsi/libsas/sas_ata.c
+@@ -112,12 +112,12 @@ static void sas_ata_task_done(struct sas_task *task)
+ if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD ||
+ ((stat->stat == SAM_STAT_CHECK_CONDITION &&
+ dev->sata_dev.command_set == ATAPI_COMMAND_SET))) {
+- ata_tf_from_fis(resp->ending_fis, &dev->sata_dev.tf);
++ memcpy(dev->sata_dev.fis, resp->ending_fis, ATA_RESP_FIS_SIZE);
+
+ if (!link->sactive) {
+- qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command);
++ qc->err_mask |= ac_err_mask(dev->sata_dev.fis[2]);
+ } else {
+- link->eh_info.err_mask |= ac_err_mask(dev->sata_dev.tf.command);
++ link->eh_info.err_mask |= ac_err_mask(dev->sata_dev.fis[2]);
+ if (unlikely(link->eh_info.err_mask))
+ qc->flags |= ATA_QCFLAG_FAILED;
+ }
+@@ -138,8 +138,8 @@ static void sas_ata_task_done(struct sas_task *task)
+ qc->flags |= ATA_QCFLAG_FAILED;
+ }
+
+- dev->sata_dev.tf.feature = 0x04; /* status err */
+- dev->sata_dev.tf.command = ATA_ERR;
++ dev->sata_dev.fis[3] = 0x04; /* status err */
++ dev->sata_dev.fis[2] = ATA_ERR;
+ }
+ }
+
+@@ -252,7 +252,7 @@ static bool sas_ata_qc_fill_rtf(struct ata_queued_cmd *qc)
+ {
+ struct domain_device *dev = qc->ap->private_data;
+
+- memcpy(&qc->result_tf, &dev->sata_dev.tf, sizeof(qc->result_tf));
++ ata_tf_from_fis(dev->sata_dev.fis, &qc->result_tf);
+ return true;
+ }
+
+diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
+index 65ea65a..93b9406 100644
+--- a/drivers/target/target_core_cdb.c
++++ b/drivers/target/target_core_cdb.c
+@@ -1199,7 +1199,7 @@ int target_emulate_write_same(struct se_task *task)
+ if (num_blocks != 0)
+ range = num_blocks;
+ else
+- range = (dev->transport->get_blocks(dev) - lba);
++ range = (dev->transport->get_blocks(dev) - lba) + 1;
+
+ pr_debug("WRITE_SAME UNMAP: LBA: %llu Range: %llu\n",
+ (unsigned long long)lba, (unsigned long long)range);
+diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
+index b75bc92..9145141 100644
+--- a/drivers/target/target_core_pr.c
++++ b/drivers/target/target_core_pr.c
+@@ -2042,7 +2042,7 @@ static int __core_scsi3_write_aptpl_to_file(
+ if (IS_ERR(file) || !file || !file->f_dentry) {
+ pr_err("filp_open(%s) for APTPL metadata"
+ " failed\n", path);
+- return (PTR_ERR(file) < 0 ? PTR_ERR(file) : -ENOENT);
++ return IS_ERR(file) ? PTR_ERR(file) : -ENOENT;
+ }
+
+ iov[0].iov_base = &buf[0];
+@@ -3853,7 +3853,7 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
+ " SPC-2 reservation is held, returning"
+ " RESERVATION_CONFLICT\n");
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+- ret = EINVAL;
++ ret = -EINVAL;
+ goto out;
+ }
+
+@@ -3863,7 +3863,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
+ */
+ if (!cmd->se_sess) {
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+- return -EINVAL;
++ ret = -EINVAL;
++ goto out;
+ }
+
+ if (cmd->data_length < 24) {
+diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
+index d95cfe2..278819c 100644
+--- a/drivers/target/tcm_fc/tfc_cmd.c
++++ b/drivers/target/tcm_fc/tfc_cmd.c
+@@ -249,6 +249,8 @@ u32 ft_get_task_tag(struct se_cmd *se_cmd)
+ {
+ struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
+
++ if (cmd->aborted)
++ return ~0;
+ return fc_seq_exch(cmd->seq)->rxid;
+ }
+
+diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
+index 19fb5fa..9aaed0d 100644
+--- a/drivers/usb/class/cdc-wdm.c
++++ b/drivers/usb/class/cdc-wdm.c
+@@ -473,6 +473,8 @@ retry:
+ goto retry;
+ }
+ if (!desc->reslength) { /* zero length read */
++ dev_dbg(&desc->intf->dev, "%s: zero length - clearing WDM_READ\n", __func__);
++ clear_bit(WDM_READ, &desc->flags);
+ spin_unlock_irq(&desc->iuspin);
+ goto retry;
+ }
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 52d27ed..175b6bb 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -2039,12 +2039,16 @@ static unsigned hub_is_wusb(struct usb_hub *hub)
+ static int hub_port_reset(struct usb_hub *hub, int port1,
+ struct usb_device *udev, unsigned int delay, bool warm);
+
+-/* Is a USB 3.0 port in the Inactive state? */
+-static bool hub_port_inactive(struct usb_hub *hub, u16 portstatus)
++/* Is a USB 3.0 port in the Inactive or Complinance Mode state?
++ * Port worm reset is required to recover
++ */
++static bool hub_port_warm_reset_required(struct usb_hub *hub, u16 portstatus)
+ {
+ return hub_is_superspeed(hub->hdev) &&
+- (portstatus & USB_PORT_STAT_LINK_STATE) ==
+- USB_SS_PORT_LS_SS_INACTIVE;
++ (((portstatus & USB_PORT_STAT_LINK_STATE) ==
++ USB_SS_PORT_LS_SS_INACTIVE) ||
++ ((portstatus & USB_PORT_STAT_LINK_STATE) ==
++ USB_SS_PORT_LS_COMP_MOD)) ;
+ }
+
+ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
+@@ -2080,7 +2084,7 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
+ *
+ * See https://bugzilla.kernel.org/show_bug.cgi?id=41752
+ */
+- if (hub_port_inactive(hub, portstatus)) {
++ if (hub_port_warm_reset_required(hub, portstatus)) {
+ int ret;
+
+ if ((portchange & USB_PORT_STAT_C_CONNECTION))
+@@ -3646,9 +3650,7 @@ static void hub_events(void)
+ /* Warm reset a USB3 protocol port if it's in
+ * SS.Inactive state.
+ */
+- if (hub_is_superspeed(hub->hdev) &&
+- (portstatus & USB_PORT_STAT_LINK_STATE)
+- == USB_SS_PORT_LS_SS_INACTIVE) {
++ if (hub_port_warm_reset_required(hub, portstatus)) {
+ dev_dbg(hub_dev, "warm reset port %d\n", i);
+ hub_port_reset(hub, i, NULL,
+ HUB_BH_RESET_TIME, true);
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index a8b2980..fd8a2c2 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -438,6 +438,42 @@ void xhci_test_and_clear_bit(struct xhci_hcd *xhci, __le32 __iomem **port_array,
+ }
+ }
+
++/* Updates Link Status for super Speed port */
++static void xhci_hub_report_link_state(u32 *status, u32 status_reg)
++{
++ u32 pls = status_reg & PORT_PLS_MASK;
++
++ /* resume state is a xHCI internal state.
++ * Do not report it to usb core.
++ */
++ if (pls == XDEV_RESUME)
++ return;
++
++ /* When the CAS bit is set then warm reset
++ * should be performed on port
++ */
++ if (status_reg & PORT_CAS) {
++ /* The CAS bit can be set while the port is
++ * in any link state.
++ * Only roothubs have CAS bit, so we
++ * pretend to be in compliance mode
++ * unless we're already in compliance
++ * or the inactive state.
++ */
++ if (pls != USB_SS_PORT_LS_COMP_MOD &&
++ pls != USB_SS_PORT_LS_SS_INACTIVE) {
++ pls = USB_SS_PORT_LS_COMP_MOD;
++ }
++ /* Return also connection bit -
++ * hub state machine resets port
++ * when this bit is set.
++ */
++ pls |= USB_PORT_STAT_CONNECTION;
++ }
++ /* update status field */
++ *status |= pls;
++}
++
+ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ u16 wIndex, char *buf, u16 wLength)
+ {
+@@ -579,13 +615,9 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ else
+ status |= USB_PORT_STAT_POWER;
+ }
+- /* Port Link State */
++ /* Update Port Link State for super speed ports*/
+ if (hcd->speed == HCD_USB3) {
+- /* resume state is a xHCI internal state.
+- * Do not report it to usb core.
+- */
+- if ((temp & PORT_PLS_MASK) != XDEV_RESUME)
+- status |= (temp & PORT_PLS_MASK);
++ xhci_hub_report_link_state(&status, temp);
+ }
+ if (bus_state->port_c_suspend & (1 << wIndex))
+ status |= 1 << USB_PORT_FEAT_C_SUSPEND;
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 363b141..7a56805 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -341,7 +341,11 @@ struct xhci_op_regs {
+ #define PORT_PLC (1 << 22)
+ /* port configure error change - port failed to configure its link partner */
+ #define PORT_CEC (1 << 23)
+-/* bit 24 reserved */
++/* Cold Attach Status - xHC can set this bit to report device attached during
++ * Sx state. Warm port reset should be perfomed to clear this bit and move port
++ * to connected state.
++ */
++#define PORT_CAS (1 << 24)
+ /* wake on connect (enable) */
+ #define PORT_WKCONN_E (1 << 25)
+ /* wake on disconnect (enable) */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 21a4734..5971c95 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -496,6 +496,15 @@ static void option_instat_callback(struct urb *urb);
+
+ /* MediaTek products */
+ #define MEDIATEK_VENDOR_ID 0x0e8d
++#define MEDIATEK_PRODUCT_DC_1COM 0x00a0
++#define MEDIATEK_PRODUCT_DC_4COM 0x00a5
++#define MEDIATEK_PRODUCT_DC_5COM 0x00a4
++#define MEDIATEK_PRODUCT_7208_1COM 0x7101
++#define MEDIATEK_PRODUCT_7208_2COM 0x7102
++#define MEDIATEK_PRODUCT_FP_1COM 0x0003
++#define MEDIATEK_PRODUCT_FP_2COM 0x0023
++#define MEDIATEK_PRODUCT_FPDC_1COM 0x0043
++#define MEDIATEK_PRODUCT_FPDC_2COM 0x0033
+
+ /* Cellient products */
+ #define CELLIENT_VENDOR_ID 0x2692
+@@ -553,6 +562,10 @@ static const struct option_blacklist_info net_intf1_blacklist = {
+ .reserved = BIT(1),
+ };
+
++static const struct option_blacklist_info net_intf2_blacklist = {
++ .reserved = BIT(2),
++};
++
+ static const struct option_blacklist_info net_intf3_blacklist = {
+ .reserved = BIT(3),
+ };
+@@ -1093,6 +1106,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1402, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff,
+ 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
+@@ -1234,6 +1249,17 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a1, 0xff, 0x02, 0x01) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x02, 0x01) }, /* MediaTek MT6276M modem & app port */
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_1COM, 0x0a, 0x00, 0x00) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_5COM, 0xff, 0x02, 0x01) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_5COM, 0xff, 0x00, 0x00) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM, 0xff, 0x02, 0x01) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM, 0xff, 0x00, 0x00) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7208_1COM, 0x02, 0x00, 0x00) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7208_2COM, 0x02, 0x02, 0x01) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FP_1COM, 0x0a, 0x00, 0x00) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FP_2COM, 0x0a, 0x00, 0x00) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_1COM, 0x0a, 0x00, 0x00) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_2COM, 0x0a, 0x00, 0x00) },
+ { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
+ { } /* Terminating entry */
+ };
+diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
+index c14c42b..ae66278 100644
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -222,6 +222,8 @@ static int vhost_worker(void *data)
+ if (work) {
+ __set_current_state(TASK_RUNNING);
+ work->fn(work);
++ if (need_resched())
++ schedule();
+ } else
+ schedule();
+
+diff --git a/fs/buffer.c b/fs/buffer.c
+index c807931..4115eca 100644
+--- a/fs/buffer.c
++++ b/fs/buffer.c
+@@ -1087,6 +1087,9 @@ grow_buffers(struct block_device *bdev, sector_t block, int size)
+ static struct buffer_head *
+ __getblk_slow(struct block_device *bdev, sector_t block, int size)
+ {
++ int ret;
++ struct buffer_head *bh;
++
+ /* Size must be multiple of hard sectorsize */
+ if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
+ (size < 512 || size > PAGE_SIZE))) {
+@@ -1099,20 +1102,21 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
+ return NULL;
+ }
+
+- for (;;) {
+- struct buffer_head * bh;
+- int ret;
++retry:
++ bh = __find_get_block(bdev, block, size);
++ if (bh)
++ return bh;
+
++ ret = grow_buffers(bdev, block, size);
++ if (ret == 0) {
++ free_more_memory();
++ goto retry;
++ } else if (ret > 0) {
+ bh = __find_get_block(bdev, block, size);
+ if (bh)
+ return bh;
+-
+- ret = grow_buffers(bdev, block, size);
+- if (ret < 0)
+- return NULL;
+- if (ret == 0)
+- free_more_memory();
+ }
++ return NULL;
+ }
+
+ /*
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index b21670c..56c152d 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -2925,6 +2925,18 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
+ #define CIFS_DEFAULT_NON_POSIX_RSIZE (60 * 1024)
+ #define CIFS_DEFAULT_NON_POSIX_WSIZE (65536)
+
++/*
++ * On hosts with high memory, we can't currently support wsize/rsize that are
++ * larger than we can kmap at once. Cap the rsize/wsize at
++ * LAST_PKMAP * PAGE_SIZE. We'll never be able to fill a read or write request
++ * larger than that anyway.
++ */
++#ifdef CONFIG_HIGHMEM
++#define CIFS_KMAP_SIZE_LIMIT (LAST_PKMAP * PAGE_CACHE_SIZE)
++#else /* CONFIG_HIGHMEM */
++#define CIFS_KMAP_SIZE_LIMIT (1<<24)
++#endif /* CONFIG_HIGHMEM */
++
+ static unsigned int
+ cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
+ {
+@@ -2955,6 +2967,9 @@ cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
+ wsize = min_t(unsigned int, wsize,
+ server->maxBuf - sizeof(WRITE_REQ) + 4);
+
++ /* limit to the amount that we can kmap at once */
++ wsize = min_t(unsigned int, wsize, CIFS_KMAP_SIZE_LIMIT);
++
+ /* hard limit of CIFS_MAX_WSIZE */
+ wsize = min_t(unsigned int, wsize, CIFS_MAX_WSIZE);
+
+@@ -2996,6 +3011,9 @@ cifs_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
+ if (!(server->capabilities & CAP_LARGE_READ_X))
+ rsize = min_t(unsigned int, CIFSMaxBufSize, rsize);
+
++ /* limit to the amount that we can kmap at once */
++ rsize = min_t(unsigned int, rsize, CIFS_KMAP_SIZE_LIMIT);
++
+ /* hard limit of CIFS_MAX_RSIZE */
+ rsize = min_t(unsigned int, rsize, CIFS_MAX_RSIZE);
+
+diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
+index db4a138..4c37ed4 100644
+--- a/fs/cifs/readdir.c
++++ b/fs/cifs/readdir.c
+@@ -86,9 +86,12 @@ cifs_readdir_lookup(struct dentry *parent, struct qstr *name,
+
+ dentry = d_lookup(parent, name);
+ if (dentry) {
+- /* FIXME: check for inode number changes? */
+- if (dentry->d_inode != NULL)
++ inode = dentry->d_inode;
++ /* update inode in place if i_ino didn't change */
++ if (inode && CIFS_I(inode)->uniqueid == fattr->cf_uniqueid) {
++ cifs_fattr_to_inode(inode, fattr);
+ return dentry;
++ }
+ d_drop(dentry);
+ dput(dentry);
+ }
+diff --git a/fs/ecryptfs/kthread.c b/fs/ecryptfs/kthread.c
+index 69f994a..0dbe58a 100644
+--- a/fs/ecryptfs/kthread.c
++++ b/fs/ecryptfs/kthread.c
+@@ -149,7 +149,7 @@ int ecryptfs_privileged_open(struct file **lower_file,
+ (*lower_file) = dentry_open(lower_dentry, lower_mnt, flags, cred);
+ if (!IS_ERR(*lower_file))
+ goto out;
+- if (flags & O_RDONLY) {
++ if ((flags & O_ACCMODE) == O_RDONLY) {
+ rc = PTR_ERR((*lower_file));
+ goto out;
+ }
+diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
+index 0dc5a3d..de42310 100644
+--- a/fs/ecryptfs/miscdev.c
++++ b/fs/ecryptfs/miscdev.c
+@@ -49,7 +49,10 @@ ecryptfs_miscdev_poll(struct file *file, poll_table *pt)
+ mutex_lock(&ecryptfs_daemon_hash_mux);
+ /* TODO: Just use file->private_data? */
+ rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns());
+- BUG_ON(rc || !daemon);
++ if (rc || !daemon) {
++ mutex_unlock(&ecryptfs_daemon_hash_mux);
++ return -EINVAL;
++ }
+ mutex_lock(&daemon->mux);
+ mutex_unlock(&ecryptfs_daemon_hash_mux);
+ if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) {
+@@ -122,6 +125,7 @@ ecryptfs_miscdev_open(struct inode *inode, struct file *file)
+ goto out_unlock_daemon;
+ }
+ daemon->flags |= ECRYPTFS_DAEMON_MISCDEV_OPEN;
++ file->private_data = daemon;
+ atomic_inc(&ecryptfs_num_miscdev_opens);
+ out_unlock_daemon:
+ mutex_unlock(&daemon->mux);
+@@ -152,9 +156,9 @@ ecryptfs_miscdev_release(struct inode *inode, struct file *file)
+
+ mutex_lock(&ecryptfs_daemon_hash_mux);
+ rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns());
+- BUG_ON(rc || !daemon);
++ if (rc || !daemon)
++ daemon = file->private_data;
+ mutex_lock(&daemon->mux);
+- BUG_ON(daemon->pid != task_pid(current));
+ BUG_ON(!(daemon->flags & ECRYPTFS_DAEMON_MISCDEV_OPEN));
+ daemon->flags &= ~ECRYPTFS_DAEMON_MISCDEV_OPEN;
+ atomic_dec(&ecryptfs_num_miscdev_opens);
+@@ -191,31 +195,32 @@ int ecryptfs_send_miscdev(char *data, size_t data_size,
+ struct ecryptfs_msg_ctx *msg_ctx, u8 msg_type,
+ u16 msg_flags, struct ecryptfs_daemon *daemon)
+ {
+- int rc = 0;
++ struct ecryptfs_message *msg;
+
+- mutex_lock(&msg_ctx->mux);
+- msg_ctx->msg = kmalloc((sizeof(*msg_ctx->msg) + data_size),
+- GFP_KERNEL);
+- if (!msg_ctx->msg) {
+- rc = -ENOMEM;
++ msg = kmalloc((sizeof(*msg) + data_size), GFP_KERNEL);
++ if (!msg) {
+ printk(KERN_ERR "%s: Out of memory whilst attempting "
+ "to kmalloc(%zd, GFP_KERNEL)\n", __func__,
+- (sizeof(*msg_ctx->msg) + data_size));
+- goto out_unlock;
++ (sizeof(*msg) + data_size));
++ return -ENOMEM;
+ }
++
++ mutex_lock(&msg_ctx->mux);
++ msg_ctx->msg = msg;
+ msg_ctx->msg->index = msg_ctx->index;
+ msg_ctx->msg->data_len = data_size;
+ msg_ctx->type = msg_type;
+ memcpy(msg_ctx->msg->data, data, data_size);
+ msg_ctx->msg_size = (sizeof(*msg_ctx->msg) + data_size);
+- mutex_lock(&daemon->mux);
+ list_add_tail(&msg_ctx->daemon_out_list, &daemon->msg_ctx_out_queue);
++ mutex_unlock(&msg_ctx->mux);
++
++ mutex_lock(&daemon->mux);
+ daemon->num_queued_msg_ctx++;
+ wake_up_interruptible(&daemon->wait);
+ mutex_unlock(&daemon->mux);
+-out_unlock:
+- mutex_unlock(&msg_ctx->mux);
+- return rc;
++
++ return 0;
+ }
+
+ /**
+@@ -246,8 +251,16 @@ ecryptfs_miscdev_read(struct file *file, char __user *buf, size_t count,
+ mutex_lock(&ecryptfs_daemon_hash_mux);
+ /* TODO: Just use file->private_data? */
+ rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns());
+- BUG_ON(rc || !daemon);
++ if (rc || !daemon) {
++ mutex_unlock(&ecryptfs_daemon_hash_mux);
++ return -EINVAL;
++ }
+ mutex_lock(&daemon->mux);
++ if (task_pid(current) != daemon->pid) {
++ mutex_unlock(&daemon->mux);
++ mutex_unlock(&ecryptfs_daemon_hash_mux);
++ return -EPERM;
++ }
+ if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) {
+ rc = 0;
+ mutex_unlock(&ecryptfs_daemon_hash_mux);
+@@ -284,9 +297,6 @@ check_list:
+ * message from the queue; try again */
+ goto check_list;
+ }
+- BUG_ON(euid != daemon->euid);
+- BUG_ON(current_user_ns() != daemon->user_ns);
+- BUG_ON(task_pid(current) != daemon->pid);
+ msg_ctx = list_first_entry(&daemon->msg_ctx_out_queue,
+ struct ecryptfs_msg_ctx, daemon_out_list);
+ BUG_ON(!msg_ctx);
+diff --git a/fs/eventpoll.c b/fs/eventpoll.c
+index 4d9d3a4..a6f3763 100644
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -1629,8 +1629,10 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
+ if (op == EPOLL_CTL_ADD) {
+ if (is_file_epoll(tfile)) {
+ error = -ELOOP;
+- if (ep_loop_check(ep, tfile) != 0)
++ if (ep_loop_check(ep, tfile) != 0) {
++ clear_tfile_check_list();
+ goto error_tgt_fput;
++ }
+ } else
+ list_add(&tfile->f_tfile_llink, &tfile_check_list);
+ }
+diff --git a/fs/exofs/ore.c b/fs/exofs/ore.c
+index 49cf230..24a49d4 100644
+--- a/fs/exofs/ore.c
++++ b/fs/exofs/ore.c
+@@ -735,13 +735,7 @@ static int _prepare_for_striping(struct ore_io_state *ios)
+ out:
+ ios->numdevs = devs_in_group;
+ ios->pages_consumed = cur_pg;
+- if (unlikely(ret)) {
+- if (length == ios->length)
+- return ret;
+- else
+- ios->length -= length;
+- }
+- return 0;
++ return ret;
+ }
+
+ int ore_create(struct ore_io_state *ios)
+diff --git a/fs/exofs/ore_raid.c b/fs/exofs/ore_raid.c
+index d222c77..fff2070 100644
+--- a/fs/exofs/ore_raid.c
++++ b/fs/exofs/ore_raid.c
+@@ -461,16 +461,12 @@ static void _mark_read4write_pages_uptodate(struct ore_io_state *ios, int ret)
+ * ios->sp2d[p][*], xor is calculated the same way. These pages are
+ * allocated/freed and don't go through cache
+ */
+-static int _read_4_write(struct ore_io_state *ios)
++static int _read_4_write_first_stripe(struct ore_io_state *ios)
+ {
+- struct ore_io_state *ios_read;
+ struct ore_striping_info read_si;
+ struct __stripe_pages_2d *sp2d = ios->sp2d;
+ u64 offset = ios->si.first_stripe_start;
+- u64 last_stripe_end;
+- unsigned bytes_in_stripe = ios->si.bytes_in_stripe;
+- unsigned i, c, p, min_p = sp2d->pages_in_unit, max_p = -1;
+- int ret;
++ unsigned c, p, min_p = sp2d->pages_in_unit, max_p = -1;
+
+ if (offset == ios->offset) /* Go to start collect $200 */
+ goto read_last_stripe;
+@@ -478,6 +474,9 @@ static int _read_4_write(struct ore_io_state *ios)
+ min_p = _sp2d_min_pg(sp2d);
+ max_p = _sp2d_max_pg(sp2d);
+
++ ORE_DBGMSG("stripe_start=0x%llx ios->offset=0x%llx min_p=%d max_p=%d\n",
++ offset, ios->offset, min_p, max_p);
++
+ for (c = 0; ; c++) {
+ ore_calc_stripe_info(ios->layout, offset, 0, &read_si);
+ read_si.obj_offset += min_p * PAGE_SIZE;
+@@ -512,6 +511,18 @@ static int _read_4_write(struct ore_io_state *ios)
+ }
+
+ read_last_stripe:
++ return 0;
++}
++
++static int _read_4_write_last_stripe(struct ore_io_state *ios)
++{
++ struct ore_striping_info read_si;
++ struct __stripe_pages_2d *sp2d = ios->sp2d;
++ u64 offset;
++ u64 last_stripe_end;
++ unsigned bytes_in_stripe = ios->si.bytes_in_stripe;
++ unsigned c, p, min_p = sp2d->pages_in_unit, max_p = -1;
++
+ offset = ios->offset + ios->length;
+ if (offset % PAGE_SIZE)
+ _add_to_r4w_last_page(ios, &offset);
+@@ -527,15 +538,15 @@ read_last_stripe:
+ c = _dev_order(ios->layout->group_width * ios->layout->mirrors_p1,
+ ios->layout->mirrors_p1, read_si.par_dev, read_si.dev);
+
+- BUG_ON(ios->si.first_stripe_start + bytes_in_stripe != last_stripe_end);
+- /* unaligned IO must be within a single stripe */
+-
+ if (min_p == sp2d->pages_in_unit) {
+ /* Didn't do it yet */
+ min_p = _sp2d_min_pg(sp2d);
+ max_p = _sp2d_max_pg(sp2d);
+ }
+
++ ORE_DBGMSG("offset=0x%llx stripe_end=0x%llx min_p=%d max_p=%d\n",
++ offset, last_stripe_end, min_p, max_p);
++
+ while (offset < last_stripe_end) {
+ struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
+
+@@ -568,6 +579,15 @@ read_last_stripe:
+ }
+
+ read_it:
++ return 0;
++}
++
++static int _read_4_write_execute(struct ore_io_state *ios)
++{
++ struct ore_io_state *ios_read;
++ unsigned i;
++ int ret;
++
+ ios_read = ios->ios_read_4_write;
+ if (!ios_read)
+ return 0;
+@@ -591,6 +611,8 @@ read_it:
+ }
+
+ _mark_read4write_pages_uptodate(ios_read, ret);
++ ore_put_io_state(ios_read);
++ ios->ios_read_4_write = NULL; /* Might need a reuse at last stripe */
+ return 0;
+ }
+
+@@ -626,8 +648,11 @@ int _ore_add_parity_unit(struct ore_io_state *ios,
+ /* If first stripe, Read in all read4write pages
+ * (if needed) before we calculate the first parity.
+ */
+- _read_4_write(ios);
++ _read_4_write_first_stripe(ios);
+ }
++ if (!cur_len) /* If last stripe r4w pages of last stripe */
++ _read_4_write_last_stripe(ios);
++ _read_4_write_execute(ios);
+
+ for (i = 0; i < num_pages; i++) {
+ pages[i] = _raid_page_alloc();
+@@ -654,34 +679,14 @@ int _ore_add_parity_unit(struct ore_io_state *ios,
+
+ int _ore_post_alloc_raid_stuff(struct ore_io_state *ios)
+ {
+- struct ore_layout *layout = ios->layout;
+-
+ if (ios->parity_pages) {
++ struct ore_layout *layout = ios->layout;
+ unsigned pages_in_unit = layout->stripe_unit / PAGE_SIZE;
+- unsigned stripe_size = ios->si.bytes_in_stripe;
+- u64 last_stripe, first_stripe;
+
+ if (_sp2d_alloc(pages_in_unit, layout->group_width,
+ layout->parity, &ios->sp2d)) {
+ return -ENOMEM;
+ }
+-
+- /* Round io down to last full strip */
+- first_stripe = div_u64(ios->offset, stripe_size);
+- last_stripe = div_u64(ios->offset + ios->length, stripe_size);
+-
+- /* If an IO spans more then a single stripe it must end at
+- * a stripe boundary. The reminder at the end is pushed into the
+- * next IO.
+- */
+- if (last_stripe != first_stripe) {
+- ios->length = last_stripe * stripe_size - ios->offset;
+-
+- BUG_ON(!ios->length);
+- ios->nr_pages = (ios->length + PAGE_SIZE - 1) /
+- PAGE_SIZE;
+- ios->si.length = ios->length; /*make it consistent */
+- }
+ }
+ return 0;
+ }
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index ab7aa3f..a93486e 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -1097,7 +1097,7 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
+ }
+ if (sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME) {
+ seq_printf(seq, ",max_batch_time=%u",
+- (unsigned) sbi->s_min_batch_time);
++ (unsigned) sbi->s_max_batch_time);
+ }
+
+ /*
+diff --git a/fs/fifo.c b/fs/fifo.c
+index b1a524d..cf6f434 100644
+--- a/fs/fifo.c
++++ b/fs/fifo.c
+@@ -14,7 +14,7 @@
+ #include <linux/sched.h>
+ #include <linux/pipe_fs_i.h>
+
+-static void wait_for_partner(struct inode* inode, unsigned int *cnt)
++static int wait_for_partner(struct inode* inode, unsigned int *cnt)
+ {
+ int cur = *cnt;
+
+@@ -23,6 +23,7 @@ static void wait_for_partner(struct inode* inode, unsigned int *cnt)
+ if (signal_pending(current))
+ break;
+ }
++ return cur == *cnt ? -ERESTARTSYS : 0;
+ }
+
+ static void wake_up_partner(struct inode* inode)
+@@ -67,8 +68,7 @@ static int fifo_open(struct inode *inode, struct file *filp)
+ * seen a writer */
+ filp->f_version = pipe->w_counter;
+ } else {
+- wait_for_partner(inode, &pipe->w_counter);
+- if(signal_pending(current))
++ if (wait_for_partner(inode, &pipe->w_counter))
+ goto err_rd;
+ }
+ }
+@@ -90,8 +90,7 @@ static int fifo_open(struct inode *inode, struct file *filp)
+ wake_up_partner(inode);
+
+ if (!pipe->readers) {
+- wait_for_partner(inode, &pipe->r_counter);
+- if (signal_pending(current))
++ if (wait_for_partner(inode, &pipe->r_counter))
+ goto err_wr;
+ }
+ break;
+diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
+index 2d0ca24..ebc2f4d 100644
+--- a/fs/hugetlbfs/inode.c
++++ b/fs/hugetlbfs/inode.c
+@@ -592,9 +592,15 @@ static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
+ spin_lock(&sbinfo->stat_lock);
+ /* If no limits set, just report 0 for max/free/used
+ * blocks, like simple_statfs() */
+- if (sbinfo->max_blocks >= 0) {
+- buf->f_blocks = sbinfo->max_blocks;
+- buf->f_bavail = buf->f_bfree = sbinfo->free_blocks;
++ if (sbinfo->spool) {
++ long free_pages;
++
++ spin_lock(&sbinfo->spool->lock);
++ buf->f_blocks = sbinfo->spool->max_hpages;
++ free_pages = sbinfo->spool->max_hpages
++ - sbinfo->spool->used_hpages;
++ buf->f_bavail = buf->f_bfree = free_pages;
++ spin_unlock(&sbinfo->spool->lock);
+ buf->f_files = sbinfo->max_inodes;
+ buf->f_ffree = sbinfo->free_inodes;
+ }
+@@ -610,6 +616,10 @@ static void hugetlbfs_put_super(struct super_block *sb)
+
+ if (sbi) {
+ sb->s_fs_info = NULL;
++
++ if (sbi->spool)
++ hugepage_put_subpool(sbi->spool);
++
+ kfree(sbi);
+ }
+ }
+@@ -841,10 +851,14 @@ hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
+ sb->s_fs_info = sbinfo;
+ sbinfo->hstate = config.hstate;
+ spin_lock_init(&sbinfo->stat_lock);
+- sbinfo->max_blocks = config.nr_blocks;
+- sbinfo->free_blocks = config.nr_blocks;
+ sbinfo->max_inodes = config.nr_inodes;
+ sbinfo->free_inodes = config.nr_inodes;
++ sbinfo->spool = NULL;
++ if (config.nr_blocks != -1) {
++ sbinfo->spool = hugepage_new_subpool(config.nr_blocks);
++ if (!sbinfo->spool)
++ goto out_free;
++ }
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
+ sb->s_blocksize = huge_page_size(config.hstate);
+ sb->s_blocksize_bits = huge_page_shift(config.hstate);
+@@ -864,38 +878,12 @@ hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
+ sb->s_root = root;
+ return 0;
+ out_free:
++ if (sbinfo->spool)
++ kfree(sbinfo->spool);
+ kfree(sbinfo);
+ return -ENOMEM;
+ }
+
+-int hugetlb_get_quota(struct address_space *mapping, long delta)
+-{
+- int ret = 0;
+- struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(mapping->host->i_sb);
+-
+- if (sbinfo->free_blocks > -1) {
+- spin_lock(&sbinfo->stat_lock);
+- if (sbinfo->free_blocks - delta >= 0)
+- sbinfo->free_blocks -= delta;
+- else
+- ret = -ENOMEM;
+- spin_unlock(&sbinfo->stat_lock);
+- }
+-
+- return ret;
+-}
+-
+-void hugetlb_put_quota(struct address_space *mapping, long delta)
+-{
+- struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(mapping->host->i_sb);
+-
+- if (sbinfo->free_blocks > -1) {
+- spin_lock(&sbinfo->stat_lock);
+- sbinfo->free_blocks += delta;
+- spin_unlock(&sbinfo->stat_lock);
+- }
+-}
+-
+ static struct dentry *hugetlbfs_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
+ {
+diff --git a/fs/locks.c b/fs/locks.c
+index 0d68f1f..6a64f15 100644
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -1465,7 +1465,7 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
+ case F_WRLCK:
+ return generic_add_lease(filp, arg, flp);
+ default:
+- BUG();
++ return -EINVAL;
+ }
+ }
+ EXPORT_SYMBOL(generic_setlease);
+diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
+index 47d1c6f..b122af8 100644
+--- a/fs/nfs/idmap.c
++++ b/fs/nfs/idmap.c
+@@ -318,12 +318,12 @@ struct idmap_hashent {
+ unsigned long ih_expires;
+ __u32 ih_id;
+ size_t ih_namelen;
+- char ih_name[IDMAP_NAMESZ];
++ const char *ih_name;
+ };
+
+ struct idmap_hashtable {
+ __u8 h_type;
+- struct idmap_hashent h_entries[IDMAP_HASH_SZ];
++ struct idmap_hashent *h_entries;
+ };
+
+ struct idmap {
+@@ -378,6 +378,28 @@ nfs_idmap_new(struct nfs_client *clp)
+ return 0;
+ }
+
++static void
++idmap_alloc_hashtable(struct idmap_hashtable *h)
++{
++ if (h->h_entries != NULL)
++ return;
++ h->h_entries = kcalloc(IDMAP_HASH_SZ,
++ sizeof(*h->h_entries),
++ GFP_KERNEL);
++}
++
++static void
++idmap_free_hashtable(struct idmap_hashtable *h)
++{
++ int i;
++
++ if (h->h_entries == NULL)
++ return;
++ for (i = 0; i < IDMAP_HASH_SZ; i++)
++ kfree(h->h_entries[i].ih_name);
++ kfree(h->h_entries);
++}
++
+ void
+ nfs_idmap_delete(struct nfs_client *clp)
+ {
+@@ -387,6 +409,8 @@ nfs_idmap_delete(struct nfs_client *clp)
+ return;
+ rpc_unlink(idmap->idmap_dentry);
+ clp->cl_idmap = NULL;
++ idmap_free_hashtable(&idmap->idmap_user_hash);
++ idmap_free_hashtable(&idmap->idmap_group_hash);
+ kfree(idmap);
+ }
+
+@@ -396,6 +420,8 @@ nfs_idmap_delete(struct nfs_client *clp)
+ static inline struct idmap_hashent *
+ idmap_name_hash(struct idmap_hashtable* h, const char *name, size_t len)
+ {
++ if (h->h_entries == NULL)
++ return NULL;
+ return &h->h_entries[fnvhash32(name, len) % IDMAP_HASH_SZ];
+ }
+
+@@ -404,6 +430,8 @@ idmap_lookup_name(struct idmap_hashtable *h, const char *name, size_t len)
+ {
+ struct idmap_hashent *he = idmap_name_hash(h, name, len);
+
++ if (he == NULL)
++ return NULL;
+ if (he->ih_namelen != len || memcmp(he->ih_name, name, len) != 0)
+ return NULL;
+ if (time_after(jiffies, he->ih_expires))
+@@ -414,6 +442,8 @@ idmap_lookup_name(struct idmap_hashtable *h, const char *name, size_t len)
+ static inline struct idmap_hashent *
+ idmap_id_hash(struct idmap_hashtable* h, __u32 id)
+ {
++ if (h->h_entries == NULL)
++ return NULL;
+ return &h->h_entries[fnvhash32(&id, sizeof(id)) % IDMAP_HASH_SZ];
+ }
+
+@@ -421,6 +451,9 @@ static struct idmap_hashent *
+ idmap_lookup_id(struct idmap_hashtable *h, __u32 id)
+ {
+ struct idmap_hashent *he = idmap_id_hash(h, id);
++
++ if (he == NULL)
++ return NULL;
+ if (he->ih_id != id || he->ih_namelen == 0)
+ return NULL;
+ if (time_after(jiffies, he->ih_expires))
+@@ -436,12 +469,14 @@ idmap_lookup_id(struct idmap_hashtable *h, __u32 id)
+ static inline struct idmap_hashent *
+ idmap_alloc_name(struct idmap_hashtable *h, char *name, size_t len)
+ {
++ idmap_alloc_hashtable(h);
+ return idmap_name_hash(h, name, len);
+ }
+
+ static inline struct idmap_hashent *
+ idmap_alloc_id(struct idmap_hashtable *h, __u32 id)
+ {
++ idmap_alloc_hashtable(h);
+ return idmap_id_hash(h, id);
+ }
+
+@@ -449,9 +484,14 @@ static void
+ idmap_update_entry(struct idmap_hashent *he, const char *name,
+ size_t namelen, __u32 id)
+ {
++ char *str = kmalloc(namelen + 1, GFP_KERNEL);
++ if (str == NULL)
++ return;
++ kfree(he->ih_name);
+ he->ih_id = id;
+- memcpy(he->ih_name, name, namelen);
+- he->ih_name[namelen] = '\0';
++ memcpy(str, name, namelen);
++ str[namelen] = '\0';
++ he->ih_name = str;
+ he->ih_namelen = namelen;
+ he->ih_expires = jiffies + nfs_idmap_cache_timeout;
+ }
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index 66020ac..07354b7 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -1186,8 +1186,9 @@ restart:
+ spin_lock(&state->state_lock);
+ list_for_each_entry(lock, &state->lock_states, ls_locks) {
+ if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
+- printk("%s: Lock reclaim failed!\n",
+- __func__);
++ pr_warn_ratelimited("NFS: "
++ "%s: Lock reclaim "
++ "failed!\n", __func__);
+ }
+ spin_unlock(&state->state_lock);
+ nfs4_put_open_state(state);
+diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
+index 55d0128..a03ee52 100644
+--- a/fs/nfs/objlayout/objio_osd.c
++++ b/fs/nfs/objlayout/objio_osd.c
+@@ -433,7 +433,10 @@ int objio_read_pagelist(struct nfs_read_data *rdata)
+ objios->ios->done = _read_done;
+ dprintk("%s: offset=0x%llx length=0x%x\n", __func__,
+ rdata->args.offset, rdata->args.count);
+- return ore_read(objios->ios);
++ ret = ore_read(objios->ios);
++ if (unlikely(ret))
++ objio_free_result(&objios->oir);
++ return ret;
+ }
+
+ /*
+@@ -464,8 +467,16 @@ static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
+ struct objio_state *objios = priv;
+ struct nfs_write_data *wdata = objios->oir.rpcdata;
+ pgoff_t index = offset / PAGE_SIZE;
+- struct page *page = find_get_page(wdata->inode->i_mapping, index);
++ struct page *page;
++ loff_t i_size = i_size_read(wdata->inode);
++
++ if (offset >= i_size) {
++ *uptodate = true;
++ dprintk("%s: g_zero_page index=0x%lx\n", __func__, index);
++ return ZERO_PAGE(0);
++ }
+
++ page = find_get_page(wdata->inode->i_mapping, index);
+ if (!page) {
+ page = find_or_create_page(wdata->inode->i_mapping,
+ index, GFP_NOFS);
+@@ -486,8 +497,10 @@ static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
+
+ static void __r4w_put_page(void *priv, struct page *page)
+ {
+- dprintk("%s: index=0x%lx\n", __func__, page->index);
+- page_cache_release(page);
++ dprintk("%s: index=0x%lx\n", __func__,
++ (page == ZERO_PAGE(0)) ? -1UL : page->index);
++ if (ZERO_PAGE(0) != page)
++ page_cache_release(page);
+ return;
+ }
+
+@@ -517,8 +530,10 @@ int objio_write_pagelist(struct nfs_write_data *wdata, int how)
+ dprintk("%s: offset=0x%llx length=0x%x\n", __func__,
+ wdata->args.offset, wdata->args.count);
+ ret = ore_write(objios->ios);
+- if (unlikely(ret))
++ if (unlikely(ret)) {
++ objio_free_result(&objios->oir);
+ return ret;
++ }
+
+ if (objios->sync)
+ _write_done(objios->ios, objios);
+diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
+index 07ee5b4..1c7d45e 100644
+--- a/fs/ocfs2/file.c
++++ b/fs/ocfs2/file.c
+@@ -1950,7 +1950,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
+ if (ret < 0)
+ mlog_errno(ret);
+
+- if (file->f_flags & O_SYNC)
++ if (file && (file->f_flags & O_SYNC))
+ handle->h_sync = 1;
+
+ ocfs2_commit_trans(osb, handle);
+diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
+index fbb0b47..d5378d0 100644
+--- a/fs/ramfs/file-nommu.c
++++ b/fs/ramfs/file-nommu.c
+@@ -110,6 +110,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
+
+ /* prevent the page from being discarded on memory pressure */
+ SetPageDirty(page);
++ SetPageUptodate(page);
+
+ unlock_page(page);
+ put_page(page);
+diff --git a/fs/ubifs/sb.c b/fs/ubifs/sb.c
+index 6094c5a..b73ecd8 100644
+--- a/fs/ubifs/sb.c
++++ b/fs/ubifs/sb.c
+@@ -715,8 +715,12 @@ static int fixup_free_space(struct ubifs_info *c)
+ lnum = ubifs_next_log_lnum(c, lnum);
+ }
+
+- /* Fixup the current log head */
+- err = fixup_leb(c, c->lhead_lnum, c->lhead_offs);
++ /*
++ * Fixup the log head which contains the only a CS node at the
++ * beginning.
++ */
++ err = fixup_leb(c, c->lhead_lnum,
++ ALIGN(UBIFS_CS_NODE_SZ, c->min_io_size));
+ if (err)
+ goto out;
+
+diff --git a/include/linux/Kbuild b/include/linux/Kbuild
+index bd21ecd..a3ce901 100644
+--- a/include/linux/Kbuild
++++ b/include/linux/Kbuild
+@@ -268,6 +268,7 @@ header-y += netfilter_ipv4.h
+ header-y += netfilter_ipv6.h
+ header-y += netlink.h
+ header-y += netrom.h
++header-y += nfc.h
+ header-y += nfs.h
+ header-y += nfs2.h
+ header-y += nfs3.h
+diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
+index fd0dc30..cc07d27 100644
+--- a/include/linux/hrtimer.h
++++ b/include/linux/hrtimer.h
+@@ -165,6 +165,7 @@ enum hrtimer_base_type {
+ * @lock: lock protecting the base and associated clock bases
+ * and timers
+ * @active_bases: Bitfield to mark bases with active timers
++ * @clock_was_set: Indicates that clock was set from irq context.
+ * @expires_next: absolute time of the next event which was scheduled
+ * via clock_set_next_event()
+ * @hres_active: State of high resolution mode
+@@ -177,7 +178,8 @@ enum hrtimer_base_type {
+ */
+ struct hrtimer_cpu_base {
+ raw_spinlock_t lock;
+- unsigned long active_bases;
++ unsigned int active_bases;
++ unsigned int clock_was_set;
+ #ifdef CONFIG_HIGH_RES_TIMERS
+ ktime_t expires_next;
+ int hres_active;
+@@ -286,6 +288,8 @@ extern void hrtimer_peek_ahead_timers(void);
+ # define MONOTONIC_RES_NSEC HIGH_RES_NSEC
+ # define KTIME_MONOTONIC_RES KTIME_HIGH_RES
+
++extern void clock_was_set_delayed(void);
++
+ #else
+
+ # define MONOTONIC_RES_NSEC LOW_RES_NSEC
+@@ -306,6 +310,9 @@ static inline int hrtimer_is_hres_active(struct hrtimer *timer)
+ {
+ return 0;
+ }
++
++static inline void clock_was_set_delayed(void) { }
++
+ #endif
+
+ extern void clock_was_set(void);
+@@ -320,6 +327,7 @@ extern ktime_t ktime_get(void);
+ extern ktime_t ktime_get_real(void);
+ extern ktime_t ktime_get_boottime(void);
+ extern ktime_t ktime_get_monotonic_offset(void);
++extern ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot);
+
+ DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
+
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index d9d6c86..c5ed2f1 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -14,6 +14,15 @@ struct user_struct;
+ #include <linux/shm.h>
+ #include <asm/tlbflush.h>
+
++struct hugepage_subpool {
++ spinlock_t lock;
++ long count;
++ long max_hpages, used_hpages;
++};
++
++struct hugepage_subpool *hugepage_new_subpool(long nr_blocks);
++void hugepage_put_subpool(struct hugepage_subpool *spool);
++
+ int PageHuge(struct page *page);
+
+ void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
+@@ -138,12 +147,11 @@ struct hugetlbfs_config {
+ };
+
+ struct hugetlbfs_sb_info {
+- long max_blocks; /* blocks allowed */
+- long free_blocks; /* blocks free */
+ long max_inodes; /* inodes allowed */
+ long free_inodes; /* inodes free */
+ spinlock_t stat_lock;
+ struct hstate *hstate;
++ struct hugepage_subpool *spool;
+ };
+
+
+@@ -166,8 +174,6 @@ extern const struct file_operations hugetlbfs_file_operations;
+ extern const struct vm_operations_struct hugetlb_vm_ops;
+ struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
+ struct user_struct **user, int creat_flags);
+-int hugetlb_get_quota(struct address_space *mapping, long delta);
+-void hugetlb_put_quota(struct address_space *mapping, long delta);
+
+ static inline int is_file_hugepages(struct file *file)
+ {
+diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
+index 188cb2f..905b1e1 100644
+--- a/include/linux/mmzone.h
++++ b/include/linux/mmzone.h
+@@ -652,7 +652,7 @@ typedef struct pglist_data {
+ range, including holes */
+ int node_id;
+ wait_queue_head_t kswapd_wait;
+- struct task_struct *kswapd;
++ struct task_struct *kswapd; /* Protected by lock_memory_hotplug() */
+ int kswapd_max_order;
+ enum zone_type classzone_idx;
+ } pg_data_t;
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index c0cfa0d..7cda65b 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -176,8 +176,6 @@ enum pci_dev_flags {
+ PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2,
+ /* Provide indication device is assigned by a Virtual Machine Manager */
+ PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) 4,
+- /* Device causes system crash if in D3 during S3 sleep */
+- PCI_DEV_FLAGS_NO_D3_DURING_SLEEP = (__force pci_dev_flags_t) 8,
+ };
+
+ enum pci_irq_reroute_variant {
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 1c4f3e9..5afa2a3 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1892,6 +1892,14 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p,
+ }
+ #endif
+
++#ifdef CONFIG_NO_HZ
++void calc_load_enter_idle(void);
++void calc_load_exit_idle(void);
++#else
++static inline void calc_load_enter_idle(void) { }
++static inline void calc_load_exit_idle(void) { }
++#endif /* CONFIG_NO_HZ */
++
+ #ifndef CONFIG_CPUMASK_OFFSTACK
+ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
+ {
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index bdb4590..53dc7e7 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -213,11 +213,8 @@ enum {
+ /* device driver is going to provide hardware time stamp */
+ SKBTX_IN_PROGRESS = 1 << 2,
+
+- /* ensure the originating sk reference is available on driver level */
+- SKBTX_DRV_NEEDS_SK_REF = 1 << 3,
+-
+ /* device driver supports TX zero-copy buffers */
+- SKBTX_DEV_ZEROCOPY = 1 << 4,
++ SKBTX_DEV_ZEROCOPY = 1 << 3,
+ };
+
+ /*
+diff --git a/include/linux/timex.h b/include/linux/timex.h
+index aa60fe7..08e90fb 100644
+--- a/include/linux/timex.h
++++ b/include/linux/timex.h
+@@ -266,7 +266,7 @@ static inline int ntp_synced(void)
+ /* Returns how long ticks are at present, in ns / 2^NTP_SCALE_SHIFT. */
+ extern u64 tick_length;
+
+-extern void second_overflow(void);
++extern int second_overflow(unsigned long secs);
+ extern void update_ntp_one_tick(void);
+ extern int do_adjtimex(struct timex *);
+ extern void hardpps(const struct timespec *, const struct timespec *);
+diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
+index 6a308d4..1e100c6 100644
+--- a/include/scsi/libsas.h
++++ b/include/scsi/libsas.h
+@@ -159,6 +159,8 @@ enum ata_command_set {
+ ATAPI_COMMAND_SET = 1,
+ };
+
++#define ATA_RESP_FIS_SIZE 24
++
+ struct sata_device {
+ enum ata_command_set command_set;
+ struct smp_resp rps_resp; /* report_phy_sata_resp */
+@@ -170,7 +172,7 @@ struct sata_device {
+
+ struct ata_port *ap;
+ struct ata_host ata_host;
+- struct ata_taskfile tf;
++ u8 fis[ATA_RESP_FIS_SIZE];
+ u32 sstatus;
+ u32 serror;
+ u32 scontrol;
+@@ -486,7 +488,7 @@ enum exec_status {
+ */
+ struct ata_task_resp {
+ u16 frame_len;
+- u8 ending_fis[24]; /* dev to host or data-in */
++ u8 ending_fis[ATA_RESP_FIS_SIZE]; /* dev to host or data-in */
+ u32 sstatus;
+ u32 serror;
+ u32 scontrol;
+diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
+index ae34bf5..6db7a5e 100644
+--- a/kernel/hrtimer.c
++++ b/kernel/hrtimer.c
+@@ -657,6 +657,14 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
+ return 0;
+ }
+
++static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
++{
++ ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
++ ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
++
++ return ktime_get_update_offsets(offs_real, offs_boot);
++}
++
+ /*
+ * Retrigger next event is called after clock was set
+ *
+@@ -665,22 +673,12 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
+ static void retrigger_next_event(void *arg)
+ {
+ struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases);
+- struct timespec realtime_offset, xtim, wtm, sleep;
+
+ if (!hrtimer_hres_active())
+ return;
+
+- /* Optimized out for !HIGH_RES */
+- get_xtime_and_monotonic_and_sleep_offset(&xtim, &wtm, &sleep);
+- set_normalized_timespec(&realtime_offset, -wtm.tv_sec, -wtm.tv_nsec);
+-
+- /* Adjust CLOCK_REALTIME offset */
+ raw_spin_lock(&base->lock);
+- base->clock_base[HRTIMER_BASE_REALTIME].offset =
+- timespec_to_ktime(realtime_offset);
+- base->clock_base[HRTIMER_BASE_BOOTTIME].offset =
+- timespec_to_ktime(sleep);
+-
++ hrtimer_update_base(base);
+ hrtimer_force_reprogram(base, 0);
+ raw_spin_unlock(&base->lock);
+ }
+@@ -710,13 +708,25 @@ static int hrtimer_switch_to_hres(void)
+ base->clock_base[i].resolution = KTIME_HIGH_RES;
+
+ tick_setup_sched_timer();
+-
+ /* "Retrigger" the interrupt to get things going */
+ retrigger_next_event(NULL);
+ local_irq_restore(flags);
+ return 1;
+ }
+
++/*
++ * Called from timekeeping code to reprogramm the hrtimer interrupt
++ * device. If called from the timer interrupt context we defer it to
++ * softirq context.
++ */
++void clock_was_set_delayed(void)
++{
++ struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
++
++ cpu_base->clock_was_set = 1;
++ __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
++}
++
+ #else
+
+ static inline int hrtimer_hres_active(void) { return 0; }
+@@ -1250,11 +1260,10 @@ void hrtimer_interrupt(struct clock_event_device *dev)
+ cpu_base->nr_events++;
+ dev->next_event.tv64 = KTIME_MAX;
+
+- entry_time = now = ktime_get();
++ raw_spin_lock(&cpu_base->lock);
++ entry_time = now = hrtimer_update_base(cpu_base);
+ retry:
+ expires_next.tv64 = KTIME_MAX;
+-
+- raw_spin_lock(&cpu_base->lock);
+ /*
+ * We set expires_next to KTIME_MAX here with cpu_base->lock
+ * held to prevent that a timer is enqueued in our queue via
+@@ -1330,8 +1339,12 @@ retry:
+ * We need to prevent that we loop forever in the hrtimer
+ * interrupt routine. We give it 3 attempts to avoid
+ * overreacting on some spurious event.
++ *
++ * Acquire base lock for updating the offsets and retrieving
++ * the current time.
+ */
+- now = ktime_get();
++ raw_spin_lock(&cpu_base->lock);
++ now = hrtimer_update_base(cpu_base);
+ cpu_base->nr_retries++;
+ if (++retries < 3)
+ goto retry;
+@@ -1343,6 +1356,7 @@ retry:
+ */
+ cpu_base->nr_hangs++;
+ cpu_base->hang_detected = 1;
++ raw_spin_unlock(&cpu_base->lock);
+ delta = ktime_sub(now, entry_time);
+ if (delta.tv64 > cpu_base->max_hang_time.tv64)
+ cpu_base->max_hang_time = delta;
+@@ -1395,6 +1409,13 @@ void hrtimer_peek_ahead_timers(void)
+
+ static void run_hrtimer_softirq(struct softirq_action *h)
+ {
++ struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
++
++ if (cpu_base->clock_was_set) {
++ cpu_base->clock_was_set = 0;
++ clock_was_set();
++ }
++
+ hrtimer_peek_ahead_timers();
+ }
+
+diff --git a/kernel/power/swap.c b/kernel/power/swap.c
+index b313086..64f8f97 100644
+--- a/kernel/power/swap.c
++++ b/kernel/power/swap.c
+@@ -6,7 +6,7 @@
+ *
+ * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
+ * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
+- * Copyright (C) 2010 Bojan Smojver <bojan@rexursive.com>
++ * Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com>
+ *
+ * This file is released under the GPLv2.
+ *
+@@ -283,14 +283,17 @@ static int write_page(void *buf, sector_t offset, struct bio **bio_chain)
+ return -ENOSPC;
+
+ if (bio_chain) {
+- src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
++ src = (void *)__get_free_page(__GFP_WAIT | __GFP_NOWARN |
++ __GFP_NORETRY);
+ if (src) {
+ copy_page(src, buf);
+ } else {
+ ret = hib_wait_on_bio_chain(bio_chain); /* Free pages */
+ if (ret)
+ return ret;
+- src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
++ src = (void *)__get_free_page(__GFP_WAIT |
++ __GFP_NOWARN |
++ __GFP_NORETRY);
+ if (src) {
+ copy_page(src, buf);
+ } else {
+@@ -368,12 +371,17 @@ static int swap_write_page(struct swap_map_handle *handle, void *buf,
+ clear_page(handle->cur);
+ handle->cur_swap = offset;
+ handle->k = 0;
+- }
+- if (bio_chain && low_free_pages() <= handle->reqd_free_pages) {
+- error = hib_wait_on_bio_chain(bio_chain);
+- if (error)
+- goto out;
+- handle->reqd_free_pages = reqd_free_pages();
++
++ if (bio_chain && low_free_pages() <= handle->reqd_free_pages) {
++ error = hib_wait_on_bio_chain(bio_chain);
++ if (error)
++ goto out;
++ /*
++ * Recalculate the number of required free pages, to
++ * make sure we never take more than half.
++ */
++ handle->reqd_free_pages = reqd_free_pages();
++ }
+ }
+ out:
+ return error;
+@@ -420,8 +428,9 @@ static int swap_writer_finish(struct swap_map_handle *handle,
+ /* Maximum number of threads for compression/decompression. */
+ #define LZO_THREADS 3
+
+-/* Maximum number of pages for read buffering. */
+-#define LZO_READ_PAGES (MAP_PAGE_ENTRIES * 8)
++/* Minimum/maximum number of pages for read buffering. */
++#define LZO_MIN_RD_PAGES 1024
++#define LZO_MAX_RD_PAGES 8192
+
+
+ /**
+@@ -632,12 +641,6 @@ static int save_image_lzo(struct swap_map_handle *handle,
+ }
+
+ /*
+- * Adjust number of free pages after all allocations have been done.
+- * We don't want to run out of pages when writing.
+- */
+- handle->reqd_free_pages = reqd_free_pages();
+-
+- /*
+ * Start the CRC32 thread.
+ */
+ init_waitqueue_head(&crc->go);
+@@ -658,6 +661,12 @@ static int save_image_lzo(struct swap_map_handle *handle,
+ goto out_clean;
+ }
+
++ /*
++ * Adjust the number of required free pages after all allocations have
++ * been done. We don't want to run out of pages when writing.
++ */
++ handle->reqd_free_pages = reqd_free_pages();
++
+ printk(KERN_INFO
+ "PM: Using %u thread(s) for compression.\n"
+ "PM: Compressing and saving image data (%u pages) ... ",
+@@ -1067,7 +1076,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
+ unsigned i, thr, run_threads, nr_threads;
+ unsigned ring = 0, pg = 0, ring_size = 0,
+ have = 0, want, need, asked = 0;
+- unsigned long read_pages;
++ unsigned long read_pages = 0;
+ unsigned char **page = NULL;
+ struct dec_data *data = NULL;
+ struct crc_data *crc = NULL;
+@@ -1079,7 +1088,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
+ nr_threads = num_online_cpus() - 1;
+ nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
+
+- page = vmalloc(sizeof(*page) * LZO_READ_PAGES);
++ page = vmalloc(sizeof(*page) * LZO_MAX_RD_PAGES);
+ if (!page) {
+ printk(KERN_ERR "PM: Failed to allocate LZO page\n");
+ ret = -ENOMEM;
+@@ -1144,15 +1153,22 @@ static int load_image_lzo(struct swap_map_handle *handle,
+ }
+
+ /*
+- * Adjust number of pages for read buffering, in case we are short.
++ * Set the number of pages for read buffering.
++ * This is complete guesswork, because we'll only know the real
++ * picture once prepare_image() is called, which is much later on
++ * during the image load phase. We'll assume the worst case and
++ * say that none of the image pages are from high memory.
+ */
+- read_pages = (nr_free_pages() - snapshot_get_image_size()) >> 1;
+- read_pages = clamp_val(read_pages, LZO_CMP_PAGES, LZO_READ_PAGES);
++ if (low_free_pages() > snapshot_get_image_size())
++ read_pages = (low_free_pages() - snapshot_get_image_size()) / 2;
++ read_pages = clamp_val(read_pages, LZO_MIN_RD_PAGES, LZO_MAX_RD_PAGES);
+
+ for (i = 0; i < read_pages; i++) {
+ page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
+ __GFP_WAIT | __GFP_HIGH :
+- __GFP_WAIT);
++ __GFP_WAIT | __GFP_NOWARN |
++ __GFP_NORETRY);
++
+ if (!page[i]) {
+ if (i < LZO_CMP_PAGES) {
+ ring_size = i;
+diff --git a/kernel/sched.c b/kernel/sched.c
+index 576a27f..52ac69b 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -1885,7 +1885,6 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
+
+ #endif
+
+-static void calc_load_account_idle(struct rq *this_rq);
+ static void update_sysctl(void);
+ static int get_update_sysctl_factor(void);
+ static void update_cpu_load(struct rq *this_rq);
+@@ -3401,11 +3400,73 @@ unsigned long this_cpu_load(void)
+ }
+
+
++/*
++ * Global load-average calculations
++ *
++ * We take a distributed and async approach to calculating the global load-avg
++ * in order to minimize overhead.
++ *
++ * The global load average is an exponentially decaying average of nr_running +
++ * nr_uninterruptible.
++ *
++ * Once every LOAD_FREQ:
++ *
++ * nr_active = 0;
++ * for_each_possible_cpu(cpu)
++ * nr_active += cpu_of(cpu)->nr_running + cpu_of(cpu)->nr_uninterruptible;
++ *
++ * avenrun[n] = avenrun[0] * exp_n + nr_active * (1 - exp_n)
++ *
++ * Due to a number of reasons the above turns in the mess below:
++ *
++ * - for_each_possible_cpu() is prohibitively expensive on machines with
++ * serious number of cpus, therefore we need to take a distributed approach
++ * to calculating nr_active.
++ *
++ * \Sum_i x_i(t) = \Sum_i x_i(t) - x_i(t_0) | x_i(t_0) := 0
++ * = \Sum_i { \Sum_j=1 x_i(t_j) - x_i(t_j-1) }
++ *
++ * So assuming nr_active := 0 when we start out -- true per definition, we
++ * can simply take per-cpu deltas and fold those into a global accumulate
++ * to obtain the same result. See calc_load_fold_active().
++ *
++ * Furthermore, in order to avoid synchronizing all per-cpu delta folding
++ * across the machine, we assume 10 ticks is sufficient time for every
++ * cpu to have completed this task.
++ *
++ * This places an upper-bound on the IRQ-off latency of the machine. Then
++ * again, being late doesn't loose the delta, just wrecks the sample.
++ *
++ * - cpu_rq()->nr_uninterruptible isn't accurately tracked per-cpu because
++ * this would add another cross-cpu cacheline miss and atomic operation
++ * to the wakeup path. Instead we increment on whatever cpu the task ran
++ * when it went into uninterruptible state and decrement on whatever cpu
++ * did the wakeup. This means that only the sum of nr_uninterruptible over
++ * all cpus yields the correct result.
++ *
++ * This covers the NO_HZ=n code, for extra head-aches, see the comment below.
++ */
++
+ /* Variables and functions for calc_load */
+ static atomic_long_t calc_load_tasks;
+ static unsigned long calc_load_update;
+ unsigned long avenrun[3];
+-EXPORT_SYMBOL(avenrun);
++EXPORT_SYMBOL(avenrun); /* should be removed */
++
++/**
++ * get_avenrun - get the load average array
++ * @loads: pointer to dest load array
++ * @offset: offset to add
++ * @shift: shift count to shift the result left
++ *
++ * These values are estimates at best, so no need for locking.
++ */
++void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
++{
++ loads[0] = (avenrun[0] + offset) << shift;
++ loads[1] = (avenrun[1] + offset) << shift;
++ loads[2] = (avenrun[2] + offset) << shift;
++}
+
+ static long calc_load_fold_active(struct rq *this_rq)
+ {
+@@ -3422,6 +3483,9 @@ static long calc_load_fold_active(struct rq *this_rq)
+ return delta;
+ }
+
++/*
++ * a1 = a0 * e + a * (1 - e)
++ */
+ static unsigned long
+ calc_load(unsigned long load, unsigned long exp, unsigned long active)
+ {
+@@ -3433,30 +3497,118 @@ calc_load(unsigned long load, unsigned long exp, unsigned long active)
+
+ #ifdef CONFIG_NO_HZ
+ /*
+- * For NO_HZ we delay the active fold to the next LOAD_FREQ update.
++ * Handle NO_HZ for the global load-average.
++ *
++ * Since the above described distributed algorithm to compute the global
++ * load-average relies on per-cpu sampling from the tick, it is affected by
++ * NO_HZ.
++ *
++ * The basic idea is to fold the nr_active delta into a global idle-delta upon
++ * entering NO_HZ state such that we can include this as an 'extra' cpu delta
++ * when we read the global state.
++ *
++ * Obviously reality has to ruin such a delightfully simple scheme:
++ *
++ * - When we go NO_HZ idle during the window, we can negate our sample
++ * contribution, causing under-accounting.
++ *
++ * We avoid this by keeping two idle-delta counters and flipping them
++ * when the window starts, thus separating old and new NO_HZ load.
++ *
++ * The only trick is the slight shift in index flip for read vs write.
++ *
++ * 0s 5s 10s 15s
++ * +10 +10 +10 +10
++ * |-|-----------|-|-----------|-|-----------|-|
++ * r:0 0 1 1 0 0 1 1 0
++ * w:0 1 1 0 0 1 1 0 0
++ *
++ * This ensures we'll fold the old idle contribution in this window while
++ * accumlating the new one.
++ *
++ * - When we wake up from NO_HZ idle during the window, we push up our
++ * contribution, since we effectively move our sample point to a known
++ * busy state.
++ *
++ * This is solved by pushing the window forward, and thus skipping the
++ * sample, for this cpu (effectively using the idle-delta for this cpu which
++ * was in effect at the time the window opened). This also solves the issue
++ * of having to deal with a cpu having been in NOHZ idle for multiple
++ * LOAD_FREQ intervals.
+ *
+ * When making the ILB scale, we should try to pull this in as well.
+ */
+-static atomic_long_t calc_load_tasks_idle;
++static atomic_long_t calc_load_idle[2];
++static int calc_load_idx;
+
+-static void calc_load_account_idle(struct rq *this_rq)
++static inline int calc_load_write_idx(void)
+ {
++ int idx = calc_load_idx;
++
++ /*
++ * See calc_global_nohz(), if we observe the new index, we also
++ * need to observe the new update time.
++ */
++ smp_rmb();
++
++ /*
++ * If the folding window started, make sure we start writing in the
++ * next idle-delta.
++ */
++ if (!time_before(jiffies, calc_load_update))
++ idx++;
++
++ return idx & 1;
++}
++
++static inline int calc_load_read_idx(void)
++{
++ return calc_load_idx & 1;
++}
++
++void calc_load_enter_idle(void)
++{
++ struct rq *this_rq = this_rq();
+ long delta;
+
++ /*
++ * We're going into NOHZ mode, if there's any pending delta, fold it
++ * into the pending idle delta.
++ */
+ delta = calc_load_fold_active(this_rq);
+- if (delta)
+- atomic_long_add(delta, &calc_load_tasks_idle);
++ if (delta) {
++ int idx = calc_load_write_idx();
++ atomic_long_add(delta, &calc_load_idle[idx]);
++ }
+ }
+
+-static long calc_load_fold_idle(void)
++void calc_load_exit_idle(void)
+ {
+- long delta = 0;
++ struct rq *this_rq = this_rq();
++
++ /*
++ * If we're still before the sample window, we're done.
++ */
++ if (time_before(jiffies, this_rq->calc_load_update))
++ return;
+
+ /*
+- * Its got a race, we don't care...
++ * We woke inside or after the sample window, this means we're already
++ * accounted through the nohz accounting, so skip the entire deal and
++ * sync up for the next window.
+ */
+- if (atomic_long_read(&calc_load_tasks_idle))
+- delta = atomic_long_xchg(&calc_load_tasks_idle, 0);
++ this_rq->calc_load_update = calc_load_update;
++ if (time_before(jiffies, this_rq->calc_load_update + 10))
++ this_rq->calc_load_update += LOAD_FREQ;
++}
++
++static long calc_load_fold_idle(void)
++{
++ int idx = calc_load_read_idx();
++ long delta = 0;
++
++ if (atomic_long_read(&calc_load_idle[idx]))
++ delta = atomic_long_xchg(&calc_load_idle[idx], 0);
+
+ return delta;
+ }
+@@ -3542,66 +3694,39 @@ static void calc_global_nohz(void)
+ {
+ long delta, active, n;
+
+- /*
+- * If we crossed a calc_load_update boundary, make sure to fold
+- * any pending idle changes, the respective CPUs might have
+- * missed the tick driven calc_load_account_active() update
+- * due to NO_HZ.
+- */
+- delta = calc_load_fold_idle();
+- if (delta)
+- atomic_long_add(delta, &calc_load_tasks);
+-
+- /*
+- * It could be the one fold was all it took, we done!
+- */
+- if (time_before(jiffies, calc_load_update + 10))
+- return;
+-
+- /*
+- * Catch-up, fold however many we are behind still
+- */
+- delta = jiffies - calc_load_update - 10;
+- n = 1 + (delta / LOAD_FREQ);
++ if (!time_before(jiffies, calc_load_update + 10)) {
++ /*
++ * Catch-up, fold however many we are behind still
++ */
++ delta = jiffies - calc_load_update - 10;
++ n = 1 + (delta / LOAD_FREQ);
+
+- active = atomic_long_read(&calc_load_tasks);
+- active = active > 0 ? active * FIXED_1 : 0;
++ active = atomic_long_read(&calc_load_tasks);
++ active = active > 0 ? active * FIXED_1 : 0;
+
+- avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
+- avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
+- avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
++ avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
++ avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
++ avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
+
+- calc_load_update += n * LOAD_FREQ;
+-}
+-#else
+-static void calc_load_account_idle(struct rq *this_rq)
+-{
+-}
++ calc_load_update += n * LOAD_FREQ;
++ }
+
+-static inline long calc_load_fold_idle(void)
+-{
+- return 0;
++ /*
++ * Flip the idle index...
++ *
++ * Make sure we first write the new time then flip the index, so that
++ * calc_load_write_idx() will see the new time when it reads the new
++ * index, this avoids a double flip messing things up.
++ */
++ smp_wmb();
++ calc_load_idx++;
+ }
++#else /* !CONFIG_NO_HZ */
+
+-static void calc_global_nohz(void)
+-{
+-}
+-#endif
++static inline long calc_load_fold_idle(void) { return 0; }
++static inline void calc_global_nohz(void) { }
+
+-/**
+- * get_avenrun - get the load average array
+- * @loads: pointer to dest load array
+- * @offset: offset to add
+- * @shift: shift count to shift the result left
+- *
+- * These values are estimates at best, so no need for locking.
+- */
+-void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
+-{
+- loads[0] = (avenrun[0] + offset) << shift;
+- loads[1] = (avenrun[1] + offset) << shift;
+- loads[2] = (avenrun[2] + offset) << shift;
+-}
++#endif /* CONFIG_NO_HZ */
+
+ /*
+ * calc_load - update the avenrun load estimates 10 ticks after the
+@@ -3609,11 +3734,18 @@ void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
+ */
+ void calc_global_load(unsigned long ticks)
+ {
+- long active;
++ long active, delta;
+
+ if (time_before(jiffies, calc_load_update + 10))
+ return;
+
++ /*
++ * Fold the 'old' idle-delta to include all NO_HZ cpus.
++ */
++ delta = calc_load_fold_idle();
++ if (delta)
++ atomic_long_add(delta, &calc_load_tasks);
++
+ active = atomic_long_read(&calc_load_tasks);
+ active = active > 0 ? active * FIXED_1 : 0;
+
+@@ -3624,12 +3756,7 @@ void calc_global_load(unsigned long ticks)
+ calc_load_update += LOAD_FREQ;
+
+ /*
+- * Account one period with whatever state we found before
+- * folding in the nohz state and ageing the entire idle period.
+- *
+- * This avoids loosing a sample when we go idle between
+- * calc_load_account_active() (10 ticks ago) and now and thus
+- * under-accounting.
++ * In case we idled for multiple LOAD_FREQ intervals, catch up in bulk.
+ */
+ calc_global_nohz();
+ }
+@@ -3646,7 +3773,6 @@ static void calc_load_account_active(struct rq *this_rq)
+ return;
+
+ delta = calc_load_fold_active(this_rq);
+- delta += calc_load_fold_idle();
+ if (delta)
+ atomic_long_add(delta, &calc_load_tasks);
+
+@@ -3654,6 +3780,10 @@ static void calc_load_account_active(struct rq *this_rq)
+ }
+
+ /*
++ * End of global load-average stuff
++ */
++
++/*
+ * The exact cpuload at various idx values, calculated at every tick would be
+ * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load
+ *
+diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c
+index 0a51882..be92bfe 100644
+--- a/kernel/sched_idletask.c
++++ b/kernel/sched_idletask.c
+@@ -23,7 +23,6 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl
+ static struct task_struct *pick_next_task_idle(struct rq *rq)
+ {
+ schedstat_inc(rq, sched_goidle);
+- calc_load_account_idle(rq);
+ return rq->idle;
+ }
+
+diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
+index 4b85a7a..f1eb182 100644
+--- a/kernel/time/ntp.c
++++ b/kernel/time/ntp.c
+@@ -31,8 +31,6 @@ unsigned long tick_nsec;
+ u64 tick_length;
+ static u64 tick_length_base;
+
+-static struct hrtimer leap_timer;
+-
+ #define MAX_TICKADJ 500LL /* usecs */
+ #define MAX_TICKADJ_SCALED \
+ (((MAX_TICKADJ * NSEC_PER_USEC) << NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ)
+@@ -350,60 +348,60 @@ void ntp_clear(void)
+ }
+
+ /*
+- * Leap second processing. If in leap-insert state at the end of the
+- * day, the system clock is set back one second; if in leap-delete
+- * state, the system clock is set ahead one second.
++ * this routine handles the overflow of the microsecond field
++ *
++ * The tricky bits of code to handle the accurate clock support
++ * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
++ * They were originally developed for SUN and DEC kernels.
++ * All the kudos should go to Dave for this stuff.
++ *
++ * Also handles leap second processing, and returns leap offset
+ */
+-static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
++int second_overflow(unsigned long secs)
+ {
+- enum hrtimer_restart res = HRTIMER_NORESTART;
+-
+- write_seqlock(&xtime_lock);
++ int leap = 0;
++ s64 delta;
+
++ /*
++ * Leap second processing. If in leap-insert state at the end of the
++ * day, the system clock is set back one second; if in leap-delete
++ * state, the system clock is set ahead one second.
++ */
+ switch (time_state) {
+ case TIME_OK:
++ if (time_status & STA_INS)
++ time_state = TIME_INS;
++ else if (time_status & STA_DEL)
++ time_state = TIME_DEL;
+ break;
+ case TIME_INS:
+- timekeeping_leap_insert(-1);
+- time_state = TIME_OOP;
+- printk(KERN_NOTICE
+- "Clock: inserting leap second 23:59:60 UTC\n");
+- hrtimer_add_expires_ns(&leap_timer, NSEC_PER_SEC);
+- res = HRTIMER_RESTART;
++ if (secs % 86400 == 0) {
++ leap = -1;
++ time_state = TIME_OOP;
++ time_tai++;
++ printk(KERN_NOTICE
++ "Clock: inserting leap second 23:59:60 UTC\n");
++ }
+ break;
+ case TIME_DEL:
+- timekeeping_leap_insert(1);
+- time_tai--;
+- time_state = TIME_WAIT;
+- printk(KERN_NOTICE
+- "Clock: deleting leap second 23:59:59 UTC\n");
++ if ((secs + 1) % 86400 == 0) {
++ leap = 1;
++ time_tai--;
++ time_state = TIME_WAIT;
++ printk(KERN_NOTICE
++ "Clock: deleting leap second 23:59:59 UTC\n");
++ }
+ break;
+ case TIME_OOP:
+- time_tai++;
+ time_state = TIME_WAIT;
+- /* fall through */
++ break;
++
+ case TIME_WAIT:
+ if (!(time_status & (STA_INS | STA_DEL)))
+ time_state = TIME_OK;
+ break;
+ }
+
+- write_sequnlock(&xtime_lock);
+-
+- return res;
+-}
+-
+-/*
+- * this routine handles the overflow of the microsecond field
+- *
+- * The tricky bits of code to handle the accurate clock support
+- * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
+- * They were originally developed for SUN and DEC kernels.
+- * All the kudos should go to Dave for this stuff.
+- */
+-void second_overflow(void)
+-{
+- s64 delta;
+
+ /* Bump the maxerror field */
+ time_maxerror += MAXFREQ / NSEC_PER_USEC;
+@@ -423,23 +421,25 @@ void second_overflow(void)
+ pps_dec_valid();
+
+ if (!time_adjust)
+- return;
++ goto out;
+
+ if (time_adjust > MAX_TICKADJ) {
+ time_adjust -= MAX_TICKADJ;
+ tick_length += MAX_TICKADJ_SCALED;
+- return;
++ goto out;
+ }
+
+ if (time_adjust < -MAX_TICKADJ) {
+ time_adjust += MAX_TICKADJ;
+ tick_length -= MAX_TICKADJ_SCALED;
+- return;
++ goto out;
+ }
+
+ tick_length += (s64)(time_adjust * NSEC_PER_USEC / NTP_INTERVAL_FREQ)
+ << NTP_SCALE_SHIFT;
+ time_adjust = 0;
++out:
++ return leap;
+ }
+
+ #ifdef CONFIG_GENERIC_CMOS_UPDATE
+@@ -501,27 +501,6 @@ static void notify_cmos_timer(void)
+ static inline void notify_cmos_timer(void) { }
+ #endif
+
+-/*
+- * Start the leap seconds timer:
+- */
+-static inline void ntp_start_leap_timer(struct timespec *ts)
+-{
+- long now = ts->tv_sec;
+-
+- if (time_status & STA_INS) {
+- time_state = TIME_INS;
+- now += 86400 - now % 86400;
+- hrtimer_start(&leap_timer, ktime_set(now, 0), HRTIMER_MODE_ABS);
+-
+- return;
+- }
+-
+- if (time_status & STA_DEL) {
+- time_state = TIME_DEL;
+- now += 86400 - (now + 1) % 86400;
+- hrtimer_start(&leap_timer, ktime_set(now, 0), HRTIMER_MODE_ABS);
+- }
+-}
+
+ /*
+ * Propagate a new txc->status value into the NTP state:
+@@ -546,22 +525,6 @@ static inline void process_adj_status(struct timex *txc, struct timespec *ts)
+ time_status &= STA_RONLY;
+ time_status |= txc->status & ~STA_RONLY;
+
+- switch (time_state) {
+- case TIME_OK:
+- ntp_start_leap_timer(ts);
+- break;
+- case TIME_INS:
+- case TIME_DEL:
+- time_state = TIME_OK;
+- ntp_start_leap_timer(ts);
+- case TIME_WAIT:
+- if (!(time_status & (STA_INS | STA_DEL)))
+- time_state = TIME_OK;
+- break;
+- case TIME_OOP:
+- hrtimer_restart(&leap_timer);
+- break;
+- }
+ }
+ /*
+ * Called with the xtime lock held, so we can access and modify
+@@ -643,9 +606,6 @@ int do_adjtimex(struct timex *txc)
+ (txc->tick < 900000/USER_HZ ||
+ txc->tick > 1100000/USER_HZ))
+ return -EINVAL;
+-
+- if (txc->modes & ADJ_STATUS && time_state != TIME_OK)
+- hrtimer_cancel(&leap_timer);
+ }
+
+ if (txc->modes & ADJ_SETOFFSET) {
+@@ -967,6 +927,4 @@ __setup("ntp_tick_adj=", ntp_tick_adj_setup);
+ void __init ntp_init(void)
+ {
+ ntp_clear();
+- hrtimer_init(&leap_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
+- leap_timer.function = ntp_leap_second;
+ }
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index c923640..9955ebd 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -430,6 +430,7 @@ void tick_nohz_stop_sched_tick(int inidle)
+ */
+ if (!ts->tick_stopped) {
+ select_nohz_load_balancer(1);
++ calc_load_enter_idle();
+
+ ts->idle_tick = hrtimer_get_expires(&ts->sched_timer);
+ ts->tick_stopped = 1;
+@@ -563,6 +564,7 @@ void tick_nohz_restart_sched_tick(void)
+ account_idle_ticks(ticks);
+ #endif
+
++ calc_load_exit_idle();
+ touch_softlockup_watchdog();
+ /*
+ * Cancel the scheduled timer and restore the tick
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
+index 2378413..03e67d4 100644
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -161,23 +161,43 @@ static struct timespec xtime __attribute__ ((aligned (16)));
+ static struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
+ static struct timespec total_sleep_time;
+
++/* Offset clock monotonic -> clock realtime */
++static ktime_t offs_real;
++
++/* Offset clock monotonic -> clock boottime */
++static ktime_t offs_boot;
++
+ /*
+ * The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock.
+ */
+ static struct timespec raw_time;
+
+-/* flag for if timekeeping is suspended */
+-int __read_mostly timekeeping_suspended;
++/* must hold write on xtime_lock */
++static void update_rt_offset(void)
++{
++ struct timespec tmp, *wtm = &wall_to_monotonic;
+
+-/* must hold xtime_lock */
+-void timekeeping_leap_insert(int leapsecond)
++ set_normalized_timespec(&tmp, -wtm->tv_sec, -wtm->tv_nsec);
++ offs_real = timespec_to_ktime(tmp);
++}
++
++/* must hold write on xtime_lock */
++static void timekeeping_update(bool clearntp)
+ {
+- xtime.tv_sec += leapsecond;
+- wall_to_monotonic.tv_sec -= leapsecond;
+- update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
+- timekeeper.mult);
++ if (clearntp) {
++ timekeeper.ntp_error = 0;
++ ntp_clear();
++ }
++ update_rt_offset();
++ update_vsyscall(&xtime, &wall_to_monotonic,
++ timekeeper.clock, timekeeper.mult);
+ }
+
++
++
++/* flag for if timekeeping is suspended */
++int __read_mostly timekeeping_suspended;
++
+ /**
+ * timekeeping_forward_now - update clock to the current time
+ *
+@@ -375,11 +395,7 @@ int do_settimeofday(const struct timespec *tv)
+
+ xtime = *tv;
+
+- timekeeper.ntp_error = 0;
+- ntp_clear();
+-
+- update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
+- timekeeper.mult);
++ timekeeping_update(true);
+
+ write_sequnlock_irqrestore(&xtime_lock, flags);
+
+@@ -412,11 +428,7 @@ int timekeeping_inject_offset(struct timespec *ts)
+ xtime = timespec_add(xtime, *ts);
+ wall_to_monotonic = timespec_sub(wall_to_monotonic, *ts);
+
+- timekeeper.ntp_error = 0;
+- ntp_clear();
+-
+- update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
+- timekeeper.mult);
++ timekeeping_update(true);
+
+ write_sequnlock_irqrestore(&xtime_lock, flags);
+
+@@ -591,6 +603,7 @@ void __init timekeeping_init(void)
+ }
+ set_normalized_timespec(&wall_to_monotonic,
+ -boot.tv_sec, -boot.tv_nsec);
++ update_rt_offset();
+ total_sleep_time.tv_sec = 0;
+ total_sleep_time.tv_nsec = 0;
+ write_sequnlock_irqrestore(&xtime_lock, flags);
+@@ -599,6 +612,12 @@ void __init timekeeping_init(void)
+ /* time in seconds when suspend began */
+ static struct timespec timekeeping_suspend_time;
+
++static void update_sleep_time(struct timespec t)
++{
++ total_sleep_time = t;
++ offs_boot = timespec_to_ktime(t);
++}
++
+ /**
+ * __timekeeping_inject_sleeptime - Internal function to add sleep interval
+ * @delta: pointer to a timespec delta value
+@@ -616,7 +635,7 @@ static void __timekeeping_inject_sleeptime(struct timespec *delta)
+
+ xtime = timespec_add(xtime, *delta);
+ wall_to_monotonic = timespec_sub(wall_to_monotonic, *delta);
+- total_sleep_time = timespec_add(total_sleep_time, *delta);
++ update_sleep_time(timespec_add(total_sleep_time, *delta));
+ }
+
+
+@@ -645,10 +664,7 @@ void timekeeping_inject_sleeptime(struct timespec *delta)
+
+ __timekeeping_inject_sleeptime(delta);
+
+- timekeeper.ntp_error = 0;
+- ntp_clear();
+- update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
+- timekeeper.mult);
++ timekeeping_update(true);
+
+ write_sequnlock_irqrestore(&xtime_lock, flags);
+
+@@ -683,6 +699,7 @@ static void timekeeping_resume(void)
+ timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
+ timekeeper.ntp_error = 0;
+ timekeeping_suspended = 0;
++ timekeeping_update(false);
+ write_sequnlock_irqrestore(&xtime_lock, flags);
+
+ touch_softlockup_watchdog();
+@@ -942,9 +959,14 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
+
+ timekeeper.xtime_nsec += timekeeper.xtime_interval << shift;
+ while (timekeeper.xtime_nsec >= nsecps) {
++ int leap;
+ timekeeper.xtime_nsec -= nsecps;
+ xtime.tv_sec++;
+- second_overflow();
++ leap = second_overflow(xtime.tv_sec);
++ xtime.tv_sec += leap;
++ wall_to_monotonic.tv_sec -= leap;
++ if (leap)
++ clock_was_set_delayed();
+ }
+
+ /* Accumulate raw time */
+@@ -1050,14 +1072,17 @@ static void update_wall_time(void)
+ * xtime.tv_nsec isn't larger then NSEC_PER_SEC
+ */
+ if (unlikely(xtime.tv_nsec >= NSEC_PER_SEC)) {
++ int leap;
+ xtime.tv_nsec -= NSEC_PER_SEC;
+ xtime.tv_sec++;
+- second_overflow();
++ leap = second_overflow(xtime.tv_sec);
++ xtime.tv_sec += leap;
++ wall_to_monotonic.tv_sec -= leap;
++ if (leap)
++ clock_was_set_delayed();
+ }
+
+- /* check to see if there is a new clocksource to use */
+- update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
+- timekeeper.mult);
++ timekeeping_update(false);
+ }
+
+ /**
+@@ -1216,6 +1241,40 @@ void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
+ } while (read_seqretry(&xtime_lock, seq));
+ }
+
++#ifdef CONFIG_HIGH_RES_TIMERS
++/**
++ * ktime_get_update_offsets - hrtimer helper
++ * @real: pointer to storage for monotonic -> realtime offset
++ * @_boot: pointer to storage for monotonic -> boottime offset
++ *
++ * Returns current monotonic time and updates the offsets
++ * Called from hrtimer_interupt() or retrigger_next_event()
++ */
++ktime_t ktime_get_update_offsets(ktime_t *real, ktime_t *boot)
++{
++ ktime_t now;
++ unsigned int seq;
++ u64 secs, nsecs;
++
++ do {
++ seq = read_seqbegin(&xtime_lock);
++
++ secs = xtime.tv_sec;
++ nsecs = xtime.tv_nsec;
++ nsecs += timekeeping_get_ns();
++ /* If arch requires, add in gettimeoffset() */
++ nsecs += arch_gettimeoffset();
++
++ *real = offs_real;
++ *boot = offs_boot;
++ } while (read_seqretry(&xtime_lock, seq));
++
++ now = ktime_add_ns(ktime_set(secs, 0), nsecs);
++ now = ktime_sub(now, *real);
++ return now;
++}
++#endif
++
+ /**
+ * ktime_get_monotonic_offset() - get wall_to_monotonic in ktime_t format
+ */
+diff --git a/mm/compaction.c b/mm/compaction.c
+index 8fb8a40..50f1c60 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -592,8 +592,11 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
+ if (err) {
+ putback_lru_pages(&cc->migratepages);
+ cc->nr_migratepages = 0;
++ if (err == -ENOMEM) {
++ ret = COMPACT_PARTIAL;
++ goto out;
++ }
+ }
+-
+ }
+
+ out:
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 5f5c545..7c535b0 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -53,6 +53,84 @@ static unsigned long __initdata default_hstate_size;
+ */
+ static DEFINE_SPINLOCK(hugetlb_lock);
+
++static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
++{
++ bool free = (spool->count == 0) && (spool->used_hpages == 0);
++
++ spin_unlock(&spool->lock);
++
++ /* If no pages are used, and no other handles to the subpool
++ * remain, free the subpool the subpool remain */
++ if (free)
++ kfree(spool);
++}
++
++struct hugepage_subpool *hugepage_new_subpool(long nr_blocks)
++{
++ struct hugepage_subpool *spool;
++
++ spool = kmalloc(sizeof(*spool), GFP_KERNEL);
++ if (!spool)
++ return NULL;
++
++ spin_lock_init(&spool->lock);
++ spool->count = 1;
++ spool->max_hpages = nr_blocks;
++ spool->used_hpages = 0;
++
++ return spool;
++}
++
++void hugepage_put_subpool(struct hugepage_subpool *spool)
++{
++ spin_lock(&spool->lock);
++ BUG_ON(!spool->count);
++ spool->count--;
++ unlock_or_release_subpool(spool);
++}
++
++static int hugepage_subpool_get_pages(struct hugepage_subpool *spool,
++ long delta)
++{
++ int ret = 0;
++
++ if (!spool)
++ return 0;
++
++ spin_lock(&spool->lock);
++ if ((spool->used_hpages + delta) <= spool->max_hpages) {
++ spool->used_hpages += delta;
++ } else {
++ ret = -ENOMEM;
++ }
++ spin_unlock(&spool->lock);
++
++ return ret;
++}
++
++static void hugepage_subpool_put_pages(struct hugepage_subpool *spool,
++ long delta)
++{
++ if (!spool)
++ return;
++
++ spin_lock(&spool->lock);
++ spool->used_hpages -= delta;
++ /* If hugetlbfs_put_super couldn't free spool due to
++ * an outstanding quota reference, free it now. */
++ unlock_or_release_subpool(spool);
++}
++
++static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
++{
++ return HUGETLBFS_SB(inode->i_sb)->spool;
++}
++
++static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
++{
++ return subpool_inode(vma->vm_file->f_dentry->d_inode);
++}
++
+ /*
+ * Region tracking -- allows tracking of reservations and instantiated pages
+ * across the pages in a mapping.
+@@ -533,9 +611,9 @@ static void free_huge_page(struct page *page)
+ */
+ struct hstate *h = page_hstate(page);
+ int nid = page_to_nid(page);
+- struct address_space *mapping;
++ struct hugepage_subpool *spool =
++ (struct hugepage_subpool *)page_private(page);
+
+- mapping = (struct address_space *) page_private(page);
+ set_page_private(page, 0);
+ page->mapping = NULL;
+ BUG_ON(page_count(page));
+@@ -551,8 +629,7 @@ static void free_huge_page(struct page *page)
+ enqueue_huge_page(h, page);
+ }
+ spin_unlock(&hugetlb_lock);
+- if (mapping)
+- hugetlb_put_quota(mapping, 1);
++ hugepage_subpool_put_pages(spool, 1);
+ }
+
+ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
+@@ -966,11 +1043,12 @@ static void return_unused_surplus_pages(struct hstate *h,
+ /*
+ * Determine if the huge page at addr within the vma has an associated
+ * reservation. Where it does not we will need to logically increase
+- * reservation and actually increase quota before an allocation can occur.
+- * Where any new reservation would be required the reservation change is
+- * prepared, but not committed. Once the page has been quota'd allocated
+- * an instantiated the change should be committed via vma_commit_reservation.
+- * No action is required on failure.
++ * reservation and actually increase subpool usage before an allocation
++ * can occur. Where any new reservation would be required the
++ * reservation change is prepared, but not committed. Once the page
++ * has been allocated from the subpool and instantiated the change should
++ * be committed via vma_commit_reservation. No action is required on
++ * failure.
+ */
+ static long vma_needs_reservation(struct hstate *h,
+ struct vm_area_struct *vma, unsigned long addr)
+@@ -1019,24 +1097,24 @@ static void vma_commit_reservation(struct hstate *h,
+ static struct page *alloc_huge_page(struct vm_area_struct *vma,
+ unsigned long addr, int avoid_reserve)
+ {
++ struct hugepage_subpool *spool = subpool_vma(vma);
+ struct hstate *h = hstate_vma(vma);
+ struct page *page;
+- struct address_space *mapping = vma->vm_file->f_mapping;
+- struct inode *inode = mapping->host;
+ long chg;
+
+ /*
+- * Processes that did not create the mapping will have no reserves and
+- * will not have accounted against quota. Check that the quota can be
+- * made before satisfying the allocation
+- * MAP_NORESERVE mappings may also need pages and quota allocated
+- * if no reserve mapping overlaps.
++ * Processes that did not create the mapping will have no
++ * reserves and will not have accounted against subpool
++ * limit. Check that the subpool limit can be made before
++ * satisfying the allocation MAP_NORESERVE mappings may also
++ * need pages and subpool limit allocated allocated if no reserve
++ * mapping overlaps.
+ */
+ chg = vma_needs_reservation(h, vma, addr);
+ if (chg < 0)
+ return ERR_PTR(-VM_FAULT_OOM);
+ if (chg)
+- if (hugetlb_get_quota(inode->i_mapping, chg))
++ if (hugepage_subpool_get_pages(spool, chg))
+ return ERR_PTR(-VM_FAULT_SIGBUS);
+
+ spin_lock(&hugetlb_lock);
+@@ -1046,12 +1124,12 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
+ if (!page) {
+ page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
+ if (!page) {
+- hugetlb_put_quota(inode->i_mapping, chg);
++ hugepage_subpool_put_pages(spool, chg);
+ return ERR_PTR(-VM_FAULT_SIGBUS);
+ }
+ }
+
+- set_page_private(page, (unsigned long) mapping);
++ set_page_private(page, (unsigned long)spool);
+
+ vma_commit_reservation(h, vma, addr);
+
+@@ -2081,6 +2159,7 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
+ {
+ struct hstate *h = hstate_vma(vma);
+ struct resv_map *reservations = vma_resv_map(vma);
++ struct hugepage_subpool *spool = subpool_vma(vma);
+ unsigned long reserve;
+ unsigned long start;
+ unsigned long end;
+@@ -2096,7 +2175,7 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
+
+ if (reserve) {
+ hugetlb_acct_memory(h, -reserve);
+- hugetlb_put_quota(vma->vm_file->f_mapping, reserve);
++ hugepage_subpool_put_pages(spool, reserve);
+ }
+ }
+ }
+@@ -2326,7 +2405,7 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
+ address = address & huge_page_mask(h);
+ pgoff = ((address - vma->vm_start) >> PAGE_SHIFT)
+ + (vma->vm_pgoff >> PAGE_SHIFT);
+- mapping = (struct address_space *)page_private(page);
++ mapping = vma->vm_file->f_dentry->d_inode->i_mapping;
+
+ /*
+ * Take the mapping lock for the duration of the table walk. As
+@@ -2865,11 +2944,12 @@ int hugetlb_reserve_pages(struct inode *inode,
+ {
+ long ret, chg;
+ struct hstate *h = hstate_inode(inode);
++ struct hugepage_subpool *spool = subpool_inode(inode);
+
+ /*
+ * Only apply hugepage reservation if asked. At fault time, an
+ * attempt will be made for VM_NORESERVE to allocate a page
+- * and filesystem quota without using reserves
++ * without using reserves
+ */
+ if (vm_flags & VM_NORESERVE)
+ return 0;
+@@ -2898,19 +2978,19 @@ int hugetlb_reserve_pages(struct inode *inode,
+ goto out_err;
+ }
+
+- /* There must be enough filesystem quota for the mapping */
+- if (hugetlb_get_quota(inode->i_mapping, chg)) {
++ /* There must be enough pages in the subpool for the mapping */
++ if (hugepage_subpool_get_pages(spool, chg)) {
+ ret = -ENOSPC;
+ goto out_err;
+ }
+
+ /*
+ * Check enough hugepages are available for the reservation.
+- * Hand back the quota if there are not
++ * Hand the pages back to the subpool if there are not
+ */
+ ret = hugetlb_acct_memory(h, chg);
+ if (ret < 0) {
+- hugetlb_put_quota(inode->i_mapping, chg);
++ hugepage_subpool_put_pages(spool, chg);
+ goto out_err;
+ }
+
+@@ -2938,12 +3018,13 @@ void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
+ {
+ struct hstate *h = hstate_inode(inode);
+ long chg = region_truncate(&inode->i_mapping->private_list, offset);
++ struct hugepage_subpool *spool = subpool_inode(inode);
+
+ spin_lock(&inode->i_lock);
+ inode->i_blocks -= (blocks_per_huge_page(h) * freed);
+ spin_unlock(&inode->i_lock);
+
+- hugetlb_put_quota(inode->i_mapping, (chg - freed));
++ hugepage_subpool_put_pages(spool, (chg - freed));
+ hugetlb_acct_memory(h, -(chg - freed));
+ }
+
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index fbe2d2c..8342119 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -2824,7 +2824,10 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
+ * them before going back to sleep.
+ */
+ set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
+- schedule();
++
++ if (!kthread_should_stop())
++ schedule();
++
+ set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
+ } else {
+ if (remaining)
+@@ -3090,14 +3093,17 @@ int kswapd_run(int nid)
+ }
+
+ /*
+- * Called by memory hotplug when all memory in a node is offlined.
++ * Called by memory hotplug when all memory in a node is offlined. Caller must
++ * hold lock_memory_hotplug().
+ */
+ void kswapd_stop(int nid)
+ {
+ struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
+
+- if (kswapd)
++ if (kswapd) {
+ kthread_stop(kswapd);
++ NODE_DATA(nid)->kswapd = NULL;
++ }
+ }
+
+ static int __init kswapd_init(void)
+diff --git a/net/can/raw.c b/net/can/raw.c
+index cde1b4a..46cca3a 100644
+--- a/net/can/raw.c
++++ b/net/can/raw.c
+@@ -681,9 +681,6 @@ static int raw_sendmsg(struct kiocb *iocb, struct socket *sock,
+ if (err < 0)
+ goto free_skb;
+
+- /* to be able to check the received tx sock reference in raw_rcv() */
+- skb_shinfo(skb)->tx_flags |= SKBTX_DRV_NEEDS_SK_REF;
+-
+ skb->dev = dev;
+ skb->sk = sk;
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 1cbddc9..5738654 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2079,25 +2079,6 @@ static int dev_gso_segment(struct sk_buff *skb, int features)
+ return 0;
+ }
+
+-/*
+- * Try to orphan skb early, right before transmission by the device.
+- * We cannot orphan skb if tx timestamp is requested or the sk-reference
+- * is needed on driver level for other reasons, e.g. see net/can/raw.c
+- */
+-static inline void skb_orphan_try(struct sk_buff *skb)
+-{
+- struct sock *sk = skb->sk;
+-
+- if (sk && !skb_shinfo(skb)->tx_flags) {
+- /* skb_tx_hash() wont be able to get sk.
+- * We copy sk_hash into skb->rxhash
+- */
+- if (!skb->rxhash)
+- skb->rxhash = sk->sk_hash;
+- skb_orphan(skb);
+- }
+-}
+-
+ static bool can_checksum_protocol(unsigned long features, __be16 protocol)
+ {
+ return ((features & NETIF_F_GEN_CSUM) ||
+@@ -2182,8 +2163,6 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
+ if (!list_empty(&ptype_all))
+ dev_queue_xmit_nit(skb, dev);
+
+- skb_orphan_try(skb);
+-
+ features = netif_skb_features(skb);
+
+ if (vlan_tx_tag_present(skb) &&
+@@ -2293,7 +2272,7 @@ u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
+ if (skb->sk && skb->sk->sk_hash)
+ hash = skb->sk->sk_hash;
+ else
+- hash = (__force u16) skb->protocol ^ skb->rxhash;
++ hash = (__force u16) skb->protocol;
+ hash = jhash_1word(hash, hashrnd);
+
+ return (u16) (((u64) hash * qcount) >> 32) + qoffset;
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 9726927..32e6ca2 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -5836,6 +5836,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+ goto discard;
+
+ if (th->syn) {
++ if (th->fin)
++ goto discard;
+ if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
+ return 1;
+
+diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
+index 274d150..cf98d62 100644
+--- a/net/iucv/af_iucv.c
++++ b/net/iucv/af_iucv.c
+@@ -380,7 +380,6 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
+ skb_trim(skb, skb->dev->mtu);
+ }
+ skb->protocol = ETH_P_AF_IUCV;
+- skb_shinfo(skb)->tx_flags |= SKBTX_DRV_NEEDS_SK_REF;
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (!nskb)
+ return -ENOMEM;
+diff --git a/net/wireless/util.c b/net/wireless/util.c
+index d38815d..74d5292 100644
+--- a/net/wireless/util.c
++++ b/net/wireless/util.c
+@@ -813,7 +813,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
+ ntype == NL80211_IFTYPE_P2P_CLIENT))
+ return -EBUSY;
+
+- if (ntype != otype) {
++ if (ntype != otype && netif_running(dev)) {
+ err = cfg80211_can_change_interface(rdev, dev->ieee80211_ptr,
+ ntype);
+ if (err)
+diff --git a/scripts/depmod.sh b/scripts/depmod.sh
+index a272356..2ae4817 100755
+--- a/scripts/depmod.sh
++++ b/scripts/depmod.sh
+@@ -9,12 +9,6 @@ fi
+ DEPMOD=$1
+ KERNELRELEASE=$2
+
+-if ! "$DEPMOD" -V 2>/dev/null | grep -q module-init-tools; then
+- echo "Warning: you may need to install module-init-tools" >&2
+- echo "See http://www.codemonkey.org.uk/docs/post-halloween-2.6.txt" >&2
+- sleep 1
+-fi
+-
+ if ! test -r System.map -a -x "$DEPMOD"; then
+ exit 0
+ fi
+diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
+index 9f614b4..272407c 100644
+--- a/virt/kvm/irq_comm.c
++++ b/virt/kvm/irq_comm.c
+@@ -318,6 +318,7 @@ static int setup_routing_entry(struct kvm_irq_routing_table *rt,
+ */
+ hlist_for_each_entry(ei, n, &rt->map[ue->gsi], link)
+ if (ei->type == KVM_IRQ_ROUTING_MSI ||
++ ue->type == KVM_IRQ_ROUTING_MSI ||
+ ue->u.irqchip.irqchip == ei->irqchip.irqchip)
+ return r;
+
diff --git a/1024_linux-3.2.25.patch b/1024_linux-3.2.25.patch
new file mode 100644
index 00000000..e95c213e
--- /dev/null
+++ b/1024_linux-3.2.25.patch
@@ -0,0 +1,4503 @@
+diff --git a/Makefile b/Makefile
+index 80bb4fd..e13e4e7 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 24
++SUBLEVEL = 25
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
+index 559da19..578e5a0 100644
+--- a/arch/powerpc/include/asm/reg.h
++++ b/arch/powerpc/include/asm/reg.h
+@@ -1016,7 +1016,8 @@
+ /* Macros for setting and retrieving special purpose registers */
+ #ifndef __ASSEMBLY__
+ #define mfmsr() ({unsigned long rval; \
+- asm volatile("mfmsr %0" : "=r" (rval)); rval;})
++ asm volatile("mfmsr %0" : "=r" (rval) : \
++ : "memory"); rval;})
+ #ifdef CONFIG_PPC_BOOK3S_64
+ #define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \
+ : : "r" (v) : "memory")
+diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
+index bf99cfa..6324008 100644
+--- a/arch/powerpc/kernel/ftrace.c
++++ b/arch/powerpc/kernel/ftrace.c
+@@ -245,9 +245,9 @@ __ftrace_make_nop(struct module *mod,
+
+ /*
+ * On PPC32 the trampoline looks like:
+- * 0x3d, 0x60, 0x00, 0x00 lis r11,sym@ha
+- * 0x39, 0x6b, 0x00, 0x00 addi r11,r11,sym@l
+- * 0x7d, 0x69, 0x03, 0xa6 mtctr r11
++ * 0x3d, 0x80, 0x00, 0x00 lis r12,sym@ha
++ * 0x39, 0x8c, 0x00, 0x00 addi r12,r12,sym@l
++ * 0x7d, 0x89, 0x03, 0xa6 mtctr r12
+ * 0x4e, 0x80, 0x04, 0x20 bctr
+ */
+
+@@ -262,9 +262,9 @@ __ftrace_make_nop(struct module *mod,
+ pr_devel(" %08x %08x ", jmp[0], jmp[1]);
+
+ /* verify that this is what we expect it to be */
+- if (((jmp[0] & 0xffff0000) != 0x3d600000) ||
+- ((jmp[1] & 0xffff0000) != 0x396b0000) ||
+- (jmp[2] != 0x7d6903a6) ||
++ if (((jmp[0] & 0xffff0000) != 0x3d800000) ||
++ ((jmp[1] & 0xffff0000) != 0x398c0000) ||
++ (jmp[2] != 0x7d8903a6) ||
+ (jmp[3] != 0x4e800420)) {
+ printk(KERN_ERR "Not a trampoline\n");
+ return -EINVAL;
+diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
+index 6e0073e..07c7bf4 100644
+--- a/arch/s390/kernel/processor.c
++++ b/arch/s390/kernel/processor.c
+@@ -26,12 +26,14 @@ static DEFINE_PER_CPU(struct cpuid, cpu_id);
+ void __cpuinit cpu_init(void)
+ {
+ struct cpuid *id = &per_cpu(cpu_id, smp_processor_id());
++ struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
+
+ get_cpu_id(id);
+ atomic_inc(&init_mm.mm_count);
+ current->active_mm = &init_mm;
+ BUG_ON(current->mm);
+ enter_lazy_tlb(&init_mm, current);
++ memset(idle, 0, sizeof(*idle));
+ }
+
+ /*
+diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
+index 3ea8728..1df64a8 100644
+--- a/arch/s390/kernel/smp.c
++++ b/arch/s390/kernel/smp.c
+@@ -1020,14 +1020,11 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self,
+ unsigned int cpu = (unsigned int)(long)hcpu;
+ struct cpu *c = &per_cpu(cpu_devices, cpu);
+ struct sys_device *s = &c->sysdev;
+- struct s390_idle_data *idle;
+ int err = 0;
+
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+- idle = &per_cpu(s390_idle, cpu);
+- memset(idle, 0, sizeof(struct s390_idle_data));
+ err = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
+ break;
+ case CPU_DEAD:
+diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
+index 563a09d..29c95d7 100644
+--- a/arch/x86/kernel/microcode_core.c
++++ b/arch/x86/kernel/microcode_core.c
+@@ -297,20 +297,31 @@ static ssize_t reload_store(struct sys_device *dev,
+ const char *buf, size_t size)
+ {
+ unsigned long val;
+- int cpu = dev->id;
+- int ret = 0;
+- char *end;
++ int cpu;
++ ssize_t ret = 0, tmp_ret;
+
+- val = simple_strtoul(buf, &end, 0);
+- if (end == buf)
++ /* allow reload only from the BSP */
++ if (boot_cpu_data.cpu_index != dev->id)
+ return -EINVAL;
+
+- if (val == 1) {
+- get_online_cpus();
+- if (cpu_online(cpu))
+- ret = reload_for_cpu(cpu);
+- put_online_cpus();
++ ret = kstrtoul(buf, 0, &val);
++ if (ret)
++ return ret;
++
++ if (val != 1)
++ return size;
++
++ get_online_cpus();
++ for_each_online_cpu(cpu) {
++ tmp_ret = reload_for_cpu(cpu);
++ if (tmp_ret != 0)
++ pr_warn("Error reloading microcode on CPU %d\n", cpu);
++
++ /* save retval of the first encountered reload error */
++ if (!ret)
++ ret = tmp_ret;
+ }
++ put_online_cpus();
+
+ if (!ret)
+ ret = size;
+diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
+index 6dd8955..0951b81 100644
+--- a/arch/x86/pci/fixup.c
++++ b/arch/x86/pci/fixup.c
+@@ -521,3 +521,20 @@ static void sb600_disable_hpet_bar(struct pci_dev *dev)
+ }
+ }
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ATI, 0x4385, sb600_disable_hpet_bar);
++
++/*
++ * Twinhead H12Y needs us to block out a region otherwise we map devices
++ * there and any access kills the box.
++ *
++ * See: https://bugzilla.kernel.org/show_bug.cgi?id=10231
++ *
++ * Match off the LPC and svid/sdid (older kernels lose the bridge subvendor)
++ */
++static void __devinit twinhead_reserve_killing_zone(struct pci_dev *dev)
++{
++ if (dev->subsystem_vendor == 0x14FF && dev->subsystem_device == 0xA003) {
++ pr_info("Reserving memory on Twinhead H12Y\n");
++ request_mem_region(0xFFB00000, 0x100000, "twinhead");
++ }
++}
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x27B9, twinhead_reserve_killing_zone);
+diff --git a/block/blk-core.c b/block/blk-core.c
+index 15de223..49d9e91 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -607,7 +607,7 @@ EXPORT_SYMBOL(blk_init_allocated_queue);
+
+ int blk_get_queue(struct request_queue *q)
+ {
+- if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
++ if (likely(!blk_queue_dead(q))) {
+ kobject_get(&q->kobj);
+ return 0;
+ }
+@@ -754,7 +754,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
+ const bool is_sync = rw_is_sync(rw_flags) != 0;
+ int may_queue;
+
+- if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
++ if (unlikely(blk_queue_dead(q)))
+ return NULL;
+
+ may_queue = elv_may_queue(q, rw_flags);
+@@ -874,7 +874,7 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
+ struct io_context *ioc;
+ struct request_list *rl = &q->rq;
+
+- if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
++ if (unlikely(blk_queue_dead(q)))
+ return NULL;
+
+ prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
+diff --git a/block/blk-exec.c b/block/blk-exec.c
+index a1ebceb..6053285 100644
+--- a/block/blk-exec.c
++++ b/block/blk-exec.c
+@@ -50,7 +50,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
+ {
+ int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
+
+- if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
++ if (unlikely(blk_queue_dead(q))) {
+ rq->errors = -ENXIO;
+ if (rq->end_io)
+ rq->end_io(rq, rq->errors);
+diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
+index e7f9f65..f0b2ca8 100644
+--- a/block/blk-sysfs.c
++++ b/block/blk-sysfs.c
+@@ -425,7 +425,7 @@ queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
+ if (!entry->show)
+ return -EIO;
+ mutex_lock(&q->sysfs_lock);
+- if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
++ if (blk_queue_dead(q)) {
+ mutex_unlock(&q->sysfs_lock);
+ return -ENOENT;
+ }
+@@ -447,7 +447,7 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
+
+ q = container_of(kobj, struct request_queue, kobj);
+ mutex_lock(&q->sysfs_lock);
+- if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
++ if (blk_queue_dead(q)) {
+ mutex_unlock(&q->sysfs_lock);
+ return -ENOENT;
+ }
+diff --git a/block/blk-throttle.c b/block/blk-throttle.c
+index 4553245..5eed6a7 100644
+--- a/block/blk-throttle.c
++++ b/block/blk-throttle.c
+@@ -310,7 +310,7 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
+ struct request_queue *q = td->queue;
+
+ /* no throttling for dead queue */
+- if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
++ if (unlikely(blk_queue_dead(q)))
+ return NULL;
+
+ rcu_read_lock();
+@@ -335,7 +335,7 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
+ spin_lock_irq(q->queue_lock);
+
+ /* Make sure @q is still alive */
+- if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
++ if (unlikely(blk_queue_dead(q))) {
+ kfree(tg);
+ return NULL;
+ }
+diff --git a/block/blk.h b/block/blk.h
+index 3f6551b..e38691d 100644
+--- a/block/blk.h
++++ b/block/blk.h
+@@ -85,7 +85,7 @@ static inline struct request *__elv_next_request(struct request_queue *q)
+ q->flush_queue_delayed = 1;
+ return NULL;
+ }
+- if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags) ||
++ if (unlikely(blk_queue_dead(q)) ||
+ !q->elevator->ops->elevator_dispatch_fn(q, 0))
+ return NULL;
+ }
+diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
+index 6512b20..d1fcbc0 100644
+--- a/drivers/acpi/ac.c
++++ b/drivers/acpi/ac.c
+@@ -292,7 +292,9 @@ static int acpi_ac_add(struct acpi_device *device)
+ ac->charger.properties = ac_props;
+ ac->charger.num_properties = ARRAY_SIZE(ac_props);
+ ac->charger.get_property = get_ac_property;
+- power_supply_register(&ac->device->dev, &ac->charger);
++ result = power_supply_register(&ac->device->dev, &ac->charger);
++ if (result)
++ goto end;
+
+ printk(KERN_INFO PREFIX "%s [%s] (%s)\n",
+ acpi_device_name(device), acpi_device_bid(device),
+diff --git a/drivers/gpu/drm/nouveau/nva3_copy.fuc b/drivers/gpu/drm/nouveau/nva3_copy.fuc
+index eaf35f8..d894731 100644
+--- a/drivers/gpu/drm/nouveau/nva3_copy.fuc
++++ b/drivers/gpu/drm/nouveau/nva3_copy.fuc
+@@ -118,9 +118,9 @@ dispatch_dma:
+ // mthd 0x030c-0x0340, various stuff
+ .b16 0xc3 14
+ .b32 ctx_src_address_high ~0x000000ff
+-.b32 ctx_src_address_low ~0xfffffff0
++.b32 ctx_src_address_low ~0xffffffff
+ .b32 ctx_dst_address_high ~0x000000ff
+-.b32 ctx_dst_address_low ~0xfffffff0
++.b32 ctx_dst_address_low ~0xffffffff
+ .b32 ctx_src_pitch ~0x0007ffff
+ .b32 ctx_dst_pitch ~0x0007ffff
+ .b32 ctx_xcnt ~0x0000ffff
+diff --git a/drivers/gpu/drm/nouveau/nva3_copy.fuc.h b/drivers/gpu/drm/nouveau/nva3_copy.fuc.h
+index 2731de2..e2a0e88 100644
+--- a/drivers/gpu/drm/nouveau/nva3_copy.fuc.h
++++ b/drivers/gpu/drm/nouveau/nva3_copy.fuc.h
+@@ -1,37 +1,72 @@
+-uint32_t nva3_pcopy_data[] = {
++u32 nva3_pcopy_data[] = {
++/* 0x0000: ctx_object */
+ 0x00000000,
++/* 0x0004: ctx_dma */
++/* 0x0004: ctx_dma_query */
+ 0x00000000,
++/* 0x0008: ctx_dma_src */
+ 0x00000000,
++/* 0x000c: ctx_dma_dst */
+ 0x00000000,
++/* 0x0010: ctx_query_address_high */
+ 0x00000000,
++/* 0x0014: ctx_query_address_low */
+ 0x00000000,
++/* 0x0018: ctx_query_counter */
+ 0x00000000,
++/* 0x001c: ctx_src_address_high */
+ 0x00000000,
++/* 0x0020: ctx_src_address_low */
+ 0x00000000,
++/* 0x0024: ctx_src_pitch */
+ 0x00000000,
++/* 0x0028: ctx_src_tile_mode */
+ 0x00000000,
++/* 0x002c: ctx_src_xsize */
+ 0x00000000,
++/* 0x0030: ctx_src_ysize */
+ 0x00000000,
++/* 0x0034: ctx_src_zsize */
+ 0x00000000,
++/* 0x0038: ctx_src_zoff */
+ 0x00000000,
++/* 0x003c: ctx_src_xoff */
+ 0x00000000,
++/* 0x0040: ctx_src_yoff */
+ 0x00000000,
++/* 0x0044: ctx_src_cpp */
+ 0x00000000,
++/* 0x0048: ctx_dst_address_high */
+ 0x00000000,
++/* 0x004c: ctx_dst_address_low */
+ 0x00000000,
++/* 0x0050: ctx_dst_pitch */
+ 0x00000000,
++/* 0x0054: ctx_dst_tile_mode */
+ 0x00000000,
++/* 0x0058: ctx_dst_xsize */
+ 0x00000000,
++/* 0x005c: ctx_dst_ysize */
+ 0x00000000,
++/* 0x0060: ctx_dst_zsize */
+ 0x00000000,
++/* 0x0064: ctx_dst_zoff */
+ 0x00000000,
++/* 0x0068: ctx_dst_xoff */
+ 0x00000000,
++/* 0x006c: ctx_dst_yoff */
+ 0x00000000,
++/* 0x0070: ctx_dst_cpp */
+ 0x00000000,
++/* 0x0074: ctx_format */
+ 0x00000000,
++/* 0x0078: ctx_swz_const0 */
+ 0x00000000,
++/* 0x007c: ctx_swz_const1 */
+ 0x00000000,
++/* 0x0080: ctx_xcnt */
+ 0x00000000,
++/* 0x0084: ctx_ycnt */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+@@ -63,6 +98,7 @@ uint32_t nva3_pcopy_data[] = {
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
++/* 0x0100: dispatch_table */
+ 0x00010000,
+ 0x00000000,
+ 0x00000000,
+@@ -73,6 +109,7 @@ uint32_t nva3_pcopy_data[] = {
+ 0x00010162,
+ 0x00000000,
+ 0x00030060,
++/* 0x0128: dispatch_dma */
+ 0x00010170,
+ 0x00000000,
+ 0x00010170,
+@@ -118,11 +155,11 @@ uint32_t nva3_pcopy_data[] = {
+ 0x0000001c,
+ 0xffffff00,
+ 0x00000020,
+- 0x0000000f,
++ 0x00000000,
+ 0x00000048,
+ 0xffffff00,
+ 0x0000004c,
+- 0x0000000f,
++ 0x00000000,
+ 0x00000024,
+ 0xfff80000,
+ 0x00000050,
+@@ -146,7 +183,8 @@ uint32_t nva3_pcopy_data[] = {
+ 0x00000800,
+ };
+
+-uint32_t nva3_pcopy_code[] = {
++u32 nva3_pcopy_code[] = {
++/* 0x0000: main */
+ 0x04fe04bd,
+ 0x3517f000,
+ 0xf10010fe,
+@@ -158,23 +196,31 @@ uint32_t nva3_pcopy_code[] = {
+ 0x17f11031,
+ 0x27f01200,
+ 0x0012d003,
++/* 0x002f: spin */
+ 0xf40031f4,
+ 0x0ef40028,
++/* 0x0035: ih */
+ 0x8001cffd,
+ 0xf40812c4,
+ 0x21f4060b,
++/* 0x0041: ih_no_chsw */
+ 0x0412c472,
+ 0xf4060bf4,
++/* 0x004a: ih_no_cmd */
+ 0x11c4c321,
+ 0x4001d00c,
++/* 0x0052: swctx */
+ 0x47f101f8,
+ 0x4bfe7700,
+ 0x0007fe00,
+ 0xf00204b9,
+ 0x01f40643,
+ 0x0604fa09,
++/* 0x006b: swctx_load */
+ 0xfa060ef4,
++/* 0x006e: swctx_done */
+ 0x03f80504,
++/* 0x0072: chsw */
+ 0x27f100f8,
+ 0x23cf1400,
+ 0x1e3fc800,
+@@ -183,18 +229,22 @@ uint32_t nva3_pcopy_code[] = {
+ 0x1e3af052,
+ 0xf00023d0,
+ 0x24d00147,
++/* 0x0093: chsw_no_unload */
+ 0xcf00f880,
+ 0x3dc84023,
+ 0x220bf41e,
+ 0xf40131f4,
+ 0x57f05221,
+ 0x0367f004,
++/* 0x00a8: chsw_load_ctx_dma */
+ 0xa07856bc,
+ 0xb6018068,
+ 0x87d00884,
+ 0x0162b600,
++/* 0x00bb: chsw_finish_load */
+ 0xf0f018f4,
+ 0x23d00237,
++/* 0x00c3: dispatch */
+ 0xf100f880,
+ 0xcf190037,
+ 0x33cf4032,
+@@ -202,6 +252,7 @@ uint32_t nva3_pcopy_code[] = {
+ 0x1024b607,
+ 0x010057f1,
+ 0x74bd64bd,
++/* 0x00dc: dispatch_loop */
+ 0x58005658,
+ 0x50b60157,
+ 0x0446b804,
+@@ -211,6 +262,7 @@ uint32_t nva3_pcopy_code[] = {
+ 0xb60276bb,
+ 0x57bb0374,
+ 0xdf0ef400,
++/* 0x0100: dispatch_valid_mthd */
+ 0xb60246bb,
+ 0x45bb0344,
+ 0x01459800,
+@@ -220,31 +272,41 @@ uint32_t nva3_pcopy_code[] = {
+ 0xb0014658,
+ 0x1bf40064,
+ 0x00538009,
++/* 0x0127: dispatch_cmd */
+ 0xf4300ef4,
+ 0x55f90132,
+ 0xf40c01f4,
++/* 0x0132: dispatch_invalid_bitfield */
+ 0x25f0250e,
++/* 0x0135: dispatch_illegal_mthd */
+ 0x0125f002,
++/* 0x0138: dispatch_error */
+ 0x100047f1,
+ 0xd00042d0,
+ 0x27f04043,
+ 0x0002d040,
++/* 0x0148: hostirq_wait */
+ 0xf08002cf,
+ 0x24b04024,
+ 0xf71bf400,
++/* 0x0154: dispatch_done */
+ 0x1d0027f1,
+ 0xd00137f0,
+ 0x00f80023,
++/* 0x0160: cmd_nop */
++/* 0x0162: cmd_pm_trigger */
+ 0x27f100f8,
+ 0x34bd2200,
+ 0xd00233f0,
+ 0x00f80023,
++/* 0x0170: cmd_dma */
+ 0x012842b7,
+ 0xf00145b6,
+ 0x43801e39,
+ 0x0040b701,
+ 0x0644b606,
+ 0xf80043d0,
++/* 0x0189: cmd_exec_set_format */
+ 0xf030f400,
+ 0xb00001b0,
+ 0x01b00101,
+@@ -256,20 +318,26 @@ uint32_t nva3_pcopy_code[] = {
+ 0x70b63847,
+ 0x0232f401,
+ 0x94bd84bd,
++/* 0x01b4: ncomp_loop */
+ 0xb60f4ac4,
+ 0xb4bd0445,
++/* 0x01bc: bpc_loop */
+ 0xf404a430,
+ 0xa5ff0f18,
+ 0x00cbbbc0,
+ 0xf40231f4,
++/* 0x01ce: cmp_c0 */
+ 0x1bf4220e,
+ 0x10c7f00c,
+ 0xf400cbbb,
++/* 0x01da: cmp_c1 */
+ 0xa430160e,
+ 0x0c18f406,
+ 0xbb14c7f0,
+ 0x0ef400cb,
++/* 0x01e9: cmp_zero */
+ 0x80c7f107,
++/* 0x01ed: bpc_next */
+ 0x01c83800,
+ 0xb60180b6,
+ 0xb5b801b0,
+@@ -280,6 +348,7 @@ uint32_t nva3_pcopy_code[] = {
+ 0x98110680,
+ 0x68fd2008,
+ 0x0502f400,
++/* 0x0216: dst_xcnt */
+ 0x75fd64bd,
+ 0x1c078000,
+ 0xf10078fd,
+@@ -304,6 +373,7 @@ uint32_t nva3_pcopy_code[] = {
+ 0x980056d0,
+ 0x56d01f06,
+ 0x1030f440,
++/* 0x0276: cmd_exec_set_surface_tiled */
+ 0x579800f8,
+ 0x6879c70a,
+ 0xb66478c7,
+@@ -311,9 +381,11 @@ uint32_t nva3_pcopy_code[] = {
+ 0x0e76b060,
+ 0xf0091bf4,
+ 0x0ef40477,
++/* 0x0291: xtile64 */
+ 0x027cf00f,
+ 0xfd1170b6,
+ 0x77f00947,
++/* 0x029d: xtileok */
+ 0x0f5a9806,
+ 0xfd115b98,
+ 0xb7f000ab,
+@@ -371,6 +443,7 @@ uint32_t nva3_pcopy_code[] = {
+ 0x67d00600,
+ 0x0060b700,
+ 0x0068d004,
++/* 0x0382: cmd_exec_set_surface_linear */
+ 0x6cf000f8,
+ 0x0260b702,
+ 0x0864b602,
+@@ -381,13 +454,16 @@ uint32_t nva3_pcopy_code[] = {
+ 0xb70067d0,
+ 0x98040060,
+ 0x67d00957,
++/* 0x03ab: cmd_exec_wait */
+ 0xf900f800,
+ 0xf110f900,
+ 0xb6080007,
++/* 0x03b6: loop */
+ 0x01cf0604,
+ 0x0114f000,
+ 0xfcfa1bf4,
+ 0xf800fc10,
++/* 0x03c5: cmd_exec_query */
+ 0x0d34c800,
+ 0xf5701bf4,
+ 0xf103ab21,
+@@ -417,6 +493,7 @@ uint32_t nva3_pcopy_code[] = {
+ 0x47f10153,
+ 0x44b60800,
+ 0x0045d006,
++/* 0x0438: query_counter */
+ 0x03ab21f5,
+ 0x080c47f1,
+ 0x980644b6,
+@@ -439,11 +516,13 @@ uint32_t nva3_pcopy_code[] = {
+ 0x47f10153,
+ 0x44b60800,
+ 0x0045d006,
++/* 0x0492: cmd_exec */
+ 0x21f500f8,
+ 0x3fc803ab,
+ 0x0e0bf400,
+ 0x018921f5,
+ 0x020047f1,
++/* 0x04a7: cmd_exec_no_format */
+ 0xf11e0ef4,
+ 0xb6081067,
+ 0x77f00664,
+@@ -451,19 +530,24 @@ uint32_t nva3_pcopy_code[] = {
+ 0x981c0780,
+ 0x67d02007,
+ 0x4067d000,
++/* 0x04c2: cmd_exec_init_src_surface */
+ 0x32f444bd,
+ 0xc854bd02,
+ 0x0bf4043f,
+ 0x8221f50a,
+ 0x0a0ef403,
++/* 0x04d4: src_tiled */
+ 0x027621f5,
++/* 0x04db: cmd_exec_init_dst_surface */
+ 0xf40749f0,
+ 0x57f00231,
+ 0x083fc82c,
+ 0xf50a0bf4,
+ 0xf4038221,
++/* 0x04ee: dst_tiled */
+ 0x21f50a0e,
+ 0x49f00276,
++/* 0x04f5: cmd_exec_kick */
+ 0x0057f108,
+ 0x0654b608,
+ 0xd0210698,
+@@ -473,6 +557,8 @@ uint32_t nva3_pcopy_code[] = {
+ 0xc80054d0,
+ 0x0bf40c3f,
+ 0xc521f507,
++/* 0x0519: cmd_exec_done */
++/* 0x051b: cmd_wrcache_flush */
+ 0xf100f803,
+ 0xbd220027,
+ 0x0133f034,
+diff --git a/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h b/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
+index 4199038..9e87036 100644
+--- a/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
++++ b/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
+@@ -1,34 +1,65 @@
+-uint32_t nvc0_pcopy_data[] = {
++u32 nvc0_pcopy_data[] = {
++/* 0x0000: ctx_object */
+ 0x00000000,
++/* 0x0004: ctx_query_address_high */
+ 0x00000000,
++/* 0x0008: ctx_query_address_low */
+ 0x00000000,
++/* 0x000c: ctx_query_counter */
+ 0x00000000,
++/* 0x0010: ctx_src_address_high */
+ 0x00000000,
++/* 0x0014: ctx_src_address_low */
+ 0x00000000,
++/* 0x0018: ctx_src_pitch */
+ 0x00000000,
++/* 0x001c: ctx_src_tile_mode */
+ 0x00000000,
++/* 0x0020: ctx_src_xsize */
+ 0x00000000,
++/* 0x0024: ctx_src_ysize */
+ 0x00000000,
++/* 0x0028: ctx_src_zsize */
+ 0x00000000,
++/* 0x002c: ctx_src_zoff */
+ 0x00000000,
++/* 0x0030: ctx_src_xoff */
+ 0x00000000,
++/* 0x0034: ctx_src_yoff */
+ 0x00000000,
++/* 0x0038: ctx_src_cpp */
+ 0x00000000,
++/* 0x003c: ctx_dst_address_high */
+ 0x00000000,
++/* 0x0040: ctx_dst_address_low */
+ 0x00000000,
++/* 0x0044: ctx_dst_pitch */
+ 0x00000000,
++/* 0x0048: ctx_dst_tile_mode */
+ 0x00000000,
++/* 0x004c: ctx_dst_xsize */
+ 0x00000000,
++/* 0x0050: ctx_dst_ysize */
+ 0x00000000,
++/* 0x0054: ctx_dst_zsize */
+ 0x00000000,
++/* 0x0058: ctx_dst_zoff */
+ 0x00000000,
++/* 0x005c: ctx_dst_xoff */
+ 0x00000000,
++/* 0x0060: ctx_dst_yoff */
+ 0x00000000,
++/* 0x0064: ctx_dst_cpp */
+ 0x00000000,
++/* 0x0068: ctx_format */
+ 0x00000000,
++/* 0x006c: ctx_swz_const0 */
+ 0x00000000,
++/* 0x0070: ctx_swz_const1 */
+ 0x00000000,
++/* 0x0074: ctx_xcnt */
+ 0x00000000,
++/* 0x0078: ctx_ycnt */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+@@ -63,6 +94,7 @@ uint32_t nvc0_pcopy_data[] = {
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
++/* 0x0100: dispatch_table */
+ 0x00010000,
+ 0x00000000,
+ 0x00000000,
+@@ -111,11 +143,11 @@ uint32_t nvc0_pcopy_data[] = {
+ 0x00000010,
+ 0xffffff00,
+ 0x00000014,
+- 0x0000000f,
++ 0x00000000,
+ 0x0000003c,
+ 0xffffff00,
+ 0x00000040,
+- 0x0000000f,
++ 0x00000000,
+ 0x00000018,
+ 0xfff80000,
+ 0x00000044,
+@@ -139,7 +171,8 @@ uint32_t nvc0_pcopy_data[] = {
+ 0x00000800,
+ };
+
+-uint32_t nvc0_pcopy_code[] = {
++u32 nvc0_pcopy_code[] = {
++/* 0x0000: main */
+ 0x04fe04bd,
+ 0x3517f000,
+ 0xf10010fe,
+@@ -151,15 +184,20 @@ uint32_t nvc0_pcopy_code[] = {
+ 0x17f11031,
+ 0x27f01200,
+ 0x0012d003,
++/* 0x002f: spin */
+ 0xf40031f4,
+ 0x0ef40028,
++/* 0x0035: ih */
+ 0x8001cffd,
+ 0xf40812c4,
+ 0x21f4060b,
++/* 0x0041: ih_no_chsw */
+ 0x0412c4ca,
+ 0xf5070bf4,
++/* 0x004b: ih_no_cmd */
+ 0xc4010221,
+ 0x01d00c11,
++/* 0x0053: swctx */
+ 0xf101f840,
+ 0xfe770047,
+ 0x47f1004b,
+@@ -188,8 +226,11 @@ uint32_t nvc0_pcopy_code[] = {
+ 0xf00204b9,
+ 0x01f40643,
+ 0x0604fa09,
++/* 0x00c3: swctx_load */
+ 0xfa060ef4,
++/* 0x00c6: swctx_done */
+ 0x03f80504,
++/* 0x00ca: chsw */
+ 0x27f100f8,
+ 0x23cf1400,
+ 0x1e3fc800,
+@@ -198,18 +239,22 @@ uint32_t nvc0_pcopy_code[] = {
+ 0x1e3af053,
+ 0xf00023d0,
+ 0x24d00147,
++/* 0x00eb: chsw_no_unload */
+ 0xcf00f880,
+ 0x3dc84023,
+ 0x090bf41e,
+ 0xf40131f4,
++/* 0x00fa: chsw_finish_load */
+ 0x37f05321,
+ 0x8023d002,
++/* 0x0102: dispatch */
+ 0x37f100f8,
+ 0x32cf1900,
+ 0x0033cf40,
+ 0x07ff24e4,
+ 0xf11024b6,
+ 0xbd010057,
++/* 0x011b: dispatch_loop */
+ 0x5874bd64,
+ 0x57580056,
+ 0x0450b601,
+@@ -219,6 +264,7 @@ uint32_t nvc0_pcopy_code[] = {
+ 0xbb0f08f4,
+ 0x74b60276,
+ 0x0057bb03,
++/* 0x013f: dispatch_valid_mthd */
+ 0xbbdf0ef4,
+ 0x44b60246,
+ 0x0045bb03,
+@@ -229,24 +275,33 @@ uint32_t nvc0_pcopy_code[] = {
+ 0x64b00146,
+ 0x091bf400,
+ 0xf4005380,
++/* 0x0166: dispatch_cmd */
+ 0x32f4300e,
+ 0xf455f901,
+ 0x0ef40c01,
++/* 0x0171: dispatch_invalid_bitfield */
+ 0x0225f025,
++/* 0x0174: dispatch_illegal_mthd */
++/* 0x0177: dispatch_error */
+ 0xf10125f0,
+ 0xd0100047,
+ 0x43d00042,
+ 0x4027f040,
++/* 0x0187: hostirq_wait */
+ 0xcf0002d0,
+ 0x24f08002,
+ 0x0024b040,
++/* 0x0193: dispatch_done */
+ 0xf1f71bf4,
+ 0xf01d0027,
+ 0x23d00137,
++/* 0x019f: cmd_nop */
+ 0xf800f800,
++/* 0x01a1: cmd_pm_trigger */
+ 0x0027f100,
+ 0xf034bd22,
+ 0x23d00233,
++/* 0x01af: cmd_exec_set_format */
+ 0xf400f800,
+ 0x01b0f030,
+ 0x0101b000,
+@@ -258,20 +313,26 @@ uint32_t nvc0_pcopy_code[] = {
+ 0x3847c701,
+ 0xf40170b6,
+ 0x84bd0232,
++/* 0x01da: ncomp_loop */
+ 0x4ac494bd,
+ 0x0445b60f,
++/* 0x01e2: bpc_loop */
+ 0xa430b4bd,
+ 0x0f18f404,
+ 0xbbc0a5ff,
+ 0x31f400cb,
+ 0x220ef402,
++/* 0x01f4: cmp_c0 */
+ 0xf00c1bf4,
+ 0xcbbb10c7,
+ 0x160ef400,
++/* 0x0200: cmp_c1 */
+ 0xf406a430,
+ 0xc7f00c18,
+ 0x00cbbb14,
++/* 0x020f: cmp_zero */
+ 0xf1070ef4,
++/* 0x0213: bpc_next */
+ 0x380080c7,
+ 0x80b601c8,
+ 0x01b0b601,
+@@ -283,6 +344,7 @@ uint32_t nvc0_pcopy_code[] = {
+ 0x1d08980e,
+ 0xf40068fd,
+ 0x64bd0502,
++/* 0x023c: dst_xcnt */
+ 0x800075fd,
+ 0x78fd1907,
+ 0x1057f100,
+@@ -307,15 +369,18 @@ uint32_t nvc0_pcopy_code[] = {
+ 0x1c069800,
+ 0xf44056d0,
+ 0x00f81030,
++/* 0x029c: cmd_exec_set_surface_tiled */
+ 0xc7075798,
+ 0x78c76879,
+ 0x0380b664,
+ 0xb06077c7,
+ 0x1bf40e76,
+ 0x0477f009,
++/* 0x02b7: xtile64 */
+ 0xf00f0ef4,
+ 0x70b6027c,
+ 0x0947fd11,
++/* 0x02c3: xtileok */
+ 0x980677f0,
+ 0x5b980c5a,
+ 0x00abfd0e,
+@@ -374,6 +439,7 @@ uint32_t nvc0_pcopy_code[] = {
+ 0xb70067d0,
+ 0xd0040060,
+ 0x00f80068,
++/* 0x03a8: cmd_exec_set_surface_linear */
+ 0xb7026cf0,
+ 0xb6020260,
+ 0x57980864,
+@@ -384,12 +450,15 @@ uint32_t nvc0_pcopy_code[] = {
+ 0x0060b700,
+ 0x06579804,
+ 0xf80067d0,
++/* 0x03d1: cmd_exec_wait */
+ 0xf900f900,
+ 0x0007f110,
+ 0x0604b608,
++/* 0x03dc: loop */
+ 0xf00001cf,
+ 0x1bf40114,
+ 0xfc10fcfa,
++/* 0x03eb: cmd_exec_query */
+ 0xc800f800,
+ 0x1bf40d34,
+ 0xd121f570,
+@@ -419,6 +488,7 @@ uint32_t nvc0_pcopy_code[] = {
+ 0x0153f026,
+ 0x080047f1,
+ 0xd00644b6,
++/* 0x045e: query_counter */
+ 0x21f50045,
+ 0x47f103d1,
+ 0x44b6080c,
+@@ -442,11 +512,13 @@ uint32_t nvc0_pcopy_code[] = {
+ 0x080047f1,
+ 0xd00644b6,
+ 0x00f80045,
++/* 0x04b8: cmd_exec */
+ 0x03d121f5,
+ 0xf4003fc8,
+ 0x21f50e0b,
+ 0x47f101af,
+ 0x0ef40200,
++/* 0x04cd: cmd_exec_no_format */
+ 0x1067f11e,
+ 0x0664b608,
+ 0x800177f0,
+@@ -454,18 +526,23 @@ uint32_t nvc0_pcopy_code[] = {
+ 0x1d079819,
+ 0xd00067d0,
+ 0x44bd4067,
++/* 0x04e8: cmd_exec_init_src_surface */
+ 0xbd0232f4,
+ 0x043fc854,
+ 0xf50a0bf4,
+ 0xf403a821,
++/* 0x04fa: src_tiled */
+ 0x21f50a0e,
+ 0x49f0029c,
++/* 0x0501: cmd_exec_init_dst_surface */
+ 0x0231f407,
+ 0xc82c57f0,
+ 0x0bf4083f,
+ 0xa821f50a,
+ 0x0a0ef403,
++/* 0x0514: dst_tiled */
+ 0x029c21f5,
++/* 0x051b: cmd_exec_kick */
+ 0xf10849f0,
+ 0xb6080057,
+ 0x06980654,
+@@ -475,7 +552,9 @@ uint32_t nvc0_pcopy_code[] = {
+ 0x54d00546,
+ 0x0c3fc800,
+ 0xf5070bf4,
++/* 0x053f: cmd_exec_done */
+ 0xf803eb21,
++/* 0x0541: cmd_wrcache_flush */
+ 0x0027f100,
+ 0xf034bd22,
+ 0x23d00133,
+diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
+index 552b436..3254d51 100644
+--- a/drivers/gpu/drm/radeon/atombios_dp.c
++++ b/drivers/gpu/drm/radeon/atombios_dp.c
+@@ -22,6 +22,7 @@
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
++ * Jerome Glisse
+ */
+ #include "drmP.h"
+ #include "radeon_drm.h"
+@@ -634,7 +635,6 @@ static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector,
+ ret = radeon_dp_aux_native_read(radeon_connector, DP_LANE0_1_STATUS,
+ link_status, DP_LINK_STATUS_SIZE, 100);
+ if (ret <= 0) {
+- DRM_ERROR("displayport link status failed\n");
+ return false;
+ }
+
+@@ -812,8 +812,10 @@ static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info)
+ else
+ mdelay(dp_info->rd_interval * 4);
+
+- if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status))
++ if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
++ DRM_ERROR("displayport link status failed\n");
+ break;
++ }
+
+ if (dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) {
+ clock_recovery = true;
+@@ -875,8 +877,10 @@ static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info)
+ else
+ mdelay(dp_info->rd_interval * 4);
+
+- if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status))
++ if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
++ DRM_ERROR("displayport link status failed\n");
+ break;
++ }
+
+ if (dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) {
+ channel_eq = true;
+diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
+index 4a4493f..87d494d 100644
+--- a/drivers/gpu/drm/radeon/radeon_connectors.c
++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
+@@ -64,14 +64,33 @@ void radeon_connector_hotplug(struct drm_connector *connector)
+
+ /* just deal with DP (not eDP) here. */
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+- int saved_dpms = connector->dpms;
+-
+- /* Only turn off the display it it's physically disconnected */
+- if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
+- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+- else if (radeon_dp_needs_link_train(radeon_connector))
+- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+- connector->dpms = saved_dpms;
++ struct radeon_connector_atom_dig *dig_connector =
++ radeon_connector->con_priv;
++
++ /* if existing sink type was not DP no need to retrain */
++ if (dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_DISPLAYPORT)
++ return;
++
++ /* first get sink type as it may be reset after (un)plug */
++ dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
++ /* don't do anything if sink is not display port, i.e.,
++ * passive dp->(dvi|hdmi) adaptor
++ */
++ if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
++ int saved_dpms = connector->dpms;
++ /* Only turn off the display if it's physically disconnected */
++ if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
++ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
++ } else if (radeon_dp_needs_link_train(radeon_connector)) {
++ /* set it to OFF so that drm_helper_connector_dpms()
++ * won't return immediately since the current state
++ * is ON at this point.
++ */
++ connector->dpms = DRM_MODE_DPMS_OFF;
++ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
++ }
++ connector->dpms = saved_dpms;
++ }
+ }
+ }
+
+diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
+index 986d608..2132109 100644
+--- a/drivers/gpu/drm/radeon/radeon_cursor.c
++++ b/drivers/gpu/drm/radeon/radeon_cursor.c
+@@ -257,8 +257,14 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
+ if (!(cursor_end & 0x7f))
+ w--;
+ }
+- if (w <= 0)
++ if (w <= 0) {
+ w = 1;
++ cursor_end = x - xorigin + w;
++ if (!(cursor_end & 0x7f)) {
++ x--;
++ WARN_ON_ONCE(x < 0);
++ }
++ }
+ }
+ }
+
+diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
+index f3ae607..39497c7 100644
+--- a/drivers/gpu/drm/radeon/radeon_object.c
++++ b/drivers/gpu/drm/radeon/radeon_object.c
+@@ -117,7 +117,6 @@ int radeon_bo_create(struct radeon_device *rdev,
+ return -ENOMEM;
+ }
+
+-retry:
+ bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
+ if (bo == NULL)
+ return -ENOMEM;
+@@ -130,6 +129,8 @@ retry:
+ bo->gem_base.driver_private = NULL;
+ bo->surface_reg = -1;
+ INIT_LIST_HEAD(&bo->list);
++
++retry:
+ radeon_ttm_placement_from_domain(bo, domain);
+ /* Kernel allocation are uninterruptible */
+ mutex_lock(&rdev->vram_mutex);
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index a1b8caa..0f074e0 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -1865,6 +1865,11 @@ static int device_change_notifier(struct notifier_block *nb,
+
+ iommu_init_device(dev);
+
++ if (iommu_pass_through) {
++ attach_device(dev, pt_domain);
++ break;
++ }
++
+ domain = domain_for_device(dev);
+
+ /* allocate a protection domain if a device is added */
+@@ -1880,10 +1885,7 @@ static int device_change_notifier(struct notifier_block *nb,
+ list_add_tail(&dma_domain->list, &iommu_pd_list);
+ spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
+
+- if (!iommu_pass_through)
+- dev->archdata.dma_ops = &amd_iommu_dma_ops;
+- else
+- dev->archdata.dma_ops = &nommu_dma_ops;
++ dev->archdata.dma_ops = &amd_iommu_dma_ops;
+
+ break;
+ case BUS_NOTIFY_DEL_DEVICE:
+diff --git a/drivers/media/video/cx25821/cx25821-core.c b/drivers/media/video/cx25821/cx25821-core.c
+index a7fa38f..e572ce5 100644
+--- a/drivers/media/video/cx25821/cx25821-core.c
++++ b/drivers/media/video/cx25821/cx25821-core.c
+@@ -914,9 +914,6 @@ static int cx25821_dev_setup(struct cx25821_dev *dev)
+ list_add_tail(&dev->devlist, &cx25821_devlist);
+ mutex_unlock(&cx25821_devlist_mutex);
+
+- strcpy(cx25821_boards[UNKNOWN_BOARD].name, "unknown");
+- strcpy(cx25821_boards[CX25821_BOARD].name, "cx25821");
+-
+ if (dev->pci->device != 0x8210) {
+ pr_info("%s(): Exiting. Incorrect Hardware device = 0x%02x\n",
+ __func__, dev->pci->device);
+diff --git a/drivers/media/video/cx25821/cx25821.h b/drivers/media/video/cx25821/cx25821.h
+index 2d2d009..bf54360 100644
+--- a/drivers/media/video/cx25821/cx25821.h
++++ b/drivers/media/video/cx25821/cx25821.h
+@@ -187,7 +187,7 @@ enum port {
+ };
+
+ struct cx25821_board {
+- char *name;
++ const char *name;
+ enum port porta;
+ enum port portb;
+ enum port portc;
+diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
+index 6878a94..83b51b5 100644
+--- a/drivers/mmc/host/sdhci-pci.c
++++ b/drivers/mmc/host/sdhci-pci.c
+@@ -148,6 +148,7 @@ static const struct sdhci_pci_fixes sdhci_ene_714 = {
+ static const struct sdhci_pci_fixes sdhci_cafe = {
+ .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
+ SDHCI_QUIRK_NO_BUSY_IRQ |
++ SDHCI_QUIRK_BROKEN_CARD_DETECTION |
+ SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
+ };
+
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index 9e61d6b..ed1be8a 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -3770,6 +3770,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
+ case RTL_GIGA_MAC_VER_22:
+ case RTL_GIGA_MAC_VER_23:
+ case RTL_GIGA_MAC_VER_24:
++ case RTL_GIGA_MAC_VER_34:
+ RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
+ break;
+ default:
+diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
+index 01dcb1a..727c129 100644
+--- a/drivers/net/wireless/mwifiex/cfg80211.c
++++ b/drivers/net/wireless/mwifiex/cfg80211.c
+@@ -545,9 +545,9 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
+
+ /*
+ * Bit 0 in tx_htinfo indicates that current Tx rate is 11n rate. Valid
+- * MCS index values for us are 0 to 7.
++ * MCS index values for us are 0 to 15.
+ */
+- if ((priv->tx_htinfo & BIT(0)) && (priv->tx_rate < 8)) {
++ if ((priv->tx_htinfo & BIT(0)) && (priv->tx_rate < 16)) {
+ sinfo->txrate.mcs = priv->tx_rate;
+ sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS;
+ /* 40MHz rate */
+diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
+index 0ffa111..bdf960b 100644
+--- a/drivers/net/wireless/rt2x00/rt2800usb.c
++++ b/drivers/net/wireless/rt2x00/rt2800usb.c
+@@ -876,6 +876,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ { USB_DEVICE(0x1482, 0x3c09) },
+ /* AirTies */
+ { USB_DEVICE(0x1eda, 0x2012) },
++ { USB_DEVICE(0x1eda, 0x2210) },
+ { USB_DEVICE(0x1eda, 0x2310) },
+ /* Allwin */
+ { USB_DEVICE(0x8516, 0x2070) },
+@@ -945,6 +946,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ /* DVICO */
+ { USB_DEVICE(0x0fe9, 0xb307) },
+ /* Edimax */
++ { USB_DEVICE(0x7392, 0x4085) },
+ { USB_DEVICE(0x7392, 0x7711) },
+ { USB_DEVICE(0x7392, 0x7717) },
+ { USB_DEVICE(0x7392, 0x7718) },
+@@ -1020,6 +1022,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ /* Philips */
+ { USB_DEVICE(0x0471, 0x200f) },
+ /* Planex */
++ { USB_DEVICE(0x2019, 0x5201) },
+ { USB_DEVICE(0x2019, 0xab25) },
+ { USB_DEVICE(0x2019, 0xed06) },
+ /* Quanta */
+@@ -1088,6 +1091,12 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ #ifdef CONFIG_RT2800USB_RT33XX
+ /* Belkin */
+ { USB_DEVICE(0x050d, 0x945b) },
++ /* D-Link */
++ { USB_DEVICE(0x2001, 0x3c17) },
++ /* Panasonic */
++ { USB_DEVICE(0x083a, 0xb511) },
++ /* Philips */
++ { USB_DEVICE(0x0471, 0x20dd) },
+ /* Ralink */
+ { USB_DEVICE(0x148f, 0x3370) },
+ { USB_DEVICE(0x148f, 0x8070) },
+@@ -1099,6 +1108,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ { USB_DEVICE(0x8516, 0x3572) },
+ /* Askey */
+ { USB_DEVICE(0x1690, 0x0744) },
++ { USB_DEVICE(0x1690, 0x0761) },
++ { USB_DEVICE(0x1690, 0x0764) },
+ /* Cisco */
+ { USB_DEVICE(0x167b, 0x4001) },
+ /* EnGenius */
+@@ -1113,6 +1124,9 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ /* Sitecom */
+ { USB_DEVICE(0x0df6, 0x0041) },
+ { USB_DEVICE(0x0df6, 0x0062) },
++ { USB_DEVICE(0x0df6, 0x0065) },
++ { USB_DEVICE(0x0df6, 0x0066) },
++ { USB_DEVICE(0x0df6, 0x0068) },
+ /* Toshiba */
+ { USB_DEVICE(0x0930, 0x0a07) },
+ /* Zinwell */
+@@ -1122,6 +1136,9 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ /* Azurewave */
+ { USB_DEVICE(0x13d3, 0x3329) },
+ { USB_DEVICE(0x13d3, 0x3365) },
++ /* D-Link */
++ { USB_DEVICE(0x2001, 0x3c1c) },
++ { USB_DEVICE(0x2001, 0x3c1d) },
+ /* Ralink */
+ { USB_DEVICE(0x148f, 0x5370) },
+ { USB_DEVICE(0x148f, 0x5372) },
+@@ -1163,13 +1180,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ /* D-Link */
+ { USB_DEVICE(0x07d1, 0x3c0b) },
+ { USB_DEVICE(0x07d1, 0x3c17) },
+- { USB_DEVICE(0x2001, 0x3c17) },
+- /* Edimax */
+- { USB_DEVICE(0x7392, 0x4085) },
+ /* Encore */
+ { USB_DEVICE(0x203d, 0x14a1) },
+- /* Fujitsu Stylistic 550 */
+- { USB_DEVICE(0x1690, 0x0761) },
+ /* Gemtek */
+ { USB_DEVICE(0x15a9, 0x0010) },
+ /* Gigabyte */
+@@ -1190,7 +1202,6 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ { USB_DEVICE(0x05a6, 0x0101) },
+ { USB_DEVICE(0x1d4d, 0x0010) },
+ /* Planex */
+- { USB_DEVICE(0x2019, 0x5201) },
+ { USB_DEVICE(0x2019, 0xab24) },
+ /* Qcom */
+ { USB_DEVICE(0x18e8, 0x6259) },
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+index 2cf4c5f..de9faa9 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+@@ -3462,21 +3462,21 @@ void rtl92d_phy_config_macphymode_info(struct ieee80211_hw *hw)
+ switch (rtlhal->macphymode) {
+ case DUALMAC_SINGLEPHY:
+ rtlphy->rf_type = RF_2T2R;
+- rtlhal->version |= CHIP_92D_SINGLEPHY;
++ rtlhal->version |= RF_TYPE_2T2R;
+ rtlhal->bandset = BAND_ON_BOTH;
+ rtlhal->current_bandtype = BAND_ON_2_4G;
+ break;
+
+ case SINGLEMAC_SINGLEPHY:
+ rtlphy->rf_type = RF_2T2R;
+- rtlhal->version |= CHIP_92D_SINGLEPHY;
++ rtlhal->version |= RF_TYPE_2T2R;
+ rtlhal->bandset = BAND_ON_BOTH;
+ rtlhal->current_bandtype = BAND_ON_2_4G;
+ break;
+
+ case DUALMAC_DUALPHY:
+ rtlphy->rf_type = RF_1T1R;
+- rtlhal->version &= (~CHIP_92D_SINGLEPHY);
++ rtlhal->version &= RF_TYPE_1T1R;
+ /* Now we let MAC0 run on 5G band. */
+ if (rtlhal->interfaceindex == 0) {
+ rtlhal->bandset = BAND_ON_5G;
+diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
+index 351dc0b..ee77a58 100644
+--- a/drivers/scsi/hosts.c
++++ b/drivers/scsi/hosts.c
+@@ -287,6 +287,7 @@ static void scsi_host_dev_release(struct device *dev)
+ struct Scsi_Host *shost = dev_to_shost(dev);
+ struct device *parent = dev->parent;
+ struct request_queue *q;
++ void *queuedata;
+
+ scsi_proc_hostdir_rm(shost->hostt);
+
+@@ -296,9 +297,9 @@ static void scsi_host_dev_release(struct device *dev)
+ destroy_workqueue(shost->work_q);
+ q = shost->uspace_req_q;
+ if (q) {
+- kfree(q->queuedata);
+- q->queuedata = NULL;
+- scsi_free_queue(q);
++ queuedata = q->queuedata;
++ blk_cleanup_queue(q);
++ kfree(queuedata);
+ }
+
+ scsi_destroy_command_freelist(shost);
+diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
+index e48ba4b..dbe3568 100644
+--- a/drivers/scsi/libsas/sas_expander.c
++++ b/drivers/scsi/libsas/sas_expander.c
+@@ -774,7 +774,7 @@ static struct domain_device *sas_ex_discover_end_dev(
+ }
+
+ /* See if this phy is part of a wide port */
+-static int sas_ex_join_wide_port(struct domain_device *parent, int phy_id)
++static bool sas_ex_join_wide_port(struct domain_device *parent, int phy_id)
+ {
+ struct ex_phy *phy = &parent->ex_dev.ex_phy[phy_id];
+ int i;
+@@ -790,11 +790,11 @@ static int sas_ex_join_wide_port(struct domain_device *parent, int phy_id)
+ sas_port_add_phy(ephy->port, phy->phy);
+ phy->port = ephy->port;
+ phy->phy_state = PHY_DEVICE_DISCOVERED;
+- return 0;
++ return true;
+ }
+ }
+
+- return -ENODEV;
++ return false;
+ }
+
+ static struct domain_device *sas_ex_discover_expander(
+@@ -932,8 +932,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
+ return res;
+ }
+
+- res = sas_ex_join_wide_port(dev, phy_id);
+- if (!res) {
++ if (sas_ex_join_wide_port(dev, phy_id)) {
+ SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n",
+ phy_id, SAS_ADDR(ex_phy->attached_sas_addr));
+ return res;
+@@ -978,8 +977,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
+ if (SAS_ADDR(ex->ex_phy[i].attached_sas_addr) ==
+ SAS_ADDR(child->sas_addr)) {
+ ex->ex_phy[i].phy_state= PHY_DEVICE_DISCOVERED;
+- res = sas_ex_join_wide_port(dev, i);
+- if (!res)
++ if (sas_ex_join_wide_port(dev, i))
+ SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n",
+ i, SAS_ADDR(ex->ex_phy[i].attached_sas_addr));
+
+@@ -1849,32 +1847,20 @@ static int sas_discover_new(struct domain_device *dev, int phy_id)
+ {
+ struct ex_phy *ex_phy = &dev->ex_dev.ex_phy[phy_id];
+ struct domain_device *child;
+- bool found = false;
+- int res, i;
++ int res;
+
+ SAS_DPRINTK("ex %016llx phy%d new device attached\n",
+ SAS_ADDR(dev->sas_addr), phy_id);
+ res = sas_ex_phy_discover(dev, phy_id);
+ if (res)
+- goto out;
+- /* to support the wide port inserted */
+- for (i = 0; i < dev->ex_dev.num_phys; i++) {
+- struct ex_phy *ex_phy_temp = &dev->ex_dev.ex_phy[i];
+- if (i == phy_id)
+- continue;
+- if (SAS_ADDR(ex_phy_temp->attached_sas_addr) ==
+- SAS_ADDR(ex_phy->attached_sas_addr)) {
+- found = true;
+- break;
+- }
+- }
+- if (found) {
+- sas_ex_join_wide_port(dev, phy_id);
++ return res;
++
++ if (sas_ex_join_wide_port(dev, phy_id))
+ return 0;
+- }
++
+ res = sas_ex_discover_devices(dev, phy_id);
+- if (!res)
+- goto out;
++ if (res)
++ return res;
+ list_for_each_entry(child, &dev->ex_dev.children, siblings) {
+ if (SAS_ADDR(child->sas_addr) ==
+ SAS_ADDR(ex_phy->attached_sas_addr)) {
+@@ -1884,7 +1870,6 @@ static int sas_discover_new(struct domain_device *dev, int phy_id)
+ break;
+ }
+ }
+-out:
+ return res;
+ }
+
+@@ -1983,9 +1968,7 @@ int sas_ex_revalidate_domain(struct domain_device *port_dev)
+ struct domain_device *dev = NULL;
+
+ res = sas_find_bcast_dev(port_dev, &dev);
+- if (res)
+- goto out;
+- if (dev) {
++ while (res == 0 && dev) {
+ struct expander_device *ex = &dev->ex_dev;
+ int i = 0, phy_id;
+
+@@ -1997,8 +1980,10 @@ int sas_ex_revalidate_domain(struct domain_device *port_dev)
+ res = sas_rediscover(dev, phy_id);
+ i = phy_id + 1;
+ } while (i < ex->num_phys);
++
++ dev = NULL;
++ res = sas_find_bcast_dev(port_dev, &dev);
+ }
+-out:
+ return res;
+ }
+
+diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
+index 2aeb2e9..831db24 100644
+--- a/drivers/scsi/scsi.c
++++ b/drivers/scsi/scsi.c
+@@ -785,7 +785,13 @@ static void scsi_done(struct scsi_cmnd *cmd)
+ /* Move this to a header if it becomes more generally useful */
+ static struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd)
+ {
+- return *(struct scsi_driver **)cmd->request->rq_disk->private_data;
++ struct scsi_driver **sdp;
++
++ sdp = (struct scsi_driver **)cmd->request->rq_disk->private_data;
++ if (!sdp)
++ return NULL;
++
++ return *sdp;
+ }
+
+ /**
+diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
+index dc6131e..456b131 100644
+--- a/drivers/scsi/scsi_error.c
++++ b/drivers/scsi/scsi_error.c
+@@ -1673,6 +1673,20 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
+ * requests are started.
+ */
+ scsi_run_host_queues(shost);
++
++ /*
++ * if eh is active and host_eh_scheduled is pending we need to re-run
++ * recovery. we do this check after scsi_run_host_queues() to allow
++ * everything pent up since the last eh run a chance to make forward
++ * progress before we sync again. Either we'll immediately re-run
++ * recovery or scsi_device_unbusy() will wake us again when these
++ * pending commands complete.
++ */
++ spin_lock_irqsave(shost->host_lock, flags);
++ if (shost->host_eh_scheduled)
++ if (scsi_host_set_state(shost, SHOST_RECOVERY))
++ WARN_ON(scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY));
++ spin_unlock_irqrestore(shost->host_lock, flags);
+ }
+
+ /**
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index f0ab58e..6c4b620 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -406,10 +406,6 @@ static void scsi_run_queue(struct request_queue *q)
+ LIST_HEAD(starved_list);
+ unsigned long flags;
+
+- /* if the device is dead, sdev will be NULL, so no queue to run */
+- if (!sdev)
+- return;
+-
+ shost = sdev->host;
+ if (scsi_target(sdev)->single_lun)
+ scsi_single_lun_run(sdev);
+@@ -483,15 +479,26 @@ void scsi_requeue_run_queue(struct work_struct *work)
+ */
+ static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
+ {
++ struct scsi_device *sdev = cmd->device;
+ struct request *req = cmd->request;
+ unsigned long flags;
+
++ /*
++ * We need to hold a reference on the device to avoid the queue being
++ * killed after the unlock and before scsi_run_queue is invoked which
++ * may happen because scsi_unprep_request() puts the command which
++ * releases its reference on the device.
++ */
++ get_device(&sdev->sdev_gendev);
++
+ spin_lock_irqsave(q->queue_lock, flags);
+ scsi_unprep_request(req);
+ blk_requeue_request(q, req);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ scsi_run_queue(q);
++
++ put_device(&sdev->sdev_gendev);
+ }
+
+ void scsi_next_command(struct scsi_cmnd *cmd)
+@@ -1374,16 +1381,16 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
+ * may be changed after request stacking drivers call the function,
+ * regardless of taking lock or not.
+ *
+- * When scsi can't dispatch I/Os anymore and needs to kill I/Os
+- * (e.g. !sdev), scsi needs to return 'not busy'.
+- * Otherwise, request stacking drivers may hold requests forever.
++ * When scsi can't dispatch I/Os anymore and needs to kill I/Os scsi
++ * needs to return 'not busy'. Otherwise, request stacking drivers
++ * may hold requests forever.
+ */
+ static int scsi_lld_busy(struct request_queue *q)
+ {
+ struct scsi_device *sdev = q->queuedata;
+ struct Scsi_Host *shost;
+
+- if (!sdev)
++ if (blk_queue_dead(q))
+ return 0;
+
+ shost = sdev->host;
+@@ -1494,12 +1501,6 @@ static void scsi_request_fn(struct request_queue *q)
+ struct scsi_cmnd *cmd;
+ struct request *req;
+
+- if (!sdev) {
+- while ((req = blk_peek_request(q)) != NULL)
+- scsi_kill_request(req, q);
+- return;
+- }
+-
+ if(!get_device(&sdev->sdev_gendev))
+ /* We must be tearing the block queue down already */
+ return;
+@@ -1701,20 +1702,6 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
+ return q;
+ }
+
+-void scsi_free_queue(struct request_queue *q)
+-{
+- unsigned long flags;
+-
+- WARN_ON(q->queuedata);
+-
+- /* cause scsi_request_fn() to kill all non-finished requests */
+- spin_lock_irqsave(q->queue_lock, flags);
+- q->request_fn(q);
+- spin_unlock_irqrestore(q->queue_lock, flags);
+-
+- blk_cleanup_queue(q);
+-}
+-
+ /*
+ * Function: scsi_block_requests()
+ *
+diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
+index 5b475d0..d58adca 100644
+--- a/drivers/scsi/scsi_priv.h
++++ b/drivers/scsi/scsi_priv.h
+@@ -85,7 +85,6 @@ extern void scsi_next_command(struct scsi_cmnd *cmd);
+ extern void scsi_io_completion(struct scsi_cmnd *, unsigned int);
+ extern void scsi_run_host_queues(struct Scsi_Host *shost);
+ extern struct request_queue *scsi_alloc_queue(struct scsi_device *sdev);
+-extern void scsi_free_queue(struct request_queue *q);
+ extern int scsi_init_queue(void);
+ extern void scsi_exit_queue(void);
+ struct request_queue;
+diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
+index 6e7ea4a..a48b59c 100644
+--- a/drivers/scsi/scsi_scan.c
++++ b/drivers/scsi/scsi_scan.c
+@@ -1710,6 +1710,9 @@ static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
+ {
+ struct scsi_device *sdev;
+ shost_for_each_device(sdev, shost) {
++ /* target removed before the device could be added */
++ if (sdev->sdev_state == SDEV_DEL)
++ continue;
+ if (!scsi_host_scan_allowed(shost) ||
+ scsi_sysfs_add_sdev(sdev) != 0)
+ __scsi_remove_device(sdev);
+diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
+index 04c2a27..bb7c482 100644
+--- a/drivers/scsi/scsi_sysfs.c
++++ b/drivers/scsi/scsi_sysfs.c
+@@ -971,11 +971,8 @@ void __scsi_remove_device(struct scsi_device *sdev)
+ sdev->host->hostt->slave_destroy(sdev);
+ transport_destroy_device(dev);
+
+- /* cause the request function to reject all I/O requests */
+- sdev->request_queue->queuedata = NULL;
+-
+ /* Freeing the queue signals to block that we're done */
+- scsi_free_queue(sdev->request_queue);
++ blk_cleanup_queue(sdev->request_queue);
+ put_device(dev);
+ }
+
+@@ -1000,7 +997,6 @@ static void __scsi_remove_target(struct scsi_target *starget)
+ struct scsi_device *sdev;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+- starget->reap_ref++;
+ restart:
+ list_for_each_entry(sdev, &shost->__devices, siblings) {
+ if (sdev->channel != starget->channel ||
+@@ -1014,14 +1010,6 @@ static void __scsi_remove_target(struct scsi_target *starget)
+ goto restart;
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
+- scsi_target_reap(starget);
+-}
+-
+-static int __remove_child (struct device * dev, void * data)
+-{
+- if (scsi_is_target_device(dev))
+- __scsi_remove_target(to_scsi_target(dev));
+- return 0;
+ }
+
+ /**
+@@ -1034,14 +1022,34 @@ static int __remove_child (struct device * dev, void * data)
+ */
+ void scsi_remove_target(struct device *dev)
+ {
+- if (scsi_is_target_device(dev)) {
+- __scsi_remove_target(to_scsi_target(dev));
+- return;
++ struct Scsi_Host *shost = dev_to_shost(dev->parent);
++ struct scsi_target *starget, *found;
++ unsigned long flags;
++
++ restart:
++ found = NULL;
++ spin_lock_irqsave(shost->host_lock, flags);
++ list_for_each_entry(starget, &shost->__targets, siblings) {
++ if (starget->state == STARGET_DEL)
++ continue;
++ if (starget->dev.parent == dev || &starget->dev == dev) {
++ found = starget;
++ found->reap_ref++;
++ break;
++ }
+ }
++ spin_unlock_irqrestore(shost->host_lock, flags);
+
+- get_device(dev);
+- device_for_each_child(dev, NULL, __remove_child);
+- put_device(dev);
++ if (found) {
++ __scsi_remove_target(found);
++ scsi_target_reap(found);
++ /* in the case where @dev has multiple starget children,
++ * continue removing.
++ *
++ * FIXME: does such a case exist?
++ */
++ goto restart;
++ }
+ }
+ EXPORT_SYMBOL(scsi_remove_target);
+
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 0842cc7..2ff1255 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -427,19 +427,8 @@ int iscsit_reset_np_thread(
+
+ int iscsit_del_np_comm(struct iscsi_np *np)
+ {
+- if (!np->np_socket)
+- return 0;
+-
+- /*
+- * Some network transports allocate their own struct sock->file,
+- * see if we need to free any additional allocated resources.
+- */
+- if (np->np_flags & NPF_SCTP_STRUCT_FILE) {
+- kfree(np->np_socket->file);
+- np->np_socket->file = NULL;
+- }
+-
+- sock_release(np->np_socket);
++ if (np->np_socket)
++ sock_release(np->np_socket);
+ return 0;
+ }
+
+@@ -4105,13 +4094,8 @@ int iscsit_close_connection(
+ kfree(conn->conn_ops);
+ conn->conn_ops = NULL;
+
+- if (conn->sock) {
+- if (conn->conn_flags & CONNFLAG_SCTP_STRUCT_FILE) {
+- kfree(conn->sock->file);
+- conn->sock->file = NULL;
+- }
++ if (conn->sock)
+ sock_release(conn->sock);
+- }
+ conn->thread_set = NULL;
+
+ pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
+diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
+index 7da2d6a..0f68197 100644
+--- a/drivers/target/iscsi/iscsi_target_core.h
++++ b/drivers/target/iscsi/iscsi_target_core.h
+@@ -224,7 +224,6 @@ enum iscsi_timer_flags_table {
+ /* Used for struct iscsi_np->np_flags */
+ enum np_flags_table {
+ NPF_IP_NETWORK = 0x00,
+- NPF_SCTP_STRUCT_FILE = 0x01 /* Bugfix */
+ };
+
+ /* Used for struct iscsi_np->np_thread_state */
+@@ -511,7 +510,6 @@ struct iscsi_conn {
+ u16 local_port;
+ int net_size;
+ u32 auth_id;
+-#define CONNFLAG_SCTP_STRUCT_FILE 0x01
+ u32 conn_flags;
+ /* Used for iscsi_tx_login_rsp() */
+ u32 login_itt;
+diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
+index bd2adec..2ec5339 100644
+--- a/drivers/target/iscsi/iscsi_target_login.c
++++ b/drivers/target/iscsi/iscsi_target_login.c
+@@ -793,22 +793,6 @@ int iscsi_target_setup_login_socket(
+ }
+ np->np_socket = sock;
+ /*
+- * The SCTP stack needs struct socket->file.
+- */
+- if ((np->np_network_transport == ISCSI_SCTP_TCP) ||
+- (np->np_network_transport == ISCSI_SCTP_UDP)) {
+- if (!sock->file) {
+- sock->file = kzalloc(sizeof(struct file), GFP_KERNEL);
+- if (!sock->file) {
+- pr_err("Unable to allocate struct"
+- " file for SCTP\n");
+- ret = -ENOMEM;
+- goto fail;
+- }
+- np->np_flags |= NPF_SCTP_STRUCT_FILE;
+- }
+- }
+- /*
+ * Setup the np->np_sockaddr from the passed sockaddr setup
+ * in iscsi_target_configfs.c code..
+ */
+@@ -857,21 +841,15 @@ int iscsi_target_setup_login_socket(
+
+ fail:
+ np->np_socket = NULL;
+- if (sock) {
+- if (np->np_flags & NPF_SCTP_STRUCT_FILE) {
+- kfree(sock->file);
+- sock->file = NULL;
+- }
+-
++ if (sock)
+ sock_release(sock);
+- }
+ return ret;
+ }
+
+ static int __iscsi_target_login_thread(struct iscsi_np *np)
+ {
+ u8 buffer[ISCSI_HDR_LEN], iscsi_opcode, zero_tsih = 0;
+- int err, ret = 0, ip_proto, sock_type, set_sctp_conn_flag, stop;
++ int err, ret = 0, ip_proto, sock_type, stop;
+ struct iscsi_conn *conn = NULL;
+ struct iscsi_login *login;
+ struct iscsi_portal_group *tpg = NULL;
+@@ -882,7 +860,6 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
+ struct sockaddr_in6 sock_in6;
+
+ flush_signals(current);
+- set_sctp_conn_flag = 0;
+ sock = np->np_socket;
+ ip_proto = np->np_ip_proto;
+ sock_type = np->np_sock_type;
+@@ -907,35 +884,12 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
+ spin_unlock_bh(&np->np_thread_lock);
+ goto out;
+ }
+- /*
+- * The SCTP stack needs struct socket->file.
+- */
+- if ((np->np_network_transport == ISCSI_SCTP_TCP) ||
+- (np->np_network_transport == ISCSI_SCTP_UDP)) {
+- if (!new_sock->file) {
+- new_sock->file = kzalloc(
+- sizeof(struct file), GFP_KERNEL);
+- if (!new_sock->file) {
+- pr_err("Unable to allocate struct"
+- " file for SCTP\n");
+- sock_release(new_sock);
+- /* Get another socket */
+- return 1;
+- }
+- set_sctp_conn_flag = 1;
+- }
+- }
+-
+ iscsi_start_login_thread_timer(np);
+
+ conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL);
+ if (!conn) {
+ pr_err("Could not allocate memory for"
+ " new connection\n");
+- if (set_sctp_conn_flag) {
+- kfree(new_sock->file);
+- new_sock->file = NULL;
+- }
+ sock_release(new_sock);
+ /* Get another socket */
+ return 1;
+@@ -945,9 +899,6 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
+ conn->conn_state = TARG_CONN_STATE_FREE;
+ conn->sock = new_sock;
+
+- if (set_sctp_conn_flag)
+- conn->conn_flags |= CONNFLAG_SCTP_STRUCT_FILE;
+-
+ pr_debug("Moving to TARG_CONN_STATE_XPT_UP.\n");
+ conn->conn_state = TARG_CONN_STATE_XPT_UP;
+
+@@ -1195,13 +1146,8 @@ old_sess_out:
+ iscsi_release_param_list(conn->param_list);
+ conn->param_list = NULL;
+ }
+- if (conn->sock) {
+- if (conn->conn_flags & CONNFLAG_SCTP_STRUCT_FILE) {
+- kfree(conn->sock->file);
+- conn->sock->file = NULL;
+- }
++ if (conn->sock)
+ sock_release(conn->sock);
+- }
+ kfree(conn);
+
+ if (tpg) {
+diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
+index 93b9406..717a8d4 100644
+--- a/drivers/target/target_core_cdb.c
++++ b/drivers/target/target_core_cdb.c
+@@ -1114,11 +1114,11 @@ int target_emulate_unmap(struct se_task *task)
+ struct se_cmd *cmd = task->task_se_cmd;
+ struct se_device *dev = cmd->se_dev;
+ unsigned char *buf, *ptr = NULL;
+- unsigned char *cdb = &cmd->t_task_cdb[0];
+ sector_t lba;
+- unsigned int size = cmd->data_length, range;
+- int ret = 0, offset;
+- unsigned short dl, bd_dl;
++ int size = cmd->data_length;
++ u32 range;
++ int ret = 0;
++ int dl, bd_dl;
+
+ if (!dev->transport->do_discard) {
+ pr_err("UNMAP emulation not supported for: %s\n",
+@@ -1127,24 +1127,41 @@ int target_emulate_unmap(struct se_task *task)
+ return -ENOSYS;
+ }
+
+- /* First UNMAP block descriptor starts at 8 byte offset */
+- offset = 8;
+- size -= 8;
+- dl = get_unaligned_be16(&cdb[0]);
+- bd_dl = get_unaligned_be16(&cdb[2]);
+-
+ buf = transport_kmap_data_sg(cmd);
+
+- ptr = &buf[offset];
+- pr_debug("UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu"
++ dl = get_unaligned_be16(&buf[0]);
++ bd_dl = get_unaligned_be16(&buf[2]);
++
++ size = min(size - 8, bd_dl);
++ if (size / 16 > dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
++ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
++ ret = -EINVAL;
++ goto err;
++ }
++
++ /* First UNMAP block descriptor starts at 8 byte offset */
++ ptr = &buf[8];
++ pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u"
+ " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
+
+- while (size) {
++ while (size >= 16) {
+ lba = get_unaligned_be64(&ptr[0]);
+ range = get_unaligned_be32(&ptr[8]);
+ pr_debug("UNMAP: Using lba: %llu and range: %u\n",
+ (unsigned long long)lba, range);
+
++ if (range > dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count) {
++ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
++ ret = -EINVAL;
++ goto err;
++ }
++
++ if (lba + range > dev->transport->get_blocks(dev) + 1) {
++ cmd->scsi_sense_reason = TCM_ADDRESS_OUT_OF_RANGE;
++ ret = -EINVAL;
++ goto err;
++ }
++
+ ret = dev->transport->do_discard(dev, lba, range);
+ if (ret < 0) {
+ pr_err("blkdev_issue_discard() failed: %d\n",
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 5660916..94c03d2 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -1820,6 +1820,7 @@ static void transport_generic_request_failure(struct se_cmd *cmd)
+ case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
+ case TCM_UNKNOWN_MODE_PAGE:
+ case TCM_WRITE_PROTECTED:
++ case TCM_ADDRESS_OUT_OF_RANGE:
+ case TCM_CHECK_CONDITION_ABORT_CMD:
+ case TCM_CHECK_CONDITION_UNIT_ATTENTION:
+ case TCM_CHECK_CONDITION_NOT_READY:
+@@ -4496,6 +4497,15 @@ int transport_send_check_condition_and_sense(
+ /* WRITE PROTECTED */
+ buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27;
+ break;
++ case TCM_ADDRESS_OUT_OF_RANGE:
++ /* CURRENT ERROR */
++ buffer[offset] = 0x70;
++ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
++ /* ILLEGAL REQUEST */
++ buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
++ /* LOGICAL BLOCK ADDRESS OUT OF RANGE */
++ buffer[offset+SPC_ASC_KEY_OFFSET] = 0x21;
++ break;
+ case TCM_CHECK_CONDITION_UNIT_ATTENTION:
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
+diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
+index f6ff837..a9df218 100644
+--- a/drivers/usb/core/devio.c
++++ b/drivers/usb/core/devio.c
+@@ -1555,10 +1555,14 @@ static int processcompl_compat(struct async *as, void __user * __user *arg)
+ void __user *addr = as->userurb;
+ unsigned int i;
+
+- if (as->userbuffer && urb->actual_length)
+- if (copy_to_user(as->userbuffer, urb->transfer_buffer,
+- urb->actual_length))
++ if (as->userbuffer && urb->actual_length) {
++ if (urb->number_of_packets > 0) /* Isochronous */
++ i = urb->transfer_buffer_length;
++ else /* Non-Isoc */
++ i = urb->actual_length;
++ if (copy_to_user(as->userbuffer, urb->transfer_buffer, i))
+ return -EFAULT;
++ }
+ if (put_user(as->status, &userurb->status))
+ return -EFAULT;
+ if (put_user(urb->actual_length, &userurb->actual_length))
+diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
+index 29c854b..4e1f0aa 100644
+--- a/drivers/usb/gadget/u_ether.c
++++ b/drivers/usb/gadget/u_ether.c
+@@ -796,12 +796,6 @@ int gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
+
+ SET_ETHTOOL_OPS(net, &ops);
+
+- /* two kinds of host-initiated state changes:
+- * - iff DATA transfer is active, carrier is "on"
+- * - tx queueing enabled if open *and* carrier is "on"
+- */
+- netif_carrier_off(net);
+-
+ dev->gadget = g;
+ SET_NETDEV_DEV(net, &g->dev);
+ SET_NETDEV_DEVTYPE(net, &gadget_type);
+@@ -815,6 +809,12 @@ int gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
+ INFO(dev, "HOST MAC %pM\n", dev->host_mac);
+
+ the_dev = dev;
++
++ /* two kinds of host-initiated state changes:
++ * - iff DATA transfer is active, carrier is "on"
++ * - tx queueing enabled if open *and* carrier is "on"
++ */
++ netif_carrier_off(net);
+ }
+
+ return status;
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 5971c95..d89aac1 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -932,8 +932,12 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0326, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) },
+diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
+index 0b39458..03321e5 100644
+--- a/fs/btrfs/async-thread.c
++++ b/fs/btrfs/async-thread.c
+@@ -206,10 +206,17 @@ static noinline int run_ordered_completions(struct btrfs_workers *workers,
+
+ work->ordered_func(work);
+
+- /* now take the lock again and call the freeing code */
++ /* now take the lock again and drop our item from the list */
+ spin_lock(&workers->order_lock);
+ list_del(&work->order_list);
++ spin_unlock(&workers->order_lock);
++
++ /*
++ * we don't want to call the ordered free functions
++ * with the lock held though
++ */
+ work->ordered_free(work);
++ spin_lock(&workers->order_lock);
+ }
+
+ spin_unlock(&workers->order_lock);
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index f44b392..6b2a724 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -872,7 +872,8 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
+
+ #ifdef CONFIG_MIGRATION
+ static int btree_migratepage(struct address_space *mapping,
+- struct page *newpage, struct page *page)
++ struct page *newpage, struct page *page,
++ enum migrate_mode mode)
+ {
+ /*
+ * we can't safely write a btree page from here,
+@@ -887,7 +888,7 @@ static int btree_migratepage(struct address_space *mapping,
+ if (page_has_private(page) &&
+ !try_to_release_page(page, GFP_KERNEL))
+ return -EAGAIN;
+- return migrate_page(mapping, newpage, page);
++ return migrate_page(mapping, newpage, page, mode);
+ }
+ #endif
+
+diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
+index 6aa7457..c858a29 100644
+--- a/fs/cifs/cifssmb.c
++++ b/fs/cifs/cifssmb.c
+@@ -89,6 +89,32 @@ static struct {
+ /* Forward declarations */
+ static void cifs_readv_complete(struct work_struct *work);
+
++#ifdef CONFIG_HIGHMEM
++/*
++ * On arches that have high memory, kmap address space is limited. By
++ * serializing the kmap operations on those arches, we ensure that we don't
++ * end up with a bunch of threads in writeback with partially mapped page
++ * arrays, stuck waiting for kmap to come back. That situation prevents
++ * progress and can deadlock.
++ */
++static DEFINE_MUTEX(cifs_kmap_mutex);
++
++static inline void
++cifs_kmap_lock(void)
++{
++ mutex_lock(&cifs_kmap_mutex);
++}
++
++static inline void
++cifs_kmap_unlock(void)
++{
++ mutex_unlock(&cifs_kmap_mutex);
++}
++#else /* !CONFIG_HIGHMEM */
++#define cifs_kmap_lock() do { ; } while(0)
++#define cifs_kmap_unlock() do { ; } while(0)
++#endif /* CONFIG_HIGHMEM */
++
+ /* Mark as invalid, all open files on tree connections since they
+ were closed when session to server was lost */
+ static void mark_open_files_invalid(struct cifs_tcon *pTcon)
+@@ -1540,6 +1566,7 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
+ eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
+ cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index);
+
++ cifs_kmap_lock();
+ list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
+ if (remaining >= PAGE_CACHE_SIZE) {
+ /* enough data to fill the page */
+@@ -1589,6 +1616,7 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
+ page_cache_release(page);
+ }
+ }
++ cifs_kmap_unlock();
+
+ /* issue the read if we have any iovecs left to fill */
+ if (rdata->nr_iov > 1) {
+@@ -2171,6 +2199,7 @@ cifs_async_writev(struct cifs_writedata *wdata)
+ iov[0].iov_base = smb;
+
+ /* marshal up the pages into iov array */
++ cifs_kmap_lock();
+ wdata->bytes = 0;
+ for (i = 0; i < wdata->nr_pages; i++) {
+ iov[i + 1].iov_len = min(inode->i_size -
+@@ -2179,6 +2208,7 @@ cifs_async_writev(struct cifs_writedata *wdata)
+ iov[i + 1].iov_base = kmap(wdata->pages[i]);
+ wdata->bytes += iov[i + 1].iov_len;
+ }
++ cifs_kmap_unlock();
+
+ cFYI(1, "async write at %llu %u bytes", wdata->offset, wdata->bytes);
+
+diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
+index 914bf9e..d6970f7 100644
+--- a/fs/ext4/balloc.c
++++ b/fs/ext4/balloc.c
+@@ -557,7 +557,8 @@ ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb)
+ if (bitmap_bh == NULL)
+ continue;
+
+- x = ext4_count_free(bitmap_bh, sb->s_blocksize);
++ x = ext4_count_free(bitmap_bh->b_data,
++ EXT4_BLOCKS_PER_GROUP(sb) / 8);
+ printk(KERN_DEBUG "group %u: stored = %d, counted = %u\n",
+ i, ext4_free_group_clusters(sb, gdp), x);
+ bitmap_count += x;
+diff --git a/fs/ext4/bitmap.c b/fs/ext4/bitmap.c
+index fa3af81..bbde5d5 100644
+--- a/fs/ext4/bitmap.c
++++ b/fs/ext4/bitmap.c
+@@ -11,21 +11,15 @@
+ #include <linux/jbd2.h>
+ #include "ext4.h"
+
+-#ifdef EXT4FS_DEBUG
+-
+ static const int nibblemap[] = {4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0};
+
+-unsigned int ext4_count_free(struct buffer_head *map, unsigned int numchars)
++unsigned int ext4_count_free(char *bitmap, unsigned int numchars)
+ {
+ unsigned int i, sum = 0;
+
+- if (!map)
+- return 0;
+ for (i = 0; i < numchars; i++)
+- sum += nibblemap[map->b_data[i] & 0xf] +
+- nibblemap[(map->b_data[i] >> 4) & 0xf];
++ sum += nibblemap[bitmap[i] & 0xf] +
++ nibblemap[(bitmap[i] >> 4) & 0xf];
+ return sum;
+ }
+
+-#endif /* EXT4FS_DEBUG */
+-
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 7b1cd5c..8cb184c 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -1123,8 +1123,7 @@ struct ext4_sb_info {
+ unsigned long s_desc_per_block; /* Number of group descriptors per block */
+ ext4_group_t s_groups_count; /* Number of groups in the fs */
+ ext4_group_t s_blockfile_groups;/* Groups acceptable for non-extent files */
+- unsigned long s_overhead_last; /* Last calculated overhead */
+- unsigned long s_blocks_last; /* Last seen block count */
++ unsigned long s_overhead; /* # of fs overhead clusters */
+ unsigned int s_cluster_ratio; /* Number of blocks per cluster */
+ unsigned int s_cluster_bits; /* log2 of s_cluster_ratio */
+ loff_t s_bitmap_maxbytes; /* max bytes for bitmap files */
+@@ -1757,7 +1756,7 @@ struct mmpd_data {
+ # define NORET_AND noreturn,
+
+ /* bitmap.c */
+-extern unsigned int ext4_count_free(struct buffer_head *, unsigned);
++extern unsigned int ext4_count_free(char *bitmap, unsigned numchars);
+
+ /* balloc.c */
+ extern unsigned int ext4_block_group(struct super_block *sb,
+@@ -1925,6 +1924,7 @@ extern int ext4_group_extend(struct super_block *sb,
+ ext4_fsblk_t n_blocks_count);
+
+ /* super.c */
++extern int ext4_calculate_overhead(struct super_block *sb);
+ extern void *ext4_kvmalloc(size_t size, gfp_t flags);
+ extern void *ext4_kvzalloc(size_t size, gfp_t flags);
+ extern void ext4_kvfree(void *ptr);
+diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
+index 8fb6844..6266799 100644
+--- a/fs/ext4/ialloc.c
++++ b/fs/ext4/ialloc.c
+@@ -1057,7 +1057,8 @@ unsigned long ext4_count_free_inodes(struct super_block *sb)
+ if (!bitmap_bh)
+ continue;
+
+- x = ext4_count_free(bitmap_bh, EXT4_INODES_PER_GROUP(sb) / 8);
++ x = ext4_count_free(bitmap_bh->b_data,
++ EXT4_INODES_PER_GROUP(sb) / 8);
+ printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n",
+ (unsigned long) i, ext4_free_inodes_count(sb, gdp), x);
+ bitmap_count += x;
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 3ce7613..8b01f9f 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -277,6 +277,15 @@ void ext4_da_update_reserve_space(struct inode *inode,
+ used = ei->i_reserved_data_blocks;
+ }
+
++ if (unlikely(ei->i_allocated_meta_blocks > ei->i_reserved_meta_blocks)) {
++ ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, allocated %d "
++ "with only %d reserved metadata blocks\n", __func__,
++ inode->i_ino, ei->i_allocated_meta_blocks,
++ ei->i_reserved_meta_blocks);
++ WARN_ON(1);
++ ei->i_allocated_meta_blocks = ei->i_reserved_meta_blocks;
++ }
++
+ /* Update per-inode reservations */
+ ei->i_reserved_data_blocks -= used;
+ ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
+@@ -1102,6 +1111,17 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ unsigned int md_needed;
+ int ret;
++ ext4_lblk_t save_last_lblock;
++ int save_len;
++
++ /*
++ * We will charge metadata quota at writeout time; this saves
++ * us from metadata over-estimation, though we may go over by
++ * a small amount in the end. Here we just reserve for data.
++ */
++ ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
++ if (ret)
++ return ret;
+
+ /*
+ * recalculate the amount of metadata blocks to reserve
+@@ -1110,32 +1130,31 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
+ */
+ repeat:
+ spin_lock(&ei->i_block_reservation_lock);
++ /*
++ * ext4_calc_metadata_amount() has side effects, which we have
++ * to be prepared undo if we fail to claim space.
++ */
++ save_len = ei->i_da_metadata_calc_len;
++ save_last_lblock = ei->i_da_metadata_calc_last_lblock;
+ md_needed = EXT4_NUM_B2C(sbi,
+ ext4_calc_metadata_amount(inode, lblock));
+ trace_ext4_da_reserve_space(inode, md_needed);
+- spin_unlock(&ei->i_block_reservation_lock);
+
+ /*
+- * We will charge metadata quota at writeout time; this saves
+- * us from metadata over-estimation, though we may go over by
+- * a small amount in the end. Here we just reserve for data.
+- */
+- ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
+- if (ret)
+- return ret;
+- /*
+ * We do still charge estimated metadata to the sb though;
+ * we cannot afford to run out of free blocks.
+ */
+ if (ext4_claim_free_clusters(sbi, md_needed + 1, 0)) {
+- dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
++ ei->i_da_metadata_calc_len = save_len;
++ ei->i_da_metadata_calc_last_lblock = save_last_lblock;
++ spin_unlock(&ei->i_block_reservation_lock);
+ if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
+ yield();
+ goto repeat;
+ }
++ dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
+ return -ENOSPC;
+ }
+- spin_lock(&ei->i_block_reservation_lock);
+ ei->i_reserved_data_blocks++;
+ ei->i_reserved_meta_blocks += md_needed;
+ spin_unlock(&ei->i_block_reservation_lock);
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index 996780a..4eac337 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -952,6 +952,11 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
+ &sbi->s_flex_groups[flex_group].free_inodes);
+ }
+
++ /*
++ * Update the fs overhead information
++ */
++ ext4_calculate_overhead(sb);
++
+ ext4_handle_dirty_super(handle, sb);
+
+ exit_journal:
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index a93486e..a071348 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -3083,6 +3083,114 @@ static void ext4_destroy_lazyinit_thread(void)
+ kthread_stop(ext4_lazyinit_task);
+ }
+
++/*
++ * Note: calculating the overhead so we can be compatible with
++ * historical BSD practice is quite difficult in the face of
++ * clusters/bigalloc. This is because multiple metadata blocks from
++ * different block group can end up in the same allocation cluster.
++ * Calculating the exact overhead in the face of clustered allocation
++ * requires either O(all block bitmaps) in memory or O(number of block
++ * groups**2) in time. We will still calculate the superblock for
++ * older file systems --- and if we come across with a bigalloc file
++ * system with zero in s_overhead_clusters the estimate will be close to
++ * correct especially for very large cluster sizes --- but for newer
++ * file systems, it's better to calculate this figure once at mkfs
++ * time, and store it in the superblock. If the superblock value is
++ * present (even for non-bigalloc file systems), we will use it.
++ */
++static int count_overhead(struct super_block *sb, ext4_group_t grp,
++ char *buf)
++{
++ struct ext4_sb_info *sbi = EXT4_SB(sb);
++ struct ext4_group_desc *gdp;
++ ext4_fsblk_t first_block, last_block, b;
++ ext4_group_t i, ngroups = ext4_get_groups_count(sb);
++ int s, j, count = 0;
++
++ first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
++ (grp * EXT4_BLOCKS_PER_GROUP(sb));
++ last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1;
++ for (i = 0; i < ngroups; i++) {
++ gdp = ext4_get_group_desc(sb, i, NULL);
++ b = ext4_block_bitmap(sb, gdp);
++ if (b >= first_block && b <= last_block) {
++ ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
++ count++;
++ }
++ b = ext4_inode_bitmap(sb, gdp);
++ if (b >= first_block && b <= last_block) {
++ ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
++ count++;
++ }
++ b = ext4_inode_table(sb, gdp);
++ if (b >= first_block && b + sbi->s_itb_per_group <= last_block)
++ for (j = 0; j < sbi->s_itb_per_group; j++, b++) {
++ int c = EXT4_B2C(sbi, b - first_block);
++ ext4_set_bit(c, buf);
++ count++;
++ }
++ if (i != grp)
++ continue;
++ s = 0;
++ if (ext4_bg_has_super(sb, grp)) {
++ ext4_set_bit(s++, buf);
++ count++;
++ }
++ for (j = ext4_bg_num_gdb(sb, grp); j > 0; j--) {
++ ext4_set_bit(EXT4_B2C(sbi, s++), buf);
++ count++;
++ }
++ }
++ if (!count)
++ return 0;
++ return EXT4_CLUSTERS_PER_GROUP(sb) -
++ ext4_count_free(buf, EXT4_CLUSTERS_PER_GROUP(sb) / 8);
++}
++
++/*
++ * Compute the overhead and stash it in sbi->s_overhead
++ */
++int ext4_calculate_overhead(struct super_block *sb)
++{
++ struct ext4_sb_info *sbi = EXT4_SB(sb);
++ struct ext4_super_block *es = sbi->s_es;
++ ext4_group_t i, ngroups = ext4_get_groups_count(sb);
++ ext4_fsblk_t overhead = 0;
++ char *buf = (char *) get_zeroed_page(GFP_KERNEL);
++
++ memset(buf, 0, PAGE_SIZE);
++ if (!buf)
++ return -ENOMEM;
++
++ /*
++ * Compute the overhead (FS structures). This is constant
++ * for a given filesystem unless the number of block groups
++ * changes so we cache the previous value until it does.
++ */
++
++ /*
++ * All of the blocks before first_data_block are overhead
++ */
++ overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block));
++
++ /*
++ * Add the overhead found in each block group
++ */
++ for (i = 0; i < ngroups; i++) {
++ int blks;
++
++ blks = count_overhead(sb, i, buf);
++ overhead += blks;
++ if (blks)
++ memset(buf, 0, PAGE_SIZE);
++ cond_resched();
++ }
++ sbi->s_overhead = overhead;
++ smp_wmb();
++ free_page((unsigned long) buf);
++ return 0;
++}
++
+ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ {
+ char *orig_data = kstrdup(data, GFP_KERNEL);
+@@ -3695,6 +3803,18 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+
+ no_journal:
+ /*
++ * Get the # of file system overhead blocks from the
++ * superblock if present.
++ */
++ if (es->s_overhead_clusters)
++ sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
++ else {
++ ret = ext4_calculate_overhead(sb);
++ if (ret)
++ goto failed_mount_wq;
++ }
++
++ /*
+ * The maximum number of concurrent works can be high and
+ * concurrency isn't really necessary. Limit it to 1.
+ */
+@@ -4568,67 +4688,21 @@ restore_opts:
+ return err;
+ }
+
+-/*
+- * Note: calculating the overhead so we can be compatible with
+- * historical BSD practice is quite difficult in the face of
+- * clusters/bigalloc. This is because multiple metadata blocks from
+- * different block group can end up in the same allocation cluster.
+- * Calculating the exact overhead in the face of clustered allocation
+- * requires either O(all block bitmaps) in memory or O(number of block
+- * groups**2) in time. We will still calculate the superblock for
+- * older file systems --- and if we come across with a bigalloc file
+- * system with zero in s_overhead_clusters the estimate will be close to
+- * correct especially for very large cluster sizes --- but for newer
+- * file systems, it's better to calculate this figure once at mkfs
+- * time, and store it in the superblock. If the superblock value is
+- * present (even for non-bigalloc file systems), we will use it.
+- */
+ static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
+ {
+ struct super_block *sb = dentry->d_sb;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_super_block *es = sbi->s_es;
+- struct ext4_group_desc *gdp;
++ ext4_fsblk_t overhead = 0;
+ u64 fsid;
+ s64 bfree;
+
+- if (test_opt(sb, MINIX_DF)) {
+- sbi->s_overhead_last = 0;
+- } else if (es->s_overhead_clusters) {
+- sbi->s_overhead_last = le32_to_cpu(es->s_overhead_clusters);
+- } else if (sbi->s_blocks_last != ext4_blocks_count(es)) {
+- ext4_group_t i, ngroups = ext4_get_groups_count(sb);
+- ext4_fsblk_t overhead = 0;
+-
+- /*
+- * Compute the overhead (FS structures). This is constant
+- * for a given filesystem unless the number of block groups
+- * changes so we cache the previous value until it does.
+- */
+-
+- /*
+- * All of the blocks before first_data_block are
+- * overhead
+- */
+- overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block));
+-
+- /*
+- * Add the overhead found in each block group
+- */
+- for (i = 0; i < ngroups; i++) {
+- gdp = ext4_get_group_desc(sb, i, NULL);
+- overhead += ext4_num_overhead_clusters(sb, i, gdp);
+- cond_resched();
+- }
+- sbi->s_overhead_last = overhead;
+- smp_wmb();
+- sbi->s_blocks_last = ext4_blocks_count(es);
+- }
++ if (!test_opt(sb, MINIX_DF))
++ overhead = sbi->s_overhead;
+
+ buf->f_type = EXT4_SUPER_MAGIC;
+ buf->f_bsize = sb->s_blocksize;
+- buf->f_blocks = (ext4_blocks_count(es) -
+- EXT4_C2B(sbi, sbi->s_overhead_last));
++ buf->f_blocks = ext4_blocks_count(es) - EXT4_C2B(sbi, sbi->s_overhead);
+ bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) -
+ percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter);
+ /* prevent underflow in case that few free space is available */
+diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
+index ebc2f4d..0aa424a 100644
+--- a/fs/hugetlbfs/inode.c
++++ b/fs/hugetlbfs/inode.c
+@@ -569,7 +569,8 @@ static int hugetlbfs_set_page_dirty(struct page *page)
+ }
+
+ static int hugetlbfs_migrate_page(struct address_space *mapping,
+- struct page *newpage, struct page *page)
++ struct page *newpage, struct page *page,
++ enum migrate_mode mode)
+ {
+ int rc;
+
+diff --git a/fs/locks.c b/fs/locks.c
+index 6a64f15..fcc50ab 100644
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -308,7 +308,7 @@ static int flock_make_lock(struct file *filp, struct file_lock **lock,
+ return 0;
+ }
+
+-static int assign_type(struct file_lock *fl, int type)
++static int assign_type(struct file_lock *fl, long type)
+ {
+ switch (type) {
+ case F_RDLCK:
+@@ -445,7 +445,7 @@ static const struct lock_manager_operations lease_manager_ops = {
+ /*
+ * Initialize a lease, use the default lock manager operations
+ */
+-static int lease_init(struct file *filp, int type, struct file_lock *fl)
++static int lease_init(struct file *filp, long type, struct file_lock *fl)
+ {
+ if (assign_type(fl, type) != 0)
+ return -EINVAL;
+@@ -463,7 +463,7 @@ static int lease_init(struct file *filp, int type, struct file_lock *fl)
+ }
+
+ /* Allocate a file_lock initialised to this type of lease */
+-static struct file_lock *lease_alloc(struct file *filp, int type)
++static struct file_lock *lease_alloc(struct file *filp, long type)
+ {
+ struct file_lock *fl = locks_alloc_lock();
+ int error = -ENOMEM;
+diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
+index 3f4d957..68b3f20 100644
+--- a/fs/nfs/internal.h
++++ b/fs/nfs/internal.h
+@@ -330,7 +330,7 @@ void nfs_commit_release_pages(struct nfs_write_data *data);
+
+ #ifdef CONFIG_MIGRATION
+ extern int nfs_migrate_page(struct address_space *,
+- struct page *, struct page *);
++ struct page *, struct page *, enum migrate_mode);
+ #else
+ #define nfs_migrate_page NULL
+ #endif
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index 4efd421..c6e523a 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -1711,7 +1711,7 @@ out_error:
+
+ #ifdef CONFIG_MIGRATION
+ int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
+- struct page *page)
++ struct page *page, enum migrate_mode mode)
+ {
+ /*
+ * If PagePrivate is set, then the page is currently associated with
+@@ -1726,7 +1726,7 @@ int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
+
+ nfs_fscache_release_page(page, GFP_KERNEL);
+
+- return migrate_page(mapping, newpage, page);
++ return migrate_page(mapping, newpage, page, mode);
+ }
+ #endif
+
+diff --git a/fs/udf/super.c b/fs/udf/super.c
+index 270e135..516b7f0 100644
+--- a/fs/udf/super.c
++++ b/fs/udf/super.c
+@@ -1285,7 +1285,7 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
+ BUG_ON(ident != TAG_IDENT_LVD);
+ lvd = (struct logicalVolDesc *)bh->b_data;
+ table_len = le32_to_cpu(lvd->mapTableLength);
+- if (sizeof(*lvd) + table_len > sb->s_blocksize) {
++ if (table_len > sb->s_blocksize - sizeof(*lvd)) {
+ udf_err(sb, "error loading logical volume descriptor: "
+ "Partition table too long (%u > %lu)\n", table_len,
+ sb->s_blocksize - sizeof(*lvd));
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 0ed1eb0..ff039f0 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -481,6 +481,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
+
+ #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
+ #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
++#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
+ #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
+ #define blk_queue_noxmerges(q) \
+ test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
+diff --git a/include/linux/cpu.h b/include/linux/cpu.h
+index 6cb60fd..c692acc 100644
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -66,8 +66,9 @@ enum {
+ /* migration should happen before other stuff but after perf */
+ CPU_PRI_PERF = 20,
+ CPU_PRI_MIGRATION = 10,
+- /* prepare workqueues for other notifiers */
+- CPU_PRI_WORKQUEUE = 5,
++ /* bring up workqueues before normal notifiers and down after */
++ CPU_PRI_WORKQUEUE_UP = 5,
++ CPU_PRI_WORKQUEUE_DOWN = -5,
+ };
+
+ #define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
+diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
+index e9eaec5..7a7e5fd 100644
+--- a/include/linux/cpuset.h
++++ b/include/linux/cpuset.h
+@@ -89,42 +89,33 @@ extern void rebuild_sched_domains(void);
+ extern void cpuset_print_task_mems_allowed(struct task_struct *p);
+
+ /*
+- * reading current mems_allowed and mempolicy in the fastpath must protected
+- * by get_mems_allowed()
++ * get_mems_allowed is required when making decisions involving mems_allowed
++ * such as during page allocation. mems_allowed can be updated in parallel
++ * and depending on the new value an operation can fail potentially causing
++ * process failure. A retry loop with get_mems_allowed and put_mems_allowed
++ * prevents these artificial failures.
+ */
+-static inline void get_mems_allowed(void)
++static inline unsigned int get_mems_allowed(void)
+ {
+- current->mems_allowed_change_disable++;
+-
+- /*
+- * ensure that reading mems_allowed and mempolicy happens after the
+- * update of ->mems_allowed_change_disable.
+- *
+- * the write-side task finds ->mems_allowed_change_disable is not 0,
+- * and knows the read-side task is reading mems_allowed or mempolicy,
+- * so it will clear old bits lazily.
+- */
+- smp_mb();
++ return read_seqcount_begin(&current->mems_allowed_seq);
+ }
+
+-static inline void put_mems_allowed(void)
++/*
++ * If this returns false, the operation that took place after get_mems_allowed
++ * may have failed. It is up to the caller to retry the operation if
++ * appropriate.
++ */
++static inline bool put_mems_allowed(unsigned int seq)
+ {
+- /*
+- * ensure that reading mems_allowed and mempolicy before reducing
+- * mems_allowed_change_disable.
+- *
+- * the write-side task will know that the read-side task is still
+- * reading mems_allowed or mempolicy, don't clears old bits in the
+- * nodemask.
+- */
+- smp_mb();
+- --ACCESS_ONCE(current->mems_allowed_change_disable);
++ return !read_seqcount_retry(&current->mems_allowed_seq, seq);
+ }
+
+ static inline void set_mems_allowed(nodemask_t nodemask)
+ {
+ task_lock(current);
++ write_seqcount_begin(&current->mems_allowed_seq);
+ current->mems_allowed = nodemask;
++ write_seqcount_end(&current->mems_allowed_seq);
+ task_unlock(current);
+ }
+
+@@ -234,12 +225,14 @@ static inline void set_mems_allowed(nodemask_t nodemask)
+ {
+ }
+
+-static inline void get_mems_allowed(void)
++static inline unsigned int get_mems_allowed(void)
+ {
++ return 0;
+ }
+
+-static inline void put_mems_allowed(void)
++static inline bool put_mems_allowed(unsigned int seq)
+ {
++ return true;
+ }
+
+ #endif /* !CONFIG_CPUSETS */
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 43d36b7..29b6353 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -525,6 +525,7 @@ enum positive_aop_returns {
+ struct page;
+ struct address_space;
+ struct writeback_control;
++enum migrate_mode;
+
+ struct iov_iter {
+ const struct iovec *iov;
+@@ -609,9 +610,12 @@ struct address_space_operations {
+ loff_t offset, unsigned long nr_segs);
+ int (*get_xip_mem)(struct address_space *, pgoff_t, int,
+ void **, unsigned long *);
+- /* migrate the contents of a page to the specified target */
++ /*
++ * migrate the contents of a page to the specified target. If sync
++ * is false, it must not block.
++ */
+ int (*migratepage) (struct address_space *,
+- struct page *, struct page *);
++ struct page *, struct page *, enum migrate_mode);
+ int (*launder_page) (struct page *);
+ int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
+ unsigned long);
+@@ -2586,7 +2590,8 @@ extern int generic_check_addressable(unsigned, u64);
+
+ #ifdef CONFIG_MIGRATION
+ extern int buffer_migrate_page(struct address_space *,
+- struct page *, struct page *);
++ struct page *, struct page *,
++ enum migrate_mode);
+ #else
+ #define buffer_migrate_page NULL
+ #endif
+diff --git a/include/linux/init_task.h b/include/linux/init_task.h
+index 32574ee..df53fdf 100644
+--- a/include/linux/init_task.h
++++ b/include/linux/init_task.h
+@@ -30,6 +30,13 @@ extern struct fs_struct init_fs;
+ #define INIT_THREADGROUP_FORK_LOCK(sig)
+ #endif
+
++#ifdef CONFIG_CPUSETS
++#define INIT_CPUSET_SEQ \
++ .mems_allowed_seq = SEQCNT_ZERO,
++#else
++#define INIT_CPUSET_SEQ
++#endif
++
+ #define INIT_SIGNALS(sig) { \
+ .nr_threads = 1, \
+ .wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\
+@@ -193,6 +200,7 @@ extern struct cred init_cred;
+ INIT_FTRACE_GRAPH \
+ INIT_TRACE_RECURSION \
+ INIT_TASK_RCU_PREEMPT(tsk) \
++ INIT_CPUSET_SEQ \
+ }
+
+
+diff --git a/include/linux/migrate.h b/include/linux/migrate.h
+index e39aeec..eaf8674 100644
+--- a/include/linux/migrate.h
++++ b/include/linux/migrate.h
+@@ -6,18 +6,31 @@
+
+ typedef struct page *new_page_t(struct page *, unsigned long private, int **);
+
++/*
++ * MIGRATE_ASYNC means never block
++ * MIGRATE_SYNC_LIGHT in the current implementation means to allow blocking
++ * on most operations but not ->writepage as the potential stall time
++ * is too significant
++ * MIGRATE_SYNC will block when migrating pages
++ */
++enum migrate_mode {
++ MIGRATE_ASYNC,
++ MIGRATE_SYNC_LIGHT,
++ MIGRATE_SYNC,
++};
++
+ #ifdef CONFIG_MIGRATION
+ #define PAGE_MIGRATION 1
+
+ extern void putback_lru_pages(struct list_head *l);
+ extern int migrate_page(struct address_space *,
+- struct page *, struct page *);
++ struct page *, struct page *, enum migrate_mode);
+ extern int migrate_pages(struct list_head *l, new_page_t x,
+ unsigned long private, bool offlining,
+- bool sync);
++ enum migrate_mode mode);
+ extern int migrate_huge_pages(struct list_head *l, new_page_t x,
+ unsigned long private, bool offlining,
+- bool sync);
++ enum migrate_mode mode);
+
+ extern int fail_migrate_page(struct address_space *,
+ struct page *, struct page *);
+@@ -36,10 +49,10 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping,
+ static inline void putback_lru_pages(struct list_head *l) {}
+ static inline int migrate_pages(struct list_head *l, new_page_t x,
+ unsigned long private, bool offlining,
+- bool sync) { return -ENOSYS; }
++ enum migrate_mode mode) { return -ENOSYS; }
+ static inline int migrate_huge_pages(struct list_head *l, new_page_t x,
+ unsigned long private, bool offlining,
+- bool sync) { return -ENOSYS; }
++ enum migrate_mode mode) { return -ENOSYS; }
+
+ static inline int migrate_prep(void) { return -ENOSYS; }
+ static inline int migrate_prep_local(void) { return -ENOSYS; }
+diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
+index 905b1e1..25842b6 100644
+--- a/include/linux/mmzone.h
++++ b/include/linux/mmzone.h
+@@ -173,6 +173,8 @@ static inline int is_unevictable_lru(enum lru_list l)
+ #define ISOLATE_CLEAN ((__force isolate_mode_t)0x4)
+ /* Isolate unmapped file */
+ #define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x8)
++/* Isolate for asynchronous migration */
++#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x10)
+
+ /* LRU Isolation modes. */
+ typedef unsigned __bitwise__ isolate_mode_t;
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 5afa2a3..d336c35 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -145,6 +145,7 @@ extern unsigned long this_cpu_load(void);
+
+
+ extern void calc_global_load(unsigned long ticks);
++extern void update_cpu_load_nohz(void);
+
+ extern unsigned long get_parent_ip(unsigned long addr);
+
+@@ -1481,7 +1482,7 @@ struct task_struct {
+ #endif
+ #ifdef CONFIG_CPUSETS
+ nodemask_t mems_allowed; /* Protected by alloc_lock */
+- int mems_allowed_change_disable;
++ seqcount_t mems_allowed_seq; /* Seqence no to catch updates */
+ int cpuset_mem_spread_rotor;
+ int cpuset_slab_spread_rotor;
+ #endif
+diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
+index 94bbec3..6ee550e 100644
+--- a/include/target/target_core_base.h
++++ b/include/target/target_core_base.h
+@@ -157,6 +157,7 @@ enum tcm_sense_reason_table {
+ TCM_CHECK_CONDITION_UNIT_ATTENTION = 0x0e,
+ TCM_CHECK_CONDITION_NOT_READY = 0x0f,
+ TCM_RESERVATION_CONFLICT = 0x10,
++ TCM_ADDRESS_OUT_OF_RANGE = 0x11,
+ };
+
+ struct se_obj {
+diff --git a/kernel/cpuset.c b/kernel/cpuset.c
+index 0b1712d..46a1d3c 100644
+--- a/kernel/cpuset.c
++++ b/kernel/cpuset.c
+@@ -964,7 +964,6 @@ static void cpuset_change_task_nodemask(struct task_struct *tsk,
+ {
+ bool need_loop;
+
+-repeat:
+ /*
+ * Allow tasks that have access to memory reserves because they have
+ * been OOM killed to get memory anywhere.
+@@ -983,45 +982,19 @@ repeat:
+ */
+ need_loop = task_has_mempolicy(tsk) ||
+ !nodes_intersects(*newmems, tsk->mems_allowed);
+- nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
+- mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
+
+- /*
+- * ensure checking ->mems_allowed_change_disable after setting all new
+- * allowed nodes.
+- *
+- * the read-side task can see an nodemask with new allowed nodes and
+- * old allowed nodes. and if it allocates page when cpuset clears newly
+- * disallowed ones continuous, it can see the new allowed bits.
+- *
+- * And if setting all new allowed nodes is after the checking, setting
+- * all new allowed nodes and clearing newly disallowed ones will be done
+- * continuous, and the read-side task may find no node to alloc page.
+- */
+- smp_mb();
++ if (need_loop)
++ write_seqcount_begin(&tsk->mems_allowed_seq);
+
+- /*
+- * Allocation of memory is very fast, we needn't sleep when waiting
+- * for the read-side.
+- */
+- while (need_loop && ACCESS_ONCE(tsk->mems_allowed_change_disable)) {
+- task_unlock(tsk);
+- if (!task_curr(tsk))
+- yield();
+- goto repeat;
+- }
+-
+- /*
+- * ensure checking ->mems_allowed_change_disable before clearing all new
+- * disallowed nodes.
+- *
+- * if clearing newly disallowed bits before the checking, the read-side
+- * task may find no node to alloc page.
+- */
+- smp_mb();
++ nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
++ mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
+
+ mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP2);
+ tsk->mems_allowed = *newmems;
++
++ if (need_loop)
++ write_seqcount_end(&tsk->mems_allowed_seq);
++
+ task_unlock(tsk);
+ }
+
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 79ee71f..222457a 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -979,6 +979,9 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
+ #ifdef CONFIG_CGROUPS
+ init_rwsem(&sig->threadgroup_fork_lock);
+ #endif
++#ifdef CONFIG_CPUSETS
++ seqcount_init(&tsk->mems_allowed_seq);
++#endif
+
+ sig->oom_adj = current->signal->oom_adj;
+ sig->oom_score_adj = current->signal->oom_score_adj;
+diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
+index 7c0d578..013bd2e 100644
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -367,6 +367,7 @@ int hibernation_snapshot(int platform_mode)
+ }
+
+ suspend_console();
++ ftrace_stop();
+ pm_restrict_gfp_mask();
+ error = dpm_suspend(PMSG_FREEZE);
+ if (error)
+@@ -392,6 +393,7 @@ int hibernation_snapshot(int platform_mode)
+ if (error || !in_suspend)
+ pm_restore_gfp_mask();
+
++ ftrace_start();
+ resume_console();
+ dpm_complete(msg);
+
+@@ -496,6 +498,7 @@ int hibernation_restore(int platform_mode)
+
+ pm_prepare_console();
+ suspend_console();
++ ftrace_stop();
+ pm_restrict_gfp_mask();
+ error = dpm_suspend_start(PMSG_QUIESCE);
+ if (!error) {
+@@ -503,6 +506,7 @@ int hibernation_restore(int platform_mode)
+ dpm_resume_end(PMSG_RECOVER);
+ }
+ pm_restore_gfp_mask();
++ ftrace_start();
+ resume_console();
+ pm_restore_console();
+ return error;
+@@ -529,6 +533,7 @@ int hibernation_platform_enter(void)
+
+ entering_platform_hibernation = true;
+ suspend_console();
++ ftrace_stop();
+ error = dpm_suspend_start(PMSG_HIBERNATE);
+ if (error) {
+ if (hibernation_ops->recover)
+@@ -572,6 +577,7 @@ int hibernation_platform_enter(void)
+ Resume_devices:
+ entering_platform_hibernation = false;
+ dpm_resume_end(PMSG_RESTORE);
++ ftrace_start();
+ resume_console();
+
+ Close:
+diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
+index 4953dc0..af48faa 100644
+--- a/kernel/power/suspend.c
++++ b/kernel/power/suspend.c
+@@ -25,6 +25,7 @@
+ #include <linux/export.h>
+ #include <linux/suspend.h>
+ #include <linux/syscore_ops.h>
++#include <linux/ftrace.h>
+ #include <trace/events/power.h>
+
+ #include "power.h"
+@@ -220,6 +221,7 @@ int suspend_devices_and_enter(suspend_state_t state)
+ goto Close;
+ }
+ suspend_console();
++ ftrace_stop();
+ suspend_test_start();
+ error = dpm_suspend_start(PMSG_SUSPEND);
+ if (error) {
+@@ -239,6 +241,7 @@ int suspend_devices_and_enter(suspend_state_t state)
+ suspend_test_start();
+ dpm_resume_end(PMSG_RESUME);
+ suspend_test_finish("resume devices");
++ ftrace_start();
+ resume_console();
+ Close:
+ if (suspend_ops->end)
+diff --git a/kernel/sched.c b/kernel/sched.c
+index 52ac69b..9cd8ca7 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -1887,7 +1887,7 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
+
+ static void update_sysctl(void);
+ static int get_update_sysctl_factor(void);
+-static void update_cpu_load(struct rq *this_rq);
++static void update_idle_cpu_load(struct rq *this_rq);
+
+ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
+ {
+@@ -3855,22 +3855,13 @@ decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
+ * scheduler tick (TICK_NSEC). With tickless idle this will not be called
+ * every tick. We fix it up based on jiffies.
+ */
+-static void update_cpu_load(struct rq *this_rq)
++static void __update_cpu_load(struct rq *this_rq, unsigned long this_load,
++ unsigned long pending_updates)
+ {
+- unsigned long this_load = this_rq->load.weight;
+- unsigned long curr_jiffies = jiffies;
+- unsigned long pending_updates;
+ int i, scale;
+
+ this_rq->nr_load_updates++;
+
+- /* Avoid repeated calls on same jiffy, when moving in and out of idle */
+- if (curr_jiffies == this_rq->last_load_update_tick)
+- return;
+-
+- pending_updates = curr_jiffies - this_rq->last_load_update_tick;
+- this_rq->last_load_update_tick = curr_jiffies;
+-
+ /* Update our load: */
+ this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */
+ for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
+@@ -3895,9 +3886,78 @@ static void update_cpu_load(struct rq *this_rq)
+ sched_avg_update(this_rq);
+ }
+
++#ifdef CONFIG_NO_HZ
++/*
++ * There is no sane way to deal with nohz on smp when using jiffies because the
++ * cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading
++ * causing off-by-one errors in observed deltas; {0,2} instead of {1,1}.
++ *
++ * Therefore we cannot use the delta approach from the regular tick since that
++ * would seriously skew the load calculation. However we'll make do for those
++ * updates happening while idle (nohz_idle_balance) or coming out of idle
++ * (tick_nohz_idle_exit).
++ *
++ * This means we might still be one tick off for nohz periods.
++ */
++
++/*
++ * Called from nohz_idle_balance() to update the load ratings before doing the
++ * idle balance.
++ */
++static void update_idle_cpu_load(struct rq *this_rq)
++{
++ unsigned long curr_jiffies = ACCESS_ONCE(jiffies);
++ unsigned long load = this_rq->load.weight;
++ unsigned long pending_updates;
++
++ /*
++ * bail if there's load or we're actually up-to-date.
++ */
++ if (load || curr_jiffies == this_rq->last_load_update_tick)
++ return;
++
++ pending_updates = curr_jiffies - this_rq->last_load_update_tick;
++ this_rq->last_load_update_tick = curr_jiffies;
++
++ __update_cpu_load(this_rq, load, pending_updates);
++}
++
++/*
++ * Called from tick_nohz_idle_exit() -- try and fix up the ticks we missed.
++ */
++void update_cpu_load_nohz(void)
++{
++ struct rq *this_rq = this_rq();
++ unsigned long curr_jiffies = ACCESS_ONCE(jiffies);
++ unsigned long pending_updates;
++
++ if (curr_jiffies == this_rq->last_load_update_tick)
++ return;
++
++ raw_spin_lock(&this_rq->lock);
++ pending_updates = curr_jiffies - this_rq->last_load_update_tick;
++ if (pending_updates) {
++ this_rq->last_load_update_tick = curr_jiffies;
++ /*
++ * We were idle, this means load 0, the current load might be
++ * !0 due to remote wakeups and the sort.
++ */
++ __update_cpu_load(this_rq, 0, pending_updates);
++ }
++ raw_spin_unlock(&this_rq->lock);
++}
++#endif /* CONFIG_NO_HZ */
++
++/*
++ * Called from scheduler_tick()
++ */
+ static void update_cpu_load_active(struct rq *this_rq)
+ {
+- update_cpu_load(this_rq);
++ /*
++ * See the mess around update_idle_cpu_load() / update_cpu_load_nohz().
++ */
++ this_rq->last_load_update_tick = jiffies;
++ __update_cpu_load(this_rq, this_rq->load.weight, 1);
+
+ calc_load_account_active(this_rq);
+ }
+diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
+index 8a39fa3..66e4576 100644
+--- a/kernel/sched_fair.c
++++ b/kernel/sched_fair.c
+@@ -4735,7 +4735,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
+
+ raw_spin_lock_irq(&this_rq->lock);
+ update_rq_clock(this_rq);
+- update_cpu_load(this_rq);
++ update_idle_cpu_load(this_rq);
+ raw_spin_unlock_irq(&this_rq->lock);
+
+ rebalance_domains(balance_cpu, CPU_IDLE);
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index 9955ebd..793548c 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -549,6 +549,7 @@ void tick_nohz_restart_sched_tick(void)
+ /* Update jiffies first */
+ select_nohz_load_balancer(0);
+ tick_do_update_jiffies64(now);
++ update_cpu_load_nohz();
+
+ #ifndef CONFIG_VIRT_CPU_ACCOUNTING
+ /*
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 7947e16..a650bee 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -3586,6 +3586,41 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
+ return notifier_from_errno(0);
+ }
+
++/*
++ * Workqueues should be brought up before normal priority CPU notifiers.
++ * This will be registered high priority CPU notifier.
++ */
++static int __devinit workqueue_cpu_up_callback(struct notifier_block *nfb,
++ unsigned long action,
++ void *hcpu)
++{
++ switch (action & ~CPU_TASKS_FROZEN) {
++ case CPU_UP_PREPARE:
++ case CPU_UP_CANCELED:
++ case CPU_DOWN_FAILED:
++ case CPU_ONLINE:
++ return workqueue_cpu_callback(nfb, action, hcpu);
++ }
++ return NOTIFY_OK;
++}
++
++/*
++ * Workqueues should be brought down after normal priority CPU notifiers.
++ * This will be registered as low priority CPU notifier.
++ */
++static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb,
++ unsigned long action,
++ void *hcpu)
++{
++ switch (action & ~CPU_TASKS_FROZEN) {
++ case CPU_DOWN_PREPARE:
++ case CPU_DYING:
++ case CPU_POST_DEAD:
++ return workqueue_cpu_callback(nfb, action, hcpu);
++ }
++ return NOTIFY_OK;
++}
++
+ #ifdef CONFIG_SMP
+
+ struct work_for_cpu {
+@@ -3779,7 +3814,8 @@ static int __init init_workqueues(void)
+ unsigned int cpu;
+ int i;
+
+- cpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE);
++ cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
++ cpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
+
+ /* initialize gcwqs */
+ for_each_gcwq_cpu(cpu) {
+diff --git a/mm/compaction.c b/mm/compaction.c
+index 50f1c60..46973fb 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -372,7 +372,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
+ }
+
+ if (!cc->sync)
+- mode |= ISOLATE_CLEAN;
++ mode |= ISOLATE_ASYNC_MIGRATE;
+
+ /* Try isolate the page */
+ if (__isolate_lru_page(page, mode, 0) != 0)
+@@ -577,7 +577,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
+ nr_migrate = cc->nr_migratepages;
+ err = migrate_pages(&cc->migratepages, compaction_alloc,
+ (unsigned long)cc, false,
+- cc->sync);
++ cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC);
+ update_nr_listpages(cc);
+ nr_remaining = cc->nr_migratepages;
+
+diff --git a/mm/filemap.c b/mm/filemap.c
+index 03c5b0e..556858c 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -500,10 +500,13 @@ struct page *__page_cache_alloc(gfp_t gfp)
+ struct page *page;
+
+ if (cpuset_do_page_mem_spread()) {
+- get_mems_allowed();
+- n = cpuset_mem_spread_node();
+- page = alloc_pages_exact_node(n, gfp, 0);
+- put_mems_allowed();
++ unsigned int cpuset_mems_cookie;
++ do {
++ cpuset_mems_cookie = get_mems_allowed();
++ n = cpuset_mem_spread_node();
++ page = alloc_pages_exact_node(n, gfp, 0);
++ } while (!put_mems_allowed(cpuset_mems_cookie) && !page);
++
+ return page;
+ }
+ return alloc_pages(gfp, 0);
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 7c535b0..b1e1bad 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -538,8 +538,10 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
+ struct zonelist *zonelist;
+ struct zone *zone;
+ struct zoneref *z;
++ unsigned int cpuset_mems_cookie;
+
+- get_mems_allowed();
++retry_cpuset:
++ cpuset_mems_cookie = get_mems_allowed();
+ zonelist = huge_zonelist(vma, address,
+ htlb_alloc_mask, &mpol, &nodemask);
+ /*
+@@ -566,10 +568,15 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
+ }
+ }
+ }
+-err:
++
+ mpol_cond_put(mpol);
+- put_mems_allowed();
++ if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
++ goto retry_cpuset;
+ return page;
++
++err:
++ mpol_cond_put(mpol);
++ return NULL;
+ }
+
+ static void update_and_free_page(struct hstate *h, struct page *page)
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 06d3479..5bd5bb1 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -1427,8 +1427,8 @@ static int soft_offline_huge_page(struct page *page, int flags)
+ /* Keep page count to indicate a given hugepage is isolated. */
+
+ list_add(&hpage->lru, &pagelist);
+- ret = migrate_huge_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0,
+- true);
++ ret = migrate_huge_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, false,
++ MIGRATE_SYNC);
+ if (ret) {
+ struct page *page1, *page2;
+ list_for_each_entry_safe(page1, page2, &pagelist, lru)
+@@ -1557,7 +1557,7 @@ int soft_offline_page(struct page *page, int flags)
+ page_is_file_cache(page));
+ list_add(&page->lru, &pagelist);
+ ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
+- 0, true);
++ false, MIGRATE_SYNC);
+ if (ret) {
+ putback_lru_pages(&pagelist);
+ pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
+diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
+index 2168489..6629faf 100644
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -809,7 +809,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
+ }
+ /* this function returns # of failed pages */
+ ret = migrate_pages(&source, hotremove_migrate_alloc, 0,
+- true, true);
++ true, MIGRATE_SYNC);
+ if (ret)
+ putback_lru_pages(&source);
+ }
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index b26aae2..c0007f9 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -942,7 +942,7 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
+
+ if (!list_empty(&pagelist)) {
+ err = migrate_pages(&pagelist, new_node_page, dest,
+- false, true);
++ false, MIGRATE_SYNC);
+ if (err)
+ putback_lru_pages(&pagelist);
+ }
+@@ -1843,18 +1843,24 @@ struct page *
+ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
+ unsigned long addr, int node)
+ {
+- struct mempolicy *pol = get_vma_policy(current, vma, addr);
++ struct mempolicy *pol;
+ struct zonelist *zl;
+ struct page *page;
++ unsigned int cpuset_mems_cookie;
++
++retry_cpuset:
++ pol = get_vma_policy(current, vma, addr);
++ cpuset_mems_cookie = get_mems_allowed();
+
+- get_mems_allowed();
+ if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
+ unsigned nid;
+
+ nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
+ mpol_cond_put(pol);
+ page = alloc_page_interleave(gfp, order, nid);
+- put_mems_allowed();
++ if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
++ goto retry_cpuset;
++
+ return page;
+ }
+ zl = policy_zonelist(gfp, pol, node);
+@@ -1865,7 +1871,8 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
+ struct page *page = __alloc_pages_nodemask(gfp, order,
+ zl, policy_nodemask(gfp, pol));
+ __mpol_put(pol);
+- put_mems_allowed();
++ if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
++ goto retry_cpuset;
+ return page;
+ }
+ /*
+@@ -1873,7 +1880,8 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
+ */
+ page = __alloc_pages_nodemask(gfp, order, zl,
+ policy_nodemask(gfp, pol));
+- put_mems_allowed();
++ if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
++ goto retry_cpuset;
+ return page;
+ }
+
+@@ -1900,11 +1908,14 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
+ {
+ struct mempolicy *pol = current->mempolicy;
+ struct page *page;
++ unsigned int cpuset_mems_cookie;
+
+ if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
+ pol = &default_policy;
+
+- get_mems_allowed();
++retry_cpuset:
++ cpuset_mems_cookie = get_mems_allowed();
++
+ /*
+ * No reference counting needed for current->mempolicy
+ * nor system default_policy
+@@ -1915,7 +1926,10 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
+ page = __alloc_pages_nodemask(gfp, order,
+ policy_zonelist(gfp, pol, numa_node_id()),
+ policy_nodemask(gfp, pol));
+- put_mems_allowed();
++
++ if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
++ goto retry_cpuset;
++
+ return page;
+ }
+ EXPORT_SYMBOL(alloc_pages_current);
+diff --git a/mm/migrate.c b/mm/migrate.c
+index 177aca4..180d97f 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -220,6 +220,56 @@ out:
+ pte_unmap_unlock(ptep, ptl);
+ }
+
++#ifdef CONFIG_BLOCK
++/* Returns true if all buffers are successfully locked */
++static bool buffer_migrate_lock_buffers(struct buffer_head *head,
++ enum migrate_mode mode)
++{
++ struct buffer_head *bh = head;
++
++ /* Simple case, sync compaction */
++ if (mode != MIGRATE_ASYNC) {
++ do {
++ get_bh(bh);
++ lock_buffer(bh);
++ bh = bh->b_this_page;
++
++ } while (bh != head);
++
++ return true;
++ }
++
++ /* async case, we cannot block on lock_buffer so use trylock_buffer */
++ do {
++ get_bh(bh);
++ if (!trylock_buffer(bh)) {
++ /*
++ * We failed to lock the buffer and cannot stall in
++ * async migration. Release the taken locks
++ */
++ struct buffer_head *failed_bh = bh;
++ put_bh(failed_bh);
++ bh = head;
++ while (bh != failed_bh) {
++ unlock_buffer(bh);
++ put_bh(bh);
++ bh = bh->b_this_page;
++ }
++ return false;
++ }
++
++ bh = bh->b_this_page;
++ } while (bh != head);
++ return true;
++}
++#else
++static inline bool buffer_migrate_lock_buffers(struct buffer_head *head,
++ enum migrate_mode mode)
++{
++ return true;
++}
++#endif /* CONFIG_BLOCK */
++
+ /*
+ * Replace the page in the mapping.
+ *
+@@ -229,7 +279,8 @@ out:
+ * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
+ */
+ static int migrate_page_move_mapping(struct address_space *mapping,
+- struct page *newpage, struct page *page)
++ struct page *newpage, struct page *page,
++ struct buffer_head *head, enum migrate_mode mode)
+ {
+ int expected_count;
+ void **pslot;
+@@ -259,6 +310,20 @@ static int migrate_page_move_mapping(struct address_space *mapping,
+ }
+
+ /*
++ * In the async migration case of moving a page with buffers, lock the
++ * buffers using trylock before the mapping is moved. If the mapping
++ * was moved, we later failed to lock the buffers and could not move
++ * the mapping back due to an elevated page count, we would have to
++ * block waiting on other references to be dropped.
++ */
++ if (mode == MIGRATE_ASYNC && head &&
++ !buffer_migrate_lock_buffers(head, mode)) {
++ page_unfreeze_refs(page, expected_count);
++ spin_unlock_irq(&mapping->tree_lock);
++ return -EAGAIN;
++ }
++
++ /*
+ * Now we know that no one else is looking at the page.
+ */
+ get_page(newpage); /* add cache reference */
+@@ -415,13 +480,14 @@ EXPORT_SYMBOL(fail_migrate_page);
+ * Pages are locked upon entry and exit.
+ */
+ int migrate_page(struct address_space *mapping,
+- struct page *newpage, struct page *page)
++ struct page *newpage, struct page *page,
++ enum migrate_mode mode)
+ {
+ int rc;
+
+ BUG_ON(PageWriteback(page)); /* Writeback must be complete */
+
+- rc = migrate_page_move_mapping(mapping, newpage, page);
++ rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode);
+
+ if (rc)
+ return rc;
+@@ -438,28 +504,28 @@ EXPORT_SYMBOL(migrate_page);
+ * exist.
+ */
+ int buffer_migrate_page(struct address_space *mapping,
+- struct page *newpage, struct page *page)
++ struct page *newpage, struct page *page, enum migrate_mode mode)
+ {
+ struct buffer_head *bh, *head;
+ int rc;
+
+ if (!page_has_buffers(page))
+- return migrate_page(mapping, newpage, page);
++ return migrate_page(mapping, newpage, page, mode);
+
+ head = page_buffers(page);
+
+- rc = migrate_page_move_mapping(mapping, newpage, page);
++ rc = migrate_page_move_mapping(mapping, newpage, page, head, mode);
+
+ if (rc)
+ return rc;
+
+- bh = head;
+- do {
+- get_bh(bh);
+- lock_buffer(bh);
+- bh = bh->b_this_page;
+-
+- } while (bh != head);
++ /*
++ * In the async case, migrate_page_move_mapping locked the buffers
++ * with an IRQ-safe spinlock held. In the sync case, the buffers
++ * need to be locked now
++ */
++ if (mode != MIGRATE_ASYNC)
++ BUG_ON(!buffer_migrate_lock_buffers(head, mode));
+
+ ClearPagePrivate(page);
+ set_page_private(newpage, page_private(page));
+@@ -536,10 +602,14 @@ static int writeout(struct address_space *mapping, struct page *page)
+ * Default handling if a filesystem does not provide a migration function.
+ */
+ static int fallback_migrate_page(struct address_space *mapping,
+- struct page *newpage, struct page *page)
++ struct page *newpage, struct page *page, enum migrate_mode mode)
+ {
+- if (PageDirty(page))
++ if (PageDirty(page)) {
++ /* Only writeback pages in full synchronous migration */
++ if (mode != MIGRATE_SYNC)
++ return -EBUSY;
+ return writeout(mapping, page);
++ }
+
+ /*
+ * Buffers may be managed in a filesystem specific way.
+@@ -549,7 +619,7 @@ static int fallback_migrate_page(struct address_space *mapping,
+ !try_to_release_page(page, GFP_KERNEL))
+ return -EAGAIN;
+
+- return migrate_page(mapping, newpage, page);
++ return migrate_page(mapping, newpage, page, mode);
+ }
+
+ /*
+@@ -564,7 +634,7 @@ static int fallback_migrate_page(struct address_space *mapping,
+ * == 0 - success
+ */
+ static int move_to_new_page(struct page *newpage, struct page *page,
+- int remap_swapcache, bool sync)
++ int remap_swapcache, enum migrate_mode mode)
+ {
+ struct address_space *mapping;
+ int rc;
+@@ -585,29 +655,18 @@ static int move_to_new_page(struct page *newpage, struct page *page,
+
+ mapping = page_mapping(page);
+ if (!mapping)
+- rc = migrate_page(mapping, newpage, page);
+- else {
++ rc = migrate_page(mapping, newpage, page, mode);
++ else if (mapping->a_ops->migratepage)
+ /*
+- * Do not writeback pages if !sync and migratepage is
+- * not pointing to migrate_page() which is nonblocking
+- * (swapcache/tmpfs uses migratepage = migrate_page).
++ * Most pages have a mapping and most filesystems provide a
++ * migratepage callback. Anonymous pages are part of swap
++ * space which also has its own migratepage callback. This
++ * is the most common path for page migration.
+ */
+- if (PageDirty(page) && !sync &&
+- mapping->a_ops->migratepage != migrate_page)
+- rc = -EBUSY;
+- else if (mapping->a_ops->migratepage)
+- /*
+- * Most pages have a mapping and most filesystems
+- * should provide a migration function. Anonymous
+- * pages are part of swap space which also has its
+- * own migration function. This is the most common
+- * path for page migration.
+- */
+- rc = mapping->a_ops->migratepage(mapping,
+- newpage, page);
+- else
+- rc = fallback_migrate_page(mapping, newpage, page);
+- }
++ rc = mapping->a_ops->migratepage(mapping,
++ newpage, page, mode);
++ else
++ rc = fallback_migrate_page(mapping, newpage, page, mode);
+
+ if (rc) {
+ newpage->mapping = NULL;
+@@ -622,7 +681,7 @@ static int move_to_new_page(struct page *newpage, struct page *page,
+ }
+
+ static int __unmap_and_move(struct page *page, struct page *newpage,
+- int force, bool offlining, bool sync)
++ int force, bool offlining, enum migrate_mode mode)
+ {
+ int rc = -EAGAIN;
+ int remap_swapcache = 1;
+@@ -631,7 +690,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
+ struct anon_vma *anon_vma = NULL;
+
+ if (!trylock_page(page)) {
+- if (!force || !sync)
++ if (!force || mode == MIGRATE_ASYNC)
+ goto out;
+
+ /*
+@@ -677,10 +736,12 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
+
+ if (PageWriteback(page)) {
+ /*
+- * For !sync, there is no point retrying as the retry loop
+- * is expected to be too short for PageWriteback to be cleared
++ * Only in the case of a full syncronous migration is it
++ * necessary to wait for PageWriteback. In the async case,
++ * the retry loop is too short and in the sync-light case,
++ * the overhead of stalling is too much
+ */
+- if (!sync) {
++ if (mode != MIGRATE_SYNC) {
+ rc = -EBUSY;
+ goto uncharge;
+ }
+@@ -751,7 +812,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
+
+ skip_unmap:
+ if (!page_mapped(page))
+- rc = move_to_new_page(newpage, page, remap_swapcache, sync);
++ rc = move_to_new_page(newpage, page, remap_swapcache, mode);
+
+ if (rc && remap_swapcache)
+ remove_migration_ptes(page, page);
+@@ -774,7 +835,8 @@ out:
+ * to the newly allocated page in newpage.
+ */
+ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
+- struct page *page, int force, bool offlining, bool sync)
++ struct page *page, int force, bool offlining,
++ enum migrate_mode mode)
+ {
+ int rc = 0;
+ int *result = NULL;
+@@ -792,7 +854,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
+ if (unlikely(split_huge_page(page)))
+ goto out;
+
+- rc = __unmap_and_move(page, newpage, force, offlining, sync);
++ rc = __unmap_and_move(page, newpage, force, offlining, mode);
+ out:
+ if (rc != -EAGAIN) {
+ /*
+@@ -840,7 +902,8 @@ out:
+ */
+ static int unmap_and_move_huge_page(new_page_t get_new_page,
+ unsigned long private, struct page *hpage,
+- int force, bool offlining, bool sync)
++ int force, bool offlining,
++ enum migrate_mode mode)
+ {
+ int rc = 0;
+ int *result = NULL;
+@@ -853,7 +916,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
+ rc = -EAGAIN;
+
+ if (!trylock_page(hpage)) {
+- if (!force || !sync)
++ if (!force || mode != MIGRATE_SYNC)
+ goto out;
+ lock_page(hpage);
+ }
+@@ -864,7 +927,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
+ try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
+
+ if (!page_mapped(hpage))
+- rc = move_to_new_page(new_hpage, hpage, 1, sync);
++ rc = move_to_new_page(new_hpage, hpage, 1, mode);
+
+ if (rc)
+ remove_migration_ptes(hpage, hpage);
+@@ -907,7 +970,7 @@ out:
+ */
+ int migrate_pages(struct list_head *from,
+ new_page_t get_new_page, unsigned long private, bool offlining,
+- bool sync)
++ enum migrate_mode mode)
+ {
+ int retry = 1;
+ int nr_failed = 0;
+@@ -928,7 +991,7 @@ int migrate_pages(struct list_head *from,
+
+ rc = unmap_and_move(get_new_page, private,
+ page, pass > 2, offlining,
+- sync);
++ mode);
+
+ switch(rc) {
+ case -ENOMEM:
+@@ -958,7 +1021,7 @@ out:
+
+ int migrate_huge_pages(struct list_head *from,
+ new_page_t get_new_page, unsigned long private, bool offlining,
+- bool sync)
++ enum migrate_mode mode)
+ {
+ int retry = 1;
+ int nr_failed = 0;
+@@ -975,7 +1038,7 @@ int migrate_huge_pages(struct list_head *from,
+
+ rc = unmap_and_move_huge_page(get_new_page,
+ private, page, pass > 2, offlining,
+- sync);
++ mode);
+
+ switch(rc) {
+ case -ENOMEM:
+@@ -1104,7 +1167,7 @@ set_status:
+ err = 0;
+ if (!list_empty(&pagelist)) {
+ err = migrate_pages(&pagelist, new_page_node,
+- (unsigned long)pm, 0, true);
++ (unsigned long)pm, 0, MIGRATE_SYNC);
+ if (err)
+ putback_lru_pages(&pagelist);
+ }
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 485be89..065dbe8 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -1886,14 +1886,20 @@ static struct page *
+ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
+ struct zonelist *zonelist, enum zone_type high_zoneidx,
+ nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
+- int migratetype, unsigned long *did_some_progress,
+- bool sync_migration)
++ int migratetype, bool sync_migration,
++ bool *deferred_compaction,
++ unsigned long *did_some_progress)
+ {
+ struct page *page;
+
+- if (!order || compaction_deferred(preferred_zone))
++ if (!order)
+ return NULL;
+
++ if (compaction_deferred(preferred_zone)) {
++ *deferred_compaction = true;
++ return NULL;
++ }
++
+ current->flags |= PF_MEMALLOC;
+ *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
+ nodemask, sync_migration);
+@@ -1921,7 +1927,13 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
+ * but not enough to satisfy watermarks.
+ */
+ count_vm_event(COMPACTFAIL);
+- defer_compaction(preferred_zone);
++
++ /*
++ * As async compaction considers a subset of pageblocks, only
++ * defer if the failure was a sync compaction failure.
++ */
++ if (sync_migration)
++ defer_compaction(preferred_zone);
+
+ cond_resched();
+ }
+@@ -1933,8 +1945,9 @@ static inline struct page *
+ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
+ struct zonelist *zonelist, enum zone_type high_zoneidx,
+ nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
+- int migratetype, unsigned long *did_some_progress,
+- bool sync_migration)
++ int migratetype, bool sync_migration,
++ bool *deferred_compaction,
++ unsigned long *did_some_progress)
+ {
+ return NULL;
+ }
+@@ -2084,6 +2097,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
+ unsigned long pages_reclaimed = 0;
+ unsigned long did_some_progress;
+ bool sync_migration = false;
++ bool deferred_compaction = false;
+
+ /*
+ * In the slowpath, we sanity check order to avoid ever trying to
+@@ -2164,12 +2178,22 @@ rebalance:
+ zonelist, high_zoneidx,
+ nodemask,
+ alloc_flags, preferred_zone,
+- migratetype, &did_some_progress,
+- sync_migration);
++ migratetype, sync_migration,
++ &deferred_compaction,
++ &did_some_progress);
+ if (page)
+ goto got_pg;
+ sync_migration = true;
+
++ /*
++ * If compaction is deferred for high-order allocations, it is because
++ * sync compaction recently failed. In this is the case and the caller
++ * has requested the system not be heavily disrupted, fail the
++ * allocation now instead of entering direct reclaim
++ */
++ if (deferred_compaction && (gfp_mask & __GFP_NO_KSWAPD))
++ goto nopage;
++
+ /* Try direct reclaim and then allocating */
+ page = __alloc_pages_direct_reclaim(gfp_mask, order,
+ zonelist, high_zoneidx,
+@@ -2232,8 +2256,9 @@ rebalance:
+ zonelist, high_zoneidx,
+ nodemask,
+ alloc_flags, preferred_zone,
+- migratetype, &did_some_progress,
+- sync_migration);
++ migratetype, sync_migration,
++ &deferred_compaction,
++ &did_some_progress);
+ if (page)
+ goto got_pg;
+ }
+@@ -2257,8 +2282,9 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
+ {
+ enum zone_type high_zoneidx = gfp_zone(gfp_mask);
+ struct zone *preferred_zone;
+- struct page *page;
++ struct page *page = NULL;
+ int migratetype = allocflags_to_migratetype(gfp_mask);
++ unsigned int cpuset_mems_cookie;
+
+ gfp_mask &= gfp_allowed_mask;
+
+@@ -2277,15 +2303,15 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
+ if (unlikely(!zonelist->_zonerefs->zone))
+ return NULL;
+
+- get_mems_allowed();
++retry_cpuset:
++ cpuset_mems_cookie = get_mems_allowed();
++
+ /* The preferred zone is used for statistics later */
+ first_zones_zonelist(zonelist, high_zoneidx,
+ nodemask ? : &cpuset_current_mems_allowed,
+ &preferred_zone);
+- if (!preferred_zone) {
+- put_mems_allowed();
+- return NULL;
+- }
++ if (!preferred_zone)
++ goto out;
+
+ /* First allocation attempt */
+ page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
+@@ -2295,9 +2321,19 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
+ page = __alloc_pages_slowpath(gfp_mask, order,
+ zonelist, high_zoneidx, nodemask,
+ preferred_zone, migratetype);
+- put_mems_allowed();
+
+ trace_mm_page_alloc(page, order, gfp_mask, migratetype);
++
++out:
++ /*
++ * When updating a task's mems_allowed, it is possible to race with
++ * parallel threads in such a way that an allocation can fail while
++ * the mask is being updated. If a page allocation is about to fail,
++ * check if the cpuset changed during allocation and if so, retry.
++ */
++ if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
++ goto retry_cpuset;
++
+ return page;
+ }
+ EXPORT_SYMBOL(__alloc_pages_nodemask);
+@@ -2521,13 +2557,15 @@ void si_meminfo_node(struct sysinfo *val, int nid)
+ bool skip_free_areas_node(unsigned int flags, int nid)
+ {
+ bool ret = false;
++ unsigned int cpuset_mems_cookie;
+
+ if (!(flags & SHOW_MEM_FILTER_NODES))
+ goto out;
+
+- get_mems_allowed();
+- ret = !node_isset(nid, cpuset_current_mems_allowed);
+- put_mems_allowed();
++ do {
++ cpuset_mems_cookie = get_mems_allowed();
++ ret = !node_isset(nid, cpuset_current_mems_allowed);
++ } while (!put_mems_allowed(cpuset_mems_cookie));
+ out:
+ return ret;
+ }
+@@ -3407,25 +3445,33 @@ static void setup_zone_migrate_reserve(struct zone *zone)
+ if (page_to_nid(page) != zone_to_nid(zone))
+ continue;
+
+- /* Blocks with reserved pages will never free, skip them. */
+- block_end_pfn = min(pfn + pageblock_nr_pages, end_pfn);
+- if (pageblock_is_reserved(pfn, block_end_pfn))
+- continue;
+-
+ block_migratetype = get_pageblock_migratetype(page);
+
+- /* If this block is reserved, account for it */
+- if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) {
+- reserve--;
+- continue;
+- }
++ /* Only test what is necessary when the reserves are not met */
++ if (reserve > 0) {
++ /*
++ * Blocks with reserved pages will never free, skip
++ * them.
++ */
++ block_end_pfn = min(pfn + pageblock_nr_pages, end_pfn);
++ if (pageblock_is_reserved(pfn, block_end_pfn))
++ continue;
+
+- /* Suitable for reserving if this block is movable */
+- if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) {
+- set_pageblock_migratetype(page, MIGRATE_RESERVE);
+- move_freepages_block(zone, page, MIGRATE_RESERVE);
+- reserve--;
+- continue;
++ /* If this block is reserved, account for it */
++ if (block_migratetype == MIGRATE_RESERVE) {
++ reserve--;
++ continue;
++ }
++
++ /* Suitable for reserving if this block is movable */
++ if (block_migratetype == MIGRATE_MOVABLE) {
++ set_pageblock_migratetype(page,
++ MIGRATE_RESERVE);
++ move_freepages_block(zone, page,
++ MIGRATE_RESERVE);
++ reserve--;
++ continue;
++ }
+ }
+
+ /*
+diff --git a/mm/slab.c b/mm/slab.c
+index 83311c9a..cd3ab93 100644
+--- a/mm/slab.c
++++ b/mm/slab.c
+@@ -3267,12 +3267,10 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
+ if (in_interrupt() || (flags & __GFP_THISNODE))
+ return NULL;
+ nid_alloc = nid_here = numa_mem_id();
+- get_mems_allowed();
+ if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
+ nid_alloc = cpuset_slab_spread_node();
+ else if (current->mempolicy)
+ nid_alloc = slab_node(current->mempolicy);
+- put_mems_allowed();
+ if (nid_alloc != nid_here)
+ return ____cache_alloc_node(cachep, flags, nid_alloc);
+ return NULL;
+@@ -3295,14 +3293,17 @@ static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
+ enum zone_type high_zoneidx = gfp_zone(flags);
+ void *obj = NULL;
+ int nid;
++ unsigned int cpuset_mems_cookie;
+
+ if (flags & __GFP_THISNODE)
+ return NULL;
+
+- get_mems_allowed();
+- zonelist = node_zonelist(slab_node(current->mempolicy), flags);
+ local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
+
++retry_cpuset:
++ cpuset_mems_cookie = get_mems_allowed();
++ zonelist = node_zonelist(slab_node(current->mempolicy), flags);
++
+ retry:
+ /*
+ * Look through allowed nodes for objects available
+@@ -3355,7 +3356,9 @@ retry:
+ }
+ }
+ }
+- put_mems_allowed();
++
++ if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !obj))
++ goto retry_cpuset;
+ return obj;
+ }
+
+diff --git a/mm/slub.c b/mm/slub.c
+index af47188..5710788 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -1582,6 +1582,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags,
+ struct zone *zone;
+ enum zone_type high_zoneidx = gfp_zone(flags);
+ void *object;
++ unsigned int cpuset_mems_cookie;
+
+ /*
+ * The defrag ratio allows a configuration of the tradeoffs between
+@@ -1605,23 +1606,32 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags,
+ get_cycles() % 1024 > s->remote_node_defrag_ratio)
+ return NULL;
+
+- get_mems_allowed();
+- zonelist = node_zonelist(slab_node(current->mempolicy), flags);
+- for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
+- struct kmem_cache_node *n;
+-
+- n = get_node(s, zone_to_nid(zone));
+-
+- if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
+- n->nr_partial > s->min_partial) {
+- object = get_partial_node(s, n, c);
+- if (object) {
+- put_mems_allowed();
+- return object;
++ do {
++ cpuset_mems_cookie = get_mems_allowed();
++ zonelist = node_zonelist(slab_node(current->mempolicy), flags);
++ for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
++ struct kmem_cache_node *n;
++
++ n = get_node(s, zone_to_nid(zone));
++
++ if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
++ n->nr_partial > s->min_partial) {
++ object = get_partial_node(s, n, c);
++ if (object) {
++ /*
++ * Return the object even if
++ * put_mems_allowed indicated that
++ * the cpuset mems_allowed was
++ * updated in parallel. It's a
++ * harmless race between the alloc
++ * and the cpuset update.
++ */
++ put_mems_allowed(cpuset_mems_cookie);
++ return object;
++ }
+ }
+ }
+- }
+- put_mems_allowed();
++ } while (!put_mems_allowed(cpuset_mems_cookie));
+ #endif
+ return NULL;
+ }
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 8342119..48febd7 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -715,7 +715,13 @@ static enum page_references page_check_references(struct page *page,
+ */
+ SetPageReferenced(page);
+
+- if (referenced_page)
++ if (referenced_page || referenced_ptes > 1)
++ return PAGEREF_ACTIVATE;
++
++ /*
++ * Activate file-backed executable pages after first usage.
++ */
++ if (vm_flags & VM_EXEC)
+ return PAGEREF_ACTIVATE;
+
+ return PAGEREF_KEEP;
+@@ -1061,8 +1067,39 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file)
+
+ ret = -EBUSY;
+
+- if ((mode & ISOLATE_CLEAN) && (PageDirty(page) || PageWriteback(page)))
+- return ret;
++ /*
++ * To minimise LRU disruption, the caller can indicate that it only
++ * wants to isolate pages it will be able to operate on without
++ * blocking - clean pages for the most part.
++ *
++ * ISOLATE_CLEAN means that only clean pages should be isolated. This
++ * is used by reclaim when it is cannot write to backing storage
++ *
++ * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages
++ * that it is possible to migrate without blocking
++ */
++ if (mode & (ISOLATE_CLEAN|ISOLATE_ASYNC_MIGRATE)) {
++ /* All the caller can do on PageWriteback is block */
++ if (PageWriteback(page))
++ return ret;
++
++ if (PageDirty(page)) {
++ struct address_space *mapping;
++
++ /* ISOLATE_CLEAN means only clean pages */
++ if (mode & ISOLATE_CLEAN)
++ return ret;
++
++ /*
++ * Only pages without mappings or that have a
++ * ->migratepage callback are possible to migrate
++ * without blocking
++ */
++ mapping = page_mapping(page);
++ if (mapping && !mapping->a_ops->migratepage)
++ return ret;
++ }
++ }
+
+ if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
+ return ret;
+@@ -1178,7 +1215,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
+ * anon page which don't already have a swap slot is
+ * pointless.
+ */
+- if (nr_swap_pages <= 0 && PageAnon(cursor_page) &&
++ if (nr_swap_pages <= 0 && PageSwapBacked(cursor_page) &&
+ !PageSwapCache(cursor_page))
+ break;
+
+@@ -1874,7 +1911,8 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
+ * latencies, so it's better to scan a minimum amount there as
+ * well.
+ */
+- if (scanning_global_lru(sc) && current_is_kswapd())
++ if (scanning_global_lru(sc) && current_is_kswapd() &&
++ zone->all_unreclaimable)
+ force_scan = true;
+ if (!scanning_global_lru(sc))
+ force_scan = true;
+@@ -2012,8 +2050,9 @@ static inline bool should_continue_reclaim(struct zone *zone,
+ * inactive lists are large enough, continue reclaiming
+ */
+ pages_for_compaction = (2UL << sc->order);
+- inactive_lru_pages = zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON) +
+- zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
++ inactive_lru_pages = zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
++ if (nr_swap_pages > 0)
++ inactive_lru_pages += zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
+ if (sc->nr_reclaimed < pages_for_compaction &&
+ inactive_lru_pages > pages_for_compaction)
+ return true;
+@@ -2088,6 +2127,42 @@ restart:
+ throttle_vm_writeout(sc->gfp_mask);
+ }
+
++/* Returns true if compaction should go ahead for a high-order request */
++static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
++{
++ unsigned long balance_gap, watermark;
++ bool watermark_ok;
++
++ /* Do not consider compaction for orders reclaim is meant to satisfy */
++ if (sc->order <= PAGE_ALLOC_COSTLY_ORDER)
++ return false;
++
++ /*
++ * Compaction takes time to run and there are potentially other
++ * callers using the pages just freed. Continue reclaiming until
++ * there is a buffer of free pages available to give compaction
++ * a reasonable chance of completing and allocating the page
++ */
++ balance_gap = min(low_wmark_pages(zone),
++ (zone->present_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
++ KSWAPD_ZONE_BALANCE_GAP_RATIO);
++ watermark = high_wmark_pages(zone) + balance_gap + (2UL << sc->order);
++ watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, 0, 0);
++
++ /*
++ * If compaction is deferred, reclaim up to a point where
++ * compaction will have a chance of success when re-enabled
++ */
++ if (compaction_deferred(zone))
++ return watermark_ok;
++
++ /* If compaction is not ready to start, keep reclaiming */
++ if (!compaction_suitable(zone, sc->order))
++ return false;
++
++ return watermark_ok;
++}
++
+ /*
+ * This is the direct reclaim path, for page-allocating processes. We only
+ * try to reclaim pages from zones which will satisfy the caller's allocation
+@@ -2105,8 +2180,9 @@ restart:
+ * scan then give up on it.
+ *
+ * This function returns true if a zone is being reclaimed for a costly
+- * high-order allocation and compaction is either ready to begin or deferred.
+- * This indicates to the caller that it should retry the allocation or fail.
++ * high-order allocation and compaction is ready to begin. This indicates to
++ * the caller that it should consider retrying the allocation instead of
++ * further reclaim.
+ */
+ static bool shrink_zones(int priority, struct zonelist *zonelist,
+ struct scan_control *sc)
+@@ -2115,7 +2191,7 @@ static bool shrink_zones(int priority, struct zonelist *zonelist,
+ struct zone *zone;
+ unsigned long nr_soft_reclaimed;
+ unsigned long nr_soft_scanned;
+- bool should_abort_reclaim = false;
++ bool aborted_reclaim = false;
+
+ for_each_zone_zonelist_nodemask(zone, z, zonelist,
+ gfp_zone(sc->gfp_mask), sc->nodemask) {
+@@ -2140,10 +2216,8 @@ static bool shrink_zones(int priority, struct zonelist *zonelist,
+ * noticable problem, like transparent huge page
+ * allocations.
+ */
+- if (sc->order > PAGE_ALLOC_COSTLY_ORDER &&
+- (compaction_suitable(zone, sc->order) ||
+- compaction_deferred(zone))) {
+- should_abort_reclaim = true;
++ if (compaction_ready(zone, sc)) {
++ aborted_reclaim = true;
+ continue;
+ }
+ }
+@@ -2165,7 +2239,7 @@ static bool shrink_zones(int priority, struct zonelist *zonelist,
+ shrink_zone(priority, zone, sc);
+ }
+
+- return should_abort_reclaim;
++ return aborted_reclaim;
+ }
+
+ static bool zone_reclaimable(struct zone *zone)
+@@ -2219,8 +2293,8 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
+ struct zoneref *z;
+ struct zone *zone;
+ unsigned long writeback_threshold;
++ bool aborted_reclaim;
+
+- get_mems_allowed();
+ delayacct_freepages_start();
+
+ if (scanning_global_lru(sc))
+@@ -2230,8 +2304,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
+ sc->nr_scanned = 0;
+ if (!priority)
+ disable_swap_token(sc->mem_cgroup);
+- if (shrink_zones(priority, zonelist, sc))
+- break;
++ aborted_reclaim = shrink_zones(priority, zonelist, sc);
+
+ /*
+ * Don't shrink slabs when reclaiming memory from
+@@ -2285,7 +2358,6 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
+
+ out:
+ delayacct_freepages_end();
+- put_mems_allowed();
+
+ if (sc->nr_reclaimed)
+ return sc->nr_reclaimed;
+@@ -2298,6 +2370,10 @@ out:
+ if (oom_killer_disabled)
+ return 0;
+
++ /* Aborted reclaim to try compaction? don't OOM, then */
++ if (aborted_reclaim)
++ return 1;
++
+ /* top priority shrink_zones still had more to do? don't OOM, then */
+ if (scanning_global_lru(sc) && !all_unreclaimable(zonelist, sc))
+ return 1;
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index c505fd5..c119f33 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -868,7 +868,6 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
+ struct hdmi_spec_per_pin *per_pin;
+ struct hdmi_eld *eld;
+ struct hdmi_spec_per_cvt *per_cvt = NULL;
+- int pinctl;
+
+ /* Validate hinfo */
+ pin_idx = hinfo_to_pin_index(spec, hinfo);
+@@ -904,11 +903,6 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
+ snd_hda_codec_write(codec, per_pin->pin_nid, 0,
+ AC_VERB_SET_CONNECT_SEL,
+ mux_idx);
+- pinctl = snd_hda_codec_read(codec, per_pin->pin_nid, 0,
+- AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
+- snd_hda_codec_write(codec, per_pin->pin_nid, 0,
+- AC_VERB_SET_PIN_WIDGET_CONTROL,
+- pinctl | PIN_OUT);
+ snd_hda_spdif_ctls_assign(codec, pin_idx, per_cvt->cvt_nid);
+
+ /* Initially set the converter's capabilities */
+@@ -1147,11 +1141,17 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
+ struct hdmi_spec *spec = codec->spec;
+ int pin_idx = hinfo_to_pin_index(spec, hinfo);
+ hda_nid_t pin_nid = spec->pins[pin_idx].pin_nid;
++ int pinctl;
+
+ hdmi_set_channel_count(codec, cvt_nid, substream->runtime->channels);
+
+ hdmi_setup_audio_infoframe(codec, pin_idx, substream);
+
++ pinctl = snd_hda_codec_read(codec, pin_nid, 0,
++ AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
++ snd_hda_codec_write(codec, pin_nid, 0,
++ AC_VERB_SET_PIN_WIDGET_CONTROL, pinctl | PIN_OUT);
++
+ return hdmi_setup_stream(codec, cvt_nid, pin_nid, stream_tag, format);
+ }
+
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 5f096a5..191fd78 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5989,6 +5989,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
+ { .id = 0x10ec0275, .name = "ALC275", .patch = patch_alc269 },
+ { .id = 0x10ec0276, .name = "ALC276", .patch = patch_alc269 },
+ { .id = 0x10ec0280, .name = "ALC280", .patch = patch_alc269 },
++ { .id = 0x10ec0282, .name = "ALC282", .patch = patch_alc269 },
+ { .id = 0x10ec0861, .rev = 0x100340, .name = "ALC660",
+ .patch = patch_alc861 },
+ { .id = 0x10ec0660, .name = "ALC660-VD", .patch = patch_alc861vd },
+diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
+index 90e93bf..0dc441c 100644
+--- a/sound/soc/soc-dapm.c
++++ b/sound/soc/soc-dapm.c
+@@ -1381,7 +1381,15 @@ static int dapm_power_widgets(struct snd_soc_dapm_context *dapm, int event)
+ }
+
+ list_for_each_entry(w, &card->widgets, list) {
+- list_del_init(&w->dirty);
++ switch (w->id) {
++ case snd_soc_dapm_pre:
++ case snd_soc_dapm_post:
++ /* These widgets always need to be powered */
++ break;
++ default:
++ list_del_init(&w->dirty);
++ break;
++ }
+
+ if (w->power) {
+ d = w->dapm;
diff --git a/1025_linux-3.2.26.patch b/1025_linux-3.2.26.patch
new file mode 100644
index 00000000..44065b9f
--- /dev/null
+++ b/1025_linux-3.2.26.patch
@@ -0,0 +1,238 @@
+diff --git a/Makefile b/Makefile
+index e13e4e7..fa5acc83 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 25
++SUBLEVEL = 26
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
+index bb3ee36..f7c89e2 100644
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -99,7 +99,6 @@ struct cpuinfo_x86 {
+ u16 apicid;
+ u16 initial_apicid;
+ u16 x86_clflush_size;
+-#ifdef CONFIG_SMP
+ /* number of cores as seen by the OS: */
+ u16 booted_cores;
+ /* Physical processor id: */
+@@ -110,7 +109,6 @@ struct cpuinfo_x86 {
+ u8 compute_unit_id;
+ /* Index into per_cpu list: */
+ u16 cpu_index;
+-#endif
+ u32 microcode;
+ } __attribute__((__aligned__(SMP_CACHE_BYTES)));
+
+diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
+index bae1efe..be16854 100644
+--- a/arch/x86/kernel/amd_nb.c
++++ b/arch/x86/kernel/amd_nb.c
+@@ -154,16 +154,14 @@ int amd_get_subcaches(int cpu)
+ {
+ struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
+ unsigned int mask;
+- int cuid = 0;
++ int cuid;
+
+ if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
+ return 0;
+
+ pci_read_config_dword(link, 0x1d4, &mask);
+
+-#ifdef CONFIG_SMP
+ cuid = cpu_data(cpu).compute_unit_id;
+-#endif
+ return (mask >> (4 * cuid)) & 0xf;
+ }
+
+@@ -172,7 +170,7 @@ int amd_set_subcaches(int cpu, int mask)
+ static unsigned int reset, ban;
+ struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
+ unsigned int reg;
+- int cuid = 0;
++ int cuid;
+
+ if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
+ return -EINVAL;
+@@ -190,9 +188,7 @@ int amd_set_subcaches(int cpu, int mask)
+ pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
+ }
+
+-#ifdef CONFIG_SMP
+ cuid = cpu_data(cpu).compute_unit_id;
+-#endif
+ mask <<= 4 * cuid;
+ mask |= (0xf ^ (1 << cuid)) << 26;
+
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index 3524e1f..ff8557e 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -148,7 +148,6 @@ static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c)
+
+ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
+ {
+-#ifdef CONFIG_SMP
+ /* calling is from identify_secondary_cpu() ? */
+ if (!c->cpu_index)
+ return;
+@@ -192,7 +191,6 @@ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
+
+ valid_k7:
+ ;
+-#endif
+ }
+
+ static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index aa003b1..ca93cc7 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -676,9 +676,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
+ if (this_cpu->c_early_init)
+ this_cpu->c_early_init(c);
+
+-#ifdef CONFIG_SMP
+ c->cpu_index = 0;
+-#endif
+ filter_cpuid_features(c, false);
+
+ setup_smep(c);
+@@ -764,10 +762,7 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
+ c->apicid = c->initial_apicid;
+ # endif
+ #endif
+-
+-#ifdef CONFIG_X86_HT
+ c->phys_proc_id = c->initial_apicid;
+-#endif
+ }
+
+ setup_smep(c);
+diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
+index 5231312..3e6ff6c 100644
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -181,7 +181,6 @@ static void __cpuinit trap_init_f00f_bug(void)
+
+ static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c)
+ {
+-#ifdef CONFIG_SMP
+ /* calling is from identify_secondary_cpu() ? */
+ if (!c->cpu_index)
+ return;
+@@ -198,7 +197,6 @@ static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c)
+ WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
+ "with B stepping processors.\n");
+ }
+-#endif
+ }
+
+ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
+diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
+index b0f1271..3b67877 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce.c
++++ b/arch/x86/kernel/cpu/mcheck/mce.c
+@@ -119,9 +119,7 @@ void mce_setup(struct mce *m)
+ m->time = get_seconds();
+ m->cpuvendor = boot_cpu_data.x86_vendor;
+ m->cpuid = cpuid_eax(1);
+-#ifdef CONFIG_SMP
+ m->socketid = cpu_data(m->extcpu).phys_proc_id;
+-#endif
+ m->apicid = cpu_data(m->extcpu).initial_apicid;
+ rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap);
+ }
+diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
+index 445a61c..d4444be 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
++++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
+@@ -65,11 +65,9 @@ struct threshold_bank {
+ };
+ static DEFINE_PER_CPU(struct threshold_bank * [NR_BANKS], threshold_banks);
+
+-#ifdef CONFIG_SMP
+ static unsigned char shared_bank[NR_BANKS] = {
+ 0, 0, 0, 0, 1
+ };
+-#endif
+
+ static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */
+
+@@ -227,10 +225,9 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
+
+ if (!block)
+ per_cpu(bank_map, cpu) |= (1 << bank);
+-#ifdef CONFIG_SMP
++
+ if (shared_bank[bank] && c->cpu_core_id)
+ break;
+-#endif
+
+ memset(&b, 0, sizeof(b));
+ b.cpu = cpu;
+diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
+index 14b2314..8022c66 100644
+--- a/arch/x86/kernel/cpu/proc.c
++++ b/arch/x86/kernel/cpu/proc.c
+@@ -64,12 +64,10 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
+ static int show_cpuinfo(struct seq_file *m, void *v)
+ {
+ struct cpuinfo_x86 *c = v;
+- unsigned int cpu = 0;
++ unsigned int cpu;
+ int i;
+
+-#ifdef CONFIG_SMP
+ cpu = c->cpu_index;
+-#endif
+ seq_printf(m, "processor\t: %u\n"
+ "vendor_id\t: %s\n"
+ "cpu family\t: %d\n"
+diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
+index 18a1293..0db57b5 100644
+--- a/drivers/edac/sb_edac.c
++++ b/drivers/edac/sb_edac.c
+@@ -1609,11 +1609,9 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
+ mce->cpuvendor, mce->cpuid, mce->time,
+ mce->socketid, mce->apicid);
+
+-#ifdef CONFIG_SMP
+ /* Only handle if it is the right mc controller */
+ if (cpu_data(mce->cpu).phys_proc_id != pvt->sbridge_dev->mc)
+ return NOTIFY_DONE;
+-#endif
+
+ smp_rmb();
+ if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
+diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
+index 0790c98..19b4412 100644
+--- a/drivers/hwmon/coretemp.c
++++ b/drivers/hwmon/coretemp.c
+@@ -57,16 +57,15 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
+ #define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
+ #define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
+
+-#ifdef CONFIG_SMP
+ #define TO_PHYS_ID(cpu) cpu_data(cpu).phys_proc_id
+ #define TO_CORE_ID(cpu) cpu_data(cpu).cpu_core_id
++#define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO)
++
++#ifdef CONFIG_SMP
+ #define for_each_sibling(i, cpu) for_each_cpu(i, cpu_sibling_mask(cpu))
+ #else
+-#define TO_PHYS_ID(cpu) (cpu)
+-#define TO_CORE_ID(cpu) (cpu)
+ #define for_each_sibling(i, cpu) for (i = 0; false; )
+ #endif
+-#define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO)
+
+ /*
+ * Per-Core Temperature Data
diff --git a/1026_linux-3.2.27.patch b/1026_linux-3.2.27.patch
new file mode 100644
index 00000000..5878eb47
--- /dev/null
+++ b/1026_linux-3.2.27.patch
@@ -0,0 +1,3188 @@
+diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt
+index edad99a..69820b2 100644
+--- a/Documentation/sound/alsa/HD-Audio-Models.txt
++++ b/Documentation/sound/alsa/HD-Audio-Models.txt
+@@ -60,10 +60,11 @@ ALC267/268
+ ==========
+ N/A
+
+-ALC269
++ALC269/270/275/276/280/282
+ ======
+ laptop-amic Laptops with analog-mic input
+ laptop-dmic Laptops with digital-mic input
++ lenovo-dock Enables docking station I/O for some Lenovos
+
+ ALC662/663/272
+ ==============
+diff --git a/Documentation/stable_kernel_rules.txt b/Documentation/stable_kernel_rules.txt
+index e1f856b..22bf11b 100644
+--- a/Documentation/stable_kernel_rules.txt
++++ b/Documentation/stable_kernel_rules.txt
+@@ -1,4 +1,4 @@
+-Everything you ever wanted to know about Linux 2.6 -stable releases.
++Everything you ever wanted to know about Linux -stable releases.
+
+ Rules on what kind of patches are accepted, and which ones are not, into the
+ "-stable" tree:
+@@ -41,10 +41,10 @@ Procedure for submitting patches to the -stable tree:
+ cherry-picked than this can be specified in the following format in
+ the sign-off area:
+
+- Cc: <stable@vger.kernel.org> # .32.x: a1f84a3: sched: Check for idle
+- Cc: <stable@vger.kernel.org> # .32.x: 1b9508f: sched: Rate-limit newidle
+- Cc: <stable@vger.kernel.org> # .32.x: fd21073: sched: Fix affinity logic
+- Cc: <stable@vger.kernel.org> # .32.x
++ Cc: <stable@vger.kernel.org> # 3.3.x: a1f84a3: sched: Check for idle
++ Cc: <stable@vger.kernel.org> # 3.3.x: 1b9508f: sched: Rate-limit newidle
++ Cc: <stable@vger.kernel.org> # 3.3.x: fd21073: sched: Fix affinity logic
++ Cc: <stable@vger.kernel.org> # 3.3.x
+ Signed-off-by: Ingo Molnar <mingo@elte.hu>
+
+ The tag sequence has the meaning of:
+@@ -78,6 +78,15 @@ Review cycle:
+ security kernel team, and not go through the normal review cycle.
+ Contact the kernel security team for more details on this procedure.
+
++Trees:
++
++ - The queues of patches, for both completed versions and in progress
++ versions can be found at:
++ http://git.kernel.org/?p=linux/kernel/git/stable/stable-queue.git
++ - The finalized and tagged releases of all stable kernels can be found
++ in separate branches per version at:
++ http://git.kernel.org/?p=linux/kernel/git/stable/linux-stable.git
++
+
+ Review committee:
+
+diff --git a/Makefile b/Makefile
+index fa5acc83..bdf851f 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 26
++SUBLEVEL = 27
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/include/asm/mutex.h b/arch/arm/include/asm/mutex.h
+index 93226cf..b1479fd 100644
+--- a/arch/arm/include/asm/mutex.h
++++ b/arch/arm/include/asm/mutex.h
+@@ -7,121 +7,10 @@
+ */
+ #ifndef _ASM_MUTEX_H
+ #define _ASM_MUTEX_H
+-
+-#if __LINUX_ARM_ARCH__ < 6
+-/* On pre-ARMv6 hardware the swp based implementation is the most efficient. */
+-# include <asm-generic/mutex-xchg.h>
+-#else
+-
+ /*
+- * Attempting to lock a mutex on ARMv6+ can be done with a bastardized
+- * atomic decrement (it is not a reliable atomic decrement but it satisfies
+- * the defined semantics for our purpose, while being smaller and faster
+- * than a real atomic decrement or atomic swap. The idea is to attempt
+- * decrementing the lock value only once. If once decremented it isn't zero,
+- * or if its store-back fails due to a dispute on the exclusive store, we
+- * simply bail out immediately through the slow path where the lock will be
+- * reattempted until it succeeds.
++ * On pre-ARMv6 hardware this results in a swp-based implementation,
++ * which is the most efficient. For ARMv6+, we emit a pair of exclusive
++ * accesses instead.
+ */
+-static inline void
+-__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
+-{
+- int __ex_flag, __res;
+-
+- __asm__ (
+-
+- "ldrex %0, [%2] \n\t"
+- "sub %0, %0, #1 \n\t"
+- "strex %1, %0, [%2] "
+-
+- : "=&r" (__res), "=&r" (__ex_flag)
+- : "r" (&(count)->counter)
+- : "cc","memory" );
+-
+- __res |= __ex_flag;
+- if (unlikely(__res != 0))
+- fail_fn(count);
+-}
+-
+-static inline int
+-__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
+-{
+- int __ex_flag, __res;
+-
+- __asm__ (
+-
+- "ldrex %0, [%2] \n\t"
+- "sub %0, %0, #1 \n\t"
+- "strex %1, %0, [%2] "
+-
+- : "=&r" (__res), "=&r" (__ex_flag)
+- : "r" (&(count)->counter)
+- : "cc","memory" );
+-
+- __res |= __ex_flag;
+- if (unlikely(__res != 0))
+- __res = fail_fn(count);
+- return __res;
+-}
+-
+-/*
+- * Same trick is used for the unlock fast path. However the original value,
+- * rather than the result, is used to test for success in order to have
+- * better generated assembly.
+- */
+-static inline void
+-__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
+-{
+- int __ex_flag, __res, __orig;
+-
+- __asm__ (
+-
+- "ldrex %0, [%3] \n\t"
+- "add %1, %0, #1 \n\t"
+- "strex %2, %1, [%3] "
+-
+- : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag)
+- : "r" (&(count)->counter)
+- : "cc","memory" );
+-
+- __orig |= __ex_flag;
+- if (unlikely(__orig != 0))
+- fail_fn(count);
+-}
+-
+-/*
+- * If the unlock was done on a contended lock, or if the unlock simply fails
+- * then the mutex remains locked.
+- */
+-#define __mutex_slowpath_needs_to_unlock() 1
+-
+-/*
+- * For __mutex_fastpath_trylock we use another construct which could be
+- * described as a "single value cmpxchg".
+- *
+- * This provides the needed trylock semantics like cmpxchg would, but it is
+- * lighter and less generic than a true cmpxchg implementation.
+- */
+-static inline int
+-__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
+-{
+- int __ex_flag, __res, __orig;
+-
+- __asm__ (
+-
+- "1: ldrex %0, [%3] \n\t"
+- "subs %1, %0, #1 \n\t"
+- "strexeq %2, %1, [%3] \n\t"
+- "movlt %0, #0 \n\t"
+- "cmpeq %2, #0 \n\t"
+- "bgt 1b "
+-
+- : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag)
+- : "r" (&count->counter)
+- : "cc", "memory" );
+-
+- return __orig;
+-}
+-
+-#endif
++#include <asm-generic/mutex-xchg.h>
+ #endif
+diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
+index b145f16..ece0996 100644
+--- a/arch/arm/kernel/entry-armv.S
++++ b/arch/arm/kernel/entry-armv.S
+@@ -242,6 +242,19 @@ svc_preempt:
+ b 1b
+ #endif
+
++__und_fault:
++ @ Correct the PC such that it is pointing at the instruction
++ @ which caused the fault. If the faulting instruction was ARM
++ @ the PC will be pointing at the next instruction, and have to
++ @ subtract 4. Otherwise, it is Thumb, and the PC will be
++ @ pointing at the second half of the Thumb instruction. We
++ @ have to subtract 2.
++ ldr r2, [r0, #S_PC]
++ sub r2, r2, r1
++ str r2, [r0, #S_PC]
++ b do_undefinstr
++ENDPROC(__und_fault)
++
+ .align 5
+ __und_svc:
+ #ifdef CONFIG_KPROBES
+@@ -259,25 +272,32 @@ __und_svc:
+ @
+ @ r0 - instruction
+ @
+-#ifndef CONFIG_THUMB2_KERNEL
++#ifndef CONFIG_THUMB2_KERNEL
+ ldr r0, [r4, #-4]
+ #else
++ mov r1, #2
+ ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2
+ cmp r0, #0xe800 @ 32-bit instruction if xx >= 0
+- ldrhhs r9, [r4] @ bottom 16 bits
+- orrhs r0, r9, r0, lsl #16
++ blo __und_svc_fault
++ ldrh r9, [r4] @ bottom 16 bits
++ add r4, r4, #2
++ str r4, [sp, #S_PC]
++ orr r0, r9, r0, lsl #16
+ #endif
+- adr r9, BSYM(1f)
++ adr r9, BSYM(__und_svc_finish)
+ mov r2, r4
+ bl call_fpe
+
++ mov r1, #4 @ PC correction to apply
++__und_svc_fault:
+ mov r0, sp @ struct pt_regs *regs
+- bl do_undefinstr
++ bl __und_fault
+
+ @
+ @ IRQs off again before pulling preserved data off the stack
+ @
+-1: disable_irq_notrace
++__und_svc_finish:
++ disable_irq_notrace
+
+ @
+ @ restore SPSR and restart the instruction
+@@ -421,25 +441,33 @@ __und_usr:
+ mov r2, r4
+ mov r3, r5
+
++ @ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the
++ @ faulting instruction depending on Thumb mode.
++ @ r3 = regs->ARM_cpsr
+ @
+- @ fall through to the emulation code, which returns using r9 if
+- @ it has emulated the instruction, or the more conventional lr
+- @ if we are to treat this as a real undefined instruction
+- @
+- @ r0 - instruction
++ @ The emulation code returns using r9 if it has emulated the
++ @ instruction, or the more conventional lr if we are to treat
++ @ this as a real undefined instruction
+ @
+ adr r9, BSYM(ret_from_exception)
+- adr lr, BSYM(__und_usr_unknown)
++
+ tst r3, #PSR_T_BIT @ Thumb mode?
+- itet eq @ explicit IT needed for the 1f label
+- subeq r4, r2, #4 @ ARM instr at LR - 4
+- subne r4, r2, #2 @ Thumb instr at LR - 2
+-1: ldreqt r0, [r4]
++ bne __und_usr_thumb
++ sub r4, r2, #4 @ ARM instr at LR - 4
++1: ldrt r0, [r4]
+ #ifdef CONFIG_CPU_ENDIAN_BE8
+- reveq r0, r0 @ little endian instruction
++ rev r0, r0 @ little endian instruction
+ #endif
+- beq call_fpe
++ @ r0 = 32-bit ARM instruction which caused the exception
++ @ r2 = PC value for the following instruction (:= regs->ARM_pc)
++ @ r4 = PC value for the faulting instruction
++ @ lr = 32-bit undefined instruction function
++ adr lr, BSYM(__und_usr_fault_32)
++ b call_fpe
++
++__und_usr_thumb:
+ @ Thumb instruction
++ sub r4, r2, #2 @ First half of thumb instr at LR - 2
+ #if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
+ /*
+ * Thumb-2 instruction handling. Note that because pre-v6 and >= v6 platforms
+@@ -453,7 +481,7 @@ __und_usr:
+ ldr r5, .LCcpu_architecture
+ ldr r5, [r5]
+ cmp r5, #CPU_ARCH_ARMv7
+- blo __und_usr_unknown
++ blo __und_usr_fault_16 @ 16bit undefined instruction
+ /*
+ * The following code won't get run unless the running CPU really is v7, so
+ * coding round the lack of ldrht on older arches is pointless. Temporarily
+@@ -461,15 +489,18 @@ __und_usr:
+ */
+ .arch armv6t2
+ #endif
+-2:
+- ARM( ldrht r5, [r4], #2 )
+- THUMB( ldrht r5, [r4] )
+- THUMB( add r4, r4, #2 )
++2: ldrht r5, [r4]
+ cmp r5, #0xe800 @ 32bit instruction if xx != 0
+- blo __und_usr_unknown
+-3: ldrht r0, [r4]
++ blo __und_usr_fault_16 @ 16bit undefined instruction
++3: ldrht r0, [r2]
+ add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
++ str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
+ orr r0, r0, r5, lsl #16
++ adr lr, BSYM(__und_usr_fault_32)
++ @ r0 = the two 16-bit Thumb instructions which caused the exception
++ @ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc)
++ @ r4 = PC value for the first 16-bit Thumb instruction
++ @ lr = 32bit undefined instruction function
+
+ #if __LINUX_ARM_ARCH__ < 7
+ /* If the target arch was overridden, change it back: */
+@@ -480,17 +511,13 @@ __und_usr:
+ #endif
+ #endif /* __LINUX_ARM_ARCH__ < 7 */
+ #else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */
+- b __und_usr_unknown
++ b __und_usr_fault_16
+ #endif
+- UNWIND(.fnend )
++ UNWIND(.fnend)
+ ENDPROC(__und_usr)
+
+- @
+- @ fallthrough to call_fpe
+- @
+-
+ /*
+- * The out of line fixup for the ldrt above.
++ * The out of line fixup for the ldrt instructions above.
+ */
+ .pushsection .fixup, "ax"
+ 4: mov pc, r9
+@@ -521,11 +548,12 @@ ENDPROC(__und_usr)
+ * NEON handler code.
+ *
+ * Emulators may wish to make use of the following registers:
+- * r0 = instruction opcode.
+- * r2 = PC+4
++ * r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
++ * r2 = PC value to resume execution after successful emulation
+ * r9 = normal "successful" return address
+- * r10 = this threads thread_info structure.
++ * r10 = this threads thread_info structure
+ * lr = unrecognised instruction return address
++ * IRQs disabled, FIQs enabled.
+ */
+ @
+ @ Fall-through from Thumb-2 __und_usr
+@@ -660,12 +688,17 @@ ENTRY(no_fp)
+ mov pc, lr
+ ENDPROC(no_fp)
+
+-__und_usr_unknown:
+- enable_irq
++__und_usr_fault_32:
++ mov r1, #4
++ b 1f
++__und_usr_fault_16:
++ mov r1, #2
++1: enable_irq
+ mov r0, sp
+ adr lr, BSYM(ret_from_exception)
+- b do_undefinstr
+-ENDPROC(__und_usr_unknown)
++ b __und_fault
++ENDPROC(__und_usr_fault_32)
++ENDPROC(__und_usr_fault_16)
+
+ .align 5
+ __pabt_usr:
+diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
+index 3d0c6fb..e68d251 100644
+--- a/arch/arm/kernel/process.c
++++ b/arch/arm/kernel/process.c
+@@ -125,6 +125,7 @@ void arm_machine_restart(char mode, const char *cmd)
+ */
+ mdelay(1000);
+ printk("Reboot failed -- System halted\n");
++ local_irq_disable();
+ while (1);
+ }
+
+@@ -240,6 +241,7 @@ void machine_shutdown(void)
+ void machine_halt(void)
+ {
+ machine_shutdown();
++ local_irq_disable();
+ while (1);
+ }
+
+diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
+index 160cb16..8380bd1 100644
+--- a/arch/arm/kernel/traps.c
++++ b/arch/arm/kernel/traps.c
+@@ -362,18 +362,10 @@ static int call_undef_hook(struct pt_regs *regs, unsigned int instr)
+
+ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
+ {
+- unsigned int correction = thumb_mode(regs) ? 2 : 4;
+ unsigned int instr;
+ siginfo_t info;
+ void __user *pc;
+
+- /*
+- * According to the ARM ARM, PC is 2 or 4 bytes ahead,
+- * depending whether we're in Thumb mode or not.
+- * Correct this offset.
+- */
+- regs->ARM_pc -= correction;
+-
+ pc = (void __user *)instruction_pointer(regs);
+
+ if (processor_mode(regs) == SVC_MODE) {
+diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S
+index 845f461..c202113 100644
+--- a/arch/arm/mm/tlb-v7.S
++++ b/arch/arm/mm/tlb-v7.S
+@@ -38,11 +38,19 @@ ENTRY(v7wbi_flush_user_tlb_range)
+ dsb
+ mov r0, r0, lsr #PAGE_SHIFT @ align address
+ mov r1, r1, lsr #PAGE_SHIFT
++#ifdef CONFIG_ARM_ERRATA_720789
++ mov r3, #0
++#else
+ asid r3, r3 @ mask ASID
++#endif
+ orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA
+ mov r1, r1, lsl #PAGE_SHIFT
+ 1:
++#ifdef CONFIG_ARM_ERRATA_720789
++ ALT_SMP(mcr p15, 0, r0, c8, c3, 3) @ TLB invalidate U MVA all ASID (shareable)
++#else
+ ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable)
++#endif
+ ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA
+
+ add r0, r0, #PAGE_SZ
+@@ -67,7 +75,11 @@ ENTRY(v7wbi_flush_kern_tlb_range)
+ mov r0, r0, lsl #PAGE_SHIFT
+ mov r1, r1, lsl #PAGE_SHIFT
+ 1:
++#ifdef CONFIG_ARM_ERRATA_720789
++ ALT_SMP(mcr p15, 0, r0, c8, c3, 3) @ TLB invalidate U MVA all ASID (shareable)
++#else
+ ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable)
++#endif
+ ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA
+ add r0, r0, #PAGE_SZ
+ cmp r0, r1
+diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S
+index 4fa9903..cc926c9 100644
+--- a/arch/arm/vfp/entry.S
++++ b/arch/arm/vfp/entry.S
+@@ -7,18 +7,20 @@
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+- *
+- * Basic entry code, called from the kernel's undefined instruction trap.
+- * r0 = faulted instruction
+- * r5 = faulted PC+4
+- * r9 = successful return
+- * r10 = thread_info structure
+- * lr = failure return
+ */
+ #include <asm/thread_info.h>
+ #include <asm/vfpmacros.h>
+ #include "../kernel/entry-header.S"
+
++@ VFP entry point.
++@
++@ r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
++@ r2 = PC value to resume execution after successful emulation
++@ r9 = normal "successful" return address
++@ r10 = this threads thread_info structure
++@ lr = unrecognised instruction return address
++@ IRQs disabled.
++@
+ ENTRY(do_vfp)
+ #ifdef CONFIG_PREEMPT
+ ldr r4, [r10, #TI_PREEMPT] @ get preempt count
+diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
+index 2d30c7f..3a0efaa 100644
+--- a/arch/arm/vfp/vfphw.S
++++ b/arch/arm/vfp/vfphw.S
+@@ -61,13 +61,13 @@
+
+ @ VFP hardware support entry point.
+ @
+-@ r0 = faulted instruction
+-@ r2 = faulted PC+4
+-@ r9 = successful return
++@ r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
++@ r2 = PC value to resume execution after successful emulation
++@ r9 = normal "successful" return address
+ @ r10 = vfp_state union
+ @ r11 = CPU number
+-@ lr = failure return
+-
++@ lr = unrecognised instruction return address
++@ IRQs enabled.
+ ENTRY(vfp_support_entry)
+ DBGSTR3 "instr %08x pc %08x state %p", r0, r2, r10
+
+@@ -161,9 +161,12 @@ vfp_hw_state_valid:
+ @ exception before retrying branch
+ @ out before setting an FPEXC that
+ @ stops us reading stuff
+- VFPFMXR FPEXC, r1 @ restore FPEXC last
+- sub r2, r2, #4
+- str r2, [sp, #S_PC] @ retry the instruction
++ VFPFMXR FPEXC, r1 @ Restore FPEXC last
++ sub r2, r2, #4 @ Retry current instruction - if Thumb
++ str r2, [sp, #S_PC] @ mode it's two 16-bit instructions,
++ @ else it's one 32-bit instruction, so
++ @ always subtract 4 from the following
++ @ instruction address.
+ #ifdef CONFIG_PREEMPT
+ get_thread_info r10
+ ldr r4, [r10, #TI_PREEMPT] @ get preempt count
+diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
+index 8ea07e4..ad83dad 100644
+--- a/arch/arm/vfp/vfpmodule.c
++++ b/arch/arm/vfp/vfpmodule.c
+@@ -453,10 +453,16 @@ static int vfp_pm_suspend(void)
+
+ /* disable, just in case */
+ fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
++ } else if (vfp_current_hw_state[ti->cpu]) {
++#ifndef CONFIG_SMP
++ fmxr(FPEXC, fpexc | FPEXC_EN);
++ vfp_save_state(vfp_current_hw_state[ti->cpu], fpexc);
++ fmxr(FPEXC, fpexc);
++#endif
+ }
+
+ /* clear any information we had about last context state */
+- memset(vfp_current_hw_state, 0, sizeof(vfp_current_hw_state));
++ vfp_current_hw_state[ti->cpu] = NULL;
+
+ return 0;
+ }
+diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
+index 3fad89e..2fc214b 100644
+--- a/arch/ia64/include/asm/atomic.h
++++ b/arch/ia64/include/asm/atomic.h
+@@ -18,8 +18,8 @@
+ #include <asm/system.h>
+
+
+-#define ATOMIC_INIT(i) ((atomic_t) { (i) })
+-#define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
++#define ATOMIC_INIT(i) { (i) }
++#define ATOMIC64_INIT(i) { (i) }
+
+ #define atomic_read(v) (*(volatile int *)&(v)->counter)
+ #define atomic64_read(v) (*(volatile long *)&(v)->counter)
+diff --git a/arch/m68k/include/asm/entry.h b/arch/m68k/include/asm/entry.h
+index c3c5a86..8798ebc 100644
+--- a/arch/m68k/include/asm/entry.h
++++ b/arch/m68k/include/asm/entry.h
+@@ -33,8 +33,8 @@
+
+ /* the following macro is used when enabling interrupts */
+ #if defined(MACH_ATARI_ONLY)
+- /* block out HSYNC on the atari */
+-#define ALLOWINT (~0x400)
++ /* block out HSYNC = ipl 2 on the atari */
++#define ALLOWINT (~0x500)
+ #define MAX_NOINT_IPL 3
+ #else
+ /* portable version */
+diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c
+index 8623f8d..9a5932e 100644
+--- a/arch/m68k/kernel/sys_m68k.c
++++ b/arch/m68k/kernel/sys_m68k.c
+@@ -479,9 +479,13 @@ sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
+ goto bad_access;
+ }
+
+- mem_value = *mem;
++ /*
++ * No need to check for EFAULT; we know that the page is
++ * present and writable.
++ */
++ __get_user(mem_value, mem);
+ if (mem_value == oldval)
+- *mem = newval;
++ __put_user(newval, mem);
+
+ pte_unmap_unlock(pte, ptl);
+ up_read(&mm->mmap_sem);
+diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
+index 5682f16..20f0e01 100644
+--- a/arch/s390/include/asm/mmu_context.h
++++ b/arch/s390/include/asm/mmu_context.h
+@@ -12,7 +12,6 @@
+ #include <asm/pgalloc.h>
+ #include <asm/uaccess.h>
+ #include <asm/tlbflush.h>
+-#include <asm-generic/mm_hooks.h>
+
+ static inline int init_new_context(struct task_struct *tsk,
+ struct mm_struct *mm)
+@@ -92,4 +91,17 @@ static inline void activate_mm(struct mm_struct *prev,
+ switch_mm(prev, next, current);
+ }
+
++static inline void arch_dup_mmap(struct mm_struct *oldmm,
++ struct mm_struct *mm)
++{
++#ifdef CONFIG_64BIT
++ if (oldmm->context.asce_limit < mm->context.asce_limit)
++ crst_table_downgrade(mm, oldmm->context.asce_limit);
++#endif
++}
++
++static inline void arch_exit_mmap(struct mm_struct *mm)
++{
++}
++
+ #endif /* __S390_MMU_CONTEXT_H */
+diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
+index 5f33d37..172550d 100644
+--- a/arch/s390/include/asm/processor.h
++++ b/arch/s390/include/asm/processor.h
+@@ -130,7 +130,9 @@ struct stack_frame {
+ regs->psw.mask = psw_user_bits | PSW_MASK_BA; \
+ regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
+ regs->gprs[15] = new_stackp; \
++ __tlb_flush_mm(current->mm); \
+ crst_table_downgrade(current->mm, 1UL << 31); \
++ update_mm(current->mm, current); \
+ } while (0)
+
+ /* Forward declaration, a strange C thing */
+diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
+index b28aaa4..0fc0a7e 100644
+--- a/arch/s390/mm/fault.c
++++ b/arch/s390/mm/fault.c
+@@ -453,6 +453,7 @@ int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
+ struct pt_regs regs;
+ int access, fault;
+
++ /* Emulate a uaccess fault from kernel mode. */
+ regs.psw.mask = psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK;
+ if (!irqs_disabled())
+ regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT;
+@@ -461,12 +462,12 @@ int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
+ uaddr &= PAGE_MASK;
+ access = write ? VM_WRITE : VM_READ;
+ fault = do_exception(&regs, access, uaddr | 2);
+- if (unlikely(fault)) {
+- if (fault & VM_FAULT_OOM)
+- return -EFAULT;
+- else if (fault & VM_FAULT_SIGBUS)
+- do_sigbus(&regs, pgm_int_code, uaddr);
+- }
++ /*
++ * Since the fault happened in kernel mode while performing a uaccess
++ * all we need to do now is emulating a fixup in case "fault" is not
++ * zero.
++ * For the calling uaccess functions this results always in -EFAULT.
++ */
+ return fault ? -EFAULT : 0;
+ }
+
+diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
+index a0155c0..c70b3d8 100644
+--- a/arch/s390/mm/mmap.c
++++ b/arch/s390/mm/mmap.c
+@@ -106,9 +106,15 @@ EXPORT_SYMBOL_GPL(arch_pick_mmap_layout);
+
+ int s390_mmap_check(unsigned long addr, unsigned long len)
+ {
++ int rc;
++
+ if (!is_compat_task() &&
+- len >= TASK_SIZE && TASK_SIZE < (1UL << 53))
+- return crst_table_upgrade(current->mm, 1UL << 53);
++ len >= TASK_SIZE && TASK_SIZE < (1UL << 53)) {
++ rc = crst_table_upgrade(current->mm, 1UL << 53);
++ if (rc)
++ return rc;
++ update_mm(current->mm, current);
++ }
+ return 0;
+ }
+
+@@ -128,6 +134,7 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr,
+ rc = crst_table_upgrade(mm, 1UL << 53);
+ if (rc)
+ return (unsigned long) rc;
++ update_mm(mm, current);
+ area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
+ }
+ return area;
+@@ -150,6 +157,7 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
+ rc = crst_table_upgrade(mm, 1UL << 53);
+ if (rc)
+ return (unsigned long) rc;
++ update_mm(mm, current);
+ area = arch_get_unmapped_area_topdown(filp, addr, len,
+ pgoff, flags);
+ }
+diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
+index f8ceac4..f8e92f8 100644
+--- a/arch/s390/mm/pgtable.c
++++ b/arch/s390/mm/pgtable.c
+@@ -97,7 +97,6 @@ repeat:
+ crst_table_free(mm, table);
+ if (mm->context.asce_limit < limit)
+ goto repeat;
+- update_mm(mm, current);
+ return 0;
+ }
+
+@@ -105,9 +104,6 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
+ {
+ pgd_t *pgd;
+
+- if (mm->context.asce_limit <= limit)
+- return;
+- __tlb_flush_mm(mm);
+ while (mm->context.asce_limit > limit) {
+ pgd = mm->pgd;
+ switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
+@@ -130,7 +126,6 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
+ mm->task_size = mm->context.asce_limit;
+ crst_table_free(mm, (unsigned long *) pgd);
+ }
+- update_mm(mm, current);
+ }
+ #endif
+
+diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
+index 1f84794..73ef56c 100644
+--- a/arch/x86/kernel/alternative.c
++++ b/arch/x86/kernel/alternative.c
+@@ -219,7 +219,7 @@ void __init arch_init_ideal_nops(void)
+ ideal_nops = intel_nops;
+ #endif
+ }
+-
++ break;
+ default:
+ #ifdef CONFIG_X86_64
+ ideal_nops = k8_nops;
+diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
+index 1b267e7..00a0385 100644
+--- a/arch/x86/xen/p2m.c
++++ b/arch/x86/xen/p2m.c
+@@ -686,6 +686,7 @@ int m2p_add_override(unsigned long mfn, struct page *page,
+ unsigned long uninitialized_var(address);
+ unsigned level;
+ pte_t *ptep = NULL;
++ int ret = 0;
+
+ pfn = page_to_pfn(page);
+ if (!PageHighMem(page)) {
+@@ -721,6 +722,24 @@ int m2p_add_override(unsigned long mfn, struct page *page,
+ list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]);
+ spin_unlock_irqrestore(&m2p_override_lock, flags);
+
++ /* p2m(m2p(mfn)) == mfn: the mfn is already present somewhere in
++ * this domain. Set the FOREIGN_FRAME_BIT in the p2m for the other
++ * pfn so that the following mfn_to_pfn(mfn) calls will return the
++ * pfn from the m2p_override (the backend pfn) instead.
++ * We need to do this because the pages shared by the frontend
++ * (xen-blkfront) can be already locked (lock_page, called by
++ * do_read_cache_page); when the userspace backend tries to use them
++ * with direct_IO, mfn_to_pfn returns the pfn of the frontend, so
++ * do_blockdev_direct_IO is going to try to lock the same pages
++ * again resulting in a deadlock.
++ * As a side effect get_user_pages_fast might not be safe on the
++ * frontend pages while they are being shared with the backend,
++ * because mfn_to_pfn (that ends up being called by GUPF) will
++ * return the backend pfn rather than the frontend pfn. */
++ ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
++ if (ret == 0 && get_phys_to_machine(pfn) == mfn)
++ set_phys_to_machine(pfn, FOREIGN_FRAME(mfn));
++
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(m2p_add_override);
+@@ -732,6 +751,7 @@ int m2p_remove_override(struct page *page, bool clear_pte)
+ unsigned long uninitialized_var(address);
+ unsigned level;
+ pte_t *ptep = NULL;
++ int ret = 0;
+
+ pfn = page_to_pfn(page);
+ mfn = get_phys_to_machine(pfn);
+@@ -801,6 +821,22 @@ int m2p_remove_override(struct page *page, bool clear_pte)
+ } else
+ set_phys_to_machine(pfn, page->index);
+
++ /* p2m(m2p(mfn)) == FOREIGN_FRAME(mfn): the mfn is already present
++ * somewhere in this domain, even before being added to the
++ * m2p_override (see comment above in m2p_add_override).
++ * If there are no other entries in the m2p_override corresponding
++ * to this mfn, then remove the FOREIGN_FRAME_BIT from the p2m for
++ * the original pfn (the one shared by the frontend): the backend
++ * cannot do any IO on this page anymore because it has been
++ * unshared. Removing the FOREIGN_FRAME_BIT from the p2m entry of
++ * the original pfn causes mfn_to_pfn(mfn) to return the frontend
++ * pfn again. */
++ mfn &= ~FOREIGN_FRAME_BIT;
++ ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
++ if (ret == 0 && get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) &&
++ m2p_find_override(mfn) == NULL)
++ set_phys_to_machine(pfn, mfn);
++
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(m2p_remove_override);
+diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
+index 9955a53..c864add 100644
+--- a/drivers/block/floppy.c
++++ b/drivers/block/floppy.c
+@@ -4369,8 +4369,14 @@ out_unreg_blkdev:
+ out_put_disk:
+ while (dr--) {
+ del_timer_sync(&motor_off_timer[dr]);
+- if (disks[dr]->queue)
++ if (disks[dr]->queue) {
+ blk_cleanup_queue(disks[dr]->queue);
++ /*
++ * put_disk() is not paired with add_disk() and
++ * will put queue reference one extra time. fix it.
++ */
++ disks[dr]->queue = NULL;
++ }
+ put_disk(disks[dr]);
+ }
+ return err;
+diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
+index e46f2f7..650a308 100644
+--- a/drivers/block/virtio_blk.c
++++ b/drivers/block/virtio_blk.c
+@@ -20,8 +20,6 @@ struct workqueue_struct *virtblk_wq;
+
+ struct virtio_blk
+ {
+- spinlock_t lock;
+-
+ struct virtio_device *vdev;
+ struct virtqueue *vq;
+
+@@ -62,7 +60,7 @@ static void blk_done(struct virtqueue *vq)
+ unsigned int len;
+ unsigned long flags;
+
+- spin_lock_irqsave(&vblk->lock, flags);
++ spin_lock_irqsave(vblk->disk->queue->queue_lock, flags);
+ while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) {
+ int error;
+
+@@ -97,7 +95,7 @@ static void blk_done(struct virtqueue *vq)
+ }
+ /* In case queue is stopped waiting for more buffers. */
+ blk_start_queue(vblk->disk->queue);
+- spin_unlock_irqrestore(&vblk->lock, flags);
++ spin_unlock_irqrestore(vblk->disk->queue->queue_lock, flags);
+ }
+
+ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
+@@ -384,7 +382,6 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
+ }
+
+ INIT_LIST_HEAD(&vblk->reqs);
+- spin_lock_init(&vblk->lock);
+ vblk->vdev = vdev;
+ vblk->sg_elems = sg_elems;
+ sg_init_table(vblk->sg, vblk->sg_elems);
+@@ -410,7 +407,7 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
+ goto out_mempool;
+ }
+
+- q = vblk->disk->queue = blk_init_queue(do_virtblk_request, &vblk->lock);
++ q = vblk->disk->queue = blk_init_queue(do_virtblk_request, NULL);
+ if (!q) {
+ err = -ENOMEM;
+ goto out_put_disk;
+diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
+index 5c0d96a..b12ffea 100644
+--- a/drivers/char/mspec.c
++++ b/drivers/char/mspec.c
+@@ -284,7 +284,7 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma,
+ vdata->flags = flags;
+ vdata->type = type;
+ spin_lock_init(&vdata->lock);
+- vdata->refcnt = ATOMIC_INIT(1);
++ atomic_set(&vdata->refcnt, 1);
+ vma->vm_private_data = vdata;
+
+ vma->vm_flags |= (VM_IO | VM_RESERVED | VM_PFNMAP | VM_DONTEXPAND);
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index 6035ab8..631d4f6 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -125,21 +125,26 @@
+ * The current exported interfaces for gathering environmental noise
+ * from the devices are:
+ *
++ * void add_device_randomness(const void *buf, unsigned int size);
+ * void add_input_randomness(unsigned int type, unsigned int code,
+ * unsigned int value);
+- * void add_interrupt_randomness(int irq);
++ * void add_interrupt_randomness(int irq, int irq_flags);
+ * void add_disk_randomness(struct gendisk *disk);
+ *
++ * add_device_randomness() is for adding data to the random pool that
++ * is likely to differ between two devices (or possibly even per boot).
++ * This would be things like MAC addresses or serial numbers, or the
++ * read-out of the RTC. This does *not* add any actual entropy to the
++ * pool, but it initializes the pool to different values for devices
++ * that might otherwise be identical and have very little entropy
++ * available to them (particularly common in the embedded world).
++ *
+ * add_input_randomness() uses the input layer interrupt timing, as well as
+ * the event type information from the hardware.
+ *
+- * add_interrupt_randomness() uses the inter-interrupt timing as random
+- * inputs to the entropy pool. Note that not all interrupts are good
+- * sources of randomness! For example, the timer interrupts is not a
+- * good choice, because the periodicity of the interrupts is too
+- * regular, and hence predictable to an attacker. Network Interface
+- * Controller interrupts are a better measure, since the timing of the
+- * NIC interrupts are more unpredictable.
++ * add_interrupt_randomness() uses the interrupt timing as random
++ * inputs to the entropy pool. Using the cycle counters and the irq source
++ * as inputs, it feeds the randomness roughly once a second.
+ *
+ * add_disk_randomness() uses what amounts to the seek time of block
+ * layer request events, on a per-disk_devt basis, as input to the
+@@ -248,6 +253,8 @@
+ #include <linux/percpu.h>
+ #include <linux/cryptohash.h>
+ #include <linux/fips.h>
++#include <linux/ptrace.h>
++#include <linux/kmemcheck.h>
+
+ #ifdef CONFIG_GENERIC_HARDIRQS
+ # include <linux/irq.h>
+@@ -256,6 +263,7 @@
+ #include <asm/processor.h>
+ #include <asm/uaccess.h>
+ #include <asm/irq.h>
++#include <asm/irq_regs.h>
+ #include <asm/io.h>
+
+ /*
+@@ -266,6 +274,8 @@
+ #define SEC_XFER_SIZE 512
+ #define EXTRACT_SIZE 10
+
++#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
++
+ /*
+ * The minimum number of bits of entropy before we wake up a read on
+ * /dev/random. Should be enough to do a significant reseed.
+@@ -420,8 +430,10 @@ struct entropy_store {
+ /* read-write data: */
+ spinlock_t lock;
+ unsigned add_ptr;
++ unsigned input_rotate;
+ int entropy_count;
+- int input_rotate;
++ int entropy_total;
++ unsigned int initialized:1;
+ __u8 last_data[EXTRACT_SIZE];
+ };
+
+@@ -454,6 +466,10 @@ static struct entropy_store nonblocking_pool = {
+ .pool = nonblocking_pool_data
+ };
+
++static __u32 const twist_table[8] = {
++ 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
++ 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
++
+ /*
+ * This function adds bytes into the entropy "pool". It does not
+ * update the entropy estimate. The caller should call
+@@ -464,29 +480,24 @@ static struct entropy_store nonblocking_pool = {
+ * it's cheap to do so and helps slightly in the expected case where
+ * the entropy is concentrated in the low-order bits.
+ */
+-static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
+- int nbytes, __u8 out[64])
++static void __mix_pool_bytes(struct entropy_store *r, const void *in,
++ int nbytes, __u8 out[64])
+ {
+- static __u32 const twist_table[8] = {
+- 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
+- 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
+ unsigned long i, j, tap1, tap2, tap3, tap4, tap5;
+ int input_rotate;
+ int wordmask = r->poolinfo->poolwords - 1;
+ const char *bytes = in;
+ __u32 w;
+- unsigned long flags;
+
+- /* Taps are constant, so we can load them without holding r->lock. */
+ tap1 = r->poolinfo->tap1;
+ tap2 = r->poolinfo->tap2;
+ tap3 = r->poolinfo->tap3;
+ tap4 = r->poolinfo->tap4;
+ tap5 = r->poolinfo->tap5;
+
+- spin_lock_irqsave(&r->lock, flags);
+- input_rotate = r->input_rotate;
+- i = r->add_ptr;
++ smp_rmb();
++ input_rotate = ACCESS_ONCE(r->input_rotate);
++ i = ACCESS_ONCE(r->add_ptr);
+
+ /* mix one byte at a time to simplify size handling and churn faster */
+ while (nbytes--) {
+@@ -513,19 +524,53 @@ static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
+ input_rotate += i ? 7 : 14;
+ }
+
+- r->input_rotate = input_rotate;
+- r->add_ptr = i;
++ ACCESS_ONCE(r->input_rotate) = input_rotate;
++ ACCESS_ONCE(r->add_ptr) = i;
++ smp_wmb();
+
+ if (out)
+ for (j = 0; j < 16; j++)
+ ((__u32 *)out)[j] = r->pool[(i - j) & wordmask];
++}
++
++static void mix_pool_bytes(struct entropy_store *r, const void *in,
++ int nbytes, __u8 out[64])
++{
++ unsigned long flags;
+
++ spin_lock_irqsave(&r->lock, flags);
++ __mix_pool_bytes(r, in, nbytes, out);
+ spin_unlock_irqrestore(&r->lock, flags);
+ }
+
+-static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
++struct fast_pool {
++ __u32 pool[4];
++ unsigned long last;
++ unsigned short count;
++ unsigned char rotate;
++ unsigned char last_timer_intr;
++};
++
++/*
++ * This is a fast mixing routine used by the interrupt randomness
++ * collector. It's hardcoded for an 128 bit pool and assumes that any
++ * locks that might be needed are taken by the caller.
++ */
++static void fast_mix(struct fast_pool *f, const void *in, int nbytes)
+ {
+- mix_pool_bytes_extract(r, in, bytes, NULL);
++ const char *bytes = in;
++ __u32 w;
++ unsigned i = f->count;
++ unsigned input_rotate = f->rotate;
++
++ while (nbytes--) {
++ w = rol32(*bytes++, input_rotate & 31) ^ f->pool[i & 3] ^
++ f->pool[(i + 1) & 3];
++ f->pool[i & 3] = (w >> 3) ^ twist_table[w & 7];
++ input_rotate += (i++ & 3) ? 7 : 14;
++ }
++ f->count = i;
++ f->rotate = input_rotate;
+ }
+
+ /*
+@@ -533,30 +578,34 @@ static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
+ */
+ static void credit_entropy_bits(struct entropy_store *r, int nbits)
+ {
+- unsigned long flags;
+- int entropy_count;
++ int entropy_count, orig;
+
+ if (!nbits)
+ return;
+
+- spin_lock_irqsave(&r->lock, flags);
+-
+ DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name);
+- entropy_count = r->entropy_count;
++retry:
++ entropy_count = orig = ACCESS_ONCE(r->entropy_count);
+ entropy_count += nbits;
+ if (entropy_count < 0) {
+ DEBUG_ENT("negative entropy/overflow\n");
+ entropy_count = 0;
+ } else if (entropy_count > r->poolinfo->POOLBITS)
+ entropy_count = r->poolinfo->POOLBITS;
+- r->entropy_count = entropy_count;
++ if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
++ goto retry;
++
++ if (!r->initialized && nbits > 0) {
++ r->entropy_total += nbits;
++ if (r->entropy_total > 128)
++ r->initialized = 1;
++ }
+
+ /* should we wake readers? */
+ if (r == &input_pool && entropy_count >= random_read_wakeup_thresh) {
+ wake_up_interruptible(&random_read_wait);
+ kill_fasync(&fasync, SIGIO, POLL_IN);
+ }
+- spin_unlock_irqrestore(&r->lock, flags);
+ }
+
+ /*********************************************************************
+@@ -609,6 +658,25 @@ static void set_timer_rand_state(unsigned int irq,
+ }
+ #endif
+
++/*
++ * Add device- or boot-specific data to the input and nonblocking
++ * pools to help initialize them to unique values.
++ *
++ * None of this adds any entropy, it is meant to avoid the
++ * problem of the nonblocking pool having similar initial state
++ * across largely identical devices.
++ */
++void add_device_randomness(const void *buf, unsigned int size)
++{
++ unsigned long time = get_cycles() ^ jiffies;
++
++ mix_pool_bytes(&input_pool, buf, size, NULL);
++ mix_pool_bytes(&input_pool, &time, sizeof(time), NULL);
++ mix_pool_bytes(&nonblocking_pool, buf, size, NULL);
++ mix_pool_bytes(&nonblocking_pool, &time, sizeof(time), NULL);
++}
++EXPORT_SYMBOL(add_device_randomness);
++
+ static struct timer_rand_state input_timer_state;
+
+ /*
+@@ -624,8 +692,8 @@ static struct timer_rand_state input_timer_state;
+ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
+ {
+ struct {
+- cycles_t cycles;
+ long jiffies;
++ unsigned cycles;
+ unsigned num;
+ } sample;
+ long delta, delta2, delta3;
+@@ -639,7 +707,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
+ sample.jiffies = jiffies;
+ sample.cycles = get_cycles();
+ sample.num = num;
+- mix_pool_bytes(&input_pool, &sample, sizeof(sample));
++ mix_pool_bytes(&input_pool, &sample, sizeof(sample), NULL);
+
+ /*
+ * Calculate number of bits of randomness we probably added.
+@@ -696,17 +764,48 @@ void add_input_randomness(unsigned int type, unsigned int code,
+ }
+ EXPORT_SYMBOL_GPL(add_input_randomness);
+
+-void add_interrupt_randomness(int irq)
++static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
++
++void add_interrupt_randomness(int irq, int irq_flags)
+ {
+- struct timer_rand_state *state;
++ struct entropy_store *r;
++ struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness);
++ struct pt_regs *regs = get_irq_regs();
++ unsigned long now = jiffies;
++ __u32 input[4], cycles = get_cycles();
++
++ input[0] = cycles ^ jiffies;
++ input[1] = irq;
++ if (regs) {
++ __u64 ip = instruction_pointer(regs);
++ input[2] = ip;
++ input[3] = ip >> 32;
++ }
+
+- state = get_timer_rand_state(irq);
++ fast_mix(fast_pool, input, sizeof(input));
+
+- if (state == NULL)
++ if ((fast_pool->count & 1023) &&
++ !time_after(now, fast_pool->last + HZ))
+ return;
+
+- DEBUG_ENT("irq event %d\n", irq);
+- add_timer_randomness(state, 0x100 + irq);
++ fast_pool->last = now;
++
++ r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
++ __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL);
++ /*
++ * If we don't have a valid cycle counter, and we see
++ * back-to-back timer interrupts, then skip giving credit for
++ * any entropy.
++ */
++ if (cycles == 0) {
++ if (irq_flags & __IRQF_TIMER) {
++ if (fast_pool->last_timer_intr)
++ return;
++ fast_pool->last_timer_intr = 1;
++ } else
++ fast_pool->last_timer_intr = 0;
++ }
++ credit_entropy_bits(r, 1);
+ }
+
+ #ifdef CONFIG_BLOCK
+@@ -738,7 +837,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
+ */
+ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
+ {
+- __u32 tmp[OUTPUT_POOL_WORDS];
++ __u32 tmp[OUTPUT_POOL_WORDS];
+
+ if (r->pull && r->entropy_count < nbytes * 8 &&
+ r->entropy_count < r->poolinfo->POOLBITS) {
+@@ -757,7 +856,7 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
+
+ bytes = extract_entropy(r->pull, tmp, bytes,
+ random_read_wakeup_thresh / 8, rsvd);
+- mix_pool_bytes(r, tmp, bytes);
++ mix_pool_bytes(r, tmp, bytes, NULL);
+ credit_entropy_bits(r, bytes*8);
+ }
+ }
+@@ -816,13 +915,19 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
+ static void extract_buf(struct entropy_store *r, __u8 *out)
+ {
+ int i;
+- __u32 hash[5], workspace[SHA_WORKSPACE_WORDS];
++ union {
++ __u32 w[5];
++ unsigned long l[LONGS(EXTRACT_SIZE)];
++ } hash;
++ __u32 workspace[SHA_WORKSPACE_WORDS];
+ __u8 extract[64];
++ unsigned long flags;
+
+ /* Generate a hash across the pool, 16 words (512 bits) at a time */
+- sha_init(hash);
++ sha_init(hash.w);
++ spin_lock_irqsave(&r->lock, flags);
+ for (i = 0; i < r->poolinfo->poolwords; i += 16)
+- sha_transform(hash, (__u8 *)(r->pool + i), workspace);
++ sha_transform(hash.w, (__u8 *)(r->pool + i), workspace);
+
+ /*
+ * We mix the hash back into the pool to prevent backtracking
+@@ -833,13 +938,14 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
+ * brute-forcing the feedback as hard as brute-forcing the
+ * hash.
+ */
+- mix_pool_bytes_extract(r, hash, sizeof(hash), extract);
++ __mix_pool_bytes(r, hash.w, sizeof(hash.w), extract);
++ spin_unlock_irqrestore(&r->lock, flags);
+
+ /*
+ * To avoid duplicates, we atomically extract a portion of the
+ * pool while mixing, and hash one final time.
+ */
+- sha_transform(hash, extract, workspace);
++ sha_transform(hash.w, extract, workspace);
+ memset(extract, 0, sizeof(extract));
+ memset(workspace, 0, sizeof(workspace));
+
+@@ -848,19 +954,30 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
+ * pattern, we fold it in half. Thus, we always feed back
+ * twice as much data as we output.
+ */
+- hash[0] ^= hash[3];
+- hash[1] ^= hash[4];
+- hash[2] ^= rol32(hash[2], 16);
+- memcpy(out, hash, EXTRACT_SIZE);
+- memset(hash, 0, sizeof(hash));
++ hash.w[0] ^= hash.w[3];
++ hash.w[1] ^= hash.w[4];
++ hash.w[2] ^= rol32(hash.w[2], 16);
++
++ /*
++ * If we have a architectural hardware random number
++ * generator, mix that in, too.
++ */
++ for (i = 0; i < LONGS(EXTRACT_SIZE); i++) {
++ unsigned long v;
++ if (!arch_get_random_long(&v))
++ break;
++ hash.l[i] ^= v;
++ }
++
++ memcpy(out, &hash, EXTRACT_SIZE);
++ memset(&hash, 0, sizeof(hash));
+ }
+
+ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
+- size_t nbytes, int min, int reserved)
++ size_t nbytes, int min, int reserved)
+ {
+ ssize_t ret = 0, i;
+ __u8 tmp[EXTRACT_SIZE];
+- unsigned long flags;
+
+ xfer_secondary_pool(r, nbytes);
+ nbytes = account(r, nbytes, min, reserved);
+@@ -869,6 +986,8 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
+ extract_buf(r, tmp);
+
+ if (fips_enabled) {
++ unsigned long flags;
++
+ spin_lock_irqsave(&r->lock, flags);
+ if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
+ panic("Hardware RNG duplicated output!\n");
+@@ -927,17 +1046,34 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
+
+ /*
+ * This function is the exported kernel interface. It returns some
+- * number of good random numbers, suitable for seeding TCP sequence
+- * numbers, etc.
++ * number of good random numbers, suitable for key generation, seeding
++ * TCP sequence numbers, etc. It does not use the hw random number
++ * generator, if available; use get_random_bytes_arch() for that.
+ */
+ void get_random_bytes(void *buf, int nbytes)
+ {
++ extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0);
++}
++EXPORT_SYMBOL(get_random_bytes);
++
++/*
++ * This function will use the architecture-specific hardware random
++ * number generator if it is available. The arch-specific hw RNG will
++ * almost certainly be faster than what we can do in software, but it
++ * is impossible to verify that it is implemented securely (as
++ * opposed, to, say, the AES encryption of a sequence number using a
++ * key known by the NSA). So it's useful if we need the speed, but
++ * only if we're willing to trust the hardware manufacturer not to
++ * have put in a back door.
++ */
++void get_random_bytes_arch(void *buf, int nbytes)
++{
+ char *p = buf;
+
+ while (nbytes) {
+ unsigned long v;
+ int chunk = min(nbytes, (int)sizeof(unsigned long));
+-
++
+ if (!arch_get_random_long(&v))
+ break;
+
+@@ -946,9 +1082,11 @@ void get_random_bytes(void *buf, int nbytes)
+ nbytes -= chunk;
+ }
+
+- extract_entropy(&nonblocking_pool, p, nbytes, 0, 0);
++ if (nbytes)
++ extract_entropy(&nonblocking_pool, p, nbytes, 0, 0);
+ }
+-EXPORT_SYMBOL(get_random_bytes);
++EXPORT_SYMBOL(get_random_bytes_arch);
++
+
+ /*
+ * init_std_data - initialize pool with system data
+@@ -961,16 +1099,19 @@ EXPORT_SYMBOL(get_random_bytes);
+ */
+ static void init_std_data(struct entropy_store *r)
+ {
+- ktime_t now;
+- unsigned long flags;
++ int i;
++ ktime_t now = ktime_get_real();
++ unsigned long rv;
+
+- spin_lock_irqsave(&r->lock, flags);
+ r->entropy_count = 0;
+- spin_unlock_irqrestore(&r->lock, flags);
+-
+- now = ktime_get_real();
+- mix_pool_bytes(r, &now, sizeof(now));
+- mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
++ r->entropy_total = 0;
++ mix_pool_bytes(r, &now, sizeof(now), NULL);
++ for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof(rv)) {
++ if (!arch_get_random_long(&rv))
++ break;
++ mix_pool_bytes(r, &rv, sizeof(rv), NULL);
++ }
++ mix_pool_bytes(r, utsname(), sizeof(*(utsname())), NULL);
+ }
+
+ static int rand_initialize(void)
+@@ -1107,7 +1248,7 @@ write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
+ count -= bytes;
+ p += bytes;
+
+- mix_pool_bytes(r, buf, bytes);
++ mix_pool_bytes(r, buf, bytes, NULL);
+ cond_resched();
+ }
+
+diff --git a/drivers/firmware/pcdp.c b/drivers/firmware/pcdp.c
+index 51e0e2d..a330492 100644
+--- a/drivers/firmware/pcdp.c
++++ b/drivers/firmware/pcdp.c
+@@ -95,7 +95,7 @@ efi_setup_pcdp_console(char *cmdline)
+ if (efi.hcdp == EFI_INVALID_TABLE_ADDR)
+ return -ENODEV;
+
+- pcdp = ioremap(efi.hcdp, 4096);
++ pcdp = early_ioremap(efi.hcdp, 4096);
+ printk(KERN_INFO "PCDP: v%d at 0x%lx\n", pcdp->rev, efi.hcdp);
+
+ if (strstr(cmdline, "console=hcdp")) {
+@@ -131,6 +131,6 @@ efi_setup_pcdp_console(char *cmdline)
+ }
+
+ out:
+- iounmap(pcdp);
++ early_iounmap(pcdp, 4096);
+ return rc;
+ }
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index d4c4937..fae2050 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -708,8 +708,8 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+
+ bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
+
+- for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
+- for (clock = 0; clock <= max_clock; clock++) {
++ for (clock = 0; clock <= max_clock; clock++) {
++ for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
+ int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
+
+ if (intel_dp_link_required(mode->clock, bpp)
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index a6dcd18..96532bc 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -40,11 +40,28 @@
+ * Note that newer firmware allows querying device for maximum useable
+ * coordinates.
+ */
++#define XMIN 0
++#define XMAX 6143
++#define YMIN 0
++#define YMAX 6143
+ #define XMIN_NOMINAL 1472
+ #define XMAX_NOMINAL 5472
+ #define YMIN_NOMINAL 1408
+ #define YMAX_NOMINAL 4448
+
++/* Size in bits of absolute position values reported by the hardware */
++#define ABS_POS_BITS 13
++
++/*
++ * Any position values from the hardware above the following limits are
++ * treated as "wrapped around negative" values that have been truncated to
++ * the 13-bit reporting range of the hardware. These are just reasonable
++ * guesses and can be adjusted if hardware is found that operates outside
++ * of these parameters.
++ */
++#define X_MAX_POSITIVE (((1 << ABS_POS_BITS) + XMAX) / 2)
++#define Y_MAX_POSITIVE (((1 << ABS_POS_BITS) + YMAX) / 2)
++
+ /*
+ * Synaptics touchpads report the y coordinate from bottom to top, which is
+ * opposite from what userspace expects.
+@@ -544,6 +561,12 @@ static int synaptics_parse_hw_state(const unsigned char buf[],
+ hw->right = (buf[0] & 0x02) ? 1 : 0;
+ }
+
++ /* Convert wrap-around values to negative */
++ if (hw->x > X_MAX_POSITIVE)
++ hw->x -= 1 << ABS_POS_BITS;
++ if (hw->y > Y_MAX_POSITIVE)
++ hw->y -= 1 << ABS_POS_BITS;
++
+ return 0;
+ }
+
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index 532a902..d432032 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -19,7 +19,7 @@
+ /*
+ * Tunable constants
+ */
+-#define ENDIO_HOOK_POOL_SIZE 10240
++#define ENDIO_HOOK_POOL_SIZE 1024
+ #define DEFERRED_SET_SIZE 64
+ #define MAPPING_POOL_SIZE 1024
+ #define PRISON_CELLS 1024
+@@ -857,7 +857,7 @@ static void process_prepared_mapping(struct new_mapping *m)
+
+ if (m->err) {
+ cell_error(m->cell);
+- return;
++ goto out;
+ }
+
+ /*
+@@ -869,7 +869,7 @@ static void process_prepared_mapping(struct new_mapping *m)
+ if (r) {
+ DMERR("dm_thin_insert_block() failed");
+ cell_error(m->cell);
+- return;
++ goto out;
+ }
+
+ /*
+@@ -884,6 +884,7 @@ static void process_prepared_mapping(struct new_mapping *m)
+ } else
+ cell_defer(tc, m->cell, m->data_block);
+
++out:
+ list_del(&m->list);
+ mempool_free(m, tc->pool->mapping_pool);
+ }
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 2d97bf0..62306e5 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -2321,7 +2321,10 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
+ /* There is nowhere to write, so all non-sync
+ * drives must be failed - so we are finished
+ */
+- sector_t rv = max_sector - sector_nr;
++ sector_t rv;
++ if (min_bad > 0)
++ max_sector = sector_nr + min_bad;
++ rv = max_sector - sector_nr;
+ *skipped = 1;
+ put_buf(r1_bio);
+ return rv;
+diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
+index ed77c6d..5327061 100644
+--- a/drivers/media/rc/ene_ir.c
++++ b/drivers/media/rc/ene_ir.c
+@@ -1018,6 +1018,8 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
+
+ spin_lock_init(&dev->hw_lock);
+
++ dev->hw_io = pnp_port_start(pnp_dev, 0);
++
+ pnp_set_drvdata(pnp_dev, dev);
+ dev->pnp_dev = pnp_dev;
+
+@@ -1072,7 +1074,6 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
+
+ /* claim the resources */
+ error = -EBUSY;
+- dev->hw_io = pnp_port_start(pnp_dev, 0);
+ if (!request_region(dev->hw_io, ENE_IO_SIZE, ENE_DRIVER_NAME)) {
+ dev->hw_io = -1;
+ dev->irq = -1;
+diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
+index 60107ee..4eec7b7 100644
+--- a/drivers/mfd/ab3100-core.c
++++ b/drivers/mfd/ab3100-core.c
+@@ -409,8 +409,6 @@ static irqreturn_t ab3100_irq_handler(int irq, void *data)
+ u32 fatevent;
+ int err;
+
+- add_interrupt_randomness(irq);
+-
+ err = ab3100_get_register_page_interruptible(ab3100, AB3100_EVENTA1,
+ event_regs, 3);
+ if (err)
+diff --git a/drivers/mfd/wm831x-otp.c b/drivers/mfd/wm831x-otp.c
+index f742745..b90f3e0 100644
+--- a/drivers/mfd/wm831x-otp.c
++++ b/drivers/mfd/wm831x-otp.c
+@@ -18,6 +18,7 @@
+ #include <linux/bcd.h>
+ #include <linux/delay.h>
+ #include <linux/mfd/core.h>
++#include <linux/random.h>
+
+ #include <linux/mfd/wm831x/core.h>
+ #include <linux/mfd/wm831x/otp.h>
+@@ -66,6 +67,7 @@ static DEVICE_ATTR(unique_id, 0444, wm831x_unique_id_show, NULL);
+
+ int wm831x_otp_init(struct wm831x *wm831x)
+ {
++ char uuid[WM831X_UNIQUE_ID_LEN];
+ int ret;
+
+ ret = device_create_file(wm831x->dev, &dev_attr_unique_id);
+@@ -73,6 +75,12 @@ int wm831x_otp_init(struct wm831x *wm831x)
+ dev_err(wm831x->dev, "Unique ID attribute not created: %d\n",
+ ret);
+
++ ret = wm831x_unique_id_read(wm831x, uuid);
++ if (ret == 0)
++ add_device_randomness(uuid, sizeof(uuid));
++ else
++ dev_err(wm831x->dev, "Failed to read UUID: %d\n", ret);
++
+ return ret;
+ }
+
+diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
+index bdf960b..ae7528b 100644
+--- a/drivers/net/wireless/rt2x00/rt2800usb.c
++++ b/drivers/net/wireless/rt2x00/rt2800usb.c
+@@ -925,6 +925,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ { USB_DEVICE(0x0411, 0x015d) },
+ { USB_DEVICE(0x0411, 0x016f) },
+ { USB_DEVICE(0x0411, 0x01a2) },
++ { USB_DEVICE(0x0411, 0x01ee) },
+ /* Corega */
+ { USB_DEVICE(0x07aa, 0x002f) },
+ { USB_DEVICE(0x07aa, 0x003c) },
+diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
+index d1049ee..26fba2d 100644
+--- a/drivers/platform/x86/asus-wmi.c
++++ b/drivers/platform/x86/asus-wmi.c
+@@ -1431,14 +1431,9 @@ static int asus_wmi_platform_init(struct asus_wmi *asus)
+ */
+ if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_DSTS, 0, 0, NULL))
+ asus->dsts_id = ASUS_WMI_METHODID_DSTS;
+- else if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_DSTS2, 0, 0, NULL))
++ else
+ asus->dsts_id = ASUS_WMI_METHODID_DSTS2;
+
+- if (!asus->dsts_id) {
+- pr_err("Can't find DSTS");
+- return -ENODEV;
+- }
+-
+ /* CWAP allow to define the behavior of the Fn+F2 key,
+ * this method doesn't seems to be present on Eee PCs */
+ if (asus->driver->wapf >= 0)
+diff --git a/drivers/rtc/rtc-wm831x.c b/drivers/rtc/rtc-wm831x.c
+index bdc909b..f3c2110 100644
+--- a/drivers/rtc/rtc-wm831x.c
++++ b/drivers/rtc/rtc-wm831x.c
+@@ -24,7 +24,7 @@
+ #include <linux/mfd/wm831x/core.h>
+ #include <linux/delay.h>
+ #include <linux/platform_device.h>
+-
++#include <linux/random.h>
+
+ /*
+ * R16416 (0x4020) - RTC Write Counter
+@@ -96,6 +96,26 @@ struct wm831x_rtc {
+ unsigned int alarm_enabled:1;
+ };
+
++static void wm831x_rtc_add_randomness(struct wm831x *wm831x)
++{
++ int ret;
++ u16 reg;
++
++ /*
++ * The write counter contains a pseudo-random number which is
++ * regenerated every time we set the RTC so it should be a
++ * useful per-system source of entropy.
++ */
++ ret = wm831x_reg_read(wm831x, WM831X_RTC_WRITE_COUNTER);
++ if (ret >= 0) {
++ reg = ret;
++ add_device_randomness(&reg, sizeof(reg));
++ } else {
++ dev_warn(wm831x->dev, "Failed to read RTC write counter: %d\n",
++ ret);
++ }
++}
++
+ /*
+ * Read current time and date in RTC
+ */
+@@ -449,6 +469,8 @@ static int wm831x_rtc_probe(struct platform_device *pdev)
+ alm_irq, ret);
+ }
+
++ wm831x_rtc_add_randomness(wm831x);
++
+ return 0;
+
+ err:
+diff --git a/drivers/staging/media/lirc/lirc_sir.c b/drivers/staging/media/lirc/lirc_sir.c
+index 6903d39..90e9e32 100644
+--- a/drivers/staging/media/lirc/lirc_sir.c
++++ b/drivers/staging/media/lirc/lirc_sir.c
+@@ -53,6 +53,7 @@
+ #include <linux/io.h>
+ #include <asm/irq.h>
+ #include <linux/fcntl.h>
++#include <linux/platform_device.h>
+ #ifdef LIRC_ON_SA1100
+ #include <asm/hardware.h>
+ #ifdef CONFIG_SA1100_COLLIE
+@@ -488,9 +489,11 @@ static struct lirc_driver driver = {
+ .owner = THIS_MODULE,
+ };
+
++static struct platform_device *lirc_sir_dev;
+
+ static int init_chrdev(void)
+ {
++ driver.dev = &lirc_sir_dev->dev;
+ driver.minor = lirc_register_driver(&driver);
+ if (driver.minor < 0) {
+ printk(KERN_ERR LIRC_DRIVER_NAME ": init_chrdev() failed.\n");
+@@ -1216,20 +1219,71 @@ static int init_lirc_sir(void)
+ return 0;
+ }
+
++static int __devinit lirc_sir_probe(struct platform_device *dev)
++{
++ return 0;
++}
++
++static int __devexit lirc_sir_remove(struct platform_device *dev)
++{
++ return 0;
++}
++
++static struct platform_driver lirc_sir_driver = {
++ .probe = lirc_sir_probe,
++ .remove = __devexit_p(lirc_sir_remove),
++ .driver = {
++ .name = "lirc_sir",
++ .owner = THIS_MODULE,
++ },
++};
+
+ static int __init lirc_sir_init(void)
+ {
+ int retval;
+
++ retval = platform_driver_register(&lirc_sir_driver);
++ if (retval) {
++ printk(KERN_ERR LIRC_DRIVER_NAME ": Platform driver register "
++ "failed!\n");
++ return -ENODEV;
++ }
++
++ lirc_sir_dev = platform_device_alloc("lirc_dev", 0);
++ if (!lirc_sir_dev) {
++ printk(KERN_ERR LIRC_DRIVER_NAME ": Platform device alloc "
++ "failed!\n");
++ retval = -ENOMEM;
++ goto pdev_alloc_fail;
++ }
++
++ retval = platform_device_add(lirc_sir_dev);
++ if (retval) {
++ printk(KERN_ERR LIRC_DRIVER_NAME ": Platform device add "
++ "failed!\n");
++ retval = -ENODEV;
++ goto pdev_add_fail;
++ }
++
+ retval = init_chrdev();
+ if (retval < 0)
+- return retval;
++ goto fail;
++
+ retval = init_lirc_sir();
+ if (retval) {
+ drop_chrdev();
+- return retval;
++ goto fail;
+ }
++
+ return 0;
++
++fail:
++ platform_device_del(lirc_sir_dev);
++pdev_add_fail:
++ platform_device_put(lirc_sir_dev);
++pdev_alloc_fail:
++ platform_driver_unregister(&lirc_sir_driver);
++ return retval;
+ }
+
+ static void __exit lirc_sir_exit(void)
+@@ -1237,6 +1291,8 @@ static void __exit lirc_sir_exit(void)
+ drop_hardware();
+ drop_chrdev();
+ drop_port();
++ platform_device_unregister(lirc_sir_dev);
++ platform_driver_unregister(&lirc_sir_driver);
+ printk(KERN_INFO LIRC_DRIVER_NAME ": Uninstalled.\n");
+ }
+
+diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
+index a4b192d..08b92a6 100644
+--- a/drivers/tty/serial/pch_uart.c
++++ b/drivers/tty/serial/pch_uart.c
+@@ -660,7 +660,8 @@ static void pch_dma_rx_complete(void *arg)
+ tty_flip_buffer_push(tty);
+ tty_kref_put(tty);
+ async_tx_ack(priv->desc_rx);
+- pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_RX_INT);
++ pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_RX_INT |
++ PCH_UART_HAL_RX_ERR_INT);
+ }
+
+ static void pch_dma_tx_complete(void *arg)
+@@ -715,7 +716,8 @@ static int handle_rx_to(struct eg20t_port *priv)
+ int rx_size;
+ int ret;
+ if (!priv->start_rx) {
+- pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_RX_INT);
++ pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_RX_INT |
++ PCH_UART_HAL_RX_ERR_INT);
+ return 0;
+ }
+ buf = &priv->rxbuf;
+@@ -977,11 +979,13 @@ static irqreturn_t pch_uart_interrupt(int irq, void *dev_id)
+ case PCH_UART_IID_RDR: /* Received Data Ready */
+ if (priv->use_dma) {
+ pch_uart_hal_disable_interrupt(priv,
+- PCH_UART_HAL_RX_INT);
++ PCH_UART_HAL_RX_INT |
++ PCH_UART_HAL_RX_ERR_INT);
+ ret = dma_handle_rx(priv);
+ if (!ret)
+ pch_uart_hal_enable_interrupt(priv,
+- PCH_UART_HAL_RX_INT);
++ PCH_UART_HAL_RX_INT |
++ PCH_UART_HAL_RX_ERR_INT);
+ } else {
+ ret = handle_rx(priv);
+ }
+@@ -1107,7 +1111,8 @@ static void pch_uart_stop_rx(struct uart_port *port)
+ struct eg20t_port *priv;
+ priv = container_of(port, struct eg20t_port, port);
+ priv->start_rx = 0;
+- pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_RX_INT);
++ pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_RX_INT |
++ PCH_UART_HAL_RX_ERR_INT);
+ priv->int_dis_flag = 1;
+ }
+
+@@ -1163,6 +1168,7 @@ static int pch_uart_startup(struct uart_port *port)
+ break;
+ case 16:
+ fifo_size = PCH_UART_HAL_FIFO16;
++ break;
+ case 1:
+ default:
+ fifo_size = PCH_UART_HAL_FIFO_DIS;
+@@ -1200,7 +1206,8 @@ static int pch_uart_startup(struct uart_port *port)
+ pch_request_dma(port);
+
+ priv->start_rx = 1;
+- pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_RX_INT);
++ pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_RX_INT |
++ PCH_UART_HAL_RX_ERR_INT);
+ uart_update_timeout(port, CS8, default_baud);
+
+ return 0;
+@@ -1258,7 +1265,7 @@ static void pch_uart_set_termios(struct uart_port *port,
+ stb = PCH_UART_HAL_STB1;
+
+ if (termios->c_cflag & PARENB) {
+- if (!(termios->c_cflag & PARODD))
++ if (termios->c_cflag & PARODD)
+ parity = PCH_UART_HAL_PARITY_ODD;
+ else
+ parity = PCH_UART_HAL_PARITY_EVEN;
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 175b6bb..52340cc 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -24,6 +24,7 @@
+ #include <linux/kthread.h>
+ #include <linux/mutex.h>
+ #include <linux/freezer.h>
++#include <linux/random.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/byteorder.h>
+@@ -1897,6 +1898,14 @@ int usb_new_device(struct usb_device *udev)
+ /* Tell the world! */
+ announce_device(udev);
+
++ if (udev->serial)
++ add_device_randomness(udev->serial, strlen(udev->serial));
++ if (udev->product)
++ add_device_randomness(udev->product, strlen(udev->product));
++ if (udev->manufacturer)
++ add_device_randomness(udev->manufacturer,
++ strlen(udev->manufacturer));
++
+ device_enable_async_suspend(&udev->dev);
+ /* Register the device. The device driver is responsible
+ * for configuring the device and invoking the add-device
+diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
+index 1fc8f12..347bb05 100644
+--- a/drivers/usb/early/ehci-dbgp.c
++++ b/drivers/usb/early/ehci-dbgp.c
+@@ -450,7 +450,7 @@ static int dbgp_ehci_startup(void)
+ writel(FLAG_CF, &ehci_regs->configured_flag);
+
+ /* Wait until the controller is no longer halted */
+- loop = 10;
++ loop = 1000;
+ do {
+ status = readl(&ehci_regs->status);
+ if (!(status & STS_HALT))
+diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c
+index aaccffa..dd9533a 100644
+--- a/drivers/video/smscufx.c
++++ b/drivers/video/smscufx.c
+@@ -904,7 +904,7 @@ static ssize_t ufx_ops_write(struct fb_info *info, const char __user *buf,
+ result = fb_sys_write(info, buf, count, ppos);
+
+ if (result > 0) {
+- int start = max((int)(offset / info->fix.line_length) - 1, 0);
++ int start = max((int)(offset / info->fix.line_length), 0);
+ int lines = min((u32)((result / info->fix.line_length) + 1),
+ (u32)info->var.yres);
+
+diff --git a/fs/exofs/ore.c b/fs/exofs/ore.c
+index 24a49d4..1585db1 100644
+--- a/fs/exofs/ore.c
++++ b/fs/exofs/ore.c
+@@ -837,11 +837,11 @@ static int _write_mirror(struct ore_io_state *ios, int cur_comp)
+ bio->bi_rw |= REQ_WRITE;
+ }
+
+- osd_req_write(or, _ios_obj(ios, dev), per_dev->offset,
+- bio, per_dev->length);
++ osd_req_write(or, _ios_obj(ios, cur_comp),
++ per_dev->offset, bio, per_dev->length);
+ ORE_DBGMSG("write(0x%llx) offset=0x%llx "
+ "length=0x%llx dev=%d\n",
+- _LLU(_ios_obj(ios, dev)->id),
++ _LLU(_ios_obj(ios, cur_comp)->id),
+ _LLU(per_dev->offset),
+ _LLU(per_dev->length), dev);
+ } else if (ios->kern_buff) {
+@@ -853,20 +853,20 @@ static int _write_mirror(struct ore_io_state *ios, int cur_comp)
+ (ios->si.unit_off + ios->length >
+ ios->layout->stripe_unit));
+
+- ret = osd_req_write_kern(or, _ios_obj(ios, per_dev->dev),
++ ret = osd_req_write_kern(or, _ios_obj(ios, cur_comp),
+ per_dev->offset,
+ ios->kern_buff, ios->length);
+ if (unlikely(ret))
+ goto out;
+ ORE_DBGMSG2("write_kern(0x%llx) offset=0x%llx "
+ "length=0x%llx dev=%d\n",
+- _LLU(_ios_obj(ios, dev)->id),
++ _LLU(_ios_obj(ios, cur_comp)->id),
+ _LLU(per_dev->offset),
+ _LLU(ios->length), per_dev->dev);
+ } else {
+- osd_req_set_attributes(or, _ios_obj(ios, dev));
++ osd_req_set_attributes(or, _ios_obj(ios, cur_comp));
+ ORE_DBGMSG2("obj(0x%llx) set_attributes=%d dev=%d\n",
+- _LLU(_ios_obj(ios, dev)->id),
++ _LLU(_ios_obj(ios, cur_comp)->id),
+ ios->out_attr_len, dev);
+ }
+
+diff --git a/fs/nfs/file.c b/fs/nfs/file.c
+index c43a452..961e562 100644
+--- a/fs/nfs/file.c
++++ b/fs/nfs/file.c
+@@ -452,8 +452,11 @@ static int nfs_release_page(struct page *page, gfp_t gfp)
+
+ dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page);
+
+- /* Only do I/O if gfp is a superset of GFP_KERNEL */
+- if (mapping && (gfp & GFP_KERNEL) == GFP_KERNEL) {
++ /* Only do I/O if gfp is a superset of GFP_KERNEL, and we're not
++ * doing this memory reclaim for a fs-related allocation.
++ */
++ if (mapping && (gfp & GFP_KERNEL) == GFP_KERNEL &&
++ !(current->flags & PF_FSTRANS)) {
+ int how = FLUSH_SYNC;
+
+ /* Don't let kswapd deadlock waiting for OOM RPC calls */
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 9cfa60a..87a1746 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -2236,7 +2236,7 @@ out_acl:
+ if (bmval0 & FATTR4_WORD0_CASE_INSENSITIVE) {
+ if ((buflen -= 4) < 0)
+ goto out_resource;
+- WRITE32(1);
++ WRITE32(0);
+ }
+ if (bmval0 & FATTR4_WORD0_CASE_PRESERVING) {
+ if ((buflen -= 4) < 0)
+diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
+index ac258be..c598cfb 100644
+--- a/fs/nilfs2/ioctl.c
++++ b/fs/nilfs2/ioctl.c
+@@ -182,7 +182,7 @@ static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp,
+ if (copy_from_user(&cpmode, argp, sizeof(cpmode)))
+ goto out;
+
+- down_read(&inode->i_sb->s_umount);
++ mutex_lock(&nilfs->ns_snapshot_mount_mutex);
+
+ nilfs_transaction_begin(inode->i_sb, &ti, 0);
+ ret = nilfs_cpfile_change_cpmode(
+@@ -192,7 +192,7 @@ static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp,
+ else
+ nilfs_transaction_commit(inode->i_sb); /* never fails */
+
+- up_read(&inode->i_sb->s_umount);
++ mutex_unlock(&nilfs->ns_snapshot_mount_mutex);
+ out:
+ mnt_drop_write(filp->f_path.mnt);
+ return ret;
+diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
+index 8351c44..97bfbdd 100644
+--- a/fs/nilfs2/super.c
++++ b/fs/nilfs2/super.c
+@@ -951,6 +951,8 @@ static int nilfs_attach_snapshot(struct super_block *s, __u64 cno,
+ struct nilfs_root *root;
+ int ret;
+
++ mutex_lock(&nilfs->ns_snapshot_mount_mutex);
++
+ down_read(&nilfs->ns_segctor_sem);
+ ret = nilfs_cpfile_is_snapshot(nilfs->ns_cpfile, cno);
+ up_read(&nilfs->ns_segctor_sem);
+@@ -975,6 +977,7 @@ static int nilfs_attach_snapshot(struct super_block *s, __u64 cno,
+ ret = nilfs_get_root_dentry(s, root, root_dentry);
+ nilfs_put_root(root);
+ out:
++ mutex_unlock(&nilfs->ns_snapshot_mount_mutex);
+ return ret;
+ }
+
+diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
+index 35a8970..1c98f53 100644
+--- a/fs/nilfs2/the_nilfs.c
++++ b/fs/nilfs2/the_nilfs.c
+@@ -76,6 +76,7 @@ struct the_nilfs *alloc_nilfs(struct block_device *bdev)
+ nilfs->ns_bdev = bdev;
+ atomic_set(&nilfs->ns_ndirtyblks, 0);
+ init_rwsem(&nilfs->ns_sem);
++ mutex_init(&nilfs->ns_snapshot_mount_mutex);
+ INIT_LIST_HEAD(&nilfs->ns_dirty_files);
+ INIT_LIST_HEAD(&nilfs->ns_gc_inodes);
+ spin_lock_init(&nilfs->ns_inode_lock);
+diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h
+index 9992b11..de7435f 100644
+--- a/fs/nilfs2/the_nilfs.h
++++ b/fs/nilfs2/the_nilfs.h
+@@ -47,6 +47,7 @@ enum {
+ * @ns_flags: flags
+ * @ns_bdev: block device
+ * @ns_sem: semaphore for shared states
++ * @ns_snapshot_mount_mutex: mutex to protect snapshot mounts
+ * @ns_sbh: buffer heads of on-disk super blocks
+ * @ns_sbp: pointers to super block data
+ * @ns_sbwtime: previous write time of super block
+@@ -99,6 +100,7 @@ struct the_nilfs {
+
+ struct block_device *ns_bdev;
+ struct rw_semaphore ns_sem;
++ struct mutex ns_snapshot_mount_mutex;
+
+ /*
+ * used for
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index c5ed2f1..a2227f7 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -41,6 +41,9 @@ int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
+ unsigned long *, int *, int, unsigned int flags);
+ void unmap_hugepage_range(struct vm_area_struct *,
+ unsigned long, unsigned long, struct page *);
++void __unmap_hugepage_range_final(struct vm_area_struct *vma,
++ unsigned long start, unsigned long end,
++ struct page *ref_page);
+ void __unmap_hugepage_range(struct vm_area_struct *,
+ unsigned long, unsigned long, struct page *);
+ int hugetlb_prefault(struct address_space *, struct vm_area_struct *);
+@@ -99,6 +102,13 @@ static inline unsigned long hugetlb_total_pages(void)
+ #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
+ #define hugetlb_prefault(mapping, vma) ({ BUG(); 0; })
+ #define unmap_hugepage_range(vma, start, end, page) BUG()
++static inline void __unmap_hugepage_range_final(struct vm_area_struct *vma,
++ unsigned long start, unsigned long end,
++ struct page *ref_page)
++{
++ BUG();
++}
++
+ static inline void hugetlb_report_meminfo(struct seq_file *m)
+ {
+ }
+diff --git a/include/linux/init_task.h b/include/linux/init_task.h
+index df53fdf..cdde2b3 100644
+--- a/include/linux/init_task.h
++++ b/include/linux/init_task.h
+@@ -124,8 +124,17 @@ extern struct group_info init_groups;
+
+ extern struct cred init_cred;
+
++extern struct task_group root_task_group;
++
++#ifdef CONFIG_CGROUP_SCHED
++# define INIT_CGROUP_SCHED(tsk) \
++ .sched_task_group = &root_task_group,
++#else
++# define INIT_CGROUP_SCHED(tsk)
++#endif
++
+ #ifdef CONFIG_PERF_EVENTS
+-# define INIT_PERF_EVENTS(tsk) \
++# define INIT_PERF_EVENTS(tsk) \
+ .perf_event_mutex = \
+ __MUTEX_INITIALIZER(tsk.perf_event_mutex), \
+ .perf_event_list = LIST_HEAD_INIT(tsk.perf_event_list),
+@@ -162,6 +171,7 @@ extern struct cred init_cred;
+ }, \
+ .tasks = LIST_HEAD_INIT(tsk.tasks), \
+ INIT_PUSHABLE_TASKS(tsk) \
++ INIT_CGROUP_SCHED(tsk) \
+ .ptraced = LIST_HEAD_INIT(tsk.ptraced), \
+ .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \
+ .real_parent = &tsk, \
+diff --git a/include/linux/random.h b/include/linux/random.h
+index 8f74538..29e217a 100644
+--- a/include/linux/random.h
++++ b/include/linux/random.h
+@@ -50,11 +50,13 @@ struct rnd_state {
+
+ extern void rand_initialize_irq(int irq);
+
++extern void add_device_randomness(const void *, unsigned int);
+ extern void add_input_randomness(unsigned int type, unsigned int code,
+ unsigned int value);
+-extern void add_interrupt_randomness(int irq);
++extern void add_interrupt_randomness(int irq, int irq_flags);
+
+ extern void get_random_bytes(void *buf, int nbytes);
++extern void get_random_bytes_arch(void *buf, int nbytes);
+ void generate_random_uuid(unsigned char uuid_out[16]);
+
+ #ifndef MODULE
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index d336c35..1e86bb4 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1236,6 +1236,9 @@ struct task_struct {
+ const struct sched_class *sched_class;
+ struct sched_entity se;
+ struct sched_rt_entity rt;
++#ifdef CONFIG_CGROUP_SCHED
++ struct task_group *sched_task_group;
++#endif
+
+ #ifdef CONFIG_PREEMPT_NOTIFIERS
+ /* list of struct preempt_notifier: */
+@@ -2646,7 +2649,7 @@ extern int sched_group_set_rt_period(struct task_group *tg,
+ extern long sched_group_rt_period(struct task_group *tg);
+ extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
+ #endif
+-#endif
++#endif /* CONFIG_CGROUP_SCHED */
+
+ extern int task_can_switch_user(struct user_struct *up,
+ struct task_struct *tsk);
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 866c9d5..80fb1c6 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -2231,11 +2231,11 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
+ * @uaddr2: the pi futex we will take prior to returning to user-space
+ *
+ * The caller will wait on uaddr and will be requeued by futex_requeue() to
+- * uaddr2 which must be PI aware. Normal wakeup will wake on uaddr2 and
+- * complete the acquisition of the rt_mutex prior to returning to userspace.
+- * This ensures the rt_mutex maintains an owner when it has waiters; without
+- * one, the pi logic wouldn't know which task to boost/deboost, if there was a
+- * need to.
++ * uaddr2 which must be PI aware and unique from uaddr. Normal wakeup will wake
++ * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
++ * userspace. This ensures the rt_mutex maintains an owner when it has waiters;
++ * without one, the pi logic would not know which task to boost/deboost, if
++ * there was a need to.
+ *
+ * We call schedule in futex_wait_queue_me() when we enqueue and return there
+ * via the following:
+@@ -2272,6 +2272,9 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ struct futex_q q = futex_q_init;
+ int res, ret;
+
++ if (uaddr == uaddr2)
++ return -EINVAL;
++
+ if (!bitset)
+ return -EINVAL;
+
+@@ -2343,7 +2346,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ * signal. futex_unlock_pi() will not destroy the lock_ptr nor
+ * the pi_state.
+ */
+- WARN_ON(!&q.pi_state);
++ WARN_ON(!q.pi_state);
+ pi_mutex = &q.pi_state->pi_mutex;
+ ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
+ debug_rt_mutex_free_waiter(&rt_waiter);
+@@ -2370,7 +2373,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ * fault, unlock the rt_mutex and return the fault to userspace.
+ */
+ if (ret == -EFAULT) {
+- if (rt_mutex_owner(pi_mutex) == current)
++ if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
+ rt_mutex_unlock(pi_mutex);
+ } else if (ret == -EINTR) {
+ /*
+diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
+index 470d08c..10e0772 100644
+--- a/kernel/irq/handle.c
++++ b/kernel/irq/handle.c
+@@ -117,7 +117,7 @@ irqreturn_t
+ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
+ {
+ irqreturn_t retval = IRQ_NONE;
+- unsigned int random = 0, irq = desc->irq_data.irq;
++ unsigned int flags = 0, irq = desc->irq_data.irq;
+
+ do {
+ irqreturn_t res;
+@@ -145,7 +145,7 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
+
+ /* Fall through to add to randomness */
+ case IRQ_HANDLED:
+- random |= action->flags;
++ flags |= action->flags;
+ break;
+
+ default:
+@@ -156,8 +156,7 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
+ action = action->next;
+ } while (action);
+
+- if (random & IRQF_SAMPLE_RANDOM)
+- add_interrupt_randomness(irq);
++ add_interrupt_randomness(irq, flags);
+
+ if (!noirqdebug)
+ note_interrupt(irq, desc, retval);
+diff --git a/kernel/sched.c b/kernel/sched.c
+index 9cd8ca7..e0431c4 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -746,22 +746,19 @@ static inline int cpu_of(struct rq *rq)
+ /*
+ * Return the group to which this tasks belongs.
+ *
+- * We use task_subsys_state_check() and extend the RCU verification with
+- * pi->lock and rq->lock because cpu_cgroup_attach() holds those locks for each
+- * task it moves into the cgroup. Therefore by holding either of those locks,
+- * we pin the task to the current cgroup.
++ * We cannot use task_subsys_state() and friends because the cgroup
++ * subsystem changes that value before the cgroup_subsys::attach() method
++ * is called, therefore we cannot pin it and might observe the wrong value.
++ *
++ * The same is true for autogroup's p->signal->autogroup->tg, the autogroup
++ * core changes this before calling sched_move_task().
++ *
++ * Instead we use a 'copy' which is updated from sched_move_task() while
++ * holding both task_struct::pi_lock and rq::lock.
+ */
+ static inline struct task_group *task_group(struct task_struct *p)
+ {
+- struct task_group *tg;
+- struct cgroup_subsys_state *css;
+-
+- css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
+- lockdep_is_held(&p->pi_lock) ||
+- lockdep_is_held(&task_rq(p)->lock));
+- tg = container_of(css, struct task_group, css);
+-
+- return autogroup_task_group(p, tg);
++ return p->sched_task_group;
+ }
+
+ /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
+@@ -2372,7 +2369,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
+ * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
+ *
+ * sched_move_task() holds both and thus holding either pins the cgroup,
+- * see set_task_rq().
++ * see task_group().
+ *
+ * Furthermore, all task_rq users should acquire both locks, see
+ * task_rq_lock().
+@@ -8952,6 +8949,7 @@ void sched_destroy_group(struct task_group *tg)
+ */
+ void sched_move_task(struct task_struct *tsk)
+ {
++ struct task_group *tg;
+ int on_rq, running;
+ unsigned long flags;
+ struct rq *rq;
+@@ -8966,6 +8964,12 @@ void sched_move_task(struct task_struct *tsk)
+ if (unlikely(running))
+ tsk->sched_class->put_prev_task(rq, tsk);
+
++ tg = container_of(task_subsys_state_check(tsk, cpu_cgroup_subsys_id,
++ lockdep_is_held(&tsk->sighand->siglock)),
++ struct task_group, css);
++ tg = autogroup_task_group(tsk, tg);
++ tsk->sched_task_group = tg;
++
+ #ifdef CONFIG_FAIR_GROUP_SCHED
+ if (tsk->sched_class->task_move_group)
+ tsk->sched_class->task_move_group(tsk, on_rq);
+diff --git a/lib/vsprintf.c b/lib/vsprintf.c
+index 993599e..d74c317 100644
+--- a/lib/vsprintf.c
++++ b/lib/vsprintf.c
+@@ -886,7 +886,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
+ * %pK cannot be used in IRQ context because its test
+ * for CAP_SYSLOG would be meaningless.
+ */
+- if (in_irq() || in_serving_softirq() || in_nmi()) {
++ if (kptr_restrict && (in_irq() || in_serving_softirq() ||
++ in_nmi())) {
+ if (spec.field_width == -1)
+ spec.field_width = 2 * sizeof(void *);
+ return string(buf, end, "pK-error", spec);
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index b1e1bad..0f897b8 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -2382,6 +2382,25 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
+ }
+ }
+
++void __unmap_hugepage_range_final(struct vm_area_struct *vma,
++ unsigned long start, unsigned long end,
++ struct page *ref_page)
++{
++ __unmap_hugepage_range(vma, start, end, ref_page);
++
++ /*
++ * Clear this flag so that x86's huge_pmd_share page_table_shareable
++ * test will fail on a vma being torn down, and not grab a page table
++ * on its way out. We're lucky that the flag has such an appropriate
++ * name, and can in fact be safely cleared here. We could clear it
++ * before the __unmap_hugepage_range above, but all that's necessary
++ * is to clear it before releasing the i_mmap_mutex. This works
++ * because in the context this is called, the VMA is about to be
++ * destroyed and the i_mmap_mutex is held.
++ */
++ vma->vm_flags &= ~VM_MAYSHARE;
++}
++
+ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, struct page *ref_page)
+ {
+@@ -2939,9 +2958,14 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
+ }
+ }
+ spin_unlock(&mm->page_table_lock);
+- mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
+-
++ /*
++ * Must flush TLB before releasing i_mmap_mutex: x86's huge_pmd_unshare
++ * may have cleared our pud entry and done put_page on the page table:
++ * once we release i_mmap_mutex, another task can do the final put_page
++ * and that page table be reused and filled with junk.
++ */
+ flush_tlb_range(vma, start, end);
++ mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
+ }
+
+ int hugetlb_reserve_pages(struct inode *inode,
+diff --git a/mm/internal.h b/mm/internal.h
+index 2189af4..0c26b5e 100644
+--- a/mm/internal.h
++++ b/mm/internal.h
+@@ -309,3 +309,5 @@ extern u64 hwpoison_filter_flags_mask;
+ extern u64 hwpoison_filter_flags_value;
+ extern u64 hwpoison_filter_memcg;
+ extern u32 hwpoison_filter_enable;
++
++extern void set_pageblock_order(void);
+diff --git a/mm/memory.c b/mm/memory.c
+index 1b1ca17..70f5daf 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -1358,8 +1358,11 @@ unsigned long unmap_vmas(struct mmu_gather *tlb,
+ * Since no pte has actually been setup, it is
+ * safe to do nothing in this case.
+ */
+- if (vma->vm_file)
+- unmap_hugepage_range(vma, start, end, NULL);
++ if (vma->vm_file) {
++ mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
++ __unmap_hugepage_range_final(vma, start, end, NULL);
++ mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
++ }
+
+ start = end;
+ } else
+diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
+index 9a611d3..862b608 100644
+--- a/mm/mmu_notifier.c
++++ b/mm/mmu_notifier.c
+@@ -33,6 +33,24 @@
+ void __mmu_notifier_release(struct mm_struct *mm)
+ {
+ struct mmu_notifier *mn;
++ struct hlist_node *n;
++
++ /*
++ * RCU here will block mmu_notifier_unregister until
++ * ->release returns.
++ */
++ rcu_read_lock();
++ hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist)
++ /*
++ * if ->release runs before mmu_notifier_unregister it
++ * must be handled as it's the only way for the driver
++ * to flush all existing sptes and stop the driver
++ * from establishing any more sptes before all the
++ * pages in the mm are freed.
++ */
++ if (mn->ops->release)
++ mn->ops->release(mn, mm);
++ rcu_read_unlock();
+
+ spin_lock(&mm->mmu_notifier_mm->lock);
+ while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
+@@ -46,23 +64,6 @@ void __mmu_notifier_release(struct mm_struct *mm)
+ * mmu_notifier_unregister to return.
+ */
+ hlist_del_init_rcu(&mn->hlist);
+- /*
+- * RCU here will block mmu_notifier_unregister until
+- * ->release returns.
+- */
+- rcu_read_lock();
+- spin_unlock(&mm->mmu_notifier_mm->lock);
+- /*
+- * if ->release runs before mmu_notifier_unregister it
+- * must be handled as it's the only way for the driver
+- * to flush all existing sptes and stop the driver
+- * from establishing any more sptes before all the
+- * pages in the mm are freed.
+- */
+- if (mn->ops->release)
+- mn->ops->release(mn, mm);
+- rcu_read_unlock();
+- spin_lock(&mm->mmu_notifier_mm->lock);
+ }
+ spin_unlock(&mm->mmu_notifier_mm->lock);
+
+@@ -284,16 +285,13 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
+ {
+ BUG_ON(atomic_read(&mm->mm_count) <= 0);
+
+- spin_lock(&mm->mmu_notifier_mm->lock);
+ if (!hlist_unhashed(&mn->hlist)) {
+- hlist_del_rcu(&mn->hlist);
+-
+ /*
+ * RCU here will force exit_mmap to wait ->release to finish
+ * before freeing the pages.
+ */
+ rcu_read_lock();
+- spin_unlock(&mm->mmu_notifier_mm->lock);
++
+ /*
+ * exit_mmap will block in mmu_notifier_release to
+ * guarantee ->release is called before freeing the
+@@ -302,8 +300,11 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
+ if (mn->ops->release)
+ mn->ops->release(mn, mm);
+ rcu_read_unlock();
+- } else
++
++ spin_lock(&mm->mmu_notifier_mm->lock);
++ hlist_del_rcu(&mn->hlist);
+ spin_unlock(&mm->mmu_notifier_mm->lock);
++ }
+
+ /*
+ * Wait any running method to finish, of course including
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 065dbe8..6e51bf0 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -4281,25 +4281,24 @@ static inline void setup_usemap(struct pglist_data *pgdat,
+
+ #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
+
+-/* Return a sensible default order for the pageblock size. */
+-static inline int pageblock_default_order(void)
+-{
+- if (HPAGE_SHIFT > PAGE_SHIFT)
+- return HUGETLB_PAGE_ORDER;
+-
+- return MAX_ORDER-1;
+-}
+-
+ /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
+-static inline void __init set_pageblock_order(unsigned int order)
++void __init set_pageblock_order(void)
+ {
++ unsigned int order;
++
+ /* Check that pageblock_nr_pages has not already been setup */
+ if (pageblock_order)
+ return;
+
++ if (HPAGE_SHIFT > PAGE_SHIFT)
++ order = HUGETLB_PAGE_ORDER;
++ else
++ order = MAX_ORDER - 1;
++
+ /*
+ * Assume the largest contiguous order of interest is a huge page.
+- * This value may be variable depending on boot parameters on IA64
++ * This value may be variable depending on boot parameters on IA64 and
++ * powerpc.
+ */
+ pageblock_order = order;
+ }
+@@ -4307,15 +4306,13 @@ static inline void __init set_pageblock_order(unsigned int order)
+
+ /*
+ * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
+- * and pageblock_default_order() are unused as pageblock_order is set
+- * at compile-time. See include/linux/pageblock-flags.h for the values of
+- * pageblock_order based on the kernel config
++ * is unused as pageblock_order is set at compile-time. See
++ * include/linux/pageblock-flags.h for the values of pageblock_order based on
++ * the kernel config
+ */
+-static inline int pageblock_default_order(unsigned int order)
++void __init set_pageblock_order(void)
+ {
+- return MAX_ORDER-1;
+ }
+-#define set_pageblock_order(x) do {} while (0)
+
+ #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
+
+@@ -4403,7 +4400,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
+ if (!size)
+ continue;
+
+- set_pageblock_order(pageblock_default_order());
++ set_pageblock_order();
+ setup_usemap(pgdat, zone, size);
+ ret = init_currently_empty_zone(zone, zone_start_pfn,
+ size, MEMMAP_EARLY);
+diff --git a/mm/sparse.c b/mm/sparse.c
+index a8bc7d3..bf7d3cc 100644
+--- a/mm/sparse.c
++++ b/mm/sparse.c
+@@ -486,6 +486,9 @@ void __init sparse_init(void)
+ struct page **map_map;
+ #endif
+
++ /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */
++ set_pageblock_order();
++
+ /*
+ * map is using big page (aka 2M in x86 64 bit)
+ * usemap is less one page (aka 24 bytes)
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 5738654..4b18703 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -1177,6 +1177,7 @@ static int __dev_open(struct net_device *dev)
+ net_dmaengine_get();
+ dev_set_rx_mode(dev);
+ dev_activate(dev);
++ add_device_randomness(dev->dev_addr, dev->addr_len);
+ }
+
+ return ret;
+@@ -4841,6 +4842,7 @@ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
+ err = ops->ndo_set_mac_address(dev, sa);
+ if (!err)
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
++ add_device_randomness(dev->dev_addr, dev->addr_len);
+ return err;
+ }
+ EXPORT_SYMBOL(dev_set_mac_address);
+@@ -5621,6 +5623,7 @@ int register_netdevice(struct net_device *dev)
+ dev_init_scheduler(dev);
+ dev_hold(dev);
+ list_netdevice(dev);
++ add_device_randomness(dev->dev_addr, dev->addr_len);
+
+ /* Notify protocols, that a new device appeared. */
+ ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
+diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
+index 7f36b38..b856f87 100644
+--- a/net/core/drop_monitor.c
++++ b/net/core/drop_monitor.c
+@@ -33,22 +33,19 @@
+ #define TRACE_ON 1
+ #define TRACE_OFF 0
+
+-static void send_dm_alert(struct work_struct *unused);
+-
+-
+ /*
+ * Globals, our netlink socket pointer
+ * and the work handle that will send up
+ * netlink alerts
+ */
+ static int trace_state = TRACE_OFF;
+-static DEFINE_SPINLOCK(trace_state_lock);
++static DEFINE_MUTEX(trace_state_mutex);
+
+ struct per_cpu_dm_data {
+- struct work_struct dm_alert_work;
+- struct sk_buff *skb;
+- atomic_t dm_hit_count;
+- struct timer_list send_timer;
++ spinlock_t lock;
++ struct sk_buff *skb;
++ struct work_struct dm_alert_work;
++ struct timer_list send_timer;
+ };
+
+ struct dm_hw_stat_delta {
+@@ -74,56 +71,59 @@ static int dm_delay = 1;
+ static unsigned long dm_hw_check_delta = 2*HZ;
+ static LIST_HEAD(hw_stats_list);
+
+-static void reset_per_cpu_data(struct per_cpu_dm_data *data)
++static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data)
+ {
+ size_t al;
+ struct net_dm_alert_msg *msg;
+ struct nlattr *nla;
++ struct sk_buff *skb;
++ unsigned long flags;
+
+ al = sizeof(struct net_dm_alert_msg);
+ al += dm_hit_limit * sizeof(struct net_dm_drop_point);
+ al += sizeof(struct nlattr);
+
+- data->skb = genlmsg_new(al, GFP_KERNEL);
+- genlmsg_put(data->skb, 0, 0, &net_drop_monitor_family,
+- 0, NET_DM_CMD_ALERT);
+- nla = nla_reserve(data->skb, NLA_UNSPEC, sizeof(struct net_dm_alert_msg));
+- msg = nla_data(nla);
+- memset(msg, 0, al);
+- atomic_set(&data->dm_hit_count, dm_hit_limit);
++ skb = genlmsg_new(al, GFP_KERNEL);
++
++ if (skb) {
++ genlmsg_put(skb, 0, 0, &net_drop_monitor_family,
++ 0, NET_DM_CMD_ALERT);
++ nla = nla_reserve(skb, NLA_UNSPEC,
++ sizeof(struct net_dm_alert_msg));
++ msg = nla_data(nla);
++ memset(msg, 0, al);
++ } else {
++ mod_timer(&data->send_timer, jiffies + HZ / 10);
++ }
++
++ spin_lock_irqsave(&data->lock, flags);
++ swap(data->skb, skb);
++ spin_unlock_irqrestore(&data->lock, flags);
++
++ return skb;
+ }
+
+-static void send_dm_alert(struct work_struct *unused)
++static void send_dm_alert(struct work_struct *work)
+ {
+ struct sk_buff *skb;
+- struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data);
++ struct per_cpu_dm_data *data;
+
+- /*
+- * Grab the skb we're about to send
+- */
+- skb = data->skb;
++ data = container_of(work, struct per_cpu_dm_data, dm_alert_work);
+
+- /*
+- * Replace it with a new one
+- */
+- reset_per_cpu_data(data);
+-
+- /*
+- * Ship it!
+- */
+- genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL);
++ skb = reset_per_cpu_data(data);
+
++ if (skb)
++ genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL);
+ }
+
+ /*
+ * This is the timer function to delay the sending of an alert
+ * in the event that more drops will arrive during the
+- * hysteresis period. Note that it operates under the timer interrupt
+- * so we don't need to disable preemption here
++ * hysteresis period.
+ */
+-static void sched_send_work(unsigned long unused)
++static void sched_send_work(unsigned long _data)
+ {
+- struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data);
++ struct per_cpu_dm_data *data = (struct per_cpu_dm_data *)_data;
+
+ schedule_work(&data->dm_alert_work);
+ }
+@@ -134,17 +134,19 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
+ struct nlmsghdr *nlh;
+ struct nlattr *nla;
+ int i;
+- struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data);
++ struct sk_buff *dskb;
++ struct per_cpu_dm_data *data;
++ unsigned long flags;
+
++ local_irq_save(flags);
++ data = &__get_cpu_var(dm_cpu_data);
++ spin_lock(&data->lock);
++ dskb = data->skb;
+
+- if (!atomic_add_unless(&data->dm_hit_count, -1, 0)) {
+- /*
+- * we're already at zero, discard this hit
+- */
++ if (!dskb)
+ goto out;
+- }
+
+- nlh = (struct nlmsghdr *)data->skb->data;
++ nlh = (struct nlmsghdr *)dskb->data;
+ nla = genlmsg_data(nlmsg_data(nlh));
+ msg = nla_data(nla);
+ for (i = 0; i < msg->entries; i++) {
+@@ -153,11 +155,12 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
+ goto out;
+ }
+ }
+-
++ if (msg->entries == dm_hit_limit)
++ goto out;
+ /*
+ * We need to create a new entry
+ */
+- __nla_reserve_nohdr(data->skb, sizeof(struct net_dm_drop_point));
++ __nla_reserve_nohdr(dskb, sizeof(struct net_dm_drop_point));
+ nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point));
+ memcpy(msg->points[msg->entries].pc, &location, sizeof(void *));
+ msg->points[msg->entries].count = 1;
+@@ -165,11 +168,11 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
+
+ if (!timer_pending(&data->send_timer)) {
+ data->send_timer.expires = jiffies + dm_delay * HZ;
+- add_timer_on(&data->send_timer, smp_processor_id());
++ add_timer(&data->send_timer);
+ }
+
+ out:
+- return;
++ spin_unlock_irqrestore(&data->lock, flags);
+ }
+
+ static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb, void *location)
+@@ -213,7 +216,7 @@ static int set_all_monitor_traces(int state)
+ struct dm_hw_stat_delta *new_stat = NULL;
+ struct dm_hw_stat_delta *temp;
+
+- spin_lock(&trace_state_lock);
++ mutex_lock(&trace_state_mutex);
+
+ if (state == trace_state) {
+ rc = -EAGAIN;
+@@ -252,7 +255,7 @@ static int set_all_monitor_traces(int state)
+ rc = -EINPROGRESS;
+
+ out_unlock:
+- spin_unlock(&trace_state_lock);
++ mutex_unlock(&trace_state_mutex);
+
+ return rc;
+ }
+@@ -295,12 +298,12 @@ static int dropmon_net_event(struct notifier_block *ev_block,
+
+ new_stat->dev = dev;
+ new_stat->last_rx = jiffies;
+- spin_lock(&trace_state_lock);
++ mutex_lock(&trace_state_mutex);
+ list_add_rcu(&new_stat->list, &hw_stats_list);
+- spin_unlock(&trace_state_lock);
++ mutex_unlock(&trace_state_mutex);
+ break;
+ case NETDEV_UNREGISTER:
+- spin_lock(&trace_state_lock);
++ mutex_lock(&trace_state_mutex);
+ list_for_each_entry_safe(new_stat, tmp, &hw_stats_list, list) {
+ if (new_stat->dev == dev) {
+ new_stat->dev = NULL;
+@@ -311,7 +314,7 @@ static int dropmon_net_event(struct notifier_block *ev_block,
+ }
+ }
+ }
+- spin_unlock(&trace_state_lock);
++ mutex_unlock(&trace_state_mutex);
+ break;
+ }
+ out:
+@@ -367,13 +370,15 @@ static int __init init_net_drop_monitor(void)
+
+ for_each_present_cpu(cpu) {
+ data = &per_cpu(dm_cpu_data, cpu);
+- reset_per_cpu_data(data);
+ INIT_WORK(&data->dm_alert_work, send_dm_alert);
+ init_timer(&data->send_timer);
+- data->send_timer.data = cpu;
++ data->send_timer.data = (unsigned long)data;
+ data->send_timer.function = sched_send_work;
++ spin_lock_init(&data->lock);
++ reset_per_cpu_data(data);
+ }
+
++
+ goto out;
+
+ out_unreg:
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 2ef859a..05842ab 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -1354,6 +1354,7 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
+ goto errout;
+ send_addr_notify = 1;
+ modified = 1;
++ add_device_randomness(dev->dev_addr, dev->addr_len);
+ }
+
+ if (tb[IFLA_MTU]) {
+diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
+index 8761bf8..337c68b 100644
+--- a/net/sunrpc/rpcb_clnt.c
++++ b/net/sunrpc/rpcb_clnt.c
+@@ -246,7 +246,7 @@ static int rpcb_create_local_unix(void)
+ if (IS_ERR(clnt)) {
+ dprintk("RPC: failed to create AF_LOCAL rpcbind "
+ "client (errno %ld).\n", PTR_ERR(clnt));
+- result = -PTR_ERR(clnt);
++ result = PTR_ERR(clnt);
+ goto out;
+ }
+
+@@ -293,7 +293,7 @@ static int rpcb_create_local_net(void)
+ if (IS_ERR(clnt)) {
+ dprintk("RPC: failed to create local rpcbind "
+ "client (errno %ld).\n", PTR_ERR(clnt));
+- result = -PTR_ERR(clnt);
++ result = PTR_ERR(clnt);
+ goto out;
+ }
+
+diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
+index 4e2b3b4..c90b832 100644
+--- a/net/sunrpc/sched.c
++++ b/net/sunrpc/sched.c
+@@ -755,7 +755,9 @@ void rpc_execute(struct rpc_task *task)
+
+ static void rpc_async_schedule(struct work_struct *work)
+ {
++ current->flags |= PF_FSTRANS;
+ __rpc_execute(container_of(work, struct rpc_task, u.tk_work));
++ current->flags &= ~PF_FSTRANS;
+ }
+
+ /**
+diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
+index b446e10..06cdbff 100644
+--- a/net/sunrpc/xprtrdma/transport.c
++++ b/net/sunrpc/xprtrdma/transport.c
+@@ -200,6 +200,7 @@ xprt_rdma_connect_worker(struct work_struct *work)
+ int rc = 0;
+
+ if (!xprt->shutdown) {
++ current->flags |= PF_FSTRANS;
+ xprt_clear_connected(xprt);
+
+ dprintk("RPC: %s: %sconnect\n", __func__,
+@@ -212,10 +213,10 @@ xprt_rdma_connect_worker(struct work_struct *work)
+
+ out:
+ xprt_wake_pending_tasks(xprt, rc);
+-
+ out_clear:
+ dprintk("RPC: %s: exit\n", __func__);
+ xprt_clear_connecting(xprt);
++ current->flags &= ~PF_FSTRANS;
+ }
+
+ /*
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index 55472c4..1a6edc7 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -1895,6 +1895,8 @@ static void xs_local_setup_socket(struct work_struct *work)
+ if (xprt->shutdown)
+ goto out;
+
++ current->flags |= PF_FSTRANS;
++
+ clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
+ status = __sock_create(xprt->xprt_net, AF_LOCAL,
+ SOCK_STREAM, 0, &sock, 1);
+@@ -1928,6 +1930,7 @@ static void xs_local_setup_socket(struct work_struct *work)
+ out:
+ xprt_clear_connecting(xprt);
+ xprt_wake_pending_tasks(xprt, status);
++ current->flags &= ~PF_FSTRANS;
+ }
+
+ static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
+@@ -1970,6 +1973,8 @@ static void xs_udp_setup_socket(struct work_struct *work)
+ if (xprt->shutdown)
+ goto out;
+
++ current->flags |= PF_FSTRANS;
++
+ /* Start by resetting any existing state */
+ xs_reset_transport(transport);
+ sock = xs_create_sock(xprt, transport,
+@@ -1988,6 +1993,7 @@ static void xs_udp_setup_socket(struct work_struct *work)
+ out:
+ xprt_clear_connecting(xprt);
+ xprt_wake_pending_tasks(xprt, status);
++ current->flags &= ~PF_FSTRANS;
+ }
+
+ /*
+@@ -2113,6 +2119,8 @@ static void xs_tcp_setup_socket(struct work_struct *work)
+ if (xprt->shutdown)
+ goto out;
+
++ current->flags |= PF_FSTRANS;
++
+ if (!sock) {
+ clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
+ sock = xs_create_sock(xprt, transport,
+@@ -2162,6 +2170,7 @@ static void xs_tcp_setup_socket(struct work_struct *work)
+ case -EINPROGRESS:
+ case -EALREADY:
+ xprt_clear_connecting(xprt);
++ current->flags &= ~PF_FSTRANS;
+ return;
+ case -EINVAL:
+ /* Happens, for instance, if the user specified a link
+@@ -2174,6 +2183,7 @@ out_eagain:
+ out:
+ xprt_clear_connecting(xprt);
+ xprt_wake_pending_tasks(xprt, status);
++ current->flags &= ~PF_FSTRANS;
+ }
+
+ /**
+diff --git a/net/wireless/util.c b/net/wireless/util.c
+index 74d5292..b5e4c1c 100644
+--- a/net/wireless/util.c
++++ b/net/wireless/util.c
+@@ -981,6 +981,9 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
+ }
+ mutex_unlock(&rdev->devlist_mtx);
+
++ if (total == 1)
++ return 0;
++
+ for (i = 0; i < rdev->wiphy.n_iface_combinations; i++) {
+ const struct ieee80211_iface_combination *c;
+ struct ieee80211_iface_limit *limits;
+diff --git a/sound/drivers/mpu401/mpu401_uart.c b/sound/drivers/mpu401/mpu401_uart.c
+index 1cff331..4608c2c 100644
+--- a/sound/drivers/mpu401/mpu401_uart.c
++++ b/sound/drivers/mpu401/mpu401_uart.c
+@@ -554,6 +554,7 @@ int snd_mpu401_uart_new(struct snd_card *card, int device,
+ spin_lock_init(&mpu->output_lock);
+ spin_lock_init(&mpu->timer_lock);
+ mpu->hardware = hardware;
++ mpu->irq = -1;
+ if (! (info_flags & MPU401_INFO_INTEGRATED)) {
+ int res_size = hardware == MPU401_HW_PC98II ? 4 : 2;
+ mpu->res = request_region(port, res_size, "MPU401 UART");
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 191fd78..2e2eb93 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -4809,6 +4809,15 @@ static int alc269_resume(struct hda_codec *codec)
+ }
+ #endif /* CONFIG_PM */
+
++static void alc269_fixup_pincfg_no_hp_to_lineout(struct hda_codec *codec,
++ const struct alc_fixup *fix, int action)
++{
++ struct alc_spec *spec = codec->spec;
++
++ if (action == ALC_FIXUP_ACT_PRE_PROBE)
++ spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
++}
++
+ static void alc269_fixup_hweq(struct hda_codec *codec,
+ const struct alc_fixup *fix, int action)
+ {
+@@ -4909,6 +4918,8 @@ enum {
+ ALC269_FIXUP_DMIC,
+ ALC269VB_FIXUP_AMIC,
+ ALC269VB_FIXUP_DMIC,
++ ALC269_FIXUP_LENOVO_DOCK,
++ ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT,
+ };
+
+ static const struct alc_fixup alc269_fixups[] = {
+@@ -5029,6 +5040,20 @@ static const struct alc_fixup alc269_fixups[] = {
+ { }
+ },
+ },
++ [ALC269_FIXUP_LENOVO_DOCK] = {
++ .type = ALC_FIXUP_PINS,
++ .v.pins = (const struct alc_pincfg[]) {
++ { 0x19, 0x23a11040 }, /* dock mic */
++ { 0x1b, 0x2121103f }, /* dock headphone */
++ { }
++ },
++ .chained = true,
++ .chain_id = ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT
++ },
++ [ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT] = {
++ .type = ALC_FIXUP_FUNC,
++ .v.func = alc269_fixup_pincfg_no_hp_to_lineout,
++ },
+ };
+
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -5051,6 +5076,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
+ SND_PCI_QUIRK(0x17aa, 0x21ca, "Thinkpad L412", ALC269_FIXUP_SKU_IGNORE),
+ SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", ALC269_FIXUP_SKU_IGNORE),
++ SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK),
++ SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_QUANTA_MUTE),
+ SND_PCI_QUIRK(0x17aa, 0x3bf8, "Lenovo Ideapd", ALC269_FIXUP_PCM_44K),
+ SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
+@@ -5109,6 +5136,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ static const struct alc_model_fixup alc269_fixup_models[] = {
+ {.id = ALC269_FIXUP_AMIC, .name = "laptop-amic"},
+ {.id = ALC269_FIXUP_DMIC, .name = "laptop-dmic"},
++ {.id = ALC269_FIXUP_LENOVO_DOCK, .name = "lenovo-dock"},
+ {}
+ };
+
+diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
+index 1fe1308..7160ff2 100644
+--- a/sound/pci/hda/patch_via.c
++++ b/sound/pci/hda/patch_via.c
+@@ -3227,7 +3227,7 @@ static void set_widgets_power_state_vt1718S(struct hda_codec *codec)
+ {
+ struct via_spec *spec = codec->spec;
+ int imux_is_smixer;
+- unsigned int parm;
++ unsigned int parm, parm2;
+ /* MUX6 (1eh) = stereo mixer */
+ imux_is_smixer =
+ snd_hda_codec_read(codec, 0x1e, 0, AC_VERB_GET_CONNECT_SEL, 0x00) == 5;
+@@ -3250,7 +3250,7 @@ static void set_widgets_power_state_vt1718S(struct hda_codec *codec)
+ parm = AC_PWRST_D3;
+ set_pin_power_state(codec, 0x27, &parm);
+ snd_hda_codec_write(codec, 0x1a, 0, AC_VERB_SET_POWER_STATE, parm);
+- snd_hda_codec_write(codec, 0xb, 0, AC_VERB_SET_POWER_STATE, parm);
++ parm2 = parm; /* for pin 0x0b */
+
+ /* PW2 (26h), AOW2 (ah) */
+ parm = AC_PWRST_D3;
+@@ -3265,6 +3265,9 @@ static void set_widgets_power_state_vt1718S(struct hda_codec *codec)
+ if (!spec->hp_independent_mode) /* check for redirected HP */
+ set_pin_power_state(codec, 0x28, &parm);
+ snd_hda_codec_write(codec, 0x8, 0, AC_VERB_SET_POWER_STATE, parm);
++ if (!spec->hp_independent_mode && parm2 != AC_PWRST_D3)
++ parm = parm2;
++ snd_hda_codec_write(codec, 0xb, 0, AC_VERB_SET_POWER_STATE, parm);
+ /* MW9 (21h), Mw2 (1ah), AOW0 (8h) */
+ snd_hda_codec_write(codec, 0x21, 0, AC_VERB_SET_POWER_STATE,
+ imux_is_smixer ? AC_PWRST_D0 : parm);
+diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
+index 07dd7eb..e97df24 100644
+--- a/sound/soc/codecs/wm8962.c
++++ b/sound/soc/codecs/wm8962.c
+@@ -3105,6 +3105,9 @@ static int wm8962_set_bias_level(struct snd_soc_codec *codec,
+ /* VMID 2*250k */
+ snd_soc_update_bits(codec, WM8962_PWR_MGMT_1,
+ WM8962_VMID_SEL_MASK, 0x100);
++
++ if (codec->dapm.bias_level == SND_SOC_BIAS_OFF)
++ msleep(100);
+ break;
+
+ case SND_SOC_BIAS_OFF:
+diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
+index de61b8a..98c5774 100644
+--- a/sound/soc/codecs/wm8994.c
++++ b/sound/soc/codecs/wm8994.c
+@@ -2508,7 +2508,7 @@ static int wm8994_hw_params(struct snd_pcm_substream *substream,
+ return -EINVAL;
+ }
+
+- bclk_rate = params_rate(params) * 2;
++ bclk_rate = params_rate(params) * 4;
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ bclk_rate *= 16;
+diff --git a/sound/usb/clock.c b/sound/usb/clock.c
+index 379baad..5e634a2 100644
+--- a/sound/usb/clock.c
++++ b/sound/usb/clock.c
+@@ -111,7 +111,8 @@ static bool uac_clock_source_is_valid(struct snd_usb_audio *chip, int source_id)
+ return 0;
+
+ /* If a clock source can't tell us whether it's valid, we assume it is */
+- if (!uac2_control_is_readable(cs_desc->bmControls, UAC2_CS_CONTROL_CLOCK_VALID))
++ if (!uac2_control_is_readable(cs_desc->bmControls,
++ UAC2_CS_CONTROL_CLOCK_VALID - 1))
+ return 1;
+
+ err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR,
diff --git a/1027_linux-3.2.28.patch b/1027_linux-3.2.28.patch
new file mode 100644
index 00000000..4dbba4b9
--- /dev/null
+++ b/1027_linux-3.2.28.patch
@@ -0,0 +1,1114 @@
+diff --git a/Makefile b/Makefile
+index bdf851f..5368961 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 27
++SUBLEVEL = 28
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig
+index 6ee781b..3ee3e84 100644
+--- a/arch/arm/configs/mxs_defconfig
++++ b/arch/arm/configs/mxs_defconfig
+@@ -32,7 +32,6 @@ CONFIG_NO_HZ=y
+ CONFIG_HIGH_RES_TIMERS=y
+ CONFIG_PREEMPT_VOLUNTARY=y
+ CONFIG_AEABI=y
+-CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
+ CONFIG_AUTO_ZRELADDR=y
+ CONFIG_FPE_NWFPE=y
+ CONFIG_NET=y
+diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c
+index f0c05f4..ae7786d 100644
+--- a/arch/arm/mach-pxa/raumfeld.c
++++ b/arch/arm/mach-pxa/raumfeld.c
+@@ -951,12 +951,12 @@ static struct i2c_board_info raumfeld_connector_i2c_board_info __initdata = {
+
+ static struct eeti_ts_platform_data eeti_ts_pdata = {
+ .irq_active_high = 1,
++ .irq_gpio = GPIO_TOUCH_IRQ,
+ };
+
+ static struct i2c_board_info raumfeld_controller_i2c_board_info __initdata = {
+ .type = "eeti_ts",
+ .addr = 0x0a,
+- .irq = gpio_to_irq(GPIO_TOUCH_IRQ),
+ .platform_data = &eeti_ts_pdata,
+ };
+
+diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
+index 84a9828..38c6645 100644
+--- a/arch/s390/kernel/compat_linux.c
++++ b/arch/s390/kernel/compat_linux.c
+@@ -615,7 +615,6 @@ asmlinkage unsigned long old32_mmap(struct mmap_arg_struct_emu31 __user *arg)
+ return -EFAULT;
+ if (a.offset & ~PAGE_MASK)
+ return -EINVAL;
+- a.addr = (unsigned long) compat_ptr(a.addr);
+ return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
+ a.offset >> PAGE_SHIFT);
+ }
+@@ -626,7 +625,6 @@ asmlinkage long sys32_mmap2(struct mmap_arg_struct_emu31 __user *arg)
+
+ if (copy_from_user(&a, arg, sizeof(a)))
+ return -EFAULT;
+- a.addr = (unsigned long) compat_ptr(a.addr);
+ return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
+ }
+
+diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
+index 18c51df..25408d3 100644
+--- a/arch/s390/kernel/compat_wrapper.S
++++ b/arch/s390/kernel/compat_wrapper.S
+@@ -1636,7 +1636,7 @@ ENTRY(compat_sys_process_vm_readv_wrapper)
+ llgfr %r6,%r6 # unsigned long
+ llgf %r0,164(%r15) # unsigned long
+ stg %r0,160(%r15)
+- jg sys_process_vm_readv
++ jg compat_sys_process_vm_readv
+
+ ENTRY(compat_sys_process_vm_writev_wrapper)
+ lgfr %r2,%r2 # compat_pid_t
+@@ -1646,4 +1646,4 @@ ENTRY(compat_sys_process_vm_writev_wrapper)
+ llgfr %r6,%r6 # unsigned long
+ llgf %r0,164(%r15) # unsigned long
+ stg %r0,160(%r15)
+- jg sys_process_vm_writev
++ jg compat_sys_process_vm_writev
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 7315488..407789b 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -1956,6 +1956,7 @@ static __init void nested_vmx_setup_ctls_msrs(void)
+ #endif
+ CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
+ CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_EXITING |
++ CPU_BASED_RDPMC_EXITING |
+ CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
+ /*
+ * We can allow some features even when not supported by the
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index d62c731..c364358 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -1170,12 +1170,7 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
+ return (int32_t)(seq1 - seq2) >= 0;
+ }
+
+-static inline u32
+-i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
+-{
+- drm_i915_private_t *dev_priv = ring->dev->dev_private;
+- return ring->outstanding_lazy_request = dev_priv->next_seqno;
+-}
++u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring);
+
+ int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined);
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 3e2edc6..548a400 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -1647,6 +1647,28 @@ i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
+ }
+ }
+
++static u32
++i915_gem_get_seqno(struct drm_device *dev)
++{
++ drm_i915_private_t *dev_priv = dev->dev_private;
++ u32 seqno = dev_priv->next_seqno;
++
++ /* reserve 0 for non-seqno */
++ if (++dev_priv->next_seqno == 0)
++ dev_priv->next_seqno = 1;
++
++ return seqno;
++}
++
++u32
++i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
++{
++ if (ring->outstanding_lazy_request == 0)
++ ring->outstanding_lazy_request = i915_gem_get_seqno(ring->dev);
++
++ return ring->outstanding_lazy_request;
++}
++
+ int
+ i915_add_request(struct intel_ring_buffer *ring,
+ struct drm_file *file,
+@@ -1658,6 +1680,7 @@ i915_add_request(struct intel_ring_buffer *ring,
+ int ret;
+
+ BUG_ON(request == NULL);
++ seqno = i915_gem_next_request_seqno(ring);
+
+ ret = ring->add_request(ring, &seqno);
+ if (ret)
+diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
+index f6613dc..19085c0 100644
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
+@@ -52,20 +52,6 @@ static inline int ring_space(struct intel_ring_buffer *ring)
+ return space;
+ }
+
+-static u32 i915_gem_get_seqno(struct drm_device *dev)
+-{
+- drm_i915_private_t *dev_priv = dev->dev_private;
+- u32 seqno;
+-
+- seqno = dev_priv->next_seqno;
+-
+- /* reserve 0 for non-seqno */
+- if (++dev_priv->next_seqno == 0)
+- dev_priv->next_seqno = 1;
+-
+- return seqno;
+-}
+-
+ static int
+ render_ring_flush(struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+@@ -277,8 +263,6 @@ static int init_ring_common(struct intel_ring_buffer *ring)
+ I915_WRITE_HEAD(ring, 0);
+ ring->write_tail(ring, 0);
+
+- /* Initialize the ring. */
+- I915_WRITE_START(ring, obj->gtt_offset);
+ head = I915_READ_HEAD(ring) & HEAD_ADDR;
+
+ /* G45 ring initialization fails to reset head to zero */
+@@ -304,14 +288,19 @@ static int init_ring_common(struct intel_ring_buffer *ring)
+ }
+ }
+
++ /* Initialize the ring. This must happen _after_ we've cleared the ring
++ * registers with the above sequence (the readback of the HEAD registers
++ * also enforces ordering), otherwise the hw might lose the new ring
++ * register values. */
++ I915_WRITE_START(ring, obj->gtt_offset);
+ I915_WRITE_CTL(ring,
+ ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
+ | RING_VALID);
+
+ /* If the head is still not zero, the ring is dead */
+- if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
+- I915_READ_START(ring) != obj->gtt_offset ||
+- (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
++ if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
++ I915_READ_START(ring) == obj->gtt_offset &&
++ (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
+ DRM_ERROR("%s initialization failed "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ ring->name,
+@@ -488,7 +477,7 @@ gen6_add_request(struct intel_ring_buffer *ring,
+ mbox1_reg = ring->signal_mbox[0];
+ mbox2_reg = ring->signal_mbox[1];
+
+- *seqno = i915_gem_get_seqno(ring->dev);
++ *seqno = i915_gem_next_request_seqno(ring);
+
+ update_mboxes(ring, *seqno, mbox1_reg);
+ update_mboxes(ring, *seqno, mbox2_reg);
+@@ -586,8 +575,7 @@ static int
+ pc_render_add_request(struct intel_ring_buffer *ring,
+ u32 *result)
+ {
+- struct drm_device *dev = ring->dev;
+- u32 seqno = i915_gem_get_seqno(dev);
++ u32 seqno = i915_gem_next_request_seqno(ring);
+ struct pipe_control *pc = ring->private;
+ u32 scratch_addr = pc->gtt_offset + 128;
+ int ret;
+@@ -638,8 +626,7 @@ static int
+ render_ring_add_request(struct intel_ring_buffer *ring,
+ u32 *result)
+ {
+- struct drm_device *dev = ring->dev;
+- u32 seqno = i915_gem_get_seqno(dev);
++ u32 seqno = i915_gem_next_request_seqno(ring);
+ int ret;
+
+ ret = intel_ring_begin(ring, 4);
+@@ -813,7 +800,7 @@ ring_add_request(struct intel_ring_buffer *ring,
+ if (ret)
+ return ret;
+
+- seqno = i915_gem_get_seqno(ring->dev);
++ seqno = i915_gem_next_request_seqno(ring);
+
+ intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
+ intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index 931f4df..fc0633c 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -1065,24 +1065,8 @@ void evergreen_agp_enable(struct radeon_device *rdev)
+
+ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
+ {
+- save->vga_control[0] = RREG32(D1VGA_CONTROL);
+- save->vga_control[1] = RREG32(D2VGA_CONTROL);
+ save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
+ save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
+- save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
+- save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
+- if (rdev->num_crtc >= 4) {
+- save->vga_control[2] = RREG32(EVERGREEN_D3VGA_CONTROL);
+- save->vga_control[3] = RREG32(EVERGREEN_D4VGA_CONTROL);
+- save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
+- save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
+- }
+- if (rdev->num_crtc >= 6) {
+- save->vga_control[4] = RREG32(EVERGREEN_D5VGA_CONTROL);
+- save->vga_control[5] = RREG32(EVERGREEN_D6VGA_CONTROL);
+- save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
+- save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
+- }
+
+ /* Stop all video */
+ WREG32(VGA_RENDER_CONTROL, 0);
+@@ -1193,47 +1177,6 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
+ /* Unlock host access */
+ WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
+ mdelay(1);
+- /* Restore video state */
+- WREG32(D1VGA_CONTROL, save->vga_control[0]);
+- WREG32(D2VGA_CONTROL, save->vga_control[1]);
+- if (rdev->num_crtc >= 4) {
+- WREG32(EVERGREEN_D3VGA_CONTROL, save->vga_control[2]);
+- WREG32(EVERGREEN_D4VGA_CONTROL, save->vga_control[3]);
+- }
+- if (rdev->num_crtc >= 6) {
+- WREG32(EVERGREEN_D5VGA_CONTROL, save->vga_control[4]);
+- WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]);
+- }
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
+- if (rdev->num_crtc >= 4) {
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
+- }
+- if (rdev->num_crtc >= 6) {
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
+- }
+- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]);
+- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]);
+- if (rdev->num_crtc >= 4) {
+- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]);
+- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]);
+- }
+- if (rdev->num_crtc >= 6) {
+- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]);
+- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]);
+- }
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+- if (rdev->num_crtc >= 4) {
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+- }
+- if (rdev->num_crtc >= 6) {
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+- }
+ WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
+ }
+
+@@ -2080,10 +2023,18 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
+ if (rdev->flags & RADEON_IS_IGP)
+ rdev->config.evergreen.tile_config |= 1 << 4;
+ else {
+- if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
+- rdev->config.evergreen.tile_config |= 1 << 4;
+- else
++ switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
++ case 0: /* four banks */
+ rdev->config.evergreen.tile_config |= 0 << 4;
++ break;
++ case 1: /* eight banks */
++ rdev->config.evergreen.tile_config |= 1 << 4;
++ break;
++ case 2: /* sixteen banks */
++ default:
++ rdev->config.evergreen.tile_config |= 2 << 4;
++ break;
++ }
+ }
+ rdev->config.evergreen.tile_config |=
+ ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) << 8;
+diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
+index 9e50814..636255b 100644
+--- a/drivers/gpu/drm/radeon/ni.c
++++ b/drivers/gpu/drm/radeon/ni.c
+@@ -804,10 +804,18 @@ static void cayman_gpu_init(struct radeon_device *rdev)
+ rdev->config.cayman.tile_config |= (3 << 0);
+ break;
+ }
+- if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
+- rdev->config.cayman.tile_config |= 1 << 4;
+- else
++ switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
++ case 0: /* four banks */
+ rdev->config.cayman.tile_config |= 0 << 4;
++ break;
++ case 1: /* eight banks */
++ rdev->config.cayman.tile_config |= 1 << 4;
++ break;
++ case 2: /* sixteen banks */
++ default:
++ rdev->config.cayman.tile_config |= 2 << 4;
++ break;
++ }
+ rdev->config.cayman.tile_config |=
+ ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
+ rdev->config.cayman.tile_config |=
+diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
+index 5991484..5ce9402 100644
+--- a/drivers/gpu/drm/radeon/radeon_asic.h
++++ b/drivers/gpu/drm/radeon/radeon_asic.h
+@@ -253,13 +253,10 @@ void rs690_line_buffer_adjust(struct radeon_device *rdev,
+ * rv515
+ */
+ struct rv515_mc_save {
+- u32 d1vga_control;
+- u32 d2vga_control;
+ u32 vga_render_control;
+ u32 vga_hdp_control;
+- u32 d1crtc_control;
+- u32 d2crtc_control;
+ };
++
+ int rv515_init(struct radeon_device *rdev);
+ void rv515_fini(struct radeon_device *rdev);
+ uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
+@@ -387,11 +384,10 @@ void r700_cp_fini(struct radeon_device *rdev);
+ * evergreen
+ */
+ struct evergreen_mc_save {
+- u32 vga_control[6];
+ u32 vga_render_control;
+ u32 vga_hdp_control;
+- u32 crtc_control[6];
+ };
++
+ void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev);
+ int evergreen_init(struct radeon_device *rdev);
+ void evergreen_fini(struct radeon_device *rdev);
+diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
+index 6613ee9..d5f45b4 100644
+--- a/drivers/gpu/drm/radeon/rv515.c
++++ b/drivers/gpu/drm/radeon/rv515.c
+@@ -281,12 +281,8 @@ int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
+
+ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
+ {
+- save->d1vga_control = RREG32(R_000330_D1VGA_CONTROL);
+- save->d2vga_control = RREG32(R_000338_D2VGA_CONTROL);
+ save->vga_render_control = RREG32(R_000300_VGA_RENDER_CONTROL);
+ save->vga_hdp_control = RREG32(R_000328_VGA_HDP_CONTROL);
+- save->d1crtc_control = RREG32(R_006080_D1CRTC_CONTROL);
+- save->d2crtc_control = RREG32(R_006880_D2CRTC_CONTROL);
+
+ /* Stop all video */
+ WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
+@@ -311,15 +307,6 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
+ /* Unlock host access */
+ WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control);
+ mdelay(1);
+- /* Restore video state */
+- WREG32(R_000330_D1VGA_CONTROL, save->d1vga_control);
+- WREG32(R_000338_D2VGA_CONTROL, save->d2vga_control);
+- WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1);
+- WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 1);
+- WREG32(R_006080_D1CRTC_CONTROL, save->d1crtc_control);
+- WREG32(R_006880_D2CRTC_CONTROL, save->d2crtc_control);
+- WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0);
+- WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
+ WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control);
+ }
+
+diff --git a/drivers/input/touchscreen/eeti_ts.c b/drivers/input/touchscreen/eeti_ts.c
+index 7f8f538..4f938bb 100644
+--- a/drivers/input/touchscreen/eeti_ts.c
++++ b/drivers/input/touchscreen/eeti_ts.c
+@@ -48,7 +48,7 @@ struct eeti_ts_priv {
+ struct input_dev *input;
+ struct work_struct work;
+ struct mutex mutex;
+- int irq, irq_active_high;
++ int irq_gpio, irq, irq_active_high;
+ };
+
+ #define EETI_TS_BITDEPTH (11)
+@@ -62,7 +62,7 @@ struct eeti_ts_priv {
+
+ static inline int eeti_ts_irq_active(struct eeti_ts_priv *priv)
+ {
+- return gpio_get_value(irq_to_gpio(priv->irq)) == priv->irq_active_high;
++ return gpio_get_value(priv->irq_gpio) == priv->irq_active_high;
+ }
+
+ static void eeti_ts_read(struct work_struct *work)
+@@ -157,7 +157,7 @@ static void eeti_ts_close(struct input_dev *dev)
+ static int __devinit eeti_ts_probe(struct i2c_client *client,
+ const struct i2c_device_id *idp)
+ {
+- struct eeti_ts_platform_data *pdata;
++ struct eeti_ts_platform_data *pdata = client->dev.platform_data;
+ struct eeti_ts_priv *priv;
+ struct input_dev *input;
+ unsigned int irq_flags;
+@@ -199,9 +199,12 @@ static int __devinit eeti_ts_probe(struct i2c_client *client,
+
+ priv->client = client;
+ priv->input = input;
+- priv->irq = client->irq;
++ priv->irq_gpio = pdata->irq_gpio;
++ priv->irq = gpio_to_irq(pdata->irq_gpio);
+
+- pdata = client->dev.platform_data;
++ err = gpio_request_one(pdata->irq_gpio, GPIOF_IN, client->name);
++ if (err < 0)
++ goto err1;
+
+ if (pdata)
+ priv->irq_active_high = pdata->irq_active_high;
+@@ -215,13 +218,13 @@ static int __devinit eeti_ts_probe(struct i2c_client *client,
+
+ err = input_register_device(input);
+ if (err)
+- goto err1;
++ goto err2;
+
+ err = request_irq(priv->irq, eeti_ts_isr, irq_flags,
+ client->name, priv);
+ if (err) {
+ dev_err(&client->dev, "Unable to request touchscreen IRQ.\n");
+- goto err2;
++ goto err3;
+ }
+
+ /*
+@@ -233,9 +236,11 @@ static int __devinit eeti_ts_probe(struct i2c_client *client,
+ device_init_wakeup(&client->dev, 0);
+ return 0;
+
+-err2:
++err3:
+ input_unregister_device(input);
+ input = NULL; /* so we dont try to free it below */
++err2:
++ gpio_free(pdata->irq_gpio);
+ err1:
+ input_free_device(input);
+ kfree(priv);
+diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c
+index 43a76c4..db662e2 100644
+--- a/drivers/mfd/ezx-pcap.c
++++ b/drivers/mfd/ezx-pcap.c
+@@ -202,7 +202,7 @@ static void pcap_isr_work(struct work_struct *work)
+ }
+ local_irq_enable();
+ ezx_pcap_write(pcap, PCAP_REG_MSR, pcap->msr);
+- } while (gpio_get_value(irq_to_gpio(pcap->spi->irq)));
++ } while (gpio_get_value(pdata->gpio));
+ }
+
+ static void pcap_irq_handler(unsigned int irq, struct irq_desc *desc)
+diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
+index 23406e6..ae286a9 100644
+--- a/drivers/net/caif/caif_serial.c
++++ b/drivers/net/caif/caif_serial.c
+@@ -325,6 +325,9 @@ static int ldisc_open(struct tty_struct *tty)
+
+ sprintf(name, "cf%s", tty->name);
+ dev = alloc_netdev(sizeof(*ser), name, caifdev_setup);
++ if (!dev)
++ return -ENOMEM;
++
+ ser = netdev_priv(dev);
+ ser->tty = tty_kref_get(tty);
+ ser->dev = dev;
+diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
+index 965c723..721adfd 100644
+--- a/drivers/net/ethernet/broadcom/bnx2.c
++++ b/drivers/net/ethernet/broadcom/bnx2.c
+@@ -5378,7 +5378,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
+ int k, last;
+
+ if (skb == NULL) {
+- j++;
++ j = NEXT_TX_BD(j);
+ continue;
+ }
+
+@@ -5390,8 +5390,8 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
+ tx_buf->skb = NULL;
+
+ last = tx_buf->nr_frags;
+- j++;
+- for (k = 0; k < last; k++, j++) {
++ j = NEXT_TX_BD(j);
++ for (k = 0; k < last; k++, j = NEXT_TX_BD(j)) {
+ tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
+ dma_unmap_page(&bp->pdev->dev,
+ dma_unmap_addr(tx_buf, mapping),
+diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
+index de00805..0549261 100644
+--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
++++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
+@@ -4743,12 +4743,14 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
+ e1000_setup_rctl(adapter);
+ e1000_set_rx_mode(netdev);
+
++ rctl = er32(RCTL);
++
+ /* turn on all-multi mode if wake on multicast is enabled */
+- if (wufc & E1000_WUFC_MC) {
+- rctl = er32(RCTL);
++ if (wufc & E1000_WUFC_MC)
+ rctl |= E1000_RCTL_MPE;
+- ew32(RCTL, rctl);
+- }
++
++ /* enable receives in the hardware */
++ ew32(RCTL, rctl | E1000_RCTL_EN);
+
+ if (hw->mac_type >= e1000_82540) {
+ ctrl = er32(CTRL);
+diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
+index 3072d35..4f4d52a 100644
+--- a/drivers/net/ethernet/intel/e1000e/82571.c
++++ b/drivers/net/ethernet/intel/e1000e/82571.c
+@@ -1600,10 +1600,8 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
+ * auto-negotiation in the TXCW register and disable
+ * forced link in the Device Control register in an
+ * attempt to auto-negotiate with our link partner.
+- * If the partner code word is null, stop forcing
+- * and restart auto negotiation.
+ */
+- if ((rxcw & E1000_RXCW_C) || !(rxcw & E1000_RXCW_CW)) {
++ if (rxcw & E1000_RXCW_C) {
+ /* Enable autoneg, and unforce link up */
+ ew32(TXCW, mac->txcw);
+ ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 7bea9c6..a12c9bf 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1243,10 +1243,12 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
+ int vnet_hdr_sz;
+ int ret;
+
+- if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89)
++ if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89) {
+ if (copy_from_user(&ifr, argp, ifreq_len))
+ return -EFAULT;
+-
++ } else {
++ memset(&ifr, 0, sizeof(ifr));
++ }
+ if (cmd == TUNGETFEATURES) {
+ /* Currently this just means: "what IFF flags are valid?".
+ * This is needed because we never checked for invalid flags on
+diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
+index 582ca2d..c4c6a73 100644
+--- a/drivers/net/usb/kaweth.c
++++ b/drivers/net/usb/kaweth.c
+@@ -1308,7 +1308,7 @@ static int kaweth_internal_control_msg(struct usb_device *usb_dev,
+ int retv;
+ int length = 0; /* shut up GCC */
+
+- urb = usb_alloc_urb(0, GFP_NOIO);
++ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb)
+ return -ENOMEM;
+
+diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
+index 7f97164..2b8e957 100644
+--- a/drivers/net/wireless/ath/ath9k/hw.c
++++ b/drivers/net/wireless/ath/ath9k/hw.c
+@@ -674,6 +674,7 @@ int ath9k_hw_init(struct ath_hw *ah)
+ case AR9300_DEVID_AR9340:
+ case AR9300_DEVID_AR9580:
+ case AR9300_DEVID_AR9462:
++ case AR9485_DEVID_AR1111:
+ break;
+ default:
+ if (common->bus_ops->ath_bus_type == ATH_USB)
+diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
+index 1bd8edf..a5c4ba8 100644
+--- a/drivers/net/wireless/ath/ath9k/hw.h
++++ b/drivers/net/wireless/ath/ath9k/hw.h
+@@ -48,6 +48,7 @@
+ #define AR9300_DEVID_AR9580 0x0033
+ #define AR9300_DEVID_AR9462 0x0034
+ #define AR9300_DEVID_AR9330 0x0035
++#define AR9485_DEVID_AR1111 0x0037
+
+ #define AR5416_AR9100_DEVID 0x000b
+
+diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
+index 2dcdf63..1883d39 100644
+--- a/drivers/net/wireless/ath/ath9k/pci.c
++++ b/drivers/net/wireless/ath/ath9k/pci.c
+@@ -35,6 +35,7 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
+ { PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E AR9485 */
+ { PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */
+ { PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E AR9462 */
++ { PCI_VDEVICE(ATHEROS, 0x0037) }, /* PCI-E AR1111/AR9485 */
+ { 0 }
+ };
+
+diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+index 9ba2c1b..3395025 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
++++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+@@ -708,11 +708,14 @@ static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
+ */
+ static bool rs_use_green(struct ieee80211_sta *sta)
+ {
+- struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+- struct iwl_rxon_context *ctx = sta_priv->ctx;
+-
+- return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
+- !(ctx->ht.non_gf_sta_present);
++ /*
++ * There's a bug somewhere in this code that causes the
++ * scaling to get stuck because GF+SGI can't be combined
++ * in SISO rates. Until we find that bug, disable GF, it
++ * has only limited benefit and we still interoperate with
++ * GF APs since we can always receive GF transmissions.
++ */
++ return false;
+ }
+
+ /**
+diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
+index bf55b4a..d69f88c 100644
+--- a/drivers/net/wireless/rt2x00/rt61pci.c
++++ b/drivers/net/wireless/rt2x00/rt61pci.c
+@@ -2243,8 +2243,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
+
+ static void rt61pci_wakeup(struct rt2x00_dev *rt2x00dev)
+ {
+- struct ieee80211_conf conf = { .flags = 0 };
+- struct rt2x00lib_conf libconf = { .conf = &conf };
++ struct rt2x00lib_conf libconf = { .conf = &rt2x00dev->hw->conf };
+
+ rt61pci_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
+ }
+diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
+index db34db6..a49e848 100644
+--- a/drivers/net/wireless/rtlwifi/usb.c
++++ b/drivers/net/wireless/rtlwifi/usb.c
+@@ -120,15 +120,19 @@ static u32 _usb_read_sync(struct rtl_priv *rtlpriv, u32 addr, u16 len)
+ u8 request;
+ u16 wvalue;
+ u16 index;
+- __le32 *data = &rtlpriv->usb_data[rtlpriv->usb_data_index];
++ __le32 *data;
++ unsigned long flags;
+
++ spin_lock_irqsave(&rtlpriv->locks.usb_lock, flags);
++ if (++rtlpriv->usb_data_index >= RTL_USB_MAX_RX_COUNT)
++ rtlpriv->usb_data_index = 0;
++ data = &rtlpriv->usb_data[rtlpriv->usb_data_index];
++ spin_unlock_irqrestore(&rtlpriv->locks.usb_lock, flags);
+ request = REALTEK_USB_VENQT_CMD_REQ;
+ index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */
+
+ wvalue = (u16)addr;
+ _usbctrl_vendorreq_sync_read(udev, request, wvalue, index, data, len);
+- if (++rtlpriv->usb_data_index >= RTL_USB_MAX_RX_COUNT)
+- rtlpriv->usb_data_index = 0;
+ return le32_to_cpu(*data);
+ }
+
+@@ -909,6 +913,10 @@ int __devinit rtl_usb_probe(struct usb_interface *intf,
+ GFP_KERNEL);
+ if (!rtlpriv->usb_data)
+ return -ENOMEM;
++
++ /* this spin lock must be initialized early */
++ spin_lock_init(&rtlpriv->locks.usb_lock);
++
+ rtlpriv->usb_data_index = 0;
+ SET_IEEE80211_DEV(hw, &intf->dev);
+ udev = interface_to_usbdev(intf);
+diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
+index b1e9deb..deb87e9 100644
+--- a/drivers/net/wireless/rtlwifi/wifi.h
++++ b/drivers/net/wireless/rtlwifi/wifi.h
+@@ -1550,6 +1550,7 @@ struct rtl_locks {
+ spinlock_t rf_lock;
+ spinlock_t lps_lock;
+ spinlock_t waitq_lock;
++ spinlock_t usb_lock;
+
+ /*Dual mac*/
+ spinlock_t cck_and_rw_pagea_lock;
+diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c
+index 7daf4b8..90effcc 100644
+--- a/fs/hfsplus/wrapper.c
++++ b/fs/hfsplus/wrapper.c
+@@ -56,7 +56,7 @@ int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
+ DECLARE_COMPLETION_ONSTACK(wait);
+ struct bio *bio;
+ int ret = 0;
+- unsigned int io_size;
++ u64 io_size;
+ loff_t start;
+ int offset;
+
+diff --git a/include/linux/input/eeti_ts.h b/include/linux/input/eeti_ts.h
+index f875b31..16625d7 100644
+--- a/include/linux/input/eeti_ts.h
++++ b/include/linux/input/eeti_ts.h
+@@ -2,6 +2,7 @@
+ #define LINUX_INPUT_EETI_TS_H
+
+ struct eeti_ts_platform_data {
++ int irq_gpio;
+ unsigned int irq_active_high;
+ };
+
+diff --git a/include/linux/mfd/ezx-pcap.h b/include/linux/mfd/ezx-pcap.h
+index 40c37216..32a1b5c 100644
+--- a/include/linux/mfd/ezx-pcap.h
++++ b/include/linux/mfd/ezx-pcap.h
+@@ -16,6 +16,7 @@ struct pcap_subdev {
+ struct pcap_platform_data {
+ unsigned int irq_base;
+ unsigned int config;
++ int gpio;
+ void (*init) (void *); /* board specific init */
+ int num_subdevs;
+ struct pcap_subdev *subdevs;
+diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
+index 68223e4..4e9115d 100644
+--- a/net/caif/caif_dev.c
++++ b/net/caif/caif_dev.c
+@@ -428,9 +428,9 @@ static int __init caif_device_init(void)
+
+ static void __exit caif_device_exit(void)
+ {
+- unregister_pernet_subsys(&caif_net_ops);
+ unregister_netdevice_notifier(&caif_device_notifier);
+ dev_remove_pack(&caif_packet_type);
++ unregister_pernet_subsys(&caif_net_ops);
+ }
+
+ module_init(caif_device_init);
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 05842ab..0cf604b 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -670,6 +670,12 @@ static void set_operstate(struct net_device *dev, unsigned char transition)
+ }
+ }
+
++static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
++{
++ return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) |
++ (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI));
++}
++
+ static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
+ const struct ifinfomsg *ifm)
+ {
+@@ -678,7 +684,7 @@ static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
+ /* bugwards compatibility: ifi_change == 0 is treated as ~0 */
+ if (ifm->ifi_change)
+ flags = (flags & ifm->ifi_change) |
+- (dev->flags & ~ifm->ifi_change);
++ (rtnl_dev_get_flags(dev) & ~ifm->ifi_change);
+
+ return flags;
+ }
+diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
+index 86f3b88..afaa735 100644
+--- a/net/ipv4/cipso_ipv4.c
++++ b/net/ipv4/cipso_ipv4.c
+@@ -1725,8 +1725,10 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option)
+ case CIPSO_V4_TAG_LOCAL:
+ /* This is a non-standard tag that we only allow for
+ * local connections, so if the incoming interface is
+- * not the loopback device drop the packet. */
+- if (!(skb->dev->flags & IFF_LOOPBACK)) {
++ * not the loopback device drop the packet. Further,
++ * there is no legitimate reason for setting this from
++ * userspace so reject it if skb is NULL. */
++ if (skb == NULL || !(skb->dev->flags & IFF_LOOPBACK)) {
+ err_offset = opt_iter;
+ goto validate_return_locked;
+ }
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 11ba922..ad466a7 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2391,7 +2391,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
+ /* Cap the max timeout in ms TCP will retry/retrans
+ * before giving up and aborting (ETIMEDOUT) a connection.
+ */
+- icsk->icsk_user_timeout = msecs_to_jiffies(val);
++ if (val < 0)
++ err = -EINVAL;
++ else
++ icsk->icsk_user_timeout = msecs_to_jiffies(val);
+ break;
+ default:
+ err = -ENOPROTOOPT;
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 32e6ca2..a08a621 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -5415,7 +5415,9 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
+ if (tp->copied_seq == tp->rcv_nxt &&
+ len - tcp_header_len <= tp->ucopy.len) {
+ #ifdef CONFIG_NET_DMA
+- if (tcp_dma_try_early_copy(sk, skb, tcp_header_len)) {
++ if (tp->ucopy.task == current &&
++ sock_owned_by_user(sk) &&
++ tcp_dma_try_early_copy(sk, skb, tcp_header_len)) {
+ copied_early = 1;
+ eaten = 1;
+ }
+diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
+index a7078fd..f85de8e 100644
+--- a/net/mac80211/mesh.c
++++ b/net/mac80211/mesh.c
+@@ -543,6 +543,7 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
+
+ del_timer_sync(&sdata->u.mesh.housekeeping_timer);
+ del_timer_sync(&sdata->u.mesh.mesh_path_root_timer);
++ del_timer_sync(&sdata->u.mesh.mesh_path_timer);
+ /*
+ * If the timer fired while we waited for it, it will have
+ * requeued the work. Now the work will be running again
+diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
+index 17859ea..351a69b 100644
+--- a/net/sched/sch_sfb.c
++++ b/net/sched/sch_sfb.c
+@@ -559,6 +559,8 @@ static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb)
+
+ sch->qstats.backlog = q->qdisc->qstats.backlog;
+ opts = nla_nest_start(skb, TCA_OPTIONS);
++ if (opts == NULL)
++ goto nla_put_failure;
+ NLA_PUT(skb, TCA_SFB_PARMS, sizeof(opt), &opt);
+ return nla_nest_end(skb, opts);
+
+diff --git a/net/sctp/input.c b/net/sctp/input.c
+index b7692aa..0fc18c7 100644
+--- a/net/sctp/input.c
++++ b/net/sctp/input.c
+@@ -736,15 +736,12 @@ static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
+
+ epb = &ep->base;
+
+- if (hlist_unhashed(&epb->node))
+- return;
+-
+ epb->hashent = sctp_ep_hashfn(epb->bind_addr.port);
+
+ head = &sctp_ep_hashtable[epb->hashent];
+
+ sctp_write_lock(&head->lock);
+- __hlist_del(&epb->node);
++ hlist_del_init(&epb->node);
+ sctp_write_unlock(&head->lock);
+ }
+
+@@ -825,7 +822,7 @@ static void __sctp_unhash_established(struct sctp_association *asoc)
+ head = &sctp_assoc_hashtable[epb->hashent];
+
+ sctp_write_lock(&head->lock);
+- __hlist_del(&epb->node);
++ hlist_del_init(&epb->node);
+ sctp_write_unlock(&head->lock);
+ }
+
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 0075554..8e49d76 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -1231,8 +1231,14 @@ out_free:
+ SCTP_DEBUG_PRINTK("About to exit __sctp_connect() free asoc: %p"
+ " kaddrs: %p err: %d\n",
+ asoc, kaddrs, err);
+- if (asoc)
++ if (asoc) {
++ /* sctp_primitive_ASSOCIATE may have added this association
++ * To the hash table, try to unhash it, just in case, its a noop
++ * if it wasn't hashed so we're safe
++ */
++ sctp_unhash_established(asoc);
+ sctp_association_free(asoc);
++ }
+ return err;
+ }
+
+@@ -1942,8 +1948,10 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
+ goto out_unlock;
+
+ out_free:
+- if (new_asoc)
++ if (new_asoc) {
++ sctp_unhash_established(asoc);
+ sctp_association_free(asoc);
++ }
+ out_unlock:
+ sctp_release_sock(sk);
+
+diff --git a/net/wanrouter/wanmain.c b/net/wanrouter/wanmain.c
+index 788a12c..2ab7850 100644
+--- a/net/wanrouter/wanmain.c
++++ b/net/wanrouter/wanmain.c
+@@ -602,36 +602,31 @@ static int wanrouter_device_new_if(struct wan_device *wandev,
+ * successfully, add it to the interface list.
+ */
+
+- if (dev->name == NULL) {
+- err = -EINVAL;
+- } else {
++#ifdef WANDEBUG
++ printk(KERN_INFO "%s: registering interface %s...\n",
++ wanrouter_modname, dev->name);
++#endif
+
+- #ifdef WANDEBUG
+- printk(KERN_INFO "%s: registering interface %s...\n",
+- wanrouter_modname, dev->name);
+- #endif
+-
+- err = register_netdev(dev);
+- if (!err) {
+- struct net_device *slave = NULL;
+- unsigned long smp_flags=0;
+-
+- lock_adapter_irq(&wandev->lock, &smp_flags);
+-
+- if (wandev->dev == NULL) {
+- wandev->dev = dev;
+- } else {
+- for (slave=wandev->dev;
+- DEV_TO_SLAVE(slave);
+- slave = DEV_TO_SLAVE(slave))
+- DEV_TO_SLAVE(slave) = dev;
+- }
+- ++wandev->ndev;
+-
+- unlock_adapter_irq(&wandev->lock, &smp_flags);
+- err = 0; /* done !!! */
+- goto out;
++ err = register_netdev(dev);
++ if (!err) {
++ struct net_device *slave = NULL;
++ unsigned long smp_flags=0;
++
++ lock_adapter_irq(&wandev->lock, &smp_flags);
++
++ if (wandev->dev == NULL) {
++ wandev->dev = dev;
++ } else {
++ for (slave=wandev->dev;
++ DEV_TO_SLAVE(slave);
++ slave = DEV_TO_SLAVE(slave))
++ DEV_TO_SLAVE(slave) = dev;
+ }
++ ++wandev->ndev;
++
++ unlock_adapter_irq(&wandev->lock, &smp_flags);
++ err = 0; /* done !!! */
++ goto out;
+ }
+ if (wandev->del_if)
+ wandev->del_if(wandev, dev);
+diff --git a/net/wireless/core.c b/net/wireless/core.c
+index 220f3bd..8f5042d 100644
+--- a/net/wireless/core.c
++++ b/net/wireless/core.c
+@@ -971,6 +971,11 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
+ */
+ synchronize_rcu();
+ INIT_LIST_HEAD(&wdev->list);
++ /*
++ * Ensure that all events have been processed and
++ * freed.
++ */
++ cfg80211_process_wdev_events(wdev);
+ break;
+ case NETDEV_PRE_UP:
+ if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)))
+diff --git a/net/wireless/core.h b/net/wireless/core.h
+index b9ec306..02c3be3 100644
+--- a/net/wireless/core.h
++++ b/net/wireless/core.h
+@@ -426,6 +426,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
+ struct net_device *dev, enum nl80211_iftype ntype,
+ u32 *flags, struct vif_params *params);
+ void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev);
++void cfg80211_process_wdev_events(struct wireless_dev *wdev);
+
+ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev,
+diff --git a/net/wireless/util.c b/net/wireless/util.c
+index b5e4c1c..22fb802 100644
+--- a/net/wireless/util.c
++++ b/net/wireless/util.c
+@@ -725,7 +725,7 @@ void cfg80211_upload_connect_keys(struct wireless_dev *wdev)
+ wdev->connect_keys = NULL;
+ }
+
+-static void cfg80211_process_wdev_events(struct wireless_dev *wdev)
++void cfg80211_process_wdev_events(struct wireless_dev *wdev)
+ {
+ struct cfg80211_event *ev;
+ unsigned long flags;
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 51a1afc..402f330 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -3059,7 +3059,6 @@ static const struct snd_pci_quirk cxt5066_cfg_tbl[] = {
+ SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTRO),
+ SND_PCI_QUIRK(0x1028, 0x02f5, "Dell Vostro 320", CXT5066_IDEAPAD),
+ SND_PCI_QUIRK(0x1028, 0x0401, "Dell Vostro 1014", CXT5066_DELL_VOSTRO),
+- SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTRO),
+ SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
+ SND_PCI_QUIRK(0x1028, 0x050f, "Dell Inspiron", CXT5066_IDEAPAD),
+ SND_PCI_QUIRK(0x1028, 0x0510, "Dell Vostro", CXT5066_IDEAPAD),
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 2e2eb93..32c8169 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -4981,6 +4981,8 @@ static const struct alc_fixup alc269_fixups[] = {
+ [ALC269_FIXUP_PCM_44K] = {
+ .type = ALC_FIXUP_FUNC,
+ .v.func = alc269_fixup_pcm_44k,
++ .chained = true,
++ .chain_id = ALC269_FIXUP_QUANTA_MUTE
+ },
+ [ALC269_FIXUP_STEREO_DMIC] = {
+ .type = ALC_FIXUP_FUNC,
+@@ -5077,9 +5079,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x17aa, 0x21ca, "Thinkpad L412", ALC269_FIXUP_SKU_IGNORE),
+ SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", ALC269_FIXUP_SKU_IGNORE),
+ SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK),
++ SND_PCI_QUIRK(0x17aa, 0x21fa, "Thinkpad X230", ALC269_FIXUP_LENOVO_DOCK),
++ SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK),
+- SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_QUANTA_MUTE),
+- SND_PCI_QUIRK(0x17aa, 0x3bf8, "Lenovo Ideapd", ALC269_FIXUP_PCM_44K),
++ SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
+ SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
+
+ #if 1
diff --git a/1028_linux-3.2.29.patch b/1028_linux-3.2.29.patch
new file mode 100644
index 00000000..3c651794
--- /dev/null
+++ b/1028_linux-3.2.29.patch
@@ -0,0 +1,4279 @@
+diff --git a/MAINTAINERS b/MAINTAINERS
+index f986e7d..82d7fa6 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -5452,7 +5452,7 @@ F: Documentation/blockdev/ramdisk.txt
+ F: drivers/block/brd.c
+
+ RANDOM NUMBER DRIVER
+-M: Matt Mackall <mpm@selenic.com>
++M: Theodore Ts'o" <tytso@mit.edu>
+ S: Maintained
+ F: drivers/char/random.c
+
+diff --git a/Makefile b/Makefile
+index 5368961..d96fc2a 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 28
++SUBLEVEL = 29
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
+index 640f909..6f1aca7 100644
+--- a/arch/alpha/include/asm/atomic.h
++++ b/arch/alpha/include/asm/atomic.h
+@@ -14,8 +14,8 @@
+ */
+
+
+-#define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
+-#define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } )
++#define ATOMIC_INIT(i) { (i) }
++#define ATOMIC64_INIT(i) { (i) }
+
+ #define atomic_read(v) (*(volatile int *)&(v)->counter)
+ #define atomic64_read(v) (*(volatile long *)&(v)->counter)
+diff --git a/arch/alpha/include/asm/socket.h b/arch/alpha/include/asm/socket.h
+index 06edfef..3eeb47c 100644
+--- a/arch/alpha/include/asm/socket.h
++++ b/arch/alpha/include/asm/socket.h
+@@ -69,9 +69,11 @@
+
+ #define SO_RXQ_OVFL 40
+
++#ifdef __KERNEL__
+ /* O_NONBLOCK clashes with the bits used for socket types. Therefore we
+ * have to define SOCK_NONBLOCK to a different value here.
+ */
+ #define SOCK_NONBLOCK 0x40000000
++#endif /* __KERNEL__ */
+
+ #endif /* _ASM_SOCKET_H */
+diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
+index 9451dce..8512475 100644
+--- a/arch/arm/include/asm/pgtable.h
++++ b/arch/arm/include/asm/pgtable.h
+@@ -288,13 +288,13 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+ *
+ * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+- * <--------------- offset --------------------> <- type --> 0 0 0
++ * <--------------- offset ----------------------> < type -> 0 0 0
+ *
+- * This gives us up to 63 swap files and 32GB per swap file. Note that
++ * This gives us up to 31 swap files and 64GB per swap file. Note that
+ * the offset field is always non-zero.
+ */
+ #define __SWP_TYPE_SHIFT 3
+-#define __SWP_TYPE_BITS 6
++#define __SWP_TYPE_BITS 5
+ #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
+ #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
+
+diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S
+index c202113..ea94765 100644
+--- a/arch/arm/mm/tlb-v7.S
++++ b/arch/arm/mm/tlb-v7.S
+@@ -38,10 +38,10 @@ ENTRY(v7wbi_flush_user_tlb_range)
+ dsb
+ mov r0, r0, lsr #PAGE_SHIFT @ align address
+ mov r1, r1, lsr #PAGE_SHIFT
+-#ifdef CONFIG_ARM_ERRATA_720789
+- mov r3, #0
+-#else
+ asid r3, r3 @ mask ASID
++#ifdef CONFIG_ARM_ERRATA_720789
++ ALT_SMP(W(mov) r3, #0 )
++ ALT_UP(W(nop) )
+ #endif
+ orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA
+ mov r1, r1, lsl #PAGE_SHIFT
+diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
+index ad83dad..f0702f3 100644
+--- a/arch/arm/vfp/vfpmodule.c
++++ b/arch/arm/vfp/vfpmodule.c
+@@ -628,8 +628,10 @@ static int __init vfp_init(void)
+ if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100)
+ elf_hwcap |= HWCAP_NEON;
+ #endif
++#ifdef CONFIG_VFPv3
+ if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000)
+ elf_hwcap |= HWCAP_VFPv4;
++#endif
+ }
+ }
+ return 0;
+diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
+index f581a18..df7d12c 100644
+--- a/arch/x86/mm/hugetlbpage.c
++++ b/arch/x86/mm/hugetlbpage.c
+@@ -56,9 +56,16 @@ static int vma_shareable(struct vm_area_struct *vma, unsigned long addr)
+ }
+
+ /*
+- * search for a shareable pmd page for hugetlb.
++ * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
++ * and returns the corresponding pte. While this is not necessary for the
++ * !shared pmd case because we can allocate the pmd later as well, it makes the
++ * code much cleaner. pmd allocation is essential for the shared case because
++ * pud has to be populated inside the same i_mmap_mutex section - otherwise
++ * racing tasks could either miss the sharing (see huge_pte_offset) or select a
++ * bad pmd for sharing.
+ */
+-static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
++static pte_t *
++huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
+ {
+ struct vm_area_struct *vma = find_vma(mm, addr);
+ struct address_space *mapping = vma->vm_file->f_mapping;
+@@ -68,9 +75,10 @@ static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
+ struct vm_area_struct *svma;
+ unsigned long saddr;
+ pte_t *spte = NULL;
++ pte_t *pte;
+
+ if (!vma_shareable(vma, addr))
+- return;
++ return (pte_t *)pmd_alloc(mm, pud, addr);
+
+ mutex_lock(&mapping->i_mmap_mutex);
+ vma_prio_tree_foreach(svma, &iter, &mapping->i_mmap, idx, idx) {
+@@ -97,7 +105,9 @@ static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
+ put_page(virt_to_page(spte));
+ spin_unlock(&mm->page_table_lock);
+ out:
++ pte = (pte_t *)pmd_alloc(mm, pud, addr);
+ mutex_unlock(&mapping->i_mmap_mutex);
++ return pte;
+ }
+
+ /*
+@@ -142,8 +152,9 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
+ } else {
+ BUG_ON(sz != PMD_SIZE);
+ if (pud_none(*pud))
+- huge_pmd_share(mm, addr, pud);
+- pte = (pte_t *) pmd_alloc(mm, pud, addr);
++ pte = huge_pmd_share(mm, addr, pud);
++ else
++ pte = (pte_t *)pmd_alloc(mm, pud, addr);
+ }
+ }
+ BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
+diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
+index e7d13f5..d05f2fe 100644
+--- a/drivers/acpi/acpica/tbxface.c
++++ b/drivers/acpi/acpica/tbxface.c
+@@ -436,6 +436,7 @@ acpi_get_table_with_size(char *signature,
+
+ return (AE_NOT_FOUND);
+ }
++ACPI_EXPORT_SYMBOL(acpi_get_table_with_size)
+
+ acpi_status
+ acpi_get_table(char *signature,
+diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
+index 8c78443..3790c80 100644
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -385,7 +385,6 @@ static int rpm_suspend(struct device *dev, int rpmflags)
+ goto repeat;
+ }
+
+- dev->power.deferred_resume = false;
+ if (dev->power.no_callbacks)
+ goto no_callback; /* Assume success. */
+
+@@ -446,6 +445,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
+ wake_up_all(&dev->power.wait_queue);
+
+ if (dev->power.deferred_resume) {
++ dev->power.deferred_resume = false;
+ rpm_resume(dev, 0);
+ retval = -EAGAIN;
+ goto out;
+@@ -568,6 +568,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
+ || dev->parent->power.runtime_status == RPM_ACTIVE) {
+ atomic_inc(&dev->parent->power.child_count);
+ spin_unlock(&dev->parent->power.lock);
++ retval = 1;
+ goto no_callback; /* Assume success. */
+ }
+ spin_unlock(&dev->parent->power.lock);
+@@ -645,7 +646,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
+ }
+ wake_up_all(&dev->power.wait_queue);
+
+- if (!retval)
++ if (retval >= 0)
+ rpm_idle(dev, RPM_ASYNC);
+
+ out:
+diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
+index acda773..38aa6dd 100644
+--- a/drivers/block/cciss_scsi.c
++++ b/drivers/block/cciss_scsi.c
+@@ -763,16 +763,7 @@ static void complete_scsi_command(CommandList_struct *c, int timeout,
+ {
+ case CMD_TARGET_STATUS:
+ /* Pass it up to the upper layers... */
+- if( ei->ScsiStatus)
+- {
+-#if 0
+- printk(KERN_WARNING "cciss: cmd %p "
+- "has SCSI Status = %x\n",
+- c, ei->ScsiStatus);
+-#endif
+- cmd->result |= (ei->ScsiStatus << 1);
+- }
+- else { /* scsi status is zero??? How??? */
++ if (!ei->ScsiStatus) {
+
+ /* Ordinarily, this case should never happen, but there is a bug
+ in some released firmware revisions that allows it to happen
+diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
+index 650a308..de9c800 100644
+--- a/drivers/block/virtio_blk.c
++++ b/drivers/block/virtio_blk.c
+@@ -4,6 +4,7 @@
+ #include <linux/blkdev.h>
+ #include <linux/hdreg.h>
+ #include <linux/module.h>
++#include <linux/mutex.h>
+ #include <linux/virtio.h>
+ #include <linux/virtio_blk.h>
+ #include <linux/scatterlist.h>
+@@ -26,14 +27,17 @@ struct virtio_blk
+ /* The disk structure for the kernel. */
+ struct gendisk *disk;
+
+- /* Request tracking. */
+- struct list_head reqs;
+-
+ mempool_t *pool;
+
+ /* Process context for config space updates */
+ struct work_struct config_work;
+
++ /* Lock for config space updates */
++ struct mutex config_lock;
++
++ /* enable config space updates */
++ bool config_enable;
++
+ /* What host tells us, plus 2 for header & tailer. */
+ unsigned int sg_elems;
+
+@@ -46,7 +50,6 @@ struct virtio_blk
+
+ struct virtblk_req
+ {
+- struct list_head list;
+ struct request *req;
+ struct virtio_blk_outhdr out_hdr;
+ struct virtio_scsi_inhdr in_hdr;
+@@ -90,7 +93,6 @@ static void blk_done(struct virtqueue *vq)
+ }
+
+ __blk_end_request_all(vbr->req, error);
+- list_del(&vbr->list);
+ mempool_free(vbr, vblk->pool);
+ }
+ /* In case queue is stopped waiting for more buffers. */
+@@ -175,7 +177,6 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
+ return false;
+ }
+
+- list_add_tail(&vbr->list, &vblk->reqs);
+ return true;
+ }
+
+@@ -316,6 +317,10 @@ static void virtblk_config_changed_work(struct work_struct *work)
+ char cap_str_2[10], cap_str_10[10];
+ u64 capacity, size;
+
++ mutex_lock(&vblk->config_lock);
++ if (!vblk->config_enable)
++ goto done;
++
+ /* Host must always specify the capacity. */
+ vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity),
+ &capacity, sizeof(capacity));
+@@ -338,6 +343,8 @@ static void virtblk_config_changed_work(struct work_struct *work)
+ cap_str_10, cap_str_2);
+
+ set_capacity(vblk->disk, capacity);
++done:
++ mutex_unlock(&vblk->config_lock);
+ }
+
+ static void virtblk_config_changed(struct virtio_device *vdev)
+@@ -381,11 +388,12 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
+ goto out_free_index;
+ }
+
+- INIT_LIST_HEAD(&vblk->reqs);
+ vblk->vdev = vdev;
+ vblk->sg_elems = sg_elems;
+ sg_init_table(vblk->sg, vblk->sg_elems);
++ mutex_init(&vblk->config_lock);
+ INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
++ vblk->config_enable = true;
+
+ /* We expect one virtqueue, for output. */
+ vblk->vq = virtio_find_single_vq(vdev, blk_done, "requests");
+@@ -539,16 +547,19 @@ static void __devexit virtblk_remove(struct virtio_device *vdev)
+ struct virtio_blk *vblk = vdev->priv;
+ int index = vblk->index;
+
+- flush_work(&vblk->config_work);
++ /* Prevent config work handler from accessing the device. */
++ mutex_lock(&vblk->config_lock);
++ vblk->config_enable = false;
++ mutex_unlock(&vblk->config_lock);
+
+- /* Nothing should be pending. */
+- BUG_ON(!list_empty(&vblk->reqs));
++ del_gendisk(vblk->disk);
++ blk_cleanup_queue(vblk->disk->queue);
+
+ /* Stop all the virtqueues. */
+ vdev->config->reset(vdev);
+
+- del_gendisk(vblk->disk);
+- blk_cleanup_queue(vblk->disk->queue);
++ flush_work(&vblk->config_work);
++
+ put_disk(vblk->disk);
+ mempool_destroy(vblk->pool);
+ vdev->config->del_vqs(vdev);
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index 631d4f6..8ae9235 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -1114,6 +1114,16 @@ static void init_std_data(struct entropy_store *r)
+ mix_pool_bytes(r, utsname(), sizeof(*(utsname())), NULL);
+ }
+
++/*
++ * Note that setup_arch() may call add_device_randomness()
++ * long before we get here. This allows seeding of the pools
++ * with some platform dependent data very early in the boot
++ * process. But it limits our options here. We must use
++ * statically allocated structures that already have all
++ * initializations complete at compile time. We should also
++ * take care not to overwrite the precious per platform data
++ * we were given.
++ */
+ static int rand_initialize(void)
+ {
+ init_std_data(&input_pool);
+@@ -1391,10 +1401,15 @@ static int proc_do_uuid(ctl_table *table, int write,
+ uuid = table->data;
+ if (!uuid) {
+ uuid = tmp_uuid;
+- uuid[8] = 0;
+- }
+- if (uuid[8] == 0)
+ generate_random_uuid(uuid);
++ } else {
++ static DEFINE_SPINLOCK(bootid_spinlock);
++
++ spin_lock(&bootid_spinlock);
++ if (!uuid[8])
++ generate_random_uuid(uuid);
++ spin_unlock(&bootid_spinlock);
++ }
+
+ sprintf(buf, "%pU", uuid);
+
+diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
+index 153980b..b298158 100644
+--- a/drivers/firmware/dmi_scan.c
++++ b/drivers/firmware/dmi_scan.c
+@@ -6,6 +6,7 @@
+ #include <linux/dmi.h>
+ #include <linux/efi.h>
+ #include <linux/bootmem.h>
++#include <linux/random.h>
+ #include <asm/dmi.h>
+
+ /*
+@@ -111,6 +112,8 @@ static int __init dmi_walk_early(void (*decode)(const struct dmi_header *,
+
+ dmi_table(buf, dmi_len, dmi_num, decode, NULL);
+
++ add_device_randomness(buf, dmi_len);
++
+ dmi_iounmap(buf, dmi_len);
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index cc75c4b..3eed270 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -4748,17 +4748,6 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
+ continue;
+ }
+
+- if (intel_encoder->type == INTEL_OUTPUT_EDP) {
+- /* Use VBT settings if we have an eDP panel */
+- unsigned int edp_bpc = dev_priv->edp.bpp / 3;
+-
+- if (edp_bpc < display_bpc) {
+- DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
+- display_bpc = edp_bpc;
+- }
+- continue;
+- }
+-
+ /* Not one of the known troublemakers, check the EDID */
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ head) {
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index fae2050..c8ecaab 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -1152,10 +1152,14 @@ static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
+ WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
+
+ pp = ironlake_get_pp_control(dev_priv);
+- pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE);
++ /* We need to switch off panel power _and_ force vdd, for otherwise some
++ * panels get very unhappy and cease to work. */
++ pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
+
++ intel_dp->want_panel_vdd = false;
++
+ ironlake_wait_panel_off(intel_dp);
+ }
+
+@@ -1265,11 +1269,9 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
+ * ensure that we have vdd while we switch off the panel. */
+ ironlake_edp_panel_vdd_on(intel_dp);
+ ironlake_edp_backlight_off(intel_dp);
+- ironlake_edp_panel_off(intel_dp);
+-
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
++ ironlake_edp_panel_off(intel_dp);
+ intel_dp_link_down(intel_dp);
+- ironlake_edp_panel_vdd_off(intel_dp, false);
+ }
+
+ static void intel_dp_commit(struct drm_encoder *encoder)
+@@ -1304,11 +1306,9 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
+ /* Switching the panel off requires vdd. */
+ ironlake_edp_panel_vdd_on(intel_dp);
+ ironlake_edp_backlight_off(intel_dp);
+- ironlake_edp_panel_off(intel_dp);
+-
+ intel_dp_sink_dpms(intel_dp, mode);
++ ironlake_edp_panel_off(intel_dp);
+ intel_dp_link_down(intel_dp);
+- ironlake_edp_panel_vdd_off(intel_dp, false);
+
+ if (is_cpu_edp(intel_dp))
+ ironlake_edp_pll_off(encoder);
+diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
+index cb006a7..3002d82 100644
+--- a/drivers/gpu/drm/nouveau/nvd0_display.c
++++ b/drivers/gpu/drm/nouveau/nvd0_display.c
+@@ -472,7 +472,7 @@ static int
+ nvd0_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+ {
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+- const u32 data = (y << 16) | x;
++ const u32 data = (y << 16) | (x & 0xffff);
+
+ nv_wr32(crtc->dev, 0x64d084 + (nv_crtc->index * 0x1000), data);
+ nv_wr32(crtc->dev, 0x64d080 + (nv_crtc->index * 0x1000), 0x00000000);
+diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
+index 1b50ad8..4760466 100644
+--- a/drivers/gpu/drm/radeon/atombios.h
++++ b/drivers/gpu/drm/radeon/atombios.h
+@@ -101,6 +101,7 @@
+ #define ATOM_LCD_SELFTEST_START (ATOM_DISABLE+5)
+ #define ATOM_LCD_SELFTEST_STOP (ATOM_ENABLE+5)
+ #define ATOM_ENCODER_INIT (ATOM_DISABLE+7)
++#define ATOM_INIT (ATOM_DISABLE+7)
+ #define ATOM_GET_STATUS (ATOM_DISABLE+8)
+
+ #define ATOM_BLANKING 1
+@@ -251,25 +252,25 @@ typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
+ USHORT SetEngineClock; //Function Table,directly used by various SW components,latest version 1.1
+ USHORT SetMemoryClock; //Function Table,directly used by various SW components,latest version 1.1
+ USHORT SetPixelClock; //Function Table,directly used by various SW components,latest version 1.2
+- USHORT DynamicClockGating; //Atomic Table, indirectly used by various SW components,called from ASIC_Init
++ USHORT EnableDispPowerGating; //Atomic Table, indirectly used by various SW components,called from ASIC_Init
+ USHORT ResetMemoryDLL; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
+ USHORT ResetMemoryDevice; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
+- USHORT MemoryPLLInit;
+- USHORT AdjustDisplayPll; //only used by Bios
++ USHORT MemoryPLLInit; //Atomic Table, used only by Bios
++ USHORT AdjustDisplayPll; //Atomic Table, used by various SW componentes.
+ USHORT AdjustMemoryController; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
+ USHORT EnableASIC_StaticPwrMgt; //Atomic Table, only used by Bios
+ USHORT ASIC_StaticPwrMgtStatusChange; //Obsolete , only used by Bios
+ USHORT DAC_LoadDetection; //Atomic Table, directly used by various SW components,latest version 1.2
+ USHORT LVTMAEncoderControl; //Atomic Table,directly used by various SW components,latest version 1.3
+- USHORT LCD1OutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
++ USHORT HW_Misc_Operation; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT DAC1EncoderControl; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT DAC2EncoderControl; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT DVOOutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT CV1OutputControl; //Atomic Table, Atomic Table, Obsolete from Ry6xx, use DAC2 Output instead
+- USHORT GetConditionalGoldenSetting; //only used by Bios
++ USHORT GetConditionalGoldenSetting; //Only used by Bios
+ USHORT TVEncoderControl; //Function Table,directly used by various SW components,latest version 1.1
+- USHORT TMDSAEncoderControl; //Atomic Table, directly used by various SW components,latest version 1.3
+- USHORT LVDSEncoderControl; //Atomic Table, directly used by various SW components,latest version 1.3
++ USHORT PatchMCSetting; //only used by BIOS
++ USHORT MC_SEQ_Control; //only used by BIOS
+ USHORT TV1OutputControl; //Atomic Table, Obsolete from Ry6xx, use DAC2 Output instead
+ USHORT EnableScaler; //Atomic Table, used only by Bios
+ USHORT BlankCRTC; //Atomic Table, directly used by various SW components,latest version 1.1
+@@ -282,7 +283,7 @@ typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
+ USHORT SetCRTC_Replication; //Atomic Table, used only by Bios
+ USHORT SelectCRTC_Source; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT EnableGraphSurfaces; //Atomic Table, used only by Bios
+- USHORT UpdateCRTC_DoubleBufferRegisters;
++ USHORT UpdateCRTC_DoubleBufferRegisters; //Atomic Table, used only by Bios
+ USHORT LUT_AutoFill; //Atomic Table, only used by Bios
+ USHORT EnableHW_IconCursor; //Atomic Table, only used by Bios
+ USHORT GetMemoryClock; //Atomic Table, directly used by various SW components,latest version 1.1
+@@ -308,27 +309,36 @@ typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
+ USHORT SetVoltage; //Function Table,directly and/or indirectly used by various SW components,latest version 1.1
+ USHORT DAC1OutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT DAC2OutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
+- USHORT SetupHWAssistedI2CStatus; //Function Table,only used by Bios, obsolete soon.Switch to use "ReadEDIDFromHWAssistedI2C"
++ USHORT ComputeMemoryClockParam; //Function Table,only used by Bios, obsolete soon.Switch to use "ReadEDIDFromHWAssistedI2C"
+ USHORT ClockSource; //Atomic Table, indirectly used by various SW components,called from ASIC_Init
+ USHORT MemoryDeviceInit; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
+- USHORT EnableYUV; //Atomic Table, indirectly used by various SW components,called from EnableVGARender
++ USHORT GetDispObjectInfo; //Atomic Table, indirectly used by various SW components,called from EnableVGARender
+ USHORT DIG1EncoderControl; //Atomic Table,directly used by various SW components,latest version 1.1
+ USHORT DIG2EncoderControl; //Atomic Table,directly used by various SW components,latest version 1.1
+ USHORT DIG1TransmitterControl; //Atomic Table,directly used by various SW components,latest version 1.1
+ USHORT DIG2TransmitterControl; //Atomic Table,directly used by various SW components,latest version 1.1
+ USHORT ProcessAuxChannelTransaction; //Function Table,only used by Bios
+ USHORT DPEncoderService; //Function Table,only used by Bios
++ USHORT GetVoltageInfo; //Function Table,only used by Bios since SI
+ }ATOM_MASTER_LIST_OF_COMMAND_TABLES;
+
+ // For backward compatible
+ #define ReadEDIDFromHWAssistedI2C ProcessI2cChannelTransaction
+-#define UNIPHYTransmitterControl DIG1TransmitterControl
+-#define LVTMATransmitterControl DIG2TransmitterControl
++#define DPTranslatorControl DIG2EncoderControl
++#define UNIPHYTransmitterControl DIG1TransmitterControl
++#define LVTMATransmitterControl DIG2TransmitterControl
+ #define SetCRTC_DPM_State GetConditionalGoldenSetting
+ #define SetUniphyInstance ASIC_StaticPwrMgtStatusChange
+ #define HPDInterruptService ReadHWAssistedI2CStatus
+ #define EnableVGA_Access GetSCLKOverMCLKRatio
+-#define GetDispObjectInfo EnableYUV
++#define EnableYUV GetDispObjectInfo
++#define DynamicClockGating EnableDispPowerGating
++#define SetupHWAssistedI2CStatus ComputeMemoryClockParam
++
++#define TMDSAEncoderControl PatchMCSetting
++#define LVDSEncoderControl MC_SEQ_Control
++#define LCD1OutputControl HW_Misc_Operation
++
+
+ typedef struct _ATOM_MASTER_COMMAND_TABLE
+ {
+@@ -495,6 +505,34 @@ typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5
+ // ucInputFlag
+ #define ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN 1 // 1-StrobeMode, 0-PerformanceMode
+
++// use for ComputeMemoryClockParamTable
++typedef struct _COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1
++{
++ union
++ {
++ ULONG ulClock;
++ ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output:UPPER_WORD=FB_DIV_INTEGER, LOWER_WORD=FB_DIV_FRAC shl (16-FB_FRACTION_BITS)
++ };
++ UCHAR ucDllSpeed; //Output
++ UCHAR ucPostDiv; //Output
++ union{
++ UCHAR ucInputFlag; //Input : ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN: 1-StrobeMode, 0-PerformanceMode
++ UCHAR ucPllCntlFlag; //Output:
++ };
++ UCHAR ucBWCntl;
++}COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1;
++
++// definition of ucInputFlag
++#define MPLL_INPUT_FLAG_STROBE_MODE_EN 0x01
++// definition of ucPllCntlFlag
++#define MPLL_CNTL_FLAG_VCO_MODE_MASK 0x03
++#define MPLL_CNTL_FLAG_BYPASS_DQ_PLL 0x04
++#define MPLL_CNTL_FLAG_QDR_ENABLE 0x08
++#define MPLL_CNTL_FLAG_AD_HALF_RATE 0x10
++
++//MPLL_CNTL_FLAG_BYPASS_AD_PLL has a wrong name, should be BYPASS_DQ_PLL
++#define MPLL_CNTL_FLAG_BYPASS_AD_PLL 0x04
++
+ typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER
+ {
+ ATOM_COMPUTE_CLOCK_FREQ ulClock;
+@@ -562,6 +600,16 @@ typedef struct _DYNAMIC_CLOCK_GATING_PARAMETERS
+ #define DYNAMIC_CLOCK_GATING_PS_ALLOCATION DYNAMIC_CLOCK_GATING_PARAMETERS
+
+ /****************************************************************************/
++// Structure used by EnableDispPowerGatingTable.ctb
++/****************************************************************************/
++typedef struct _ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1
++{
++ UCHAR ucDispPipeId; // ATOM_CRTC1, ATOM_CRTC2, ...
++ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
++ UCHAR ucPadding[2];
++}ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1;
++
++/****************************************************************************/
+ // Structure used by EnableASIC_StaticPwrMgtTable.ctb
+ /****************************************************************************/
+ typedef struct _ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS
+@@ -807,6 +855,7 @@ typedef struct _ATOM_DIG_ENCODER_CONFIG_V4
+ #define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_1_62GHZ 0x00
+ #define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ 0x01
+ #define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ 0x02
++#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_3_24GHZ 0x03
+ #define ATOM_ENCODER_CONFIG_V4_ENCODER_SEL 0x70
+ #define ATOM_ENCODER_CONFIG_V4_DIG0_ENCODER 0x00
+ #define ATOM_ENCODER_CONFIG_V4_DIG1_ENCODER 0x10
+@@ -814,6 +863,7 @@ typedef struct _ATOM_DIG_ENCODER_CONFIG_V4
+ #define ATOM_ENCODER_CONFIG_V4_DIG3_ENCODER 0x30
+ #define ATOM_ENCODER_CONFIG_V4_DIG4_ENCODER 0x40
+ #define ATOM_ENCODER_CONFIG_V4_DIG5_ENCODER 0x50
++#define ATOM_ENCODER_CONFIG_V4_DIG6_ENCODER 0x60
+
+ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V4
+ {
+@@ -1171,6 +1221,106 @@ typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V4
+ #define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER3 0x80 //EF
+
+
++typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V5
++{
++#if ATOM_BIG_ENDIAN
++ UCHAR ucReservd1:1;
++ UCHAR ucHPDSel:3;
++ UCHAR ucPhyClkSrcId:2;
++ UCHAR ucCoherentMode:1;
++ UCHAR ucReserved:1;
++#else
++ UCHAR ucReserved:1;
++ UCHAR ucCoherentMode:1;
++ UCHAR ucPhyClkSrcId:2;
++ UCHAR ucHPDSel:3;
++ UCHAR ucReservd1:1;
++#endif
++}ATOM_DIG_TRANSMITTER_CONFIG_V5;
++
++typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5
++{
++ USHORT usSymClock; // Encoder Clock in 10kHz,(DP mode)= linkclock/10, (TMDS/LVDS/HDMI)= pixel clock, (HDMI deep color), =pixel clock * deep_color_ratio
++ UCHAR ucPhyId; // 0=UNIPHYA, 1=UNIPHYB, 2=UNIPHYC, 3=UNIPHYD, 4= UNIPHYE 5=UNIPHYF
++ UCHAR ucAction; // define as ATOM_TRANSMITER_ACTION_xxx
++ UCHAR ucLaneNum; // indicate lane number 1-8
++ UCHAR ucConnObjId; // Connector Object Id defined in ObjectId.h
++ UCHAR ucDigMode; // indicate DIG mode
++ union{
++ ATOM_DIG_TRANSMITTER_CONFIG_V5 asConfig;
++ UCHAR ucConfig;
++ };
++ UCHAR ucDigEncoderSel; // indicate DIG front end encoder
++ UCHAR ucDPLaneSet;
++ UCHAR ucReserved;
++ UCHAR ucReserved1;
++}DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5;
++
++//ucPhyId
++#define ATOM_PHY_ID_UNIPHYA 0
++#define ATOM_PHY_ID_UNIPHYB 1
++#define ATOM_PHY_ID_UNIPHYC 2
++#define ATOM_PHY_ID_UNIPHYD 3
++#define ATOM_PHY_ID_UNIPHYE 4
++#define ATOM_PHY_ID_UNIPHYF 5
++#define ATOM_PHY_ID_UNIPHYG 6
++
++// ucDigEncoderSel
++#define ATOM_TRANMSITTER_V5__DIGA_SEL 0x01
++#define ATOM_TRANMSITTER_V5__DIGB_SEL 0x02
++#define ATOM_TRANMSITTER_V5__DIGC_SEL 0x04
++#define ATOM_TRANMSITTER_V5__DIGD_SEL 0x08
++#define ATOM_TRANMSITTER_V5__DIGE_SEL 0x10
++#define ATOM_TRANMSITTER_V5__DIGF_SEL 0x20
++#define ATOM_TRANMSITTER_V5__DIGG_SEL 0x40
++
++// ucDigMode
++#define ATOM_TRANSMITTER_DIGMODE_V5_DP 0
++#define ATOM_TRANSMITTER_DIGMODE_V5_LVDS 1
++#define ATOM_TRANSMITTER_DIGMODE_V5_DVI 2
++#define ATOM_TRANSMITTER_DIGMODE_V5_HDMI 3
++#define ATOM_TRANSMITTER_DIGMODE_V5_SDVO 4
++#define ATOM_TRANSMITTER_DIGMODE_V5_DP_MST 5
++
++// ucDPLaneSet
++#define DP_LANE_SET__0DB_0_4V 0x00
++#define DP_LANE_SET__0DB_0_6V 0x01
++#define DP_LANE_SET__0DB_0_8V 0x02
++#define DP_LANE_SET__0DB_1_2V 0x03
++#define DP_LANE_SET__3_5DB_0_4V 0x08
++#define DP_LANE_SET__3_5DB_0_6V 0x09
++#define DP_LANE_SET__3_5DB_0_8V 0x0a
++#define DP_LANE_SET__6DB_0_4V 0x10
++#define DP_LANE_SET__6DB_0_6V 0x11
++#define DP_LANE_SET__9_5DB_0_4V 0x18
++
++// ATOM_DIG_TRANSMITTER_CONFIG_V5 asConfig;
++// Bit1
++#define ATOM_TRANSMITTER_CONFIG_V5_COHERENT 0x02
++
++// Bit3:2
++#define ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SEL_MASK 0x0c
++#define ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SEL_SHIFT 0x02
++
++#define ATOM_TRANSMITTER_CONFIG_V5_P1PLL 0x00
++#define ATOM_TRANSMITTER_CONFIG_V5_P2PLL 0x04
++#define ATOM_TRANSMITTER_CONFIG_V5_P0PLL 0x08
++#define ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT 0x0c
++// Bit6:4
++#define ATOM_TRANSMITTER_CONFIG_V5_HPD_SEL_MASK 0x70
++#define ATOM_TRANSMITTER_CONFIG_V5_HPD_SEL_SHIFT 0x04
++
++#define ATOM_TRANSMITTER_CONFIG_V5_NO_HPD_SEL 0x00
++#define ATOM_TRANSMITTER_CONFIG_V5_HPD1_SEL 0x10
++#define ATOM_TRANSMITTER_CONFIG_V5_HPD2_SEL 0x20
++#define ATOM_TRANSMITTER_CONFIG_V5_HPD3_SEL 0x30
++#define ATOM_TRANSMITTER_CONFIG_V5_HPD4_SEL 0x40
++#define ATOM_TRANSMITTER_CONFIG_V5_HPD5_SEL 0x50
++#define ATOM_TRANSMITTER_CONFIG_V5_HPD6_SEL 0x60
++
++#define DIG_TRANSMITTER_CONTROL_PS_ALLOCATION_V1_5 DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5
++
++
+ /****************************************************************************/
+ // Structures used by ExternalEncoderControlTable V1.3
+ // ASIC Families: Evergreen, Llano, NI
+@@ -1793,6 +1943,7 @@ typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2
+ #define ATOM_PPLL_SS_TYPE_V3_P1PLL 0x00
+ #define ATOM_PPLL_SS_TYPE_V3_P2PLL 0x04
+ #define ATOM_PPLL_SS_TYPE_V3_DCPLL 0x08
++#define ATOM_PPLL_SS_TYPE_V3_P0PLL ATOM_PPLL_SS_TYPE_V3_DCPLL
+ #define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_MASK 0x00FF
+ #define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_SHIFT 0
+ #define ATOM_PPLL_SS_AMOUNT_V3_NFRAC_MASK 0x0F00
+@@ -2030,12 +2181,77 @@ typedef struct _SET_VOLTAGE_PARAMETERS_V2
+ USHORT usVoltageLevel; // real voltage level
+ }SET_VOLTAGE_PARAMETERS_V2;
+
++
++typedef struct _SET_VOLTAGE_PARAMETERS_V1_3
++{
++ UCHAR ucVoltageType; // To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI
++ UCHAR ucVoltageMode; // Indicate action: Set voltage level
++ USHORT usVoltageLevel; // real voltage level in unit of mv or Voltage Phase (0, 1, 2, .. )
++}SET_VOLTAGE_PARAMETERS_V1_3;
++
++//ucVoltageType
++#define VOLTAGE_TYPE_VDDC 1
++#define VOLTAGE_TYPE_MVDDC 2
++#define VOLTAGE_TYPE_MVDDQ 3
++#define VOLTAGE_TYPE_VDDCI 4
++
++//SET_VOLTAGE_PARAMETERS_V3.ucVoltageMode
++#define ATOM_SET_VOLTAGE 0 //Set voltage Level
++#define ATOM_INIT_VOLTAGE_REGULATOR 3 //Init Regulator
++#define ATOM_SET_VOLTAGE_PHASE 4 //Set Vregulator Phase
++#define ATOM_GET_MAX_VOLTAGE 6 //Get Max Voltage, not used in SetVoltageTable v1.3
++#define ATOM_GET_VOLTAGE_LEVEL 6 //Get Voltage level from vitual voltage ID
++
++// define vitual voltage id in usVoltageLevel
++#define ATOM_VIRTUAL_VOLTAGE_ID0 0xff01
++#define ATOM_VIRTUAL_VOLTAGE_ID1 0xff02
++#define ATOM_VIRTUAL_VOLTAGE_ID2 0xff03
++#define ATOM_VIRTUAL_VOLTAGE_ID3 0xff04
++
+ typedef struct _SET_VOLTAGE_PS_ALLOCATION
+ {
+ SET_VOLTAGE_PARAMETERS sASICSetVoltage;
+ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
+ }SET_VOLTAGE_PS_ALLOCATION;
+
++// New Added from SI for GetVoltageInfoTable, input parameter structure
++typedef struct _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_1
++{
++ UCHAR ucVoltageType; // Input: To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI
++ UCHAR ucVoltageMode; // Input: Indicate action: Get voltage info
++ USHORT usVoltageLevel; // Input: real voltage level in unit of mv or Voltage Phase (0, 1, 2, .. ) or Leakage Id
++ ULONG ulReserved;
++}GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_1;
++
++// New Added from SI for GetVoltageInfoTable, output parameter structure when ucVotlageMode == ATOM_GET_VOLTAGE_VID
++typedef struct _GET_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1
++{
++ ULONG ulVotlageGpioState;
++ ULONG ulVoltageGPioMask;
++}GET_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1;
++
++// New Added from SI for GetVoltageInfoTable, output parameter structure when ucVotlageMode == ATOM_GET_VOLTAGE_STATEx_LEAKAGE_VID
++typedef struct _GET_LEAKAGE_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1
++{
++ USHORT usVoltageLevel;
++ USHORT usVoltageId; // Voltage Id programmed in Voltage Regulator
++ ULONG ulReseved;
++}GET_LEAKAGE_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1;
++
++
++// GetVoltageInfo v1.1 ucVoltageMode
++#define ATOM_GET_VOLTAGE_VID 0x00
++#define ATOM_GET_VOTLAGE_INIT_SEQ 0x03
++#define ATOM_GET_VOLTTAGE_PHASE_PHASE_VID 0x04
++// for SI, this state map to 0xff02 voltage state in Power Play table, which is power boost state
++#define ATOM_GET_VOLTAGE_STATE0_LEAKAGE_VID 0x10
++
++// for SI, this state map to 0xff01 voltage state in Power Play table, which is performance state
++#define ATOM_GET_VOLTAGE_STATE1_LEAKAGE_VID 0x11
++// undefined power state
++#define ATOM_GET_VOLTAGE_STATE2_LEAKAGE_VID 0x12
++#define ATOM_GET_VOLTAGE_STATE3_LEAKAGE_VID 0x13
++
+ /****************************************************************************/
+ // Structures used by TVEncoderControlTable
+ /****************************************************************************/
+@@ -2065,9 +2281,9 @@ typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES
+ USHORT MultimediaConfigInfo; // Only used by MM Lib,latest version 2.1, not configuable from Bios, need to include the table to build Bios
+ USHORT StandardVESA_Timing; // Only used by Bios
+ USHORT FirmwareInfo; // Shared by various SW components,latest version 1.4
+- USHORT DAC_Info; // Will be obsolete from R600
++ USHORT PaletteData; // Only used by BIOS
+ USHORT LCD_Info; // Shared by various SW components,latest version 1.3, was called LVDS_Info
+- USHORT TMDS_Info; // Will be obsolete from R600
++ USHORT DIGTransmitterInfo; // Internal used by VBIOS only version 3.1
+ USHORT AnalogTV_Info; // Shared by various SW components,latest version 1.1
+ USHORT SupportedDevicesInfo; // Will be obsolete from R600
+ USHORT GPIO_I2C_Info; // Shared by various SW components,latest version 1.2 will be used from R600
+@@ -2096,15 +2312,16 @@ typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES
+ USHORT PowerSourceInfo; // Shared by various SW components, latest versoin 1.1
+ }ATOM_MASTER_LIST_OF_DATA_TABLES;
+
+-// For backward compatible
+-#define LVDS_Info LCD_Info
+-
+ typedef struct _ATOM_MASTER_DATA_TABLE
+ {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_MASTER_LIST_OF_DATA_TABLES ListOfDataTables;
+ }ATOM_MASTER_DATA_TABLE;
+
++// For backward compatible
++#define LVDS_Info LCD_Info
++#define DAC_Info PaletteData
++#define TMDS_Info DIGTransmitterInfo
+
+ /****************************************************************************/
+ // Structure used in MultimediaCapabilityInfoTable
+@@ -2171,7 +2388,9 @@ typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO
+ typedef struct _ATOM_FIRMWARE_CAPABILITY
+ {
+ #if ATOM_BIG_ENDIAN
+- USHORT Reserved:3;
++ USHORT Reserved:1;
++ USHORT SCL2Redefined:1;
++ USHORT PostWithoutModeSet:1;
+ USHORT HyperMemory_Size:4;
+ USHORT HyperMemory_Support:1;
+ USHORT PPMode_Assigned:1;
+@@ -2193,7 +2412,9 @@ typedef struct _ATOM_FIRMWARE_CAPABILITY
+ USHORT PPMode_Assigned:1;
+ USHORT HyperMemory_Support:1;
+ USHORT HyperMemory_Size:4;
+- USHORT Reserved:3;
++ USHORT PostWithoutModeSet:1;
++ USHORT SCL2Redefined:1;
++ USHORT Reserved:1;
+ #endif
+ }ATOM_FIRMWARE_CAPABILITY;
+
+@@ -2418,7 +2639,8 @@ typedef struct _ATOM_FIRMWARE_INFO_V2_2
+ USHORT usLcdMaxPixelClockPLL_Output; // In MHz unit
+ ULONG ulReserved4; //Was ulAsicMaximumVoltage
+ ULONG ulMinPixelClockPLL_Output; //In 10Khz unit
+- ULONG ulReserved5; //Was usMinEngineClockPLL_Input and usMaxEngineClockPLL_Input
++ UCHAR ucRemoteDisplayConfig;
++ UCHAR ucReserved5[3]; //Was usMinEngineClockPLL_Input and usMaxEngineClockPLL_Input
+ ULONG ulReserved6; //Was usMinEngineClockPLL_Output and usMinMemoryClockPLL_Input
+ ULONG ulReserved7; //Was usMaxMemoryClockPLL_Input and usMinMemoryClockPLL_Output
+ USHORT usReserved11; //Was usMaxPixelClock; //In 10Khz unit, Max. Pclk used only for DAC
+@@ -2438,6 +2660,11 @@ typedef struct _ATOM_FIRMWARE_INFO_V2_2
+
+ #define ATOM_FIRMWARE_INFO_LAST ATOM_FIRMWARE_INFO_V2_2
+
++
++// definition of ucRemoteDisplayConfig
++#define REMOTE_DISPLAY_DISABLE 0x00
++#define REMOTE_DISPLAY_ENABLE 0x01
++
+ /****************************************************************************/
+ // Structures used in IntegratedSystemInfoTable
+ /****************************************************************************/
+@@ -2660,8 +2887,9 @@ usMinDownStreamHTLinkWidth: same as above.
+ #define INTEGRATED_SYSTEM_INFO__AMD_CPU__GREYHOUND 2
+ #define INTEGRATED_SYSTEM_INFO__AMD_CPU__K8 3
+ #define INTEGRATED_SYSTEM_INFO__AMD_CPU__PHARAOH 4
++#define INTEGRATED_SYSTEM_INFO__AMD_CPU__OROCHI 5
+
+-#define INTEGRATED_SYSTEM_INFO__AMD_CPU__MAX_CODE INTEGRATED_SYSTEM_INFO__AMD_CPU__PHARAOH // this deff reflects max defined CPU code
++#define INTEGRATED_SYSTEM_INFO__AMD_CPU__MAX_CODE INTEGRATED_SYSTEM_INFO__AMD_CPU__OROCHI // this deff reflects max defined CPU code
+
+ #define SYSTEM_CONFIG_POWEREXPRESS_ENABLE 0x00000001
+ #define SYSTEM_CONFIG_RUN_AT_OVERDRIVE_ENGINE 0x00000002
+@@ -2753,6 +2981,7 @@ typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V5
+ #define ASIC_INT_DIG4_ENCODER_ID 0x0b
+ #define ASIC_INT_DIG5_ENCODER_ID 0x0c
+ #define ASIC_INT_DIG6_ENCODER_ID 0x0d
++#define ASIC_INT_DIG7_ENCODER_ID 0x0e
+
+ //define Encoder attribute
+ #define ATOM_ANALOG_ENCODER 0
+@@ -3226,15 +3455,23 @@ typedef struct _ATOM_LCD_INFO_V13
+
+ UCHAR ucPowerSequenceDIGONtoDE_in4Ms;
+ UCHAR ucPowerSequenceDEtoVARY_BL_in4Ms;
+- UCHAR ucPowerSequenceDEtoDIGON_in4Ms;
+ UCHAR ucPowerSequenceVARY_BLtoDE_in4Ms;
++ UCHAR ucPowerSequenceDEtoDIGON_in4Ms;
+
+ UCHAR ucOffDelay_in4Ms;
+ UCHAR ucPowerSequenceVARY_BLtoBLON_in4Ms;
+ UCHAR ucPowerSequenceBLONtoVARY_BL_in4Ms;
+ UCHAR ucReserved1;
+
+- ULONG ulReserved[4];
++ UCHAR ucDPCD_eDP_CONFIGURATION_CAP; // dpcd 0dh
++ UCHAR ucDPCD_MAX_LINK_RATE; // dpcd 01h
++ UCHAR ucDPCD_MAX_LANE_COUNT; // dpcd 02h
++ UCHAR ucDPCD_MAX_DOWNSPREAD; // dpcd 03h
++
++ USHORT usMaxPclkFreqInSingleLink; // Max PixelClock frequency in single link mode.
++ UCHAR uceDPToLVDSRxId;
++ UCHAR ucLcdReservd;
++ ULONG ulReserved[2];
+ }ATOM_LCD_INFO_V13;
+
+ #define ATOM_LCD_INFO_LAST ATOM_LCD_INFO_V13
+@@ -3273,6 +3510,11 @@ typedef struct _ATOM_LCD_INFO_V13
+ //Use this cap bit for a quick reference whether an embadded panel (LCD1 ) is LVDS or eDP.
+ #define LCDPANEL_CAP_V13_eDP 0x4 // = LCDPANEL_CAP_eDP no change comparing to previous version
+
++//uceDPToLVDSRxId
++#define eDP_TO_LVDS_RX_DISABLE 0x00 // no eDP->LVDS translator chip
++#define eDP_TO_LVDS_COMMON_ID 0x01 // common eDP->LVDS translator chip without AMD SW init
++#define eDP_TO_LVDS_RT_ID 0x02 // RT tanslator which require AMD SW init
++
+ typedef struct _ATOM_PATCH_RECORD_MODE
+ {
+ UCHAR ucRecordType;
+@@ -3317,6 +3559,7 @@ typedef struct _ATOM_PANEL_RESOLUTION_PATCH_RECORD
+ #define LCD_CAP_RECORD_TYPE 3
+ #define LCD_FAKE_EDID_PATCH_RECORD_TYPE 4
+ #define LCD_PANEL_RESOLUTION_RECORD_TYPE 5
++#define LCD_EDID_OFFSET_PATCH_RECORD_TYPE 6
+ #define ATOM_RECORD_END_TYPE 0xFF
+
+ /****************************Spread Spectrum Info Table Definitions **********************/
+@@ -3528,6 +3771,7 @@ else //Non VGA case
+
+ CAIL needs to claim an reserved area defined by FBAccessAreaOffset and usFBUsedbyDrvInKB in non VGA case.*/
+
++/***********************************************************************************/
+ #define ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO 1
+
+ typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO
+@@ -3818,13 +4062,17 @@ typedef struct _EXT_DISPLAY_PATH
+ ATOM_DP_CONN_CHANNEL_MAPPING asDPMapping;
+ ATOM_DVI_CONN_CHANNEL_MAPPING asDVIMapping;
+ };
+- UCHAR ucReserved;
+- USHORT usReserved[2];
++ UCHAR ucChPNInvert; // bit vector for up to 8 lanes, =0: P and N is not invert, =1 P and N is inverted
++ USHORT usCaps;
++ USHORT usReserved;
+ }EXT_DISPLAY_PATH;
+
+ #define NUMBER_OF_UCHAR_FOR_GUID 16
+ #define MAX_NUMBER_OF_EXT_DISPLAY_PATH 7
+
++//usCaps
++#define EXT_DISPLAY_PATH_CAPS__HBR2_DISABLE 0x01
++
+ typedef struct _ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO
+ {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+@@ -3832,7 +4080,9 @@ typedef struct _ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO
+ EXT_DISPLAY_PATH sPath[MAX_NUMBER_OF_EXT_DISPLAY_PATH]; // total of fixed 7 entries.
+ UCHAR ucChecksum; // a simple Checksum of the sum of whole structure equal to 0x0.
+ UCHAR uc3DStereoPinId; // use for eDP panel
+- UCHAR Reserved [6]; // for potential expansion
++ UCHAR ucRemoteDisplayConfig;
++ UCHAR uceDPToLVDSRxId;
++ UCHAR Reserved[4]; // for potential expansion
+ }ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO;
+
+ //Related definitions, all records are different but they have a commond header
+@@ -3977,6 +4227,7 @@ typedef struct _ATOM_OBJECT_GPIO_CNTL_RECORD
+ #define GPIO_PIN_STATE_ACTIVE_HIGH 0x1
+
+ // Indexes to GPIO array in GLSync record
++// GLSync record is for Frame Lock/Gen Lock feature.
+ #define ATOM_GPIO_INDEX_GLSYNC_REFCLK 0
+ #define ATOM_GPIO_INDEX_GLSYNC_HSYNC 1
+ #define ATOM_GPIO_INDEX_GLSYNC_VSYNC 2
+@@ -3984,7 +4235,9 @@ typedef struct _ATOM_OBJECT_GPIO_CNTL_RECORD
+ #define ATOM_GPIO_INDEX_GLSYNC_SWAP_GNT 4
+ #define ATOM_GPIO_INDEX_GLSYNC_INTERRUPT 5
+ #define ATOM_GPIO_INDEX_GLSYNC_V_RESET 6
+-#define ATOM_GPIO_INDEX_GLSYNC_MAX 7
++#define ATOM_GPIO_INDEX_GLSYNC_SWAP_CNTL 7
++#define ATOM_GPIO_INDEX_GLSYNC_SWAP_SEL 8
++#define ATOM_GPIO_INDEX_GLSYNC_MAX 9
+
+ typedef struct _ATOM_ENCODER_DVO_CF_RECORD
+ {
+@@ -3994,7 +4247,8 @@ typedef struct _ATOM_ENCODER_DVO_CF_RECORD
+ }ATOM_ENCODER_DVO_CF_RECORD;
+
+ // Bit maps for ATOM_ENCODER_CAP_RECORD.ucEncoderCap
+-#define ATOM_ENCODER_CAP_RECORD_HBR2 0x01 // DP1.2 HBR2 is supported by this path
++#define ATOM_ENCODER_CAP_RECORD_HBR2 0x01 // DP1.2 HBR2 is supported by HW encoder
++#define ATOM_ENCODER_CAP_RECORD_HBR2_EN 0x02 // DP1.2 HBR2 setting is qualified and HBR2 can be enabled
+
+ typedef struct _ATOM_ENCODER_CAP_RECORD
+ {
+@@ -4003,11 +4257,13 @@ typedef struct _ATOM_ENCODER_CAP_RECORD
+ USHORT usEncoderCap;
+ struct {
+ #if ATOM_BIG_ENDIAN
+- USHORT usReserved:15; // Bit1-15 may be defined for other capability in future
++ USHORT usReserved:14; // Bit1-15 may be defined for other capability in future
++ USHORT usHBR2En:1; // Bit1 is for DP1.2 HBR2 enable
+ USHORT usHBR2Cap:1; // Bit0 is for DP1.2 HBR2 capability.
+ #else
+ USHORT usHBR2Cap:1; // Bit0 is for DP1.2 HBR2 capability.
+- USHORT usReserved:15; // Bit1-15 may be defined for other capability in future
++ USHORT usHBR2En:1; // Bit1 is for DP1.2 HBR2 enable
++ USHORT usReserved:14; // Bit1-15 may be defined for other capability in future
+ #endif
+ };
+ };
+@@ -4157,6 +4413,7 @@ typedef struct _ATOM_VOLTAGE_CONTROL
+ #define VOLTAGE_CONTROL_ID_VT1556M 0x07
+ #define VOLTAGE_CONTROL_ID_CHL822x 0x08
+ #define VOLTAGE_CONTROL_ID_VT1586M 0x09
++#define VOLTAGE_CONTROL_ID_UP1637 0x0A
+
+ typedef struct _ATOM_VOLTAGE_OBJECT
+ {
+@@ -4193,6 +4450,69 @@ typedef struct _ATOM_LEAKID_VOLTAGE
+ USHORT usVoltage;
+ }ATOM_LEAKID_VOLTAGE;
+
++typedef struct _ATOM_VOLTAGE_OBJECT_HEADER_V3{
++ UCHAR ucVoltageType; //Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI
++ UCHAR ucVoltageMode; //Indicate voltage control mode: Init/Set/Leakage/Set phase
++ USHORT usSize; //Size of Object
++}ATOM_VOLTAGE_OBJECT_HEADER_V3;
++
++typedef struct _VOLTAGE_LUT_ENTRY_V2
++{
++ ULONG ulVoltageId; // The Voltage ID which is used to program GPIO register
++ USHORT usVoltageValue; // The corresponding Voltage Value, in mV
++}VOLTAGE_LUT_ENTRY_V2;
++
++typedef struct _LEAKAGE_VOLTAGE_LUT_ENTRY_V2
++{
++ USHORT usVoltageLevel; // The Voltage ID which is used to program GPIO register
++ USHORT usVoltageId;
++ USHORT usLeakageId; // The corresponding Voltage Value, in mV
++}LEAKAGE_VOLTAGE_LUT_ENTRY_V2;
++
++typedef struct _ATOM_I2C_VOLTAGE_OBJECT_V3
++{
++ ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader;
++ UCHAR ucVoltageRegulatorId; //Indicate Voltage Regulator Id
++ UCHAR ucVoltageControlI2cLine;
++ UCHAR ucVoltageControlAddress;
++ UCHAR ucVoltageControlOffset;
++ ULONG ulReserved;
++ VOLTAGE_LUT_ENTRY asVolI2cLut[1]; // end with 0xff
++}ATOM_I2C_VOLTAGE_OBJECT_V3;
++
++typedef struct _ATOM_GPIO_VOLTAGE_OBJECT_V3
++{
++ ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader;
++ UCHAR ucVoltageGpioCntlId; // default is 0 which indicate control through CG VID mode
++ UCHAR ucGpioEntryNum; // indiate the entry numbers of Votlage/Gpio value Look up table
++ UCHAR ucPhaseDelay; // phase delay in unit of micro second
++ UCHAR ucReserved;
++ ULONG ulGpioMaskVal; // GPIO Mask value
++ VOLTAGE_LUT_ENTRY_V2 asVolGpioLut[1];
++}ATOM_GPIO_VOLTAGE_OBJECT_V3;
++
++typedef struct _ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
++{
++ ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader;
++ UCHAR ucLeakageCntlId; // default is 0
++ UCHAR ucLeakageEntryNum; // indicate the entry number of LeakageId/Voltage Lut table
++ UCHAR ucReserved[2];
++ ULONG ulMaxVoltageLevel;
++ LEAKAGE_VOLTAGE_LUT_ENTRY_V2 asLeakageIdLut[1];
++}ATOM_LEAKAGE_VOLTAGE_OBJECT_V3;
++
++typedef union _ATOM_VOLTAGE_OBJECT_V3{
++ ATOM_GPIO_VOLTAGE_OBJECT_V3 asGpioVoltageObj;
++ ATOM_I2C_VOLTAGE_OBJECT_V3 asI2cVoltageObj;
++ ATOM_LEAKAGE_VOLTAGE_OBJECT_V3 asLeakageObj;
++}ATOM_VOLTAGE_OBJECT_V3;
++
++typedef struct _ATOM_VOLTAGE_OBJECT_INFO_V3_1
++{
++ ATOM_COMMON_TABLE_HEADER sHeader;
++ ATOM_VOLTAGE_OBJECT_V3 asVoltageObj[3]; //Info for Voltage control
++}ATOM_VOLTAGE_OBJECT_INFO_V3_1;
++
+ typedef struct _ATOM_ASIC_PROFILE_VOLTAGE
+ {
+ UCHAR ucProfileId;
+@@ -4305,7 +4625,18 @@ typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V6
+ USHORT usHDMISSpreadRateIn10Hz;
+ USHORT usDVISSPercentage;
+ USHORT usDVISSpreadRateIn10Hz;
+- ULONG ulReserved3[21];
++ ULONG SclkDpmBoostMargin;
++ ULONG SclkDpmThrottleMargin;
++ USHORT SclkDpmTdpLimitPG;
++ USHORT SclkDpmTdpLimitBoost;
++ ULONG ulBoostEngineCLock;
++ UCHAR ulBoostVid_2bit;
++ UCHAR EnableBoost;
++ USHORT GnbTdpLimit;
++ USHORT usMaxLVDSPclkFreqInSingleLink;
++ UCHAR ucLvdsMisc;
++ UCHAR ucLVDSReserved;
++ ULONG ulReserved3[15];
+ ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo;
+ }ATOM_INTEGRATED_SYSTEM_INFO_V6;
+
+@@ -4313,9 +4644,16 @@ typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V6
+ #define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__TMDSHDMI_COHERENT_SINGLEPLL_MODE 0x01
+ #define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__DISABLE_AUX_HW_MODE_DETECTION 0x08
+
+-// ulOtherDisplayMisc
+-#define INTEGRATED_SYSTEM_INFO__GET_EDID_CALLBACK_FUNC_SUPPORT 0x01
++//ucLVDSMisc:
++#define SYS_INFO_LVDSMISC__888_FPDI_MODE 0x01
++#define SYS_INFO_LVDSMISC__DL_CH_SWAP 0x02
++#define SYS_INFO_LVDSMISC__888_BPC 0x04
++#define SYS_INFO_LVDSMISC__OVERRIDE_EN 0x08
++#define SYS_INFO_LVDSMISC__BLON_ACTIVE_LOW 0x10
+
++// not used any more
++#define SYS_INFO_LVDSMISC__VSYNC_ACTIVE_LOW 0x04
++#define SYS_INFO_LVDSMISC__HSYNC_ACTIVE_LOW 0x08
+
+ /**********************************************************************************************************************
+ ATOM_INTEGRATED_SYSTEM_INFO_V6 Description
+@@ -4384,7 +4722,208 @@ ucUMAChannelNumber: System memory channel numbers.
+ ulCSR_M3_ARB_CNTL_DEFAULT[10]: Arrays with values for CSR M3 arbiter for default
+ ulCSR_M3_ARB_CNTL_UVD[10]: Arrays with values for CSR M3 arbiter for UVD playback.
+ ulCSR_M3_ARB_CNTL_FS3D[10]: Arrays with values for CSR M3 arbiter for Full Screen 3D applications.
+-sAvail_SCLK[5]: Arrays to provide available list of SLCK and corresponding voltage, order from low to high
++sAvail_SCLK[5]: Arrays to provide availabe list of SLCK and corresponding voltage, order from low to high
++ulGMCRestoreResetTime: GMC power restore and GMC reset time to calculate data reconnection latency. Unit in ns.
++ulMinimumNClk: Minimum NCLK speed among all NB-Pstates to calcualte data reconnection latency. Unit in 10kHz.
++ulIdleNClk: NCLK speed while memory runs in self-refresh state. Unit in 10kHz.
++ulDDR_DLL_PowerUpTime: DDR PHY DLL power up time. Unit in ns.
++ulDDR_PLL_PowerUpTime: DDR PHY PLL power up time. Unit in ns.
++usPCIEClkSSPercentage: PCIE Clock Spred Spectrum Percentage in unit 0.01%; 100 mean 1%.
++usPCIEClkSSType: PCIE Clock Spred Spectrum Type. 0 for Down spread(default); 1 for Center spread.
++usLvdsSSPercentage: LVDS panel ( not include eDP ) Spread Spectrum Percentage in unit of 0.01%, =0, use VBIOS default setting.
++usLvdsSSpreadRateIn10Hz: LVDS panel ( not include eDP ) Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
++usHDMISSPercentage: HDMI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting.
++usHDMISSpreadRateIn10Hz: HDMI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
++usDVISSPercentage: DVI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting.
++usDVISSpreadRateIn10Hz: DVI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
++usMaxLVDSPclkFreqInSingleLink: Max pixel clock LVDS panel single link, if=0 means VBIOS use default threhold, right now it is 85Mhz
++ucLVDSMisc: [bit0] LVDS 888bit panel mode =0: LVDS 888 panel in LDI mode, =1: LVDS 888 panel in FPDI mode
++ [bit1] LVDS panel lower and upper link mapping =0: lower link and upper link not swap, =1: lower link and upper link are swapped
++ [bit2] LVDS 888bit per color mode =0: 666 bit per color =1:888 bit per color
++ [bit3] LVDS parameter override enable =0: ucLvdsMisc parameter are not used =1: ucLvdsMisc parameter should be used
++ [bit4] Polarity of signal sent to digital BLON output pin. =0: not inverted(active high) =1: inverted ( active low )
++**********************************************************************************************************************/
++
++// this Table is used for Liano/Ontario APU
++typedef struct _ATOM_FUSION_SYSTEM_INFO_V1
++{
++ ATOM_INTEGRATED_SYSTEM_INFO_V6 sIntegratedSysInfo;
++ ULONG ulPowerplayTable[128];
++}ATOM_FUSION_SYSTEM_INFO_V1;
++/**********************************************************************************************************************
++ ATOM_FUSION_SYSTEM_INFO_V1 Description
++sIntegratedSysInfo: refer to ATOM_INTEGRATED_SYSTEM_INFO_V6 definition.
++ulPowerplayTable[128]: This 512 bytes memory is used to save ATOM_PPLIB_POWERPLAYTABLE3, starting form ulPowerplayTable[0]
++**********************************************************************************************************************/
++
++// this IntegrateSystemInfoTable is used for Trinity APU
++typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7
++{
++ ATOM_COMMON_TABLE_HEADER sHeader;
++ ULONG ulBootUpEngineClock;
++ ULONG ulDentistVCOFreq;
++ ULONG ulBootUpUMAClock;
++ ATOM_CLK_VOLT_CAPABILITY sDISPCLK_Voltage[4];
++ ULONG ulBootUpReqDisplayVector;
++ ULONG ulOtherDisplayMisc;
++ ULONG ulGPUCapInfo;
++ ULONG ulSB_MMIO_Base_Addr;
++ USHORT usRequestedPWMFreqInHz;
++ UCHAR ucHtcTmpLmt;
++ UCHAR ucHtcHystLmt;
++ ULONG ulMinEngineClock;
++ ULONG ulSystemConfig;
++ ULONG ulCPUCapInfo;
++ USHORT usNBP0Voltage;
++ USHORT usNBP1Voltage;
++ USHORT usBootUpNBVoltage;
++ USHORT usExtDispConnInfoOffset;
++ USHORT usPanelRefreshRateRange;
++ UCHAR ucMemoryType;
++ UCHAR ucUMAChannelNumber;
++ UCHAR strVBIOSMsg[40];
++ ULONG ulReserved[20];
++ ATOM_AVAILABLE_SCLK_LIST sAvail_SCLK[5];
++ ULONG ulGMCRestoreResetTime;
++ ULONG ulMinimumNClk;
++ ULONG ulIdleNClk;
++ ULONG ulDDR_DLL_PowerUpTime;
++ ULONG ulDDR_PLL_PowerUpTime;
++ USHORT usPCIEClkSSPercentage;
++ USHORT usPCIEClkSSType;
++ USHORT usLvdsSSPercentage;
++ USHORT usLvdsSSpreadRateIn10Hz;
++ USHORT usHDMISSPercentage;
++ USHORT usHDMISSpreadRateIn10Hz;
++ USHORT usDVISSPercentage;
++ USHORT usDVISSpreadRateIn10Hz;
++ ULONG SclkDpmBoostMargin;
++ ULONG SclkDpmThrottleMargin;
++ USHORT SclkDpmTdpLimitPG;
++ USHORT SclkDpmTdpLimitBoost;
++ ULONG ulBoostEngineCLock;
++ UCHAR ulBoostVid_2bit;
++ UCHAR EnableBoost;
++ USHORT GnbTdpLimit;
++ USHORT usMaxLVDSPclkFreqInSingleLink;
++ UCHAR ucLvdsMisc;
++ UCHAR ucLVDSReserved;
++ UCHAR ucLVDSPwrOnSeqDIGONtoDE_in4Ms;
++ UCHAR ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms;
++ UCHAR ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms;
++ UCHAR ucLVDSPwrOffSeqDEtoDIGON_in4Ms;
++ UCHAR ucLVDSOffToOnDelay_in4Ms;
++ UCHAR ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms;
++ UCHAR ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms;
++ UCHAR ucLVDSReserved1;
++ ULONG ulLCDBitDepthControlVal;
++ ULONG ulNbpStateMemclkFreq[4];
++ USHORT usNBP2Voltage;
++ USHORT usNBP3Voltage;
++ ULONG ulNbpStateNClkFreq[4];
++ UCHAR ucNBDPMEnable;
++ UCHAR ucReserved[3];
++ UCHAR ucDPMState0VclkFid;
++ UCHAR ucDPMState0DclkFid;
++ UCHAR ucDPMState1VclkFid;
++ UCHAR ucDPMState1DclkFid;
++ UCHAR ucDPMState2VclkFid;
++ UCHAR ucDPMState2DclkFid;
++ UCHAR ucDPMState3VclkFid;
++ UCHAR ucDPMState3DclkFid;
++ ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo;
++}ATOM_INTEGRATED_SYSTEM_INFO_V1_7;
++
++// ulOtherDisplayMisc
++#define INTEGRATED_SYSTEM_INFO__GET_EDID_CALLBACK_FUNC_SUPPORT 0x01
++#define INTEGRATED_SYSTEM_INFO__GET_BOOTUP_DISPLAY_CALLBACK_FUNC_SUPPORT 0x02
++#define INTEGRATED_SYSTEM_INFO__GET_EXPANSION_CALLBACK_FUNC_SUPPORT 0x04
++#define INTEGRATED_SYSTEM_INFO__FAST_BOOT_SUPPORT 0x08
++
++// ulGPUCapInfo
++#define SYS_INFO_GPUCAPS__TMDSHDMI_COHERENT_SINGLEPLL_MODE 0x01
++#define SYS_INFO_GPUCAPS__DP_SINGLEPLL_MODE 0x02
++#define SYS_INFO_GPUCAPS__DISABLE_AUX_MODE_DETECT 0x08
++
++/**********************************************************************************************************************
++ ATOM_INTEGRATED_SYSTEM_INFO_V1_7 Description
++ulBootUpEngineClock: VBIOS bootup Engine clock frequency, in 10kHz unit. if it is equal 0, then VBIOS use pre-defined bootup engine clock
++ulDentistVCOFreq: Dentist VCO clock in 10kHz unit.
++ulBootUpUMAClock: System memory boot up clock frequency in 10Khz unit.
++sDISPCLK_Voltage: Report Display clock voltage requirement.
++
++ulBootUpReqDisplayVector: VBIOS boot up display IDs, following are supported devices in Trinity projects:
++ ATOM_DEVICE_CRT1_SUPPORT 0x0001
++ ATOM_DEVICE_DFP1_SUPPORT 0x0008
++ ATOM_DEVICE_DFP6_SUPPORT 0x0040
++ ATOM_DEVICE_DFP2_SUPPORT 0x0080
++ ATOM_DEVICE_DFP3_SUPPORT 0x0200
++ ATOM_DEVICE_DFP4_SUPPORT 0x0400
++ ATOM_DEVICE_DFP5_SUPPORT 0x0800
++ ATOM_DEVICE_LCD1_SUPPORT 0x0002
++ulOtherDisplayMisc: bit[0]=0: INT15 callback function Get LCD EDID ( ax=4e08, bl=1b ) is not supported by SBIOS.
++ =1: INT15 callback function Get LCD EDID ( ax=4e08, bl=1b ) is supported by SBIOS.
++ bit[1]=0: INT15 callback function Get boot display( ax=4e08, bl=01h) is not supported by SBIOS
++ =1: INT15 callback function Get boot display( ax=4e08, bl=01h) is supported by SBIOS
++ bit[2]=0: INT15 callback function Get panel Expansion ( ax=4e08, bl=02h) is not supported by SBIOS
++ =1: INT15 callback function Get panel Expansion ( ax=4e08, bl=02h) is supported by SBIOS
++ bit[3]=0: VBIOS fast boot is disable
++ =1: VBIOS fast boot is enable. ( VBIOS skip display device detection in every set mode if LCD panel is connect and LID is open)
++ulGPUCapInfo: bit[0]=0: TMDS/HDMI Coherent Mode use cascade PLL mode.
++ =1: TMDS/HDMI Coherent Mode use signel PLL mode.
++ bit[1]=0: DP mode use cascade PLL mode ( New for Trinity )
++ =1: DP mode use single PLL mode
++ bit[3]=0: Enable AUX HW mode detection logic
++ =1: Disable AUX HW mode detection logic
++
++ulSB_MMIO_Base_Addr: Physical Base address to SB MMIO space. Driver needs to initialize it for SMU usage.
++
++usRequestedPWMFreqInHz: When it's set to 0x0 by SBIOS: the LCD BackLight is not controlled by GPU(SW).
++ Any attempt to change BL using VBIOS function or enable VariBri from PP table is not effective since ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==0;
++
++ When it's set to a non-zero frequency, the BackLight is controlled by GPU (SW) in one of two ways below:
++ 1. SW uses the GPU BL PWM output to control the BL, in chis case, this non-zero frequency determines what freq GPU should use;
++ VBIOS will set up proper PWM frequency and ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1,as the result,
++ Changing BL using VBIOS function is functional in both driver and non-driver present environment;
++ and enabling VariBri under the driver environment from PP table is optional.
++
++ 2. SW uses other means to control BL (like DPCD),this non-zero frequency serves as a flag only indicating
++ that BL control from GPU is expected.
++ VBIOS will NOT set up PWM frequency but make ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1
++ Changing BL using VBIOS function could be functional in both driver and non-driver present environment,but
++ it's per platform
++ and enabling VariBri under the driver environment from PP table is optional.
++
++ucHtcTmpLmt: Refer to D18F3x64 bit[22:16], HtcTmpLmt.
++ Threshold on value to enter HTC_active state.
++ucHtcHystLmt: Refer to D18F3x64 bit[27:24], HtcHystLmt.
++ To calculate threshold off value to exit HTC_active state, which is Threshold on vlaue minus ucHtcHystLmt.
++ulMinEngineClock: Minimum SCLK allowed in 10kHz unit. This is calculated based on WRCK Fuse settings.
++ulSystemConfig: Bit[0]=0: PCIE Power Gating Disabled
++ =1: PCIE Power Gating Enabled
++ Bit[1]=0: DDR-DLL shut-down feature disabled.
++ 1: DDR-DLL shut-down feature enabled.
++ Bit[2]=0: DDR-PLL Power down feature disabled.
++ 1: DDR-PLL Power down feature enabled.
++ulCPUCapInfo: TBD
++usNBP0Voltage: VID for voltage on NB P0 State
++usNBP1Voltage: VID for voltage on NB P1 State
++usNBP2Voltage: VID for voltage on NB P2 State
++usNBP3Voltage: VID for voltage on NB P3 State
++usBootUpNBVoltage: Voltage Index of GNB voltage configured by SBIOS, which is suffcient to support VBIOS DISPCLK requirement.
++usExtDispConnInfoOffset: Offset to sExtDispConnInfo inside the structure
++usPanelRefreshRateRange: Bit vector for LCD supported refresh rate range. If DRR is requestd by the platform, at least two bits need to be set
++ to indicate a range.
++ SUPPORTED_LCD_REFRESHRATE_30Hz 0x0004
++ SUPPORTED_LCD_REFRESHRATE_40Hz 0x0008
++ SUPPORTED_LCD_REFRESHRATE_50Hz 0x0010
++ SUPPORTED_LCD_REFRESHRATE_60Hz 0x0020
++ucMemoryType: [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved.
++ucUMAChannelNumber: System memory channel numbers.
++ulCSR_M3_ARB_CNTL_DEFAULT[10]: Arrays with values for CSR M3 arbiter for default
++ulCSR_M3_ARB_CNTL_UVD[10]: Arrays with values for CSR M3 arbiter for UVD playback.
++ulCSR_M3_ARB_CNTL_FS3D[10]: Arrays with values for CSR M3 arbiter for Full Screen 3D applications.
++sAvail_SCLK[5]: Arrays to provide availabe list of SLCK and corresponding voltage, order from low to high
+ ulGMCRestoreResetTime: GMC power restore and GMC reset time to calculate data reconnection latency. Unit in ns.
+ ulMinimumNClk: Minimum NCLK speed among all NB-Pstates to calcualte data reconnection latency. Unit in 10kHz.
+ ulIdleNClk: NCLK speed while memory runs in self-refresh state. Unit in 10kHz.
+@@ -4398,6 +4937,41 @@ usHDMISSPercentage: HDMI Spread Spectrum Percentage in unit 0.01%;
+ usHDMISSpreadRateIn10Hz: HDMI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
+ usDVISSPercentage: DVI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting.
+ usDVISSpreadRateIn10Hz: DVI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
++usMaxLVDSPclkFreqInSingleLink: Max pixel clock LVDS panel single link, if=0 means VBIOS use default threhold, right now it is 85Mhz
++ucLVDSMisc: [bit0] LVDS 888bit panel mode =0: LVDS 888 panel in LDI mode, =1: LVDS 888 panel in FPDI mode
++ [bit1] LVDS panel lower and upper link mapping =0: lower link and upper link not swap, =1: lower link and upper link are swapped
++ [bit2] LVDS 888bit per color mode =0: 666 bit per color =1:888 bit per color
++ [bit3] LVDS parameter override enable =0: ucLvdsMisc parameter are not used =1: ucLvdsMisc parameter should be used
++ [bit4] Polarity of signal sent to digital BLON output pin. =0: not inverted(active high) =1: inverted ( active low )
++ucLVDSPwrOnSeqDIGONtoDE_in4Ms: LVDS power up sequence time in unit of 4ms, time delay from DIGON signal active to data enable signal active( DE ).
++ =0 mean use VBIOS default which is 8 ( 32ms ). The LVDS power up sequence is as following: DIGON->DE->VARY_BL->BLON.
++ This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
++ucLVDSPwrOnDEtoVARY_BL_in4Ms: LVDS power up sequence time in unit of 4ms., time delay from DE( data enable ) active to Vary Brightness enable signal active( VARY_BL ).
++ =0 mean use VBIOS default which is 90 ( 360ms ). The LVDS power up sequence is as following: DIGON->DE->VARY_BL->BLON.
++ This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
++
++ucLVDSPwrOffVARY_BLtoDE_in4Ms: LVDS power down sequence time in unit of 4ms, time delay from data enable ( DE ) signal off to LCDVCC (DIGON) off.
++ =0 mean use VBIOS default delay which is 8 ( 32ms ). The LVDS power down sequence is as following: BLON->VARY_BL->DE->DIGON
++ This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
++
++ucLVDSPwrOffDEtoDIGON_in4Ms: LVDS power down sequence time in unit of 4ms, time delay from vary brightness enable signal( VARY_BL) off to data enable ( DE ) signal off.
++ =0 mean use VBIOS default which is 90 ( 360ms ). The LVDS power down sequence is as following: BLON->VARY_BL->DE->DIGON
++ This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
++
++ucLVDSOffToOnDelay_in4Ms: LVDS power down sequence time in unit of 4ms. Time delay from DIGON signal off to DIGON signal active.
++ =0 means to use VBIOS default delay which is 125 ( 500ms ).
++ This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
++
++ucLVDSPwrOnVARY_BLtoBLON_in4Ms: LVDS power up sequence time in unit of 4ms. Time delay from VARY_BL signal on to DLON signal active.
++ =0 means to use VBIOS default delay which is 0 ( 0ms ).
++ This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
++
++ucLVDSPwrOffBLONtoVARY_BL_in4Ms: LVDS power down sequence time in unit of 4ms. Time delay from BLON signal off to VARY_BL signal off.
++ =0 means to use VBIOS default delay which is 0 ( 0ms ).
++ This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
++
++ulNbpStateMemclkFreq[4]: system memory clock frequncey in unit of 10Khz in different NB pstate.
++
+ **********************************************************************************************************************/
+
+ /**************************************************************************/
+@@ -4459,6 +5033,7 @@ typedef struct _ATOM_ASIC_SS_ASSIGNMENT
+ #define ASIC_INTERNAL_SS_ON_DP 7
+ #define ASIC_INTERNAL_SS_ON_DCPLL 8
+ #define ASIC_EXTERNAL_SS_ON_DP_CLOCK 9
++#define ASIC_INTERNAL_VCE_SS 10
+
+ typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V2
+ {
+@@ -4520,7 +5095,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
+ #define ATOM_DOS_MODE_INFO_DEF 7
+ #define ATOM_I2C_CHANNEL_STATUS_DEF 8
+ #define ATOM_I2C_CHANNEL_STATUS1_DEF 9
+-
++#define ATOM_INTERNAL_TIMER_DEF 10
+
+ // BIOS_0_SCRATCH Definition
+ #define ATOM_S0_CRT1_MONO 0x00000001L
+@@ -4648,6 +5223,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
+ #define ATOM_S2_DEVICE_DPMS_MASKw1 0x3FF
+ #define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASKb3 0x0C
+ #define ATOM_S2_FORCEDLOWPWRMODE_STATE_CHANGEb3 0x10
++#define ATOM_S2_TMDS_COHERENT_MODEb3 0x10 // used by VBIOS code only, use coherent mode for TMDS/HDMI mode
+ #define ATOM_S2_VRI_BRIGHT_ENABLEb3 0x20
+ #define ATOM_S2_ROTATION_STATE_MASKb3 0xC0
+
+@@ -5038,6 +5614,23 @@ typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_3
+ USHORT usDeviceId; // Active Device Id for this surface. If no device, set to 0.
+ }ENABLE_GRAPH_SURFACE_PARAMETERS_V1_3;
+
++typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_4
++{
++ USHORT usHight; // Image Hight
++ USHORT usWidth; // Image Width
++ USHORT usGraphPitch;
++ UCHAR ucColorDepth;
++ UCHAR ucPixelFormat;
++ UCHAR ucSurface; // Surface 1 or 2
++ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
++ UCHAR ucModeType;
++ UCHAR ucReserved;
++}ENABLE_GRAPH_SURFACE_PARAMETERS_V1_4;
++
++// ucEnable
++#define ATOM_GRAPH_CONTROL_SET_PITCH 0x0f
++#define ATOM_GRAPH_CONTROL_SET_DISP_START 0x10
++
+ typedef struct _ENABLE_GRAPH_SURFACE_PS_ALLOCATION
+ {
+ ENABLE_GRAPH_SURFACE_PARAMETERS sSetSurface;
+@@ -5057,6 +5650,58 @@ typedef struct _GET_DISPLAY_SURFACE_SIZE_PARAMETERS
+ USHORT usY_Size;
+ }GET_DISPLAY_SURFACE_SIZE_PARAMETERS;
+
++typedef struct _GET_DISPLAY_SURFACE_SIZE_PARAMETERS_V2
++{
++ union{
++ USHORT usX_Size; //When use as input parameter, usX_Size indicates which CRTC
++ USHORT usSurface;
++ };
++ USHORT usY_Size;
++ USHORT usDispXStart;
++ USHORT usDispYStart;
++}GET_DISPLAY_SURFACE_SIZE_PARAMETERS_V2;
++
++
++typedef struct _PALETTE_DATA_CONTROL_PARAMETERS_V3
++{
++ UCHAR ucLutId;
++ UCHAR ucAction;
++ USHORT usLutStartIndex;
++ USHORT usLutLength;
++ USHORT usLutOffsetInVram;
++}PALETTE_DATA_CONTROL_PARAMETERS_V3;
++
++// ucAction:
++#define PALETTE_DATA_AUTO_FILL 1
++#define PALETTE_DATA_READ 2
++#define PALETTE_DATA_WRITE 3
++
++
++typedef struct _INTERRUPT_SERVICE_PARAMETERS_V2
++{
++ UCHAR ucInterruptId;
++ UCHAR ucServiceId;
++ UCHAR ucStatus;
++ UCHAR ucReserved;
++}INTERRUPT_SERVICE_PARAMETER_V2;
++
++// ucInterruptId
++#define HDP1_INTERRUPT_ID 1
++#define HDP2_INTERRUPT_ID 2
++#define HDP3_INTERRUPT_ID 3
++#define HDP4_INTERRUPT_ID 4
++#define HDP5_INTERRUPT_ID 5
++#define HDP6_INTERRUPT_ID 6
++#define SW_INTERRUPT_ID 11
++
++// ucAction
++#define INTERRUPT_SERVICE_GEN_SW_INT 1
++#define INTERRUPT_SERVICE_GET_STATUS 2
++
++ // ucStatus
++#define INTERRUPT_STATUS__INT_TRIGGER 1
++#define INTERRUPT_STATUS__HPD_HIGH 2
++
+ typedef struct _INDIRECT_IO_ACCESS
+ {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+@@ -5189,7 +5834,7 @@ typedef struct _ATOM_INIT_REG_BLOCK{
+
+ #define END_OF_REG_INDEX_BLOCK 0x0ffff
+ #define END_OF_REG_DATA_BLOCK 0x00000000
+-#define ATOM_INIT_REG_MASK_FLAG 0x80
++#define ATOM_INIT_REG_MASK_FLAG 0x80 //Not used in BIOS
+ #define CLOCK_RANGE_HIGHEST 0x00ffffff
+
+ #define VALUE_DWORD SIZEOF ULONG
+@@ -5229,6 +5874,7 @@ typedef struct _ATOM_MC_INIT_PARAM_TABLE
+ #define _128Mx8 0x51
+ #define _128Mx16 0x52
+ #define _256Mx8 0x61
++#define _256Mx16 0x62
+
+ #define SAMSUNG 0x1
+ #define INFINEON 0x2
+@@ -5585,7 +6231,7 @@ typedef struct _ATOM_VRAM_MODULE_V7
+ ULONG ulChannelMapCfg; // mmMC_SHARED_CHREMAP
+ USHORT usModuleSize; // Size of ATOM_VRAM_MODULE_V7
+ USHORT usPrivateReserved; // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
+- USHORT usReserved;
++ USHORT usEnableChannels; // bit vector which indicate which channels are enabled
+ UCHAR ucExtMemoryID; // Current memory module ID
+ UCHAR ucMemoryType; // MEM_TYPE_DDR2/DDR3/GDDR3/GDDR5
+ UCHAR ucChannelNum; // Number of mem. channels supported in this module
+@@ -5597,7 +6243,8 @@ typedef struct _ATOM_VRAM_MODULE_V7
+ UCHAR ucNPL_RT; // Round trip delay (MC_SEQ_CAS_TIMING [28:24]:TCL=CL+NPL_RT-2). Always 2.
+ UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble
+ UCHAR ucMemorySize; // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
+- UCHAR ucReserved[3];
++ USHORT usSEQSettingOffset;
++ UCHAR ucReserved;
+ // Memory Module specific values
+ USHORT usEMRS2Value; // EMRS2/MR2 Value.
+ USHORT usEMRS3Value; // EMRS3/MR3 Value.
+@@ -5633,10 +6280,10 @@ typedef struct _ATOM_VRAM_INFO_V3
+ typedef struct _ATOM_VRAM_INFO_V4
+ {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+- USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
+- USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
+- USHORT usRerseved;
+- UCHAR ucMemDQ7_0ByteRemap; // DQ line byte remap, =0: Memory Data line BYTE0, =1: BYTE1, =2: BYTE2, =3: BYTE3
++ USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
++ USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
++ USHORT usRerseved;
++ UCHAR ucMemDQ7_0ByteRemap; // DQ line byte remap, =0: Memory Data line BYTE0, =1: BYTE1, =2: BYTE2, =3: BYTE3
+ ULONG ulMemDQ7_0BitRemap; // each DQ line ( 7~0) use 3bits, like: DQ0=Bit[2:0], DQ1:[5:3], ... DQ7:[23:21]
+ UCHAR ucReservde[4];
+ UCHAR ucNumOfVRAMModule;
+@@ -5648,9 +6295,10 @@ typedef struct _ATOM_VRAM_INFO_V4
+ typedef struct _ATOM_VRAM_INFO_HEADER_V2_1
+ {
+ ATOM_COMMON_TABLE_HEADER sHeader;
+- USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
+- USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
+- USHORT usReserved[4];
++ USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
++ USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
++ USHORT usPerBytePresetOffset; // offset of ATOM_INIT_REG_BLOCK structure for Per Byte Offset Preset Settings
++ USHORT usReserved[3];
+ UCHAR ucNumOfVRAMModule; // indicate number of VRAM module
+ UCHAR ucMemoryClkPatchTblVer; // version of memory AC timing register list
+ UCHAR ucVramModuleVer; // indicate ATOM_VRAM_MODUE version
+@@ -5935,6 +6583,52 @@ typedef struct _ATOM_DISP_OUT_INFO_V2
+ ASIC_ENCODER_INFO asEncoderInfo[1];
+ }ATOM_DISP_OUT_INFO_V2;
+
++
++typedef struct _ATOM_DISP_CLOCK_ID {
++ UCHAR ucPpllId;
++ UCHAR ucPpllAttribute;
++}ATOM_DISP_CLOCK_ID;
++
++// ucPpllAttribute
++#define CLOCK_SOURCE_SHAREABLE 0x01
++#define CLOCK_SOURCE_DP_MODE 0x02
++#define CLOCK_SOURCE_NONE_DP_MODE 0x04
++
++//DispOutInfoTable
++typedef struct _ASIC_TRANSMITTER_INFO_V2
++{
++ USHORT usTransmitterObjId;
++ USHORT usDispClkIdOffset; // point to clock source id list supported by Encoder Object
++ UCHAR ucTransmitterCmdTblId;
++ UCHAR ucConfig;
++ UCHAR ucEncoderID; // available 1st encoder ( default )
++ UCHAR ucOptionEncoderID; // available 2nd encoder ( optional )
++ UCHAR uc2ndEncoderID;
++ UCHAR ucReserved;
++}ASIC_TRANSMITTER_INFO_V2;
++
++typedef struct _ATOM_DISP_OUT_INFO_V3
++{
++ ATOM_COMMON_TABLE_HEADER sHeader;
++ USHORT ptrTransmitterInfo;
++ USHORT ptrEncoderInfo;
++ USHORT ptrMainCallParserFar; // direct address of main parser call in VBIOS binary.
++ USHORT usReserved;
++ UCHAR ucDCERevision;
++ UCHAR ucMaxDispEngineNum;
++ UCHAR ucMaxActiveDispEngineNum;
++ UCHAR ucMaxPPLLNum;
++ UCHAR ucCoreRefClkSource; // value of CORE_REF_CLK_SOURCE
++ UCHAR ucReserved[3];
++ ASIC_TRANSMITTER_INFO_V2 asTransmitterInfo[1]; // for alligment only
++}ATOM_DISP_OUT_INFO_V3;
++
++typedef enum CORE_REF_CLK_SOURCE{
++ CLOCK_SRC_XTALIN=0,
++ CLOCK_SRC_XO_IN=1,
++ CLOCK_SRC_XO_IN2=2,
++}CORE_REF_CLK_SOURCE;
++
+ // DispDevicePriorityInfo
+ typedef struct _ATOM_DISPLAY_DEVICE_PRIORITY_INFO
+ {
+@@ -6070,6 +6764,39 @@ typedef struct _PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS
+ #define HW_I2C_READ 0
+ #define I2C_2BYTE_ADDR 0x02
+
++/****************************************************************************/
++// Structures used by HW_Misc_OperationTable
++/****************************************************************************/
++typedef struct _ATOM_HW_MISC_OPERATION_INPUT_PARAMETER_V1_1
++{
++ UCHAR ucCmd; // Input: To tell which action to take
++ UCHAR ucReserved[3];
++ ULONG ulReserved;
++}ATOM_HW_MISC_OPERATION_INPUT_PARAMETER_V1_1;
++
++typedef struct _ATOM_HW_MISC_OPERATION_OUTPUT_PARAMETER_V1_1
++{
++ UCHAR ucReturnCode; // Output: Return value base on action was taken
++ UCHAR ucReserved[3];
++ ULONG ulReserved;
++}ATOM_HW_MISC_OPERATION_OUTPUT_PARAMETER_V1_1;
++
++// Actions code
++#define ATOM_GET_SDI_SUPPORT 0xF0
++
++// Return code
++#define ATOM_UNKNOWN_CMD 0
++#define ATOM_FEATURE_NOT_SUPPORTED 1
++#define ATOM_FEATURE_SUPPORTED 2
++
++typedef struct _ATOM_HW_MISC_OPERATION_PS_ALLOCATION
++{
++ ATOM_HW_MISC_OPERATION_INPUT_PARAMETER_V1_1 sInput_Output;
++ PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS sReserved;
++}ATOM_HW_MISC_OPERATION_PS_ALLOCATION;
++
++/****************************************************************************/
++
+ typedef struct _SET_HWBLOCK_INSTANCE_PARAMETER_V2
+ {
+ UCHAR ucHWBlkInst; // HW block instance, 0, 1, 2, ...
+@@ -6090,6 +6817,52 @@ typedef struct _SET_HWBLOCK_INSTANCE_PARAMETER_V2
+ #define SELECT_CRTC_PIXEL_RATE 7
+ #define SELECT_VGA_BLK 8
+
++// DIGTransmitterInfoTable structure used to program UNIPHY settings
++typedef struct _DIG_TRANSMITTER_INFO_HEADER_V3_1{
++ ATOM_COMMON_TABLE_HEADER sHeader;
++ USHORT usDPVsPreEmphSettingOffset; // offset of PHY_ANALOG_SETTING_INFO * with DP Voltage Swing and Pre-Emphasis for each Link clock
++ USHORT usPhyAnalogRegListOffset; // offset of CLOCK_CONDITION_REGESTER_INFO* with None-DP mode Analog Setting's register Info
++ USHORT usPhyAnalogSettingOffset; // offset of CLOCK_CONDITION_SETTING_ENTRY* with None-DP mode Analog Setting for each link clock range
++ USHORT usPhyPllRegListOffset; // offset of CLOCK_CONDITION_REGESTER_INFO* with Phy Pll register Info
++ USHORT usPhyPllSettingOffset; // offset of CLOCK_CONDITION_SETTING_ENTRY* with Phy Pll Settings
++}DIG_TRANSMITTER_INFO_HEADER_V3_1;
++
++typedef struct _CLOCK_CONDITION_REGESTER_INFO{
++ USHORT usRegisterIndex;
++ UCHAR ucStartBit;
++ UCHAR ucEndBit;
++}CLOCK_CONDITION_REGESTER_INFO;
++
++typedef struct _CLOCK_CONDITION_SETTING_ENTRY{
++ USHORT usMaxClockFreq;
++ UCHAR ucEncodeMode;
++ UCHAR ucPhySel;
++ ULONG ulAnalogSetting[1];
++}CLOCK_CONDITION_SETTING_ENTRY;
++
++typedef struct _CLOCK_CONDITION_SETTING_INFO{
++ USHORT usEntrySize;
++ CLOCK_CONDITION_SETTING_ENTRY asClkCondSettingEntry[1];
++}CLOCK_CONDITION_SETTING_INFO;
++
++typedef struct _PHY_CONDITION_REG_VAL{
++ ULONG ulCondition;
++ ULONG ulRegVal;
++}PHY_CONDITION_REG_VAL;
++
++typedef struct _PHY_CONDITION_REG_INFO{
++ USHORT usRegIndex;
++ USHORT usSize;
++ PHY_CONDITION_REG_VAL asRegVal[1];
++}PHY_CONDITION_REG_INFO;
++
++typedef struct _PHY_ANALOG_SETTING_INFO{
++ UCHAR ucEncodeMode;
++ UCHAR ucPhySel;
++ USHORT usSize;
++ PHY_CONDITION_REG_INFO asAnalogSetting[1];
++}PHY_ANALOG_SETTING_INFO;
++
+ /****************************************************************************/
+ //Portion VI: Definitinos for vbios MC scratch registers that driver used
+ /****************************************************************************/
+@@ -7020,4 +7793,68 @@ typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table
+
+ #pragma pack() // BIOS data must use byte aligment
+
++//
++// AMD ACPI Table
++//
++#pragma pack(1)
++
++typedef struct {
++ ULONG Signature;
++ ULONG TableLength; //Length
++ UCHAR Revision;
++ UCHAR Checksum;
++ UCHAR OemId[6];
++ UCHAR OemTableId[8]; //UINT64 OemTableId;
++ ULONG OemRevision;
++ ULONG CreatorId;
++ ULONG CreatorRevision;
++} AMD_ACPI_DESCRIPTION_HEADER;
++/*
++//EFI_ACPI_DESCRIPTION_HEADER from AcpiCommon.h
++typedef struct {
++ UINT32 Signature; //0x0
++ UINT32 Length; //0x4
++ UINT8 Revision; //0x8
++ UINT8 Checksum; //0x9
++ UINT8 OemId[6]; //0xA
++ UINT64 OemTableId; //0x10
++ UINT32 OemRevision; //0x18
++ UINT32 CreatorId; //0x1C
++ UINT32 CreatorRevision; //0x20
++}EFI_ACPI_DESCRIPTION_HEADER;
++*/
++typedef struct {
++ AMD_ACPI_DESCRIPTION_HEADER SHeader;
++ UCHAR TableUUID[16]; //0x24
++ ULONG VBIOSImageOffset; //0x34. Offset to the first GOP_VBIOS_CONTENT block from the beginning of the stucture.
++ ULONG Lib1ImageOffset; //0x38. Offset to the first GOP_LIB1_CONTENT block from the beginning of the stucture.
++ ULONG Reserved[4]; //0x3C
++}UEFI_ACPI_VFCT;
++
++typedef struct {
++ ULONG PCIBus; //0x4C
++ ULONG PCIDevice; //0x50
++ ULONG PCIFunction; //0x54
++ USHORT VendorID; //0x58
++ USHORT DeviceID; //0x5A
++ USHORT SSVID; //0x5C
++ USHORT SSID; //0x5E
++ ULONG Revision; //0x60
++ ULONG ImageLength; //0x64
++}VFCT_IMAGE_HEADER;
++
++
++typedef struct {
++ VFCT_IMAGE_HEADER VbiosHeader;
++ UCHAR VbiosContent[1];
++}GOP_VBIOS_CONTENT;
++
++typedef struct {
++ VFCT_IMAGE_HEADER Lib1Header;
++ UCHAR Lib1Content[1];
++}GOP_LIB1_CONTENT;
++
++#pragma pack()
++
++
+ #endif /* _ATOMBIOS_H */
+diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
+index 8227e76..28e69e9 100644
+--- a/drivers/gpu/drm/radeon/radeon.h
++++ b/drivers/gpu/drm/radeon/radeon.h
+@@ -123,21 +123,6 @@ struct radeon_device;
+ /*
+ * BIOS.
+ */
+-#define ATRM_BIOS_PAGE 4096
+-
+-#if defined(CONFIG_VGA_SWITCHEROO)
+-bool radeon_atrm_supported(struct pci_dev *pdev);
+-int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len);
+-#else
+-static inline bool radeon_atrm_supported(struct pci_dev *pdev)
+-{
+- return false;
+-}
+-
+-static inline int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len){
+- return -EINVAL;
+-}
+-#endif
+ bool radeon_get_bios(struct radeon_device *rdev);
+
+
+diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
+index 9d2c369..38585c5 100644
+--- a/drivers/gpu/drm/radeon/radeon_atombios.c
++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
+@@ -446,7 +446,7 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
+ }
+
+ /* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */
+- if ((dev->pdev->device == 0x9802) &&
++ if (((dev->pdev->device == 0x9802) || (dev->pdev->device == 0x9806)) &&
+ (dev->pdev->subsystem_vendor == 0x1734) &&
+ (dev->pdev->subsystem_device == 0x11bd)) {
+ if (*connector_type == DRM_MODE_CONNECTOR_VGA) {
+diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+index 9d95792..2a2cf0b 100644
+--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
++++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+@@ -30,56 +30,8 @@ static struct radeon_atpx_priv {
+ /* handle for device - and atpx */
+ acpi_handle dhandle;
+ acpi_handle atpx_handle;
+- acpi_handle atrm_handle;
+ } radeon_atpx_priv;
+
+-/* retrieve the ROM in 4k blocks */
+-static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
+- int offset, int len)
+-{
+- acpi_status status;
+- union acpi_object atrm_arg_elements[2], *obj;
+- struct acpi_object_list atrm_arg;
+- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
+-
+- atrm_arg.count = 2;
+- atrm_arg.pointer = &atrm_arg_elements[0];
+-
+- atrm_arg_elements[0].type = ACPI_TYPE_INTEGER;
+- atrm_arg_elements[0].integer.value = offset;
+-
+- atrm_arg_elements[1].type = ACPI_TYPE_INTEGER;
+- atrm_arg_elements[1].integer.value = len;
+-
+- status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer);
+- if (ACPI_FAILURE(status)) {
+- printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status));
+- return -ENODEV;
+- }
+-
+- obj = (union acpi_object *)buffer.pointer;
+- memcpy(bios+offset, obj->buffer.pointer, len);
+- kfree(buffer.pointer);
+- return len;
+-}
+-
+-bool radeon_atrm_supported(struct pci_dev *pdev)
+-{
+- /* get the discrete ROM only via ATRM */
+- if (!radeon_atpx_priv.atpx_detected)
+- return false;
+-
+- if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
+- return false;
+- return true;
+-}
+-
+-
+-int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len)
+-{
+- return radeon_atrm_call(radeon_atpx_priv.atrm_handle, bios, offset, len);
+-}
+-
+ static int radeon_atpx_get_version(acpi_handle handle)
+ {
+ acpi_status status;
+@@ -197,7 +149,7 @@ static int radeon_atpx_power_state(enum vga_switcheroo_client_id id,
+
+ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
+ {
+- acpi_handle dhandle, atpx_handle, atrm_handle;
++ acpi_handle dhandle, atpx_handle;
+ acpi_status status;
+
+ dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+@@ -208,13 +160,8 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
+ if (ACPI_FAILURE(status))
+ return false;
+
+- status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
+- if (ACPI_FAILURE(status))
+- return false;
+-
+ radeon_atpx_priv.dhandle = dhandle;
+ radeon_atpx_priv.atpx_handle = atpx_handle;
+- radeon_atpx_priv.atrm_handle = atrm_handle;
+ return true;
+ }
+
+diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
+index 229a20f..d306cc8 100644
+--- a/drivers/gpu/drm/radeon/radeon_bios.c
++++ b/drivers/gpu/drm/radeon/radeon_bios.c
+@@ -32,6 +32,7 @@
+
+ #include <linux/vga_switcheroo.h>
+ #include <linux/slab.h>
++#include <linux/acpi.h>
+ /*
+ * BIOS.
+ */
+@@ -98,16 +99,81 @@ static bool radeon_read_bios(struct radeon_device *rdev)
+ return true;
+ }
+
++#ifdef CONFIG_ACPI
+ /* ATRM is used to get the BIOS on the discrete cards in
+ * dual-gpu systems.
+ */
++/* retrieve the ROM in 4k blocks */
++#define ATRM_BIOS_PAGE 4096
++/**
++ * radeon_atrm_call - fetch a chunk of the vbios
++ *
++ * @atrm_handle: acpi ATRM handle
++ * @bios: vbios image pointer
++ * @offset: offset of vbios image data to fetch
++ * @len: length of vbios image data to fetch
++ *
++ * Executes ATRM to fetch a chunk of the discrete
++ * vbios image on PX systems (all asics).
++ * Returns the length of the buffer fetched.
++ */
++static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
++ int offset, int len)
++{
++ acpi_status status;
++ union acpi_object atrm_arg_elements[2], *obj;
++ struct acpi_object_list atrm_arg;
++ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
++
++ atrm_arg.count = 2;
++ atrm_arg.pointer = &atrm_arg_elements[0];
++
++ atrm_arg_elements[0].type = ACPI_TYPE_INTEGER;
++ atrm_arg_elements[0].integer.value = offset;
++
++ atrm_arg_elements[1].type = ACPI_TYPE_INTEGER;
++ atrm_arg_elements[1].integer.value = len;
++
++ status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer);
++ if (ACPI_FAILURE(status)) {
++ printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status));
++ return -ENODEV;
++ }
++
++ obj = (union acpi_object *)buffer.pointer;
++ memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length);
++ len = obj->buffer.length;
++ kfree(buffer.pointer);
++ return len;
++}
++
+ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
+ {
+ int ret;
+ int size = 256 * 1024;
+ int i;
++ struct pci_dev *pdev = NULL;
++ acpi_handle dhandle, atrm_handle;
++ acpi_status status;
++ bool found = false;
++
++ /* ATRM is for the discrete card only */
++ if (rdev->flags & RADEON_IS_IGP)
++ return false;
++
++ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
++ dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
++ if (!dhandle)
++ continue;
++
++ status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
++ if (!ACPI_FAILURE(status)) {
++ found = true;
++ break;
++ }
++ }
+
+- if (!radeon_atrm_supported(rdev->pdev))
++ if (!found)
+ return false;
+
+ rdev->bios = kmalloc(size, GFP_KERNEL);
+@@ -117,10 +183,11 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
+ }
+
+ for (i = 0; i < size / ATRM_BIOS_PAGE; i++) {
+- ret = radeon_atrm_get_bios_chunk(rdev->bios,
+- (i * ATRM_BIOS_PAGE),
+- ATRM_BIOS_PAGE);
+- if (ret <= 0)
++ ret = radeon_atrm_call(atrm_handle,
++ rdev->bios,
++ (i * ATRM_BIOS_PAGE),
++ ATRM_BIOS_PAGE);
++ if (ret < ATRM_BIOS_PAGE)
+ break;
+ }
+
+@@ -130,6 +197,12 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
+ }
+ return true;
+ }
++#else
++static inline bool radeon_atrm_get_bios(struct radeon_device *rdev)
++{
++ return false;
++}
++#endif
+
+ static bool ni_read_disabled_bios(struct radeon_device *rdev)
+ {
+@@ -476,6 +549,61 @@ static bool radeon_read_disabled_bios(struct radeon_device *rdev)
+ return legacy_read_disabled_bios(rdev);
+ }
+
++#ifdef CONFIG_ACPI
++static bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
++{
++ bool ret = false;
++ struct acpi_table_header *hdr;
++ acpi_size tbl_size;
++ UEFI_ACPI_VFCT *vfct;
++ GOP_VBIOS_CONTENT *vbios;
++ VFCT_IMAGE_HEADER *vhdr;
++
++ if (!ACPI_SUCCESS(acpi_get_table_with_size("VFCT", 1, &hdr, &tbl_size)))
++ return false;
++ if (tbl_size < sizeof(UEFI_ACPI_VFCT)) {
++ DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n");
++ goto out_unmap;
++ }
++
++ vfct = (UEFI_ACPI_VFCT *)hdr;
++ if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) > tbl_size) {
++ DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n");
++ goto out_unmap;
++ }
++
++ vbios = (GOP_VBIOS_CONTENT *)((char *)hdr + vfct->VBIOSImageOffset);
++ vhdr = &vbios->VbiosHeader;
++ DRM_INFO("ACPI VFCT contains a BIOS for %02x:%02x.%d %04x:%04x, size %d\n",
++ vhdr->PCIBus, vhdr->PCIDevice, vhdr->PCIFunction,
++ vhdr->VendorID, vhdr->DeviceID, vhdr->ImageLength);
++
++ if (vhdr->PCIBus != rdev->pdev->bus->number ||
++ vhdr->PCIDevice != PCI_SLOT(rdev->pdev->devfn) ||
++ vhdr->PCIFunction != PCI_FUNC(rdev->pdev->devfn) ||
++ vhdr->VendorID != rdev->pdev->vendor ||
++ vhdr->DeviceID != rdev->pdev->device) {
++ DRM_INFO("ACPI VFCT table is not for this card\n");
++ goto out_unmap;
++ };
++
++ if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) + vhdr->ImageLength > tbl_size) {
++ DRM_ERROR("ACPI VFCT image truncated\n");
++ goto out_unmap;
++ }
++
++ rdev->bios = kmemdup(&vbios->VbiosContent, vhdr->ImageLength, GFP_KERNEL);
++ ret = !!rdev->bios;
++
++out_unmap:
++ return ret;
++}
++#else
++static inline bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
++{
++ return false;
++}
++#endif
+
+ bool radeon_get_bios(struct radeon_device *rdev)
+ {
+@@ -484,6 +612,8 @@ bool radeon_get_bios(struct radeon_device *rdev)
+
+ r = radeon_atrm_get_bios(rdev);
+ if (r == false)
++ r = radeon_acpi_vfct_bios(rdev);
++ if (r == false)
+ r = igp_read_bios_from_vram(rdev);
+ if (r == false)
+ r = radeon_read_bios(rdev);
+diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
+index 39497c7..f3ae607 100644
+--- a/drivers/gpu/drm/radeon/radeon_object.c
++++ b/drivers/gpu/drm/radeon/radeon_object.c
+@@ -117,6 +117,7 @@ int radeon_bo_create(struct radeon_device *rdev,
+ return -ENOMEM;
+ }
+
++retry:
+ bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
+ if (bo == NULL)
+ return -ENOMEM;
+@@ -129,8 +130,6 @@ int radeon_bo_create(struct radeon_device *rdev,
+ bo->gem_base.driver_private = NULL;
+ bo->surface_reg = -1;
+ INIT_LIST_HEAD(&bo->list);
+-
+-retry:
+ radeon_ttm_placement_from_domain(bo, domain);
+ /* Kernel allocation are uninterruptible */
+ mutex_lock(&rdev->vram_mutex);
+diff --git a/drivers/hid/hid-chicony.c b/drivers/hid/hid-chicony.c
+index b99af34..a2abb8e 100644
+--- a/drivers/hid/hid-chicony.c
++++ b/drivers/hid/hid-chicony.c
+@@ -60,6 +60,7 @@ static int ch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ static const struct hid_device_id ch_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
+ { }
+ };
+ MODULE_DEVICE_TABLE(hid, ch_devices);
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 95430a0..5cc029f 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1398,12 +1398,14 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHUNGHWAT, USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CVTOUCH, USB_DEVICE_ID_CVTOUCH_SCREEN) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_4) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_TRUETOUCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
+diff --git a/drivers/hid/hid-cypress.c b/drivers/hid/hid-cypress.c
+index 2f0be4c..9e43aac 100644
+--- a/drivers/hid/hid-cypress.c
++++ b/drivers/hid/hid-cypress.c
+@@ -129,6 +129,8 @@ static const struct hid_device_id cp_devices[] = {
+ .driver_data = CP_RDESC_SWAPPED_MIN_MAX },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3),
+ .driver_data = CP_RDESC_SWAPPED_MIN_MAX },
++ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_4),
++ .driver_data = CP_RDESC_SWAPPED_MIN_MAX },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE),
+ .driver_data = CP_2WHEEL_MOUSE_HACK },
+ { }
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 7db934d..e4317a2 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -196,6 +196,7 @@
+ #define USB_DEVICE_ID_CHICONY_MULTI_TOUCH 0xb19d
+ #define USB_DEVICE_ID_CHICONY_WIRELESS 0x0618
+ #define USB_DEVICE_ID_CHICONY_WIRELESS2 0x1123
++#define USB_DEVICE_ID_CHICONY_AK1D 0x1125
+
+ #define USB_VENDOR_ID_CHUNGHWAT 0x2247
+ #define USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH 0x0001
+@@ -225,6 +226,7 @@
+ #define USB_DEVICE_ID_CYPRESS_BARCODE_1 0xde61
+ #define USB_DEVICE_ID_CYPRESS_BARCODE_2 0xde64
+ #define USB_DEVICE_ID_CYPRESS_BARCODE_3 0xbca1
++#define USB_DEVICE_ID_CYPRESS_BARCODE_4 0xed81
+ #define USB_DEVICE_ID_CYPRESS_TRUETOUCH 0xc001
+
+ #define USB_VENDOR_ID_DEALEXTREAME 0x10c5
+diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
+index 0bfa545..c76b051 100644
+--- a/drivers/infiniband/ulp/srp/ib_srp.c
++++ b/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -568,24 +568,62 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
+ scmnd->sc_data_direction);
+ }
+
+-static void srp_remove_req(struct srp_target_port *target,
+- struct srp_request *req, s32 req_lim_delta)
++/**
++ * srp_claim_req - Take ownership of the scmnd associated with a request.
++ * @target: SRP target port.
++ * @req: SRP request.
++ * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
++ * ownership of @req->scmnd if it equals @scmnd.
++ *
++ * Return value:
++ * Either NULL or a pointer to the SCSI command the caller became owner of.
++ */
++static struct scsi_cmnd *srp_claim_req(struct srp_target_port *target,
++ struct srp_request *req,
++ struct scsi_cmnd *scmnd)
+ {
+ unsigned long flags;
+
+- srp_unmap_data(req->scmnd, target, req);
++ spin_lock_irqsave(&target->lock, flags);
++ if (!scmnd) {
++ scmnd = req->scmnd;
++ req->scmnd = NULL;
++ } else if (req->scmnd == scmnd) {
++ req->scmnd = NULL;
++ } else {
++ scmnd = NULL;
++ }
++ spin_unlock_irqrestore(&target->lock, flags);
++
++ return scmnd;
++}
++
++/**
++ * srp_free_req() - Unmap data and add request to the free request list.
++ */
++static void srp_free_req(struct srp_target_port *target,
++ struct srp_request *req, struct scsi_cmnd *scmnd,
++ s32 req_lim_delta)
++{
++ unsigned long flags;
++
++ srp_unmap_data(scmnd, target, req);
++
+ spin_lock_irqsave(&target->lock, flags);
+ target->req_lim += req_lim_delta;
+- req->scmnd = NULL;
+ list_add_tail(&req->list, &target->free_reqs);
+ spin_unlock_irqrestore(&target->lock, flags);
+ }
+
+ static void srp_reset_req(struct srp_target_port *target, struct srp_request *req)
+ {
+- req->scmnd->result = DID_RESET << 16;
+- req->scmnd->scsi_done(req->scmnd);
+- srp_remove_req(target, req, 0);
++ struct scsi_cmnd *scmnd = srp_claim_req(target, req, NULL);
++
++ if (scmnd) {
++ scmnd->result = DID_RESET << 16;
++ scmnd->scsi_done(scmnd);
++ srp_free_req(target, req, scmnd, 0);
++ }
+ }
+
+ static int srp_reconnect_target(struct srp_target_port *target)
+@@ -1055,11 +1093,18 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
+ complete(&target->tsk_mgmt_done);
+ } else {
+ req = &target->req_ring[rsp->tag];
+- scmnd = req->scmnd;
+- if (!scmnd)
++ scmnd = srp_claim_req(target, req, NULL);
++ if (!scmnd) {
+ shost_printk(KERN_ERR, target->scsi_host,
+ "Null scmnd for RSP w/tag %016llx\n",
+ (unsigned long long) rsp->tag);
++
++ spin_lock_irqsave(&target->lock, flags);
++ target->req_lim += be32_to_cpu(rsp->req_lim_delta);
++ spin_unlock_irqrestore(&target->lock, flags);
++
++ return;
++ }
+ scmnd->result = rsp->status;
+
+ if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
+@@ -1074,7 +1119,9 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
+ else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
+ scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
+
+- srp_remove_req(target, req, be32_to_cpu(rsp->req_lim_delta));
++ srp_free_req(target, req, scmnd,
++ be32_to_cpu(rsp->req_lim_delta));
++
+ scmnd->host_scribble = NULL;
+ scmnd->scsi_done(scmnd);
+ }
+@@ -1613,25 +1660,17 @@ static int srp_abort(struct scsi_cmnd *scmnd)
+ {
+ struct srp_target_port *target = host_to_target(scmnd->device->host);
+ struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
+- int ret = SUCCESS;
+
+ shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
+
+- if (!req || target->qp_in_error)
+- return FAILED;
+- if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
+- SRP_TSK_ABORT_TASK))
++ if (!req || target->qp_in_error || !srp_claim_req(target, req, scmnd))
+ return FAILED;
++ srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
++ SRP_TSK_ABORT_TASK);
++ srp_free_req(target, req, scmnd, 0);
++ scmnd->result = DID_ABORT << 16;
+
+- if (req->scmnd) {
+- if (!target->tsk_mgmt_status) {
+- srp_remove_req(target, req, 0);
+- scmnd->result = DID_ABORT << 16;
+- } else
+- ret = FAILED;
+- }
+-
+- return ret;
++ return SUCCESS;
+ }
+
+ static int srp_reset_device(struct scsi_cmnd *scmnd)
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index d8646d7..2887f22 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -1144,8 +1144,11 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
+ ret = 0;
+ }
+ rdev->sectors = rdev->sb_start;
+- /* Limit to 4TB as metadata cannot record more than that */
+- if (rdev->sectors >= (2ULL << 32))
++ /* Limit to 4TB as metadata cannot record more than that.
++ * (not needed for Linear and RAID0 as metadata doesn't
++ * record this size)
++ */
++ if (rdev->sectors >= (2ULL << 32) && sb->level >= 1)
+ rdev->sectors = (2ULL << 32) - 2;
+
+ if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
+@@ -1427,7 +1430,7 @@ super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
+ /* Limit to 4TB as metadata cannot record more than that.
+ * 4TB == 2^32 KB, or 2*2^32 sectors.
+ */
+- if (num_sectors >= (2ULL << 32))
++ if (num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)
+ num_sectors = (2ULL << 32) - 2;
+ md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
+ rdev->sb_page);
+diff --git a/drivers/media/dvb/siano/smsusb.c b/drivers/media/dvb/siano/smsusb.c
+index fb68805..027550d 100644
+--- a/drivers/media/dvb/siano/smsusb.c
++++ b/drivers/media/dvb/siano/smsusb.c
+@@ -481,7 +481,7 @@ static int smsusb_resume(struct usb_interface *intf)
+ return 0;
+ }
+
+-static const struct usb_device_id smsusb_id_table[] __devinitconst = {
++static const struct usb_device_id smsusb_id_table[] = {
+ { USB_DEVICE(0x187f, 0x0010),
+ .driver_info = SMS1XXX_BOARD_SIANO_STELLAR },
+ { USB_DEVICE(0x187f, 0x0100),
+diff --git a/drivers/media/video/gspca/spca506.c b/drivers/media/video/gspca/spca506.c
+index 89fec4c..731cd16 100644
+--- a/drivers/media/video/gspca/spca506.c
++++ b/drivers/media/video/gspca/spca506.c
+@@ -685,7 +685,7 @@ static const struct sd_desc sd_desc = {
+ };
+
+ /* -- module initialisation -- */
+-static const struct usb_device_id device_table[] __devinitconst = {
++static const struct usb_device_id device_table[] = {
+ {USB_DEVICE(0x06e1, 0xa190)},
+ /*fixme: may be IntelPCCameraPro BRIDGE_SPCA505
+ {USB_DEVICE(0x0733, 0x0430)}, */
+diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
+index 17bbacb..cc2ae7e 100644
+--- a/drivers/misc/sgi-xp/xpc_uv.c
++++ b/drivers/misc/sgi-xp/xpc_uv.c
+@@ -18,6 +18,8 @@
+ #include <linux/interrupt.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
++#include <linux/cpu.h>
++#include <linux/module.h>
+ #include <linux/err.h>
+ #include <linux/slab.h>
+ #include <asm/uv/uv_hub.h>
+@@ -59,6 +61,8 @@ static struct xpc_heartbeat_uv *xpc_heartbeat_uv;
+ XPC_NOTIFY_MSG_SIZE_UV)
+ #define XPC_NOTIFY_IRQ_NAME "xpc_notify"
+
++static int xpc_mq_node = -1;
++
+ static struct xpc_gru_mq_uv *xpc_activate_mq_uv;
+ static struct xpc_gru_mq_uv *xpc_notify_mq_uv;
+
+@@ -109,11 +113,8 @@ xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name)
+ #if defined CONFIG_X86_64
+ mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset,
+ UV_AFFINITY_CPU);
+- if (mq->irq < 0) {
+- dev_err(xpc_part, "uv_setup_irq() returned error=%d\n",
+- -mq->irq);
++ if (mq->irq < 0)
+ return mq->irq;
+- }
+
+ mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset);
+
+@@ -238,8 +239,9 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
+ mq->mmr_blade = uv_cpu_to_blade_id(cpu);
+
+ nid = cpu_to_node(cpu);
+- page = alloc_pages_exact_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
+- pg_order);
++ page = alloc_pages_exact_node(nid,
++ GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
++ pg_order);
+ if (page == NULL) {
+ dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "
+ "bytes of memory on nid=%d for GRU mq\n", mq_size, nid);
+@@ -1731,9 +1733,50 @@ static struct xpc_arch_operations xpc_arch_ops_uv = {
+ .notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv,
+ };
+
++static int
++xpc_init_mq_node(int nid)
++{
++ int cpu;
++
++ get_online_cpus();
++
++ for_each_cpu(cpu, cpumask_of_node(nid)) {
++ xpc_activate_mq_uv =
++ xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, nid,
++ XPC_ACTIVATE_IRQ_NAME,
++ xpc_handle_activate_IRQ_uv);
++ if (!IS_ERR(xpc_activate_mq_uv))
++ break;
++ }
++ if (IS_ERR(xpc_activate_mq_uv)) {
++ put_online_cpus();
++ return PTR_ERR(xpc_activate_mq_uv);
++ }
++
++ for_each_cpu(cpu, cpumask_of_node(nid)) {
++ xpc_notify_mq_uv =
++ xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, nid,
++ XPC_NOTIFY_IRQ_NAME,
++ xpc_handle_notify_IRQ_uv);
++ if (!IS_ERR(xpc_notify_mq_uv))
++ break;
++ }
++ if (IS_ERR(xpc_notify_mq_uv)) {
++ xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
++ put_online_cpus();
++ return PTR_ERR(xpc_notify_mq_uv);
++ }
++
++ put_online_cpus();
++ return 0;
++}
++
+ int
+ xpc_init_uv(void)
+ {
++ int nid;
++ int ret = 0;
++
+ xpc_arch_ops = xpc_arch_ops_uv;
+
+ if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
+@@ -1742,21 +1785,21 @@ xpc_init_uv(void)
+ return -E2BIG;
+ }
+
+- xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0,
+- XPC_ACTIVATE_IRQ_NAME,
+- xpc_handle_activate_IRQ_uv);
+- if (IS_ERR(xpc_activate_mq_uv))
+- return PTR_ERR(xpc_activate_mq_uv);
++ if (xpc_mq_node < 0)
++ for_each_online_node(nid) {
++ ret = xpc_init_mq_node(nid);
+
+- xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0,
+- XPC_NOTIFY_IRQ_NAME,
+- xpc_handle_notify_IRQ_uv);
+- if (IS_ERR(xpc_notify_mq_uv)) {
+- xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
+- return PTR_ERR(xpc_notify_mq_uv);
+- }
++ if (!ret)
++ break;
++ }
++ else
++ ret = xpc_init_mq_node(xpc_mq_node);
+
+- return 0;
++ if (ret < 0)
++ dev_err(xpc_part, "xpc_init_mq_node() returned error=%d\n",
++ -ret);
++
++ return ret;
+ }
+
+ void
+@@ -1765,3 +1808,6 @@ xpc_exit_uv(void)
+ xpc_destroy_gru_mq_uv(xpc_notify_mq_uv);
+ xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
+ }
++
++module_param(xpc_mq_node, int, 0);
++MODULE_PARM_DESC(xpc_mq_node, "Node number on which to allocate message queues.");
+diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
+index e888202..01b104e 100644
+--- a/drivers/net/netconsole.c
++++ b/drivers/net/netconsole.c
+@@ -652,7 +652,6 @@ static int netconsole_netdev_event(struct notifier_block *this,
+ flags);
+ dev_put(nt->np.dev);
+ nt->np.dev = NULL;
+- netconsole_target_put(nt);
+ }
+ nt->enabled = 0;
+ stopped = true;
+diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
+index e6d791c..b4cbc82 100644
+--- a/drivers/net/wireless/ath/ath9k/recv.c
++++ b/drivers/net/wireless/ath/ath9k/recv.c
+@@ -1782,7 +1782,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
+ struct ieee80211_hw *hw = sc->hw;
+ struct ieee80211_hdr *hdr;
+ int retval;
+- bool decrypt_error = false;
+ struct ath_rx_status rs;
+ enum ath9k_rx_qtype qtype;
+ bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
+@@ -1804,6 +1803,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
+ tsf_lower = tsf & 0xffffffff;
+
+ do {
++ bool decrypt_error = false;
+ /* If handling rx interrupt and flush is in progress => exit */
+ if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0))
+ break;
+diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
+index 9b60968..8a009bc 100644
+--- a/drivers/net/wireless/p54/p54usb.c
++++ b/drivers/net/wireless/p54/p54usb.c
+@@ -42,7 +42,7 @@ MODULE_FIRMWARE("isl3887usb");
+ * whenever you add a new device.
+ */
+
+-static struct usb_device_id p54u_table[] __devinitdata = {
++static struct usb_device_id p54u_table[] = {
+ /* Version 1 devices (pci chip + net2280) */
+ {USB_DEVICE(0x0411, 0x0050)}, /* Buffalo WLI2-USB2-G54 */
+ {USB_DEVICE(0x045e, 0x00c2)}, /* Microsoft MN-710 */
+diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
+index 4a78f9e..4e98c39 100644
+--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
++++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
+@@ -44,7 +44,7 @@ MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>");
+ MODULE_DESCRIPTION("RTL8187/RTL8187B USB wireless driver");
+ MODULE_LICENSE("GPL");
+
+-static struct usb_device_id rtl8187_table[] __devinitdata = {
++static struct usb_device_id rtl8187_table[] = {
+ /* Asus */
+ {USB_DEVICE(0x0b05, 0x171d), .driver_info = DEVICE_RTL8187},
+ /* Belkin */
+diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
+index d024f83..68af94c 100644
+--- a/drivers/pci/pci-driver.c
++++ b/drivers/pci/pci-driver.c
+@@ -952,6 +952,13 @@ static int pci_pm_poweroff_noirq(struct device *dev)
+ if (!pci_dev->state_saved && !pci_is_bridge(pci_dev))
+ pci_prepare_to_sleep(pci_dev);
+
++ /*
++ * The reason for doing this here is the same as for the analogous code
++ * in pci_pm_suspend_noirq().
++ */
++ if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI)
++ pci_write_config_word(pci_dev, PCI_COMMAND, 0);
++
+ return 0;
+ }
+
+diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
+index b0859d4..ec5b17f 100644
+--- a/drivers/platform/x86/asus-nb-wmi.c
++++ b/drivers/platform/x86/asus-nb-wmi.c
+@@ -86,6 +86,10 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
+ { KE_KEY, 0x8A, { KEY_PROG1 } },
+ { KE_KEY, 0x95, { KEY_MEDIA } },
+ { KE_KEY, 0x99, { KEY_PHONE } },
++ { KE_KEY, 0xA0, { KEY_SWITCHVIDEOMODE } }, /* SDSP HDMI only */
++ { KE_KEY, 0xA1, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + HDMI */
++ { KE_KEY, 0xA2, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + HDMI */
++ { KE_KEY, 0xA3, { KEY_SWITCHVIDEOMODE } }, /* SDSP TV + HDMI */
+ { KE_KEY, 0xb5, { KEY_CALC } },
+ { KE_KEY, 0xc4, { KEY_KBDILLUMUP } },
+ { KE_KEY, 0xc5, { KEY_KBDILLUMDOWN } },
+diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
+index 30d2072..33471e1 100644
+--- a/drivers/rapidio/devices/tsi721.c
++++ b/drivers/rapidio/devices/tsi721.c
+@@ -439,6 +439,9 @@ static void tsi721_db_dpc(struct work_struct *work)
+ " info %4.4x\n", DBELL_SID(idb.bytes),
+ DBELL_TID(idb.bytes), DBELL_INF(idb.bytes));
+ }
++
++ wr_ptr = ioread32(priv->regs +
++ TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE;
+ }
+
+ iowrite32(rd_ptr & (IDB_QSIZE - 1),
+@@ -449,6 +452,10 @@ static void tsi721_db_dpc(struct work_struct *work)
+ regval |= TSI721_SR_CHINT_IDBQRCV;
+ iowrite32(regval,
+ priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
++
++ wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE;
++ if (wr_ptr != rd_ptr)
++ schedule_work(&priv->idb_work);
+ }
+
+ /**
+@@ -2155,7 +2162,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+ {
+ struct tsi721_device *priv;
+- int i, cap;
++ int cap;
+ int err;
+ u32 regval;
+
+@@ -2175,12 +2182,15 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
+ priv->pdev = pdev;
+
+ #ifdef DEBUG
++ {
++ int i;
+ for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
+ dev_dbg(&pdev->dev, "res[%d] @ 0x%llx (0x%lx, 0x%lx)\n",
+ i, (unsigned long long)pci_resource_start(pdev, i),
+ (unsigned long)pci_resource_len(pdev, i),
+ pci_resource_flags(pdev, i));
+ }
++ }
+ #endif
+ /*
+ * Verify BAR configuration
+diff --git a/drivers/rtc/rtc-rs5c348.c b/drivers/rtc/rtc-rs5c348.c
+index 971bc8e..11bcb20 100644
+--- a/drivers/rtc/rtc-rs5c348.c
++++ b/drivers/rtc/rtc-rs5c348.c
+@@ -122,9 +122,12 @@ rs5c348_rtc_read_time(struct device *dev, struct rtc_time *tm)
+ tm->tm_min = bcd2bin(rxbuf[RS5C348_REG_MINS] & RS5C348_MINS_MASK);
+ tm->tm_hour = bcd2bin(rxbuf[RS5C348_REG_HOURS] & RS5C348_HOURS_MASK);
+ if (!pdata->rtc_24h) {
+- tm->tm_hour %= 12;
+- if (rxbuf[RS5C348_REG_HOURS] & RS5C348_BIT_PM)
++ if (rxbuf[RS5C348_REG_HOURS] & RS5C348_BIT_PM) {
++ tm->tm_hour -= 20;
++ tm->tm_hour %= 12;
+ tm->tm_hour += 12;
++ } else
++ tm->tm_hour %= 12;
+ }
+ tm->tm_wday = bcd2bin(rxbuf[RS5C348_REG_WDAY] & RS5C348_WDAY_MASK);
+ tm->tm_mday = bcd2bin(rxbuf[RS5C348_REG_DAY] & RS5C348_DAY_MASK);
+diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
+index 8be5604..0d70f68 100644
+--- a/drivers/staging/speakup/main.c
++++ b/drivers/staging/speakup/main.c
+@@ -1854,7 +1854,7 @@ static void speakup_bits(struct vc_data *vc)
+
+ static int handle_goto(struct vc_data *vc, u_char type, u_char ch, u_short key)
+ {
+- static u_char *goto_buf = "\0\0\0\0\0\0";
++ static u_char goto_buf[8];
+ static int num;
+ int maxlen, go_pos;
+ char *cp;
+diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
+index 27521b6..ae62d57 100644
+--- a/drivers/staging/vt6656/main_usb.c
++++ b/drivers/staging/vt6656/main_usb.c
+@@ -222,7 +222,7 @@ DEVICE_PARAM(b80211hEnable, "802.11h mode");
+ // Static vars definitions
+ //
+
+-static struct usb_device_id vt6656_table[] __devinitdata = {
++static struct usb_device_id vt6656_table[] = {
+ {USB_DEVICE(VNT_USB_VENDOR_ID, VNT_USB_PRODUCT_ID)},
+ {}
+ };
+diff --git a/drivers/staging/winbond/wbusb.c b/drivers/staging/winbond/wbusb.c
+index f958eb4..3f0ce2b 100644
+--- a/drivers/staging/winbond/wbusb.c
++++ b/drivers/staging/winbond/wbusb.c
+@@ -25,7 +25,7 @@ MODULE_DESCRIPTION("IS89C35 802.11bg WLAN USB Driver");
+ MODULE_LICENSE("GPL");
+ MODULE_VERSION("0.1");
+
+-static const struct usb_device_id wb35_table[] __devinitconst = {
++static const struct usb_device_id wb35_table[] = {
+ { USB_DEVICE(0x0416, 0x0035) },
+ { USB_DEVICE(0x18E8, 0x6201) },
+ { USB_DEVICE(0x18E8, 0x6206) },
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 94c03d2..597fb9b 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -3509,9 +3509,9 @@ transport_generic_get_mem(struct se_cmd *cmd)
+ return 0;
+
+ out:
+- while (i >= 0) {
+- __free_page(sg_page(&cmd->t_data_sg[i]));
++ while (i > 0) {
+ i--;
++ __free_page(sg_page(&cmd->t_data_sg[i]));
+ }
+ kfree(cmd->t_data_sg);
+ cmd->t_data_sg = NULL;
+diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
+index 5acd24a..086f7fe 100644
+--- a/drivers/tty/serial/pmac_zilog.c
++++ b/drivers/tty/serial/pmac_zilog.c
+@@ -1407,10 +1407,16 @@ static int pmz_verify_port(struct uart_port *port, struct serial_struct *ser)
+ static int pmz_poll_get_char(struct uart_port *port)
+ {
+ struct uart_pmac_port *uap = (struct uart_pmac_port *)port;
++ int tries = 2;
+
+- while ((read_zsreg(uap, R0) & Rx_CH_AV) == 0)
+- udelay(5);
+- return read_zsdata(uap);
++ while (tries) {
++ if ((read_zsreg(uap, R0) & Rx_CH_AV) != 0)
++ return read_zsdata(uap);
++ if (tries--)
++ udelay(5);
++ }
++
++ return NO_POLL_CHAR;
+ }
+
+ static void pmz_poll_put_char(struct uart_port *port, unsigned char c)
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 1094469..dbf7d20 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1043,7 +1043,8 @@ skip_normal_probe:
+ }
+
+
+- if (data_interface->cur_altsetting->desc.bNumEndpoints < 2)
++ if (data_interface->cur_altsetting->desc.bNumEndpoints < 2 ||
++ control_interface->cur_altsetting->desc.bNumEndpoints == 0)
+ return -EINVAL;
+
+ epctrl = &control_interface->cur_altsetting->endpoint[0].desc;
+diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
+index 4e1f0aa..9a2a1ae 100644
+--- a/drivers/usb/gadget/u_ether.c
++++ b/drivers/usb/gadget/u_ether.c
+@@ -669,6 +669,8 @@ static int eth_stop(struct net_device *net)
+ spin_lock_irqsave(&dev->lock, flags);
+ if (dev->port_usb) {
+ struct gether *link = dev->port_usb;
++ const struct usb_endpoint_descriptor *in;
++ const struct usb_endpoint_descriptor *out;
+
+ if (link->close)
+ link->close(link);
+@@ -682,10 +684,14 @@ static int eth_stop(struct net_device *net)
+ * their own pace; the network stack can handle old packets.
+ * For the moment we leave this here, since it works.
+ */
++ in = link->in_ep->desc;
++ out = link->out_ep->desc;
+ usb_ep_disable(link->in_ep);
+ usb_ep_disable(link->out_ep);
+ if (netif_carrier_ok(net)) {
+ DBG(dev, "host still using in/out endpoints\n");
++ link->in_ep->desc = in;
++ link->out_ep->desc = out;
+ usb_ep_enable(link->in_ep);
+ usb_ep_enable(link->out_ep);
+ }
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index daf5754..07c72a4 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -95,6 +95,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
+ xhci->quirks |= XHCI_RESET_ON_RESUME;
+ xhci_dbg(xhci, "QUIRK: Resetting on resume\n");
++ xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+ }
+ if (pdev->vendor == PCI_VENDOR_ID_VIA)
+ xhci->quirks |= XHCI_RESET_ON_RESUME;
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 05f82e9..f7c0a2a 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -163,7 +163,7 @@ int xhci_reset(struct xhci_hcd *xhci)
+ xhci_writel(xhci, command, &xhci->op_regs->command);
+
+ ret = handshake(xhci, &xhci->op_regs->command,
+- CMD_RESET, 0, 250 * 1000);
++ CMD_RESET, 0, 10 * 1000 * 1000);
+ if (ret)
+ return ret;
+
+@@ -172,7 +172,8 @@ int xhci_reset(struct xhci_hcd *xhci)
+ * xHCI cannot write to any doorbells or operational registers other
+ * than status until the "Controller Not Ready" flag is cleared.
+ */
+- return handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000);
++ return handshake(xhci, &xhci->op_regs->status,
++ STS_CNR, 0, 10 * 1000 * 1000);
+ }
+
+ #ifdef CONFIG_PCI
+diff --git a/drivers/usb/misc/emi62.c b/drivers/usb/misc/emi62.c
+index fc15ad4..723e833 100644
+--- a/drivers/usb/misc/emi62.c
++++ b/drivers/usb/misc/emi62.c
+@@ -259,7 +259,7 @@ wraperr:
+ return err;
+ }
+
+-static const struct usb_device_id id_table[] __devinitconst = {
++static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(EMI62_VENDOR_ID, EMI62_PRODUCT_ID) },
+ { } /* Terminating entry */
+ };
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 4045e39..b3182bb 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -811,6 +811,7 @@ static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(LARSENBRUSGAARD_VID, LB_ALTITRACK_PID) },
+ { USB_DEVICE(GN_OTOMETRICS_VID, AURICAL_USB_PID) },
+ { USB_DEVICE(PI_VID, PI_E861_PID) },
++ { USB_DEVICE(KONDO_VID, KONDO_USB_SERIAL_PID) },
+ { USB_DEVICE(BAYER_VID, BAYER_CONTOUR_CABLE_PID) },
+ { USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index d27d7d7..54b4258 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -795,6 +795,13 @@
+ #define PI_E861_PID 0x1008 /* E-861 piezo controller USB connection */
+
+ /*
++ * Kondo Kagaku Co.Ltd.
++ * http://www.kondo-robot.com/EN
++ */
++#define KONDO_VID 0x165c
++#define KONDO_USB_SERIAL_PID 0x0002
++
++/*
+ * Bayer Ascensia Contour blood glucose meter USB-converter cable.
+ * http://winglucofacts.com/cables/
+ */
+diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
+index 5c7d654..b150ed9 100644
+--- a/drivers/usb/serial/mos7840.c
++++ b/drivers/usb/serial/mos7840.c
+@@ -1191,9 +1191,12 @@ static int mos7840_chars_in_buffer(struct tty_struct *tty)
+ }
+
+ spin_lock_irqsave(&mos7840_port->pool_lock, flags);
+- for (i = 0; i < NUM_URBS; ++i)
+- if (mos7840_port->busy[i])
+- chars += URB_TRANSFER_BUFFER_SIZE;
++ for (i = 0; i < NUM_URBS; ++i) {
++ if (mos7840_port->busy[i]) {
++ struct urb *urb = mos7840_port->write_urb_pool[i];
++ chars += urb->transfer_buffer_length;
++ }
++ }
+ spin_unlock_irqrestore(&mos7840_port->pool_lock, flags);
+ dbg("%s - returns %d", __func__, chars);
+ return chars;
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index d89aac1..113560d 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -80,84 +80,9 @@ static void option_instat_callback(struct urb *urb);
+ #define OPTION_PRODUCT_GTM380_MODEM 0x7201
+
+ #define HUAWEI_VENDOR_ID 0x12D1
+-#define HUAWEI_PRODUCT_E600 0x1001
+-#define HUAWEI_PRODUCT_E220 0x1003
+-#define HUAWEI_PRODUCT_E220BIS 0x1004
+-#define HUAWEI_PRODUCT_E1401 0x1401
+-#define HUAWEI_PRODUCT_E1402 0x1402
+-#define HUAWEI_PRODUCT_E1403 0x1403
+-#define HUAWEI_PRODUCT_E1404 0x1404
+-#define HUAWEI_PRODUCT_E1405 0x1405
+-#define HUAWEI_PRODUCT_E1406 0x1406
+-#define HUAWEI_PRODUCT_E1407 0x1407
+-#define HUAWEI_PRODUCT_E1408 0x1408
+-#define HUAWEI_PRODUCT_E1409 0x1409
+-#define HUAWEI_PRODUCT_E140A 0x140A
+-#define HUAWEI_PRODUCT_E140B 0x140B
+-#define HUAWEI_PRODUCT_E140C 0x140C
+-#define HUAWEI_PRODUCT_E140D 0x140D
+-#define HUAWEI_PRODUCT_E140E 0x140E
+-#define HUAWEI_PRODUCT_E140F 0x140F
+-#define HUAWEI_PRODUCT_E1410 0x1410
+-#define HUAWEI_PRODUCT_E1411 0x1411
+-#define HUAWEI_PRODUCT_E1412 0x1412
+-#define HUAWEI_PRODUCT_E1413 0x1413
+-#define HUAWEI_PRODUCT_E1414 0x1414
+-#define HUAWEI_PRODUCT_E1415 0x1415
+-#define HUAWEI_PRODUCT_E1416 0x1416
+-#define HUAWEI_PRODUCT_E1417 0x1417
+-#define HUAWEI_PRODUCT_E1418 0x1418
+-#define HUAWEI_PRODUCT_E1419 0x1419
+-#define HUAWEI_PRODUCT_E141A 0x141A
+-#define HUAWEI_PRODUCT_E141B 0x141B
+-#define HUAWEI_PRODUCT_E141C 0x141C
+-#define HUAWEI_PRODUCT_E141D 0x141D
+-#define HUAWEI_PRODUCT_E141E 0x141E
+-#define HUAWEI_PRODUCT_E141F 0x141F
+-#define HUAWEI_PRODUCT_E1420 0x1420
+-#define HUAWEI_PRODUCT_E1421 0x1421
+-#define HUAWEI_PRODUCT_E1422 0x1422
+-#define HUAWEI_PRODUCT_E1423 0x1423
+-#define HUAWEI_PRODUCT_E1424 0x1424
+-#define HUAWEI_PRODUCT_E1425 0x1425
+-#define HUAWEI_PRODUCT_E1426 0x1426
+-#define HUAWEI_PRODUCT_E1427 0x1427
+-#define HUAWEI_PRODUCT_E1428 0x1428
+-#define HUAWEI_PRODUCT_E1429 0x1429
+-#define HUAWEI_PRODUCT_E142A 0x142A
+-#define HUAWEI_PRODUCT_E142B 0x142B
+-#define HUAWEI_PRODUCT_E142C 0x142C
+-#define HUAWEI_PRODUCT_E142D 0x142D
+-#define HUAWEI_PRODUCT_E142E 0x142E
+-#define HUAWEI_PRODUCT_E142F 0x142F
+-#define HUAWEI_PRODUCT_E1430 0x1430
+-#define HUAWEI_PRODUCT_E1431 0x1431
+-#define HUAWEI_PRODUCT_E1432 0x1432
+-#define HUAWEI_PRODUCT_E1433 0x1433
+-#define HUAWEI_PRODUCT_E1434 0x1434
+-#define HUAWEI_PRODUCT_E1435 0x1435
+-#define HUAWEI_PRODUCT_E1436 0x1436
+-#define HUAWEI_PRODUCT_E1437 0x1437
+-#define HUAWEI_PRODUCT_E1438 0x1438
+-#define HUAWEI_PRODUCT_E1439 0x1439
+-#define HUAWEI_PRODUCT_E143A 0x143A
+-#define HUAWEI_PRODUCT_E143B 0x143B
+-#define HUAWEI_PRODUCT_E143C 0x143C
+-#define HUAWEI_PRODUCT_E143D 0x143D
+-#define HUAWEI_PRODUCT_E143E 0x143E
+-#define HUAWEI_PRODUCT_E143F 0x143F
+ #define HUAWEI_PRODUCT_K4505 0x1464
+ #define HUAWEI_PRODUCT_K3765 0x1465
+-#define HUAWEI_PRODUCT_E14AC 0x14AC
+-#define HUAWEI_PRODUCT_K3806 0x14AE
+ #define HUAWEI_PRODUCT_K4605 0x14C6
+-#define HUAWEI_PRODUCT_K3770 0x14C9
+-#define HUAWEI_PRODUCT_K3771 0x14CA
+-#define HUAWEI_PRODUCT_K4510 0x14CB
+-#define HUAWEI_PRODUCT_K4511 0x14CC
+-#define HUAWEI_PRODUCT_ETS1220 0x1803
+-#define HUAWEI_PRODUCT_E353 0x1506
+-#define HUAWEI_PRODUCT_E173S 0x1C05
+
+ #define QUANTA_VENDOR_ID 0x0408
+ #define QUANTA_PRODUCT_Q101 0xEA02
+@@ -614,101 +539,123 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLX) },
+ { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GKE) },
+ { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLE) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E600, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220BIS, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1401, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1402, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1403, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1404, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1405, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1406, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1407, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1408, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1409, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140A, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140B, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140C, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140D, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140E, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140F, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1410, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1411, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1412, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1413, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1414, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1415, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1416, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1417, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1418, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1419, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141A, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141B, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141C, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141D, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141E, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141F, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1420, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1421, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1422, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1423, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1424, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1425, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1426, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1427, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1428, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1429, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142A, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142B, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142C, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142D, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142E, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142F, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1430, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1431, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1432, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1433, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1434, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1435, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1436, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1437, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1438, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1439, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143A, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143B, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143C, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143D, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143E, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143F, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173S, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3806, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0x01, 0x31) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0x01, 0x32) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x31) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x32) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x31) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x32) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4510, 0xff, 0x01, 0x31) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4510, 0xff, 0x01, 0x32) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x31) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x32) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x02) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x03) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x10) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x12) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x13) },
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x01) }, /* E398 3G Modem */
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x02) }, /* E398 3G PC UI Interface */
+- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x03) }, /* E398 3G Application Interface */
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0xff, 0xff) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x01) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x02) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x03) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x04) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x05) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x06) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x10) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x12) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x13) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x14) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x15) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x17) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x18) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x19) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x1A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x1B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x1C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x31) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x32) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x33) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x34) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x35) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x36) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x48) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x49) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x4A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x4B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x4C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x61) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x62) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x63) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x64) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x65) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x66) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x78) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x79) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x01) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x02) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x03) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x04) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x05) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x06) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x10) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x12) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x13) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x14) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x15) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x17) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x18) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x19) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x1A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x1B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x1C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x31) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x32) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x33) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x34) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x35) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x36) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x48) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x49) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x4A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x4B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x4C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x61) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x62) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x63) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x64) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x65) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x66) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x78) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x79) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7C) },
++
++
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) },
+diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
+index 8745637..bf9a9b7 100644
+--- a/drivers/video/console/fbcon.c
++++ b/drivers/video/console/fbcon.c
+@@ -373,8 +373,15 @@ static void fb_flashcursor(struct work_struct *work)
+ struct vc_data *vc = NULL;
+ int c;
+ int mode;
++ int ret;
++
++ /* FIXME: we should sort out the unbind locking instead */
++ /* instead we just fail to flash the cursor if we can't get
++ * the lock instead of blocking fbcon deinit */
++ ret = console_trylock();
++ if (ret == 0)
++ return;
+
+- console_lock();
+ if (ops && ops->currcon != -1)
+ vc = vc_cons[ops->currcon].d;
+
+diff --git a/fs/buffer.c b/fs/buffer.c
+index 4115eca..19a4f0b 100644
+--- a/fs/buffer.c
++++ b/fs/buffer.c
+@@ -964,7 +964,7 @@ link_dev_buffers(struct page *page, struct buffer_head *head)
+ /*
+ * Initialise the state of a blockdev page's buffers.
+ */
+-static void
++static sector_t
+ init_page_buffers(struct page *page, struct block_device *bdev,
+ sector_t block, int size)
+ {
+@@ -986,33 +986,41 @@ init_page_buffers(struct page *page, struct block_device *bdev,
+ block++;
+ bh = bh->b_this_page;
+ } while (bh != head);
++
++ /*
++ * Caller needs to validate requested block against end of device.
++ */
++ return end_block;
+ }
+
+ /*
+ * Create the page-cache page that contains the requested block.
+ *
+- * This is user purely for blockdev mappings.
++ * This is used purely for blockdev mappings.
+ */
+-static struct page *
++static int
+ grow_dev_page(struct block_device *bdev, sector_t block,
+- pgoff_t index, int size)
++ pgoff_t index, int size, int sizebits)
+ {
+ struct inode *inode = bdev->bd_inode;
+ struct page *page;
+ struct buffer_head *bh;
++ sector_t end_block;
++ int ret = 0; /* Will call free_more_memory() */
+
+ page = find_or_create_page(inode->i_mapping, index,
+ (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
+ if (!page)
+- return NULL;
++ return ret;
+
+ BUG_ON(!PageLocked(page));
+
+ if (page_has_buffers(page)) {
+ bh = page_buffers(page);
+ if (bh->b_size == size) {
+- init_page_buffers(page, bdev, block, size);
+- return page;
++ end_block = init_page_buffers(page, bdev,
++ index << sizebits, size);
++ goto done;
+ }
+ if (!try_to_free_buffers(page))
+ goto failed;
+@@ -1032,15 +1040,14 @@ grow_dev_page(struct block_device *bdev, sector_t block,
+ */
+ spin_lock(&inode->i_mapping->private_lock);
+ link_dev_buffers(page, bh);
+- init_page_buffers(page, bdev, block, size);
++ end_block = init_page_buffers(page, bdev, index << sizebits, size);
+ spin_unlock(&inode->i_mapping->private_lock);
+- return page;
+-
++done:
++ ret = (block < end_block) ? 1 : -ENXIO;
+ failed:
+- BUG();
+ unlock_page(page);
+ page_cache_release(page);
+- return NULL;
++ return ret;
+ }
+
+ /*
+@@ -1050,7 +1057,6 @@ failed:
+ static int
+ grow_buffers(struct block_device *bdev, sector_t block, int size)
+ {
+- struct page *page;
+ pgoff_t index;
+ int sizebits;
+
+@@ -1074,22 +1080,14 @@ grow_buffers(struct block_device *bdev, sector_t block, int size)
+ bdevname(bdev, b));
+ return -EIO;
+ }
+- block = index << sizebits;
++
+ /* Create a page with the proper size buffers.. */
+- page = grow_dev_page(bdev, block, index, size);
+- if (!page)
+- return 0;
+- unlock_page(page);
+- page_cache_release(page);
+- return 1;
++ return grow_dev_page(bdev, block, index, size, sizebits);
+ }
+
+ static struct buffer_head *
+ __getblk_slow(struct block_device *bdev, sector_t block, int size)
+ {
+- int ret;
+- struct buffer_head *bh;
+-
+ /* Size must be multiple of hard sectorsize */
+ if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
+ (size < 512 || size > PAGE_SIZE))) {
+@@ -1102,21 +1100,20 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
+ return NULL;
+ }
+
+-retry:
+- bh = __find_get_block(bdev, block, size);
+- if (bh)
+- return bh;
++ for (;;) {
++ struct buffer_head *bh;
++ int ret;
+
+- ret = grow_buffers(bdev, block, size);
+- if (ret == 0) {
+- free_more_memory();
+- goto retry;
+- } else if (ret > 0) {
+ bh = __find_get_block(bdev, block, size);
+ if (bh)
+ return bh;
++
++ ret = grow_buffers(bdev, block, size);
++ if (ret < 0)
++ return NULL;
++ if (ret == 0)
++ free_more_memory();
+ }
+- return NULL;
+ }
+
+ /*
+@@ -1372,10 +1369,6 @@ EXPORT_SYMBOL(__find_get_block);
+ * which corresponds to the passed block_device, block and size. The
+ * returned buffer has its reference count incremented.
+ *
+- * __getblk() cannot fail - it just keeps trying. If you pass it an
+- * illegal block number, __getblk() will happily return a buffer_head
+- * which represents the non-existent block. Very weird.
+- *
+ * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
+ * attempt is failing. FIXME, perhaps?
+ */
+diff --git a/fs/compat.c b/fs/compat.c
+index c987875..e07a3d3 100644
+--- a/fs/compat.c
++++ b/fs/compat.c
+@@ -1174,11 +1174,14 @@ compat_sys_readv(unsigned long fd, const struct compat_iovec __user *vec,
+ struct file *file;
+ int fput_needed;
+ ssize_t ret;
++ loff_t pos;
+
+ file = fget_light(fd, &fput_needed);
+ if (!file)
+ return -EBADF;
+- ret = compat_readv(file, vec, vlen, &file->f_pos);
++ pos = file->f_pos;
++ ret = compat_readv(file, vec, vlen, &pos);
++ file->f_pos = pos;
+ fput_light(file, fput_needed);
+ return ret;
+ }
+@@ -1233,11 +1236,14 @@ compat_sys_writev(unsigned long fd, const struct compat_iovec __user *vec,
+ struct file *file;
+ int fput_needed;
+ ssize_t ret;
++ loff_t pos;
+
+ file = fget_light(fd, &fput_needed);
+ if (!file)
+ return -EBADF;
+- ret = compat_writev(file, vec, vlen, &file->f_pos);
++ pos = file->f_pos;
++ ret = compat_writev(file, vec, vlen, &pos);
++ file->f_pos = pos;
+ fput_light(file, fput_needed);
+ return ret;
+ }
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index a071348..f8d5fce 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -904,6 +904,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
+ ei->i_reserved_meta_blocks = 0;
+ ei->i_allocated_meta_blocks = 0;
+ ei->i_da_metadata_calc_len = 0;
++ ei->i_da_metadata_calc_last_lblock = 0;
+ spin_lock_init(&(ei->i_block_reservation_lock));
+ #ifdef CONFIG_QUOTA
+ ei->i_reserved_quota = 0;
+@@ -3107,6 +3108,10 @@ static int count_overhead(struct super_block *sb, ext4_group_t grp,
+ ext4_group_t i, ngroups = ext4_get_groups_count(sb);
+ int s, j, count = 0;
+
++ if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_BIGALLOC))
++ return (ext4_bg_has_super(sb, grp) + ext4_bg_num_gdb(sb, grp) +
++ sbi->s_itb_per_group + 2);
++
+ first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
+ (grp * EXT4_BLOCKS_PER_GROUP(sb));
+ last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1;
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index 0c84100..5242006 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -1687,7 +1687,7 @@ static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count)
+ size_t n;
+ u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT;
+
+- for (n = 0; n < count; n++) {
++ for (n = 0; n < count; n++, iov++) {
+ if (iov->iov_len > (size_t) max)
+ return -ENOMEM;
+ max -= iov->iov_len;
+diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
+index 3db6b82..d774309 100644
+--- a/fs/nfs/blocklayout/blocklayout.c
++++ b/fs/nfs/blocklayout/blocklayout.c
+@@ -38,6 +38,8 @@
+ #include <linux/buffer_head.h> /* various write calls */
+ #include <linux/prefetch.h>
+
++#include "../pnfs.h"
++#include "../internal.h"
+ #include "blocklayout.h"
+
+ #define NFSDBG_FACILITY NFSDBG_PNFS_LD
+@@ -814,7 +816,7 @@ nfs4_blk_get_deviceinfo(struct nfs_server *server, const struct nfs_fh *fh,
+ * GETDEVICEINFO's maxcount
+ */
+ max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
+- max_pages = max_resp_sz >> PAGE_SHIFT;
++ max_pages = nfs_page_array_len(0, max_resp_sz);
+ dprintk("%s max_resp_sz %u max_pages %d\n",
+ __func__, max_resp_sz, max_pages);
+
+diff --git a/fs/nfs/blocklayout/extents.c b/fs/nfs/blocklayout/extents.c
+index c69682a..4e2ee99 100644
+--- a/fs/nfs/blocklayout/extents.c
++++ b/fs/nfs/blocklayout/extents.c
+@@ -153,7 +153,7 @@ static int _preload_range(struct pnfs_inval_markings *marks,
+ count = (int)(end - start) / (int)tree->mtt_step_size;
+
+ /* Pre-malloc what memory we might need */
+- storage = kmalloc(sizeof(*storage) * count, GFP_NOFS);
++ storage = kcalloc(count, sizeof(*storage), GFP_NOFS);
+ if (!storage)
+ return -ENOMEM;
+ for (i = 0; i < count; i++) {
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index ac28990..756f4df 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -1103,7 +1103,7 @@ static int nfs_lookup_revalidate(struct dentry *dentry, struct nameidata *nd)
+ struct nfs_fattr *fattr = NULL;
+ int error;
+
+- if (nd->flags & LOOKUP_RCU)
++ if (nd && (nd->flags & LOOKUP_RCU))
+ return -ECHILD;
+
+ parent = dget_parent(dentry);
+@@ -1508,7 +1508,7 @@ static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd)
+ struct nfs_open_context *ctx;
+ int openflags, ret = 0;
+
+- if (nd->flags & LOOKUP_RCU)
++ if (nd && (nd->flags & LOOKUP_RCU))
+ return -ECHILD;
+
+ inode = dentry->d_inode;
+diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
+index d4bc9ed9..5195fd6 100644
+--- a/fs/nfs/nfs3proc.c
++++ b/fs/nfs/nfs3proc.c
+@@ -68,7 +68,7 @@ do_proc_get_root(struct rpc_clnt *client, struct nfs_fh *fhandle,
+ nfs_fattr_init(info->fattr);
+ status = rpc_call_sync(client, &msg, 0);
+ dprintk("%s: reply fsinfo: %d\n", __func__, status);
+- if (!(info->fattr->valid & NFS_ATTR_FATTR)) {
++ if (status == 0 && !(info->fattr->valid & NFS_ATTR_FATTR)) {
+ msg.rpc_proc = &nfs3_procedures[NFS3PROC_GETATTR];
+ msg.rpc_resp = info->fattr;
+ status = rpc_call_sync(client, &msg, 0);
+diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c
+index ed388aa..bd5d9cf 100644
+--- a/fs/nfs/nfs4filelayoutdev.c
++++ b/fs/nfs/nfs4filelayoutdev.c
+@@ -721,7 +721,7 @@ get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_fla
+ * GETDEVICEINFO's maxcount
+ */
+ max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
+- max_pages = max_resp_sz >> PAGE_SHIFT;
++ max_pages = nfs_page_array_len(0, max_resp_sz);
+ dprintk("%s inode %p max_resp_sz %u max_pages %d\n",
+ __func__, inode, max_resp_sz, max_pages);
+
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 8000459..d20221d 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -5769,11 +5769,58 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
+ dprintk("<-- %s\n", __func__);
+ }
+
++static size_t max_response_pages(struct nfs_server *server)
++{
++ u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
++ return nfs_page_array_len(0, max_resp_sz);
++}
++
++static void nfs4_free_pages(struct page **pages, size_t size)
++{
++ int i;
++
++ if (!pages)
++ return;
++
++ for (i = 0; i < size; i++) {
++ if (!pages[i])
++ break;
++ __free_page(pages[i]);
++ }
++ kfree(pages);
++}
++
++static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags)
++{
++ struct page **pages;
++ int i;
++
++ pages = kcalloc(size, sizeof(struct page *), gfp_flags);
++ if (!pages) {
++ dprintk("%s: can't alloc array of %zu pages\n", __func__, size);
++ return NULL;
++ }
++
++ for (i = 0; i < size; i++) {
++ pages[i] = alloc_page(gfp_flags);
++ if (!pages[i]) {
++ dprintk("%s: failed to allocate page\n", __func__);
++ nfs4_free_pages(pages, size);
++ return NULL;
++ }
++ }
++
++ return pages;
++}
++
+ static void nfs4_layoutget_release(void *calldata)
+ {
+ struct nfs4_layoutget *lgp = calldata;
++ struct nfs_server *server = NFS_SERVER(lgp->args.inode);
++ size_t max_pages = max_response_pages(server);
+
+ dprintk("--> %s\n", __func__);
++ nfs4_free_pages(lgp->args.layout.pages, max_pages);
+ put_nfs_open_context(lgp->args.ctx);
+ kfree(calldata);
+ dprintk("<-- %s\n", __func__);
+@@ -5785,9 +5832,10 @@ static const struct rpc_call_ops nfs4_layoutget_call_ops = {
+ .rpc_release = nfs4_layoutget_release,
+ };
+
+-int nfs4_proc_layoutget(struct nfs4_layoutget *lgp)
++int nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
+ {
+ struct nfs_server *server = NFS_SERVER(lgp->args.inode);
++ size_t max_pages = max_response_pages(server);
+ struct rpc_task *task;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
+@@ -5805,6 +5853,13 @@ int nfs4_proc_layoutget(struct nfs4_layoutget *lgp)
+
+ dprintk("--> %s\n", __func__);
+
++ lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags);
++ if (!lgp->args.layout.pages) {
++ nfs4_layoutget_release(lgp);
++ return -ENOMEM;
++ }
++ lgp->args.layout.pglen = max_pages * PAGE_SIZE;
++
+ lgp->res.layoutp = &lgp->args.layout;
+ lgp->res.seq_res.sr_slot = NULL;
+ task = rpc_run_task(&task_setup_data);
+diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
+index f881a63..3ad6595 100644
+--- a/fs/nfs/pnfs.c
++++ b/fs/nfs/pnfs.c
+@@ -575,9 +575,6 @@ send_layoutget(struct pnfs_layout_hdr *lo,
+ struct nfs_server *server = NFS_SERVER(ino);
+ struct nfs4_layoutget *lgp;
+ struct pnfs_layout_segment *lseg = NULL;
+- struct page **pages = NULL;
+- int i;
+- u32 max_resp_sz, max_pages;
+
+ dprintk("--> %s\n", __func__);
+
+@@ -586,20 +583,6 @@ send_layoutget(struct pnfs_layout_hdr *lo,
+ if (lgp == NULL)
+ return NULL;
+
+- /* allocate pages for xdr post processing */
+- max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
+- max_pages = max_resp_sz >> PAGE_SHIFT;
+-
+- pages = kzalloc(max_pages * sizeof(struct page *), gfp_flags);
+- if (!pages)
+- goto out_err_free;
+-
+- for (i = 0; i < max_pages; i++) {
+- pages[i] = alloc_page(gfp_flags);
+- if (!pages[i])
+- goto out_err_free;
+- }
+-
+ lgp->args.minlength = PAGE_CACHE_SIZE;
+ if (lgp->args.minlength > range->length)
+ lgp->args.minlength = range->length;
+@@ -608,39 +591,19 @@ send_layoutget(struct pnfs_layout_hdr *lo,
+ lgp->args.type = server->pnfs_curr_ld->id;
+ lgp->args.inode = ino;
+ lgp->args.ctx = get_nfs_open_context(ctx);
+- lgp->args.layout.pages = pages;
+- lgp->args.layout.pglen = max_pages * PAGE_SIZE;
+ lgp->lsegpp = &lseg;
+ lgp->gfp_flags = gfp_flags;
+
+ /* Synchronously retrieve layout information from server and
+ * store in lseg.
+ */
+- nfs4_proc_layoutget(lgp);
++ nfs4_proc_layoutget(lgp, gfp_flags);
+ if (!lseg) {
+ /* remember that LAYOUTGET failed and suspend trying */
+ set_bit(lo_fail_bit(range->iomode), &lo->plh_flags);
+ }
+
+- /* free xdr pages */
+- for (i = 0; i < max_pages; i++)
+- __free_page(pages[i]);
+- kfree(pages);
+-
+ return lseg;
+-
+-out_err_free:
+- /* free any allocated xdr pages, lgp as it's not used */
+- if (pages) {
+- for (i = 0; i < max_pages; i++) {
+- if (!pages[i])
+- break;
+- __free_page(pages[i]);
+- }
+- kfree(pages);
+- }
+- kfree(lgp);
+- return NULL;
+ }
+
+ /* Initiates a LAYOUTRETURN(FILE) */
+diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
+index 53d593a..c946b1b 100644
+--- a/fs/nfs/pnfs.h
++++ b/fs/nfs/pnfs.h
+@@ -162,7 +162,7 @@ extern int nfs4_proc_getdevicelist(struct nfs_server *server,
+ struct pnfs_devicelist *devlist);
+ extern int nfs4_proc_getdeviceinfo(struct nfs_server *server,
+ struct pnfs_device *dev);
+-extern int nfs4_proc_layoutget(struct nfs4_layoutget *lgp);
++extern int nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags);
+ extern int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp);
+
+ /* pnfs.c */
+diff --git a/fs/nfs/super.c b/fs/nfs/super.c
+index 376cd65..6e85ec6 100644
+--- a/fs/nfs/super.c
++++ b/fs/nfs/super.c
+@@ -3087,4 +3087,6 @@ static struct dentry *nfs4_referral_mount(struct file_system_type *fs_type,
+ return res;
+ }
+
++MODULE_ALIAS("nfs4");
++
+ #endif /* CONFIG_NFS_V4 */
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index c6e523a..301391a 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -1742,12 +1742,12 @@ int __init nfs_init_writepagecache(void)
+ nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
+ nfs_wdata_cachep);
+ if (nfs_wdata_mempool == NULL)
+- return -ENOMEM;
++ goto out_destroy_write_cache;
+
+ nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
+ nfs_wdata_cachep);
+ if (nfs_commit_mempool == NULL)
+- return -ENOMEM;
++ goto out_destroy_write_mempool;
+
+ /*
+ * NFS congestion size, scale with available memory.
+@@ -1770,6 +1770,12 @@ int __init nfs_init_writepagecache(void)
+ nfs_congestion_kb = 256*1024;
+
+ return 0;
++
++out_destroy_write_mempool:
++ mempool_destroy(nfs_wdata_mempool);
++out_destroy_write_cache:
++ kmem_cache_destroy(nfs_wdata_cachep);
++ return -ENOMEM;
+ }
+
+ void nfs_destroy_writepagecache(void)
+diff --git a/fs/open.c b/fs/open.c
+index e2b5d51..b8485d3 100644
+--- a/fs/open.c
++++ b/fs/open.c
+@@ -882,9 +882,10 @@ static inline int build_open_flags(int flags, int mode, struct open_flags *op)
+ int lookup_flags = 0;
+ int acc_mode;
+
+- if (!(flags & O_CREAT))
+- mode = 0;
+- op->mode = mode;
++ if (flags & O_CREAT)
++ op->mode = (mode & S_IALLUGO) | S_IFREG;
++ else
++ op->mode = 0;
+
+ /* Must never be set by userspace */
+ flags &= ~FMODE_NONOTIFY;
+diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
+index 2da1715..4619247 100644
+--- a/fs/squashfs/super.c
++++ b/fs/squashfs/super.c
+@@ -290,7 +290,7 @@ handle_fragments:
+
+ check_directory_table:
+ /* Sanity check directory_table */
+- if (msblk->directory_table >= next_table) {
++ if (msblk->directory_table > next_table) {
+ err = -EINVAL;
+ goto failed_mount;
+ }
+diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h
+index 580a6d3..c04e0db 100644
+--- a/include/asm-generic/mutex-xchg.h
++++ b/include/asm-generic/mutex-xchg.h
+@@ -26,7 +26,13 @@ static inline void
+ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
+ {
+ if (unlikely(atomic_xchg(count, 0) != 1))
+- fail_fn(count);
++ /*
++ * We failed to acquire the lock, so mark it contended
++ * to ensure that any waiting tasks are woken up by the
++ * unlock slow path.
++ */
++ if (likely(atomic_xchg(count, -1) != 1))
++ fail_fn(count);
+ }
+
+ /**
+@@ -43,7 +49,8 @@ static inline int
+ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
+ {
+ if (unlikely(atomic_xchg(count, 0) != 1))
+- return fail_fn(count);
++ if (likely(atomic_xchg(count, -1) != 1))
++ return fail_fn(count);
+ return 0;
+ }
+
+diff --git a/include/linux/usb.h b/include/linux/usb.h
+index 4269c3f..93629fc 100644
+--- a/include/linux/usb.h
++++ b/include/linux/usb.h
+@@ -775,6 +775,27 @@ static inline int usb_make_path(struct usb_device *dev, char *buf, size_t size)
+ .bInterfaceSubClass = (sc), \
+ .bInterfaceProtocol = (pr)
+
++/**
++ * USB_VENDOR_AND_INTERFACE_INFO - describe a specific usb vendor with a class of usb interfaces
++ * @vend: the 16 bit USB Vendor ID
++ * @cl: bInterfaceClass value
++ * @sc: bInterfaceSubClass value
++ * @pr: bInterfaceProtocol value
++ *
++ * This macro is used to create a struct usb_device_id that matches a
++ * specific vendor with a specific class of interfaces.
++ *
++ * This is especially useful when explicitly matching devices that have
++ * vendor specific bDeviceClass values, but standards-compliant interfaces.
++ */
++#define USB_VENDOR_AND_INTERFACE_INFO(vend, cl, sc, pr) \
++ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO \
++ | USB_DEVICE_ID_MATCH_VENDOR, \
++ .idVendor = (vend), \
++ .bInterfaceClass = (cl), \
++ .bInterfaceSubClass = (sc), \
++ .bInterfaceProtocol = (pr)
++
+ /* ----------------------------------------------------------------------- */
+
+ /* Stuff for dynamic usb ids */
+diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
+index 5bf0790..31fdc48 100644
+--- a/kernel/audit_tree.c
++++ b/kernel/audit_tree.c
+@@ -250,7 +250,6 @@ static void untag_chunk(struct node *p)
+ spin_unlock(&hash_lock);
+ spin_unlock(&entry->lock);
+ fsnotify_destroy_mark(entry);
+- fsnotify_put_mark(entry);
+ goto out;
+ }
+
+@@ -259,7 +258,7 @@ static void untag_chunk(struct node *p)
+
+ fsnotify_duplicate_mark(&new->mark, entry);
+ if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.i.inode, NULL, 1)) {
+- free_chunk(new);
++ fsnotify_put_mark(&new->mark);
+ goto Fallback;
+ }
+
+@@ -293,7 +292,6 @@ static void untag_chunk(struct node *p)
+ spin_unlock(&hash_lock);
+ spin_unlock(&entry->lock);
+ fsnotify_destroy_mark(entry);
+- fsnotify_put_mark(entry);
+ goto out;
+
+ Fallback:
+@@ -322,7 +320,7 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree)
+
+ entry = &chunk->mark;
+ if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) {
+- free_chunk(chunk);
++ fsnotify_put_mark(entry);
+ return -ENOSPC;
+ }
+
+@@ -332,6 +330,7 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree)
+ spin_unlock(&hash_lock);
+ chunk->dead = 1;
+ spin_unlock(&entry->lock);
++ fsnotify_get_mark(entry);
+ fsnotify_destroy_mark(entry);
+ fsnotify_put_mark(entry);
+ return 0;
+@@ -396,7 +395,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
+ fsnotify_duplicate_mark(chunk_entry, old_entry);
+ if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->i.inode, NULL, 1)) {
+ spin_unlock(&old_entry->lock);
+- free_chunk(chunk);
++ fsnotify_put_mark(chunk_entry);
+ fsnotify_put_mark(old_entry);
+ return -ENOSPC;
+ }
+@@ -412,6 +411,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
+ spin_unlock(&chunk_entry->lock);
+ spin_unlock(&old_entry->lock);
+
++ fsnotify_get_mark(chunk_entry);
+ fsnotify_destroy_mark(chunk_entry);
+
+ fsnotify_put_mark(chunk_entry);
+@@ -445,7 +445,6 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
+ spin_unlock(&old_entry->lock);
+ fsnotify_destroy_mark(old_entry);
+ fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
+- fsnotify_put_mark(old_entry); /* and kill it */
+ return 0;
+ }
+
+diff --git a/kernel/sched.c b/kernel/sched.c
+index e0431c4..910db7d 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -4355,6 +4355,20 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
+ # define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs)
+ #endif
+
++static cputime_t scale_utime(cputime_t utime, cputime_t rtime, cputime_t total)
++{
++ u64 temp = (__force u64) rtime;
++
++ temp *= (__force u64) utime;
++
++ if (sizeof(cputime_t) == 4)
++ temp = div_u64(temp, (__force u32) total);
++ else
++ temp = div64_u64(temp, (__force u64) total);
++
++ return (__force cputime_t) temp;
++}
++
+ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
+ {
+ cputime_t rtime, utime = p->utime, total = cputime_add(utime, p->stime);
+@@ -4364,13 +4378,9 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
+ */
+ rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
+
+- if (total) {
+- u64 temp = rtime;
+-
+- temp *= utime;
+- do_div(temp, total);
+- utime = (cputime_t)temp;
+- } else
++ if (total)
++ utime = scale_utime(utime, rtime, total);
++ else
+ utime = rtime;
+
+ /*
+@@ -4397,13 +4407,9 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
+ total = cputime_add(cputime.utime, cputime.stime);
+ rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
+
+- if (total) {
+- u64 temp = rtime;
+-
+- temp *= cputime.utime;
+- do_div(temp, total);
+- utime = (cputime_t)temp;
+- } else
++ if (total)
++ utime = scale_utime(cputime.utime, rtime, total);
++ else
+ utime = rtime;
+
+ sig->prev_utime = max(sig->prev_utime, utime);
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 48febd7..86eb848 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -1977,10 +1977,10 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
+ * proportional to the fraction of recently scanned pages on
+ * each list that were recently referenced and in active use.
+ */
+- ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1);
++ ap = anon_prio * (reclaim_stat->recent_scanned[0] + 1);
+ ap /= reclaim_stat->recent_rotated[0] + 1;
+
+- fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
++ fp = file_prio * (reclaim_stat->recent_scanned[1] + 1);
+ fp /= reclaim_stat->recent_rotated[1] + 1;
+ spin_unlock_irq(&zone->lru_lock);
+
+@@ -1993,7 +1993,7 @@ out:
+ unsigned long scan;
+
+ scan = zone_nr_lru_pages(zone, sc, l);
+- if (priority || noswap) {
++ if (priority || noswap || !vmscan_swappiness(sc)) {
+ scan >>= priority;
+ if (!scan && force_scan)
+ scan = SWAP_CLUSTER_MAX;
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 643a41b..6033f02 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -1411,7 +1411,13 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
+ if (conn->type == ACL_LINK) {
+ conn->state = BT_CONFIG;
+ hci_conn_hold(conn);
+- conn->disc_timeout = HCI_DISCONN_TIMEOUT;
++
++ if (!conn->out &&
++ !(conn->ssp_mode && conn->hdev->ssp_mode) &&
++ !hci_find_link_key(hdev, &ev->bdaddr))
++ conn->disc_timeout = HCI_PAIRING_TIMEOUT;
++ else
++ conn->disc_timeout = HCI_DISCONN_TIMEOUT;
+ mgmt_connected(hdev->id, &ev->bdaddr, conn->type);
+ } else
+ conn->state = BT_CONNECTED;
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 17b5b1c..dd76177 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -862,6 +862,7 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
+ write_lock_bh(&conn->chan_lock);
+
+ hci_conn_hold(conn->hcon);
++ conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
+
+ bacpy(&bt_sk(sk)->src, conn->src);
+ bacpy(&bt_sk(sk)->dst, conn->dst);
+@@ -2263,12 +2264,14 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
+ while (len >= L2CAP_CONF_OPT_SIZE) {
+ len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
+
+- switch (type) {
+- case L2CAP_CONF_RFC:
+- if (olen == sizeof(rfc))
+- memcpy(&rfc, (void *)val, olen);
+- goto done;
+- }
++ if (type != L2CAP_CONF_RFC)
++ continue;
++
++ if (olen != sizeof(rfc))
++ break;
++
++ memcpy(&rfc, (void *)val, olen);
++ goto done;
+ }
+
+ /* Use sane default values in case a misbehaving remote device
+diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h
+index 75c3582..fb85d37 100644
+--- a/net/dccp/ccid.h
++++ b/net/dccp/ccid.h
+@@ -246,7 +246,7 @@ static inline int ccid_hc_rx_getsockopt(struct ccid *ccid, struct sock *sk,
+ u32 __user *optval, int __user *optlen)
+ {
+ int rc = -ENOPROTOOPT;
+- if (ccid->ccid_ops->ccid_hc_rx_getsockopt != NULL)
++ if (ccid != NULL && ccid->ccid_ops->ccid_hc_rx_getsockopt != NULL)
+ rc = ccid->ccid_ops->ccid_hc_rx_getsockopt(sk, optname, len,
+ optval, optlen);
+ return rc;
+@@ -257,7 +257,7 @@ static inline int ccid_hc_tx_getsockopt(struct ccid *ccid, struct sock *sk,
+ u32 __user *optval, int __user *optlen)
+ {
+ int rc = -ENOPROTOOPT;
+- if (ccid->ccid_ops->ccid_hc_tx_getsockopt != NULL)
++ if (ccid != NULL && ccid->ccid_ops->ccid_hc_tx_getsockopt != NULL)
+ rc = ccid->ccid_ops->ccid_hc_tx_getsockopt(sk, optname, len,
+ optval, optlen);
+ return rc;
+diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
+index 9ed2cd0..3282453 100644
+--- a/net/sunrpc/svc_xprt.c
++++ b/net/sunrpc/svc_xprt.c
+@@ -315,7 +315,6 @@ static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt)
+ */
+ void svc_xprt_enqueue(struct svc_xprt *xprt)
+ {
+- struct svc_serv *serv = xprt->xpt_server;
+ struct svc_pool *pool;
+ struct svc_rqst *rqstp;
+ int cpu;
+@@ -361,8 +360,6 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
+ rqstp, rqstp->rq_xprt);
+ rqstp->rq_xprt = xprt;
+ svc_xprt_get(xprt);
+- rqstp->rq_reserved = serv->sv_max_mesg;
+- atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
+ pool->sp_stats.threads_woken++;
+ wake_up(&rqstp->rq_wait);
+ } else {
+@@ -642,8 +639,6 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
+ if (xprt) {
+ rqstp->rq_xprt = xprt;
+ svc_xprt_get(xprt);
+- rqstp->rq_reserved = serv->sv_max_mesg;
+- atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
+
+ /* As there is a shortage of threads and this request
+ * had to be queued, don't allow the thread to wait so
+@@ -740,6 +735,8 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
+ else
+ len = xprt->xpt_ops->xpo_recvfrom(rqstp);
+ dprintk("svc: got len=%d\n", len);
++ rqstp->rq_reserved = serv->sv_max_mesg;
++ atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
+ }
+ svc_xprt_received(xprt);
+
+@@ -796,7 +793,8 @@ int svc_send(struct svc_rqst *rqstp)
+
+ /* Grab mutex to serialize outgoing data. */
+ mutex_lock(&xprt->xpt_mutex);
+- if (test_bit(XPT_DEAD, &xprt->xpt_flags))
++ if (test_bit(XPT_DEAD, &xprt->xpt_flags)
++ || test_bit(XPT_CLOSE, &xprt->xpt_flags))
+ len = -ENOTCONN;
+ else
+ len = xprt->xpt_ops->xpo_sendto(rqstp);
+diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
+index 71bed1c..296192c 100644
+--- a/net/sunrpc/svcsock.c
++++ b/net/sunrpc/svcsock.c
+@@ -1136,9 +1136,9 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
+ if (len >= 0)
+ svsk->sk_tcplen += len;
+ if (len != want) {
++ svc_tcp_save_pages(svsk, rqstp);
+ if (len < 0 && len != -EAGAIN)
+ goto err_other;
+- svc_tcp_save_pages(svsk, rqstp);
+ dprintk("svc: incomplete TCP record (%d of %d)\n",
+ svsk->sk_tcplen, svsk->sk_reclen);
+ goto err_noclose;
+diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c
+index 254ab52..2210b83 100644
+--- a/sound/pci/hda/hda_proc.c
++++ b/sound/pci/hda/hda_proc.c
+@@ -412,7 +412,7 @@ static void print_digital_conv(struct snd_info_buffer *buffer,
+ if (digi1 & AC_DIG1_EMPHASIS)
+ snd_iprintf(buffer, " Preemphasis");
+ if (digi1 & AC_DIG1_COPYRIGHT)
+- snd_iprintf(buffer, " Copyright");
++ snd_iprintf(buffer, " Non-Copyright");
+ if (digi1 & AC_DIG1_NONAUDIO)
+ snd_iprintf(buffer, " Non-Audio");
+ if (digi1 & AC_DIG1_PROFESSIONAL)
+diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
+index 35abe3c..b22989e 100644
+--- a/sound/pci/hda/patch_ca0132.c
++++ b/sound/pci/hda/patch_ca0132.c
+@@ -276,6 +276,10 @@ static int _add_switch(struct hda_codec *codec, hda_nid_t nid, const char *pfx,
+ int type = dir ? HDA_INPUT : HDA_OUTPUT;
+ struct snd_kcontrol_new knew =
+ HDA_CODEC_MUTE_MONO(namestr, nid, chan, 0, type);
++ if ((query_amp_caps(codec, nid, type) & AC_AMPCAP_MUTE) == 0) {
++ snd_printdd("Skipping '%s %s Switch' (no mute on node 0x%x)\n", pfx, dirstr[dir], nid);
++ return 0;
++ }
+ sprintf(namestr, "%s %s Switch", pfx, dirstr[dir]);
+ return snd_hda_ctl_add(codec, nid, snd_ctl_new1(&knew, codec));
+ }
+@@ -287,6 +291,10 @@ static int _add_volume(struct hda_codec *codec, hda_nid_t nid, const char *pfx,
+ int type = dir ? HDA_INPUT : HDA_OUTPUT;
+ struct snd_kcontrol_new knew =
+ HDA_CODEC_VOLUME_MONO(namestr, nid, chan, 0, type);
++ if ((query_amp_caps(codec, nid, type) & AC_AMPCAP_NUM_STEPS) == 0) {
++ snd_printdd("Skipping '%s %s Volume' (no amp on node 0x%x)\n", pfx, dirstr[dir], nid);
++ return 0;
++ }
+ sprintf(namestr, "%s %s Volume", pfx, dirstr[dir]);
+ return snd_hda_ctl_add(codec, nid, snd_ctl_new1(&knew, codec));
+ }
+diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
+index 90117f8..90e5005 100644
+--- a/sound/soc/codecs/wm9712.c
++++ b/sound/soc/codecs/wm9712.c
+@@ -270,7 +270,7 @@ SOC_DAPM_ENUM("Route", wm9712_enum[9]);
+
+ /* Mic select */
+ static const struct snd_kcontrol_new wm9712_mic_src_controls =
+-SOC_DAPM_ENUM("Route", wm9712_enum[7]);
++SOC_DAPM_ENUM("Mic Source Select", wm9712_enum[7]);
+
+ /* diff select */
+ static const struct snd_kcontrol_new wm9712_diff_sel_controls =
+@@ -289,7 +289,9 @@ SND_SOC_DAPM_MUX("Left Capture Select", SND_SOC_NOPM, 0, 0,
+ &wm9712_capture_selectl_controls),
+ SND_SOC_DAPM_MUX("Right Capture Select", SND_SOC_NOPM, 0, 0,
+ &wm9712_capture_selectr_controls),
+-SND_SOC_DAPM_MUX("Mic Select Source", SND_SOC_NOPM, 0, 0,
++SND_SOC_DAPM_MUX("Left Mic Select Source", SND_SOC_NOPM, 0, 0,
++ &wm9712_mic_src_controls),
++SND_SOC_DAPM_MUX("Right Mic Select Source", SND_SOC_NOPM, 0, 0,
+ &wm9712_mic_src_controls),
+ SND_SOC_DAPM_MUX("Differential Source", SND_SOC_NOPM, 0, 0,
+ &wm9712_diff_sel_controls),
+@@ -317,6 +319,7 @@ SND_SOC_DAPM_PGA("Out 3 PGA", AC97_INT_PAGING, 5, 1, NULL, 0),
+ SND_SOC_DAPM_PGA("Line PGA", AC97_INT_PAGING, 2, 1, NULL, 0),
+ SND_SOC_DAPM_PGA("Phone PGA", AC97_INT_PAGING, 1, 1, NULL, 0),
+ SND_SOC_DAPM_PGA("Mic PGA", AC97_INT_PAGING, 0, 1, NULL, 0),
++SND_SOC_DAPM_PGA("Differential Mic", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MICBIAS("Mic Bias", AC97_INT_PAGING, 10, 1),
+ SND_SOC_DAPM_OUTPUT("MONOOUT"),
+ SND_SOC_DAPM_OUTPUT("HPOUTL"),
+@@ -377,6 +380,18 @@ static const struct snd_soc_dapm_route wm9712_audio_map[] = {
+ {"Mic PGA", NULL, "MIC1"},
+ {"Mic PGA", NULL, "MIC2"},
+
++ /* microphones */
++ {"Differential Mic", NULL, "MIC1"},
++ {"Differential Mic", NULL, "MIC2"},
++ {"Left Mic Select Source", "Mic 1", "MIC1"},
++ {"Left Mic Select Source", "Mic 2", "MIC2"},
++ {"Left Mic Select Source", "Stereo", "MIC1"},
++ {"Left Mic Select Source", "Differential", "Differential Mic"},
++ {"Right Mic Select Source", "Mic 1", "MIC1"},
++ {"Right Mic Select Source", "Mic 2", "MIC2"},
++ {"Right Mic Select Source", "Stereo", "MIC2"},
++ {"Right Mic Select Source", "Differential", "Differential Mic"},
++
+ /* left capture selector */
+ {"Left Capture Select", "Mic", "MIC1"},
+ {"Left Capture Select", "Speaker Mixer", "Speaker Mixer"},
diff --git a/1029_linux-3.2.30.patch b/1029_linux-3.2.30.patch
new file mode 100644
index 00000000..86aea4bb
--- /dev/null
+++ b/1029_linux-3.2.30.patch
@@ -0,0 +1,5552 @@
+diff --git a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
+index ab22fe6..e39a0c0 100644
+--- a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
++++ b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
+@@ -10,8 +10,8 @@ Required properties:
+
+ Optional properties:
+ - fsl,card-wired : Indicate the card is wired to host permanently
+-- fsl,cd-internal : Indicate to use controller internal card detection
+-- fsl,wp-internal : Indicate to use controller internal write protection
++- fsl,cd-controller : Indicate to use controller internal card detection
++- fsl,wp-controller : Indicate to use controller internal write protection
+ - cd-gpios : Specify GPIOs for card detection
+ - wp-gpios : Specify GPIOs for write protection
+
+@@ -21,8 +21,8 @@ esdhc@70004000 {
+ compatible = "fsl,imx51-esdhc";
+ reg = <0x70004000 0x4000>;
+ interrupts = <1>;
+- fsl,cd-internal;
+- fsl,wp-internal;
++ fsl,cd-controller;
++ fsl,wp-controller;
+ };
+
+ esdhc@70008000 {
+diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801
+index 2871fd5..99d4e44 100644
+--- a/Documentation/i2c/busses/i2c-i801
++++ b/Documentation/i2c/busses/i2c-i801
+@@ -20,6 +20,8 @@ Supported adapters:
+ * Intel Patsburg (PCH)
+ * Intel DH89xxCC (PCH)
+ * Intel Panther Point (PCH)
++ * Intel Lynx Point (PCH)
++ * Intel Lynx Point-LP (PCH)
+ Datasheets: Publicly available at the Intel website
+
+ On Intel Patsburg and later chipsets, both the normal host SMBus controller
+diff --git a/Makefile b/Makefile
+index d96fc2a..9fd7e60 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 29
++SUBLEVEL = 30
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index 987c72d..9fdc151 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -2065,6 +2065,7 @@ source "drivers/cpufreq/Kconfig"
+ config CPU_FREQ_IMX
+ tristate "CPUfreq driver for i.MX CPUs"
+ depends on ARCH_MXC && CPU_FREQ
++ select CPU_FREQ_TABLE
+ help
+ This enables the CPUfreq driver for i.MX CPUs.
+
+diff --git a/arch/arm/Makefile b/arch/arm/Makefile
+index dfcf3b0..362c7ca 100644
+--- a/arch/arm/Makefile
++++ b/arch/arm/Makefile
+@@ -284,10 +284,10 @@ zImage Image xipImage bootpImage uImage: vmlinux
+ zinstall uinstall install: vmlinux
+ $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@
+
+-%.dtb:
++%.dtb: scripts
+ $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
+
+-dtbs:
++dtbs: scripts
+ $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
+
+ # We use MRPROPER_FILES and CLEAN_FILES now
+diff --git a/arch/arm/boot/dts/imx51-babbage.dts b/arch/arm/boot/dts/imx51-babbage.dts
+index f8766af..4790df2 100644
+--- a/arch/arm/boot/dts/imx51-babbage.dts
++++ b/arch/arm/boot/dts/imx51-babbage.dts
+@@ -29,8 +29,8 @@
+ aips@70000000 { /* aips-1 */
+ spba@70000000 {
+ esdhc@70004000 { /* ESDHC1 */
+- fsl,cd-internal;
+- fsl,wp-internal;
++ fsl,cd-controller;
++ fsl,wp-controller;
+ status = "okay";
+ };
+
+diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
+index 8512475..9b419ab 100644
+--- a/arch/arm/include/asm/pgtable.h
++++ b/arch/arm/include/asm/pgtable.h
+@@ -232,6 +232,18 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
+ #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
+ #define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
+
++#define pte_none(pte) (!pte_val(pte))
++#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
++#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY))
++#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
++#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
++#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN))
++#define pte_special(pte) (0)
++
++#define pte_present_user(pte) \
++ ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \
++ (L_PTE_PRESENT | L_PTE_USER))
++
+ #if __LINUX_ARM_ARCH__ < 6
+ static inline void __sync_icache_dcache(pte_t pteval)
+ {
+@@ -243,25 +255,15 @@ extern void __sync_icache_dcache(pte_t pteval);
+ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pteval)
+ {
+- if (addr >= TASK_SIZE)
+- set_pte_ext(ptep, pteval, 0);
+- else {
++ unsigned long ext = 0;
++
++ if (addr < TASK_SIZE && pte_present_user(pteval)) {
+ __sync_icache_dcache(pteval);
+- set_pte_ext(ptep, pteval, PTE_EXT_NG);
++ ext |= PTE_EXT_NG;
+ }
+-}
+
+-#define pte_none(pte) (!pte_val(pte))
+-#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
+-#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY))
+-#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
+-#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
+-#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN))
+-#define pte_special(pte) (0)
+-
+-#define pte_present_user(pte) \
+- ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \
+- (L_PTE_PRESENT | L_PTE_USER))
++ set_pte_ext(ptep, pteval, ext);
++}
+
+ #define PTE_BIT_FUNC(fn,op) \
+ static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
+diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
+index 814a52a9..2bc1a8e 100644
+--- a/arch/arm/kernel/hw_breakpoint.c
++++ b/arch/arm/kernel/hw_breakpoint.c
+@@ -160,6 +160,12 @@ static int debug_arch_supported(void)
+ arch >= ARM_DEBUG_ARCH_V7_1;
+ }
+
++/* Can we determine the watchpoint access type from the fsr? */
++static int debug_exception_updates_fsr(void)
++{
++ return 0;
++}
++
+ /* Determine number of WRP registers available. */
+ static int get_num_wrp_resources(void)
+ {
+@@ -620,18 +626,35 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
+ info->address &= ~alignment_mask;
+ info->ctrl.len <<= offset;
+
+- /*
+- * Currently we rely on an overflow handler to take
+- * care of single-stepping the breakpoint when it fires.
+- * In the case of userspace breakpoints on a core with V7 debug,
+- * we can use the mismatch feature as a poor-man's hardware
+- * single-step, but this only works for per-task breakpoints.
+- */
+- if (!bp->overflow_handler && (arch_check_bp_in_kernelspace(bp) ||
+- !core_has_mismatch_brps() || !bp->hw.bp_target)) {
+- pr_warning("overflow handler required but none found\n");
+- ret = -EINVAL;
++ if (!bp->overflow_handler) {
++ /*
++ * Mismatch breakpoints are required for single-stepping
++ * breakpoints.
++ */
++ if (!core_has_mismatch_brps())
++ return -EINVAL;
++
++ /* We don't allow mismatch breakpoints in kernel space. */
++ if (arch_check_bp_in_kernelspace(bp))
++ return -EPERM;
++
++ /*
++ * Per-cpu breakpoints are not supported by our stepping
++ * mechanism.
++ */
++ if (!bp->hw.bp_target)
++ return -EINVAL;
++
++ /*
++ * We only support specific access types if the fsr
++ * reports them.
++ */
++ if (!debug_exception_updates_fsr() &&
++ (info->ctrl.type == ARM_BREAKPOINT_LOAD ||
++ info->ctrl.type == ARM_BREAKPOINT_STORE))
++ return -EINVAL;
+ }
++
+ out:
+ return ret;
+ }
+@@ -707,10 +730,12 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
+ goto unlock;
+
+ /* Check that the access type matches. */
+- access = (fsr & ARM_FSR_ACCESS_MASK) ? HW_BREAKPOINT_W :
+- HW_BREAKPOINT_R;
+- if (!(access & hw_breakpoint_type(wp)))
+- goto unlock;
++ if (debug_exception_updates_fsr()) {
++ access = (fsr & ARM_FSR_ACCESS_MASK) ?
++ HW_BREAKPOINT_W : HW_BREAKPOINT_R;
++ if (!(access & hw_breakpoint_type(wp)))
++ goto unlock;
++ }
+
+ /* We have a winner. */
+ info->trigger = addr;
+diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
+index 8380bd1..7ac5dfd 100644
+--- a/arch/arm/kernel/traps.c
++++ b/arch/arm/kernel/traps.c
+@@ -380,20 +380,23 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
+ #endif
+ instr = *(u32 *) pc;
+ } else if (thumb_mode(regs)) {
+- get_user(instr, (u16 __user *)pc);
++ if (get_user(instr, (u16 __user *)pc))
++ goto die_sig;
+ if (is_wide_instruction(instr)) {
+ unsigned int instr2;
+- get_user(instr2, (u16 __user *)pc+1);
++ if (get_user(instr2, (u16 __user *)pc+1))
++ goto die_sig;
+ instr <<= 16;
+ instr |= instr2;
+ }
+- } else {
+- get_user(instr, (u32 __user *)pc);
++ } else if (get_user(instr, (u32 __user *)pc)) {
++ goto die_sig;
+ }
+
+ if (call_undef_hook(regs, instr) == 0)
+ return;
+
++die_sig:
+ #ifdef CONFIG_DEBUG_USER
+ if (user_debug & UDBG_UNDEFINED) {
+ printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n",
+diff --git a/arch/arm/mach-dove/common.c b/arch/arm/mach-dove/common.c
+index 1620b15..cb105bf8 100644
+--- a/arch/arm/mach-dove/common.c
++++ b/arch/arm/mach-dove/common.c
+@@ -92,7 +92,7 @@ void __init dove_ge00_init(struct mv643xx_eth_platform_data *eth_data)
+ {
+ orion_ge00_init(eth_data, &dove_mbus_dram_info,
+ DOVE_GE00_PHYS_BASE, IRQ_DOVE_GE00_SUM,
+- 0, get_tclk());
++ 0, get_tclk(), 1600);
+ }
+
+ /*****************************************************************************
+diff --git a/arch/arm/mach-imx/hotplug.c b/arch/arm/mach-imx/hotplug.c
+index 20ed2d5..f8f7437 100644
+--- a/arch/arm/mach-imx/hotplug.c
++++ b/arch/arm/mach-imx/hotplug.c
+@@ -42,22 +42,6 @@ static inline void cpu_enter_lowpower(void)
+ : "cc");
+ }
+
+-static inline void cpu_leave_lowpower(void)
+-{
+- unsigned int v;
+-
+- asm volatile(
+- "mrc p15, 0, %0, c1, c0, 0\n"
+- " orr %0, %0, %1\n"
+- " mcr p15, 0, %0, c1, c0, 0\n"
+- " mrc p15, 0, %0, c1, c0, 1\n"
+- " orr %0, %0, %2\n"
+- " mcr p15, 0, %0, c1, c0, 1\n"
+- : "=&r" (v)
+- : "Ir" (CR_C), "Ir" (0x40)
+- : "cc");
+-}
+-
+ /*
+ * platform-specific code to shutdown a CPU
+ *
+@@ -67,11 +51,10 @@ void platform_cpu_die(unsigned int cpu)
+ {
+ cpu_enter_lowpower();
+ imx_enable_cpu(cpu, false);
+- cpu_do_idle();
+- cpu_leave_lowpower();
+
+- /* We should never return from idle */
+- panic("cpu %d unexpectedly exit from shutdown\n", cpu);
++ /* spin here until hardware takes it down */
++ while (1)
++ ;
+ }
+
+ int platform_cpu_disable(unsigned int cpu)
+diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
+index c5dbbb3..06faa97 100644
+--- a/arch/arm/mach-kirkwood/common.c
++++ b/arch/arm/mach-kirkwood/common.c
+@@ -88,7 +88,7 @@ void __init kirkwood_ge00_init(struct mv643xx_eth_platform_data *eth_data)
+
+ orion_ge00_init(eth_data, &kirkwood_mbus_dram_info,
+ GE00_PHYS_BASE, IRQ_KIRKWOOD_GE00_SUM,
+- IRQ_KIRKWOOD_GE00_ERR, kirkwood_tclk);
++ IRQ_KIRKWOOD_GE00_ERR, kirkwood_tclk, 1600);
+ }
+
+
+@@ -102,7 +102,7 @@ void __init kirkwood_ge01_init(struct mv643xx_eth_platform_data *eth_data)
+
+ orion_ge01_init(eth_data, &kirkwood_mbus_dram_info,
+ GE01_PHYS_BASE, IRQ_KIRKWOOD_GE01_SUM,
+- IRQ_KIRKWOOD_GE01_ERR, kirkwood_tclk);
++ IRQ_KIRKWOOD_GE01_ERR, kirkwood_tclk, 1600);
+ }
+
+
+diff --git a/arch/arm/mach-mv78xx0/common.c b/arch/arm/mach-mv78xx0/common.c
+index d90e244..570ee4d 100644
+--- a/arch/arm/mach-mv78xx0/common.c
++++ b/arch/arm/mach-mv78xx0/common.c
+@@ -202,7 +202,8 @@ void __init mv78xx0_ge00_init(struct mv643xx_eth_platform_data *eth_data)
+ {
+ orion_ge00_init(eth_data, &mv78xx0_mbus_dram_info,
+ GE00_PHYS_BASE, IRQ_MV78XX0_GE00_SUM,
+- IRQ_MV78XX0_GE_ERR, get_tclk());
++ IRQ_MV78XX0_GE_ERR, get_tclk(),
++ MV643XX_TX_CSUM_DEFAULT_LIMIT);
+ }
+
+
+@@ -213,7 +214,8 @@ void __init mv78xx0_ge01_init(struct mv643xx_eth_platform_data *eth_data)
+ {
+ orion_ge01_init(eth_data, &mv78xx0_mbus_dram_info,
+ GE01_PHYS_BASE, IRQ_MV78XX0_GE01_SUM,
+- NO_IRQ, get_tclk());
++ NO_IRQ, get_tclk(),
++ MV643XX_TX_CSUM_DEFAULT_LIMIT);
+ }
+
+
+diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c
+index 53b68b8..20260db 100644
+--- a/arch/arm/mach-orion5x/common.c
++++ b/arch/arm/mach-orion5x/common.c
+@@ -95,7 +95,8 @@ void __init orion5x_eth_init(struct mv643xx_eth_platform_data *eth_data)
+ {
+ orion_ge00_init(eth_data, &orion5x_mbus_dram_info,
+ ORION5X_ETH_PHYS_BASE, IRQ_ORION5X_ETH_SUM,
+- IRQ_ORION5X_ETH_ERR, orion5x_tclk);
++ IRQ_ORION5X_ETH_ERR, orion5x_tclk,
++ MV643XX_TX_CSUM_DEFAULT_LIMIT);
+ }
+
+
+diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
+index 1a8d4aa..8fda9f7 100644
+--- a/arch/arm/mm/flush.c
++++ b/arch/arm/mm/flush.c
+@@ -236,8 +236,6 @@ void __sync_icache_dcache(pte_t pteval)
+ struct page *page;
+ struct address_space *mapping;
+
+- if (!pte_present_user(pteval))
+- return;
+ if (cache_is_vipt_nonaliasing() && !pte_exec(pteval))
+ /* only flush non-aliasing VIPT caches for exec mappings */
+ return;
+diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c
+index af3b92b..f9adbbb 100644
+--- a/arch/arm/plat-omap/dmtimer.c
++++ b/arch/arm/plat-omap/dmtimer.c
+@@ -236,7 +236,7 @@ EXPORT_SYMBOL_GPL(omap_dm_timer_enable);
+
+ void omap_dm_timer_disable(struct omap_dm_timer *timer)
+ {
+- pm_runtime_put(&timer->pdev->dev);
++ pm_runtime_put_sync(&timer->pdev->dev);
+ }
+ EXPORT_SYMBOL_GPL(omap_dm_timer_disable);
+
+diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
+index 11dce87..8a6886a 100644
+--- a/arch/arm/plat-orion/common.c
++++ b/arch/arm/plat-orion/common.c
+@@ -263,10 +263,12 @@ void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data,
+ unsigned long mapbase,
+ unsigned long irq,
+ unsigned long irq_err,
+- int tclk)
++ int tclk,
++ unsigned int tx_csum_limit)
+ {
+ fill_resources(&orion_ge00_shared, orion_ge00_shared_resources,
+ mapbase + 0x2000, SZ_16K - 1, irq_err);
++ orion_ge00_shared_data.tx_csum_limit = tx_csum_limit;
+ ge_complete(&orion_ge00_shared_data, mbus_dram_info, tclk,
+ orion_ge00_resources, irq, &orion_ge00_shared,
+ eth_data, &orion_ge00);
+@@ -317,10 +319,12 @@ void __init orion_ge01_init(struct mv643xx_eth_platform_data *eth_data,
+ unsigned long mapbase,
+ unsigned long irq,
+ unsigned long irq_err,
+- int tclk)
++ int tclk,
++ unsigned int tx_csum_limit)
+ {
+ fill_resources(&orion_ge01_shared, orion_ge01_shared_resources,
+ mapbase + 0x2000, SZ_16K - 1, irq_err);
++ orion_ge01_shared_data.tx_csum_limit = tx_csum_limit;
+ ge_complete(&orion_ge01_shared_data, mbus_dram_info, tclk,
+ orion_ge01_resources, irq, &orion_ge01_shared,
+ eth_data, &orion_ge01);
+diff --git a/arch/arm/plat-orion/include/plat/common.h b/arch/arm/plat-orion/include/plat/common.h
+index a2c0e31..b637dae 100644
+--- a/arch/arm/plat-orion/include/plat/common.h
++++ b/arch/arm/plat-orion/include/plat/common.h
+@@ -41,14 +41,16 @@ void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data,
+ unsigned long mapbase,
+ unsigned long irq,
+ unsigned long irq_err,
+- int tclk);
++ int tclk,
++ unsigned int tx_csum_limit);
+
+ void __init orion_ge01_init(struct mv643xx_eth_platform_data *eth_data,
+ struct mbus_dram_target_info *mbus_dram_info,
+ unsigned long mapbase,
+ unsigned long irq,
+ unsigned long irq_err,
+- int tclk);
++ int tclk,
++ unsigned int tx_csum_limit);
+
+ void __init orion_ge10_init(struct mv643xx_eth_platform_data *eth_data,
+ struct mbus_dram_target_info *mbus_dram_info,
+diff --git a/arch/arm/plat-s3c24xx/dma.c b/arch/arm/plat-s3c24xx/dma.c
+index 8a90b6a..1eedf8d 100644
+--- a/arch/arm/plat-s3c24xx/dma.c
++++ b/arch/arm/plat-s3c24xx/dma.c
+@@ -431,7 +431,7 @@ s3c2410_dma_canload(struct s3c2410_dma_chan *chan)
+ * when necessary.
+ */
+
+-int s3c2410_dma_enqueue(unsigned int channel, void *id,
++int s3c2410_dma_enqueue(enum dma_ch channel, void *id,
+ dma_addr_t data, int size)
+ {
+ struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
+diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
+index 4054b31..c4b779b 100644
+--- a/arch/parisc/include/asm/atomic.h
++++ b/arch/parisc/include/asm/atomic.h
+@@ -247,7 +247,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+
+ #define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
+
+-#define ATOMIC_INIT(i) ((atomic_t) { (i) })
++#define ATOMIC_INIT(i) { (i) }
+
+ #define smp_mb__before_atomic_dec() smp_mb()
+ #define smp_mb__after_atomic_dec() smp_mb()
+@@ -256,7 +256,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+
+ #ifdef CONFIG_64BIT
+
+-#define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
++#define ATOMIC64_INIT(i) { (i) }
+
+ static __inline__ s64
+ __atomic64_add_return(s64 i, atomic64_t *v)
+diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
+index 7c5324f..cc20b0a 100644
+--- a/arch/powerpc/kernel/asm-offsets.c
++++ b/arch/powerpc/kernel/asm-offsets.c
+@@ -79,6 +79,7 @@ int main(void)
+ DEFINE(SIGSEGV, SIGSEGV);
+ DEFINE(NMI_MASK, NMI_MASK);
+ DEFINE(THREAD_DSCR, offsetof(struct thread_struct, dscr));
++ DEFINE(THREAD_DSCR_INHERIT, offsetof(struct thread_struct, dscr_inherit));
+ #else
+ DEFINE(THREAD_INFO, offsetof(struct task_struct, stack));
+ #endif /* CONFIG_PPC64 */
+diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c
+index 2cc451a..6856062 100644
+--- a/arch/powerpc/kernel/dbell.c
++++ b/arch/powerpc/kernel/dbell.c
+@@ -28,6 +28,8 @@ void doorbell_setup_this_cpu(void)
+
+ void doorbell_cause_ipi(int cpu, unsigned long data)
+ {
++ /* Order previous accesses vs. msgsnd, which is treated as a store */
++ mb();
+ ppc_msgsnd(PPC_DBELL, 0, data);
+ }
+
+diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
+index d834425..654fc53 100644
+--- a/arch/powerpc/kernel/entry_64.S
++++ b/arch/powerpc/kernel/entry_64.S
+@@ -380,6 +380,12 @@ _GLOBAL(ret_from_fork)
+ li r3,0
+ b syscall_exit
+
++ .section ".toc","aw"
++DSCR_DEFAULT:
++ .tc dscr_default[TC],dscr_default
++
++ .section ".text"
++
+ /*
+ * This routine switches between two different tasks. The process
+ * state of one is saved on its kernel stack. Then the state
+@@ -519,9 +525,6 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
+ mr r1,r8 /* start using new stack pointer */
+ std r7,PACAKSAVE(r13)
+
+- ld r6,_CCR(r1)
+- mtcrf 0xFF,r6
+-
+ #ifdef CONFIG_ALTIVEC
+ BEGIN_FTR_SECTION
+ ld r0,THREAD_VRSAVE(r4)
+@@ -530,14 +533,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+ #endif /* CONFIG_ALTIVEC */
+ #ifdef CONFIG_PPC64
+ BEGIN_FTR_SECTION
++ lwz r6,THREAD_DSCR_INHERIT(r4)
++ ld r7,DSCR_DEFAULT@toc(2)
+ ld r0,THREAD_DSCR(r4)
+- cmpd r0,r25
+- beq 1f
++ cmpwi r6,0
++ bne 1f
++ ld r0,0(r7)
++1: cmpd r0,r25
++ beq 2f
+ mtspr SPRN_DSCR,r0
+-1:
++2:
+ END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
+ #endif
+
++ ld r6,_CCR(r1)
++ mtcrf 0xFF,r6
++
+ /* r3-r13 are destroyed -- Cort */
+ REST_8GPRS(14, r1)
+ REST_10GPRS(22, r1)
+diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
+index 6457574..d687e3f 100644
+--- a/arch/powerpc/kernel/process.c
++++ b/arch/powerpc/kernel/process.c
+@@ -778,16 +778,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
+ #endif /* CONFIG_PPC_STD_MMU_64 */
+ #ifdef CONFIG_PPC64
+ if (cpu_has_feature(CPU_FTR_DSCR)) {
+- if (current->thread.dscr_inherit) {
+- p->thread.dscr_inherit = 1;
+- p->thread.dscr = current->thread.dscr;
+- } else if (0 != dscr_default) {
+- p->thread.dscr_inherit = 1;
+- p->thread.dscr = dscr_default;
+- } else {
+- p->thread.dscr_inherit = 0;
+- p->thread.dscr = 0;
+- }
++ p->thread.dscr_inherit = current->thread.dscr_inherit;
++ p->thread.dscr = current->thread.dscr;
+ }
+ #endif
+
+diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
+index 6df7090..fe04b4a 100644
+--- a/arch/powerpc/kernel/smp.c
++++ b/arch/powerpc/kernel/smp.c
+@@ -214,8 +214,15 @@ void smp_muxed_ipi_message_pass(int cpu, int msg)
+ struct cpu_messages *info = &per_cpu(ipi_message, cpu);
+ char *message = (char *)&info->messages;
+
++ /*
++ * Order previous accesses before accesses in the IPI handler.
++ */
++ smp_mb();
+ message[msg] = 1;
+- mb();
++ /*
++ * cause_ipi functions are required to include a full barrier
++ * before doing whatever causes the IPI.
++ */
+ smp_ops->cause_ipi(cpu, info->data);
+ }
+
+@@ -227,7 +234,7 @@ irqreturn_t smp_ipi_demux(void)
+ mb(); /* order any irq clear */
+
+ do {
+- all = xchg_local(&info->messages, 0);
++ all = xchg(&info->messages, 0);
+
+ #ifdef __BIG_ENDIAN
+ if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNCTION)))
+diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
+index ce035c1..55be64d 100644
+--- a/arch/powerpc/kernel/sysfs.c
++++ b/arch/powerpc/kernel/sysfs.c
+@@ -192,6 +192,14 @@ static ssize_t show_dscr_default(struct sysdev_class *class,
+ return sprintf(buf, "%lx\n", dscr_default);
+ }
+
++static void update_dscr(void *dummy)
++{
++ if (!current->thread.dscr_inherit) {
++ current->thread.dscr = dscr_default;
++ mtspr(SPRN_DSCR, dscr_default);
++ }
++}
++
+ static ssize_t __used store_dscr_default(struct sysdev_class *class,
+ struct sysdev_class_attribute *attr, const char *buf,
+ size_t count)
+@@ -204,6 +212,8 @@ static ssize_t __used store_dscr_default(struct sysdev_class *class,
+ return -EINVAL;
+ dscr_default = val;
+
++ on_each_cpu(update_dscr, NULL, 1);
++
+ return count;
+ }
+
+diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
+index 5459d14..82dcd4d 100644
+--- a/arch/powerpc/kernel/traps.c
++++ b/arch/powerpc/kernel/traps.c
+@@ -942,8 +942,9 @@ static int emulate_instruction(struct pt_regs *regs)
+ cpu_has_feature(CPU_FTR_DSCR)) {
+ PPC_WARN_EMULATED(mtdscr, regs);
+ rd = (instword >> 21) & 0x1f;
+- mtspr(SPRN_DSCR, regs->gpr[rd]);
++ current->thread.dscr = regs->gpr[rd];
+ current->thread.dscr_inherit = 1;
++ mtspr(SPRN_DSCR, current->thread.dscr);
+ return 0;
+ }
+ #endif
+diff --git a/arch/powerpc/sysdev/xics/icp-hv.c b/arch/powerpc/sysdev/xics/icp-hv.c
+index 9518d36..5c76bf7 100644
+--- a/arch/powerpc/sysdev/xics/icp-hv.c
++++ b/arch/powerpc/sysdev/xics/icp-hv.c
+@@ -27,33 +27,53 @@ static inline unsigned int icp_hv_get_xirr(unsigned char cppr)
+ {
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ long rc;
++ unsigned int ret = XICS_IRQ_SPURIOUS;
+
+ rc = plpar_hcall(H_XIRR, retbuf, cppr);
+- if (rc != H_SUCCESS)
+- panic(" bad return code xirr - rc = %lx\n", rc);
+- return (unsigned int)retbuf[0];
++ if (rc == H_SUCCESS) {
++ ret = (unsigned int)retbuf[0];
++ } else {
++ pr_err("%s: bad return code xirr cppr=0x%x returned %ld\n",
++ __func__, cppr, rc);
++ WARN_ON_ONCE(1);
++ }
++
++ return ret;
+ }
+
+ static inline void icp_hv_set_xirr(unsigned int value)
+ {
+ long rc = plpar_hcall_norets(H_EOI, value);
+- if (rc != H_SUCCESS)
+- panic("bad return code EOI - rc = %ld, value=%x\n", rc, value);
++ if (rc != H_SUCCESS) {
++ pr_err("%s: bad return code eoi xirr=0x%x returned %ld\n",
++ __func__, value, rc);
++ WARN_ON_ONCE(1);
++ }
+ }
+
+ static inline void icp_hv_set_cppr(u8 value)
+ {
+ long rc = plpar_hcall_norets(H_CPPR, value);
+- if (rc != H_SUCCESS)
+- panic("bad return code cppr - rc = %lx\n", rc);
++ if (rc != H_SUCCESS) {
++ pr_err("%s: bad return code cppr cppr=0x%x returned %ld\n",
++ __func__, value, rc);
++ WARN_ON_ONCE(1);
++ }
+ }
+
+ static inline void icp_hv_set_qirr(int n_cpu , u8 value)
+ {
+- long rc = plpar_hcall_norets(H_IPI, get_hard_smp_processor_id(n_cpu),
+- value);
+- if (rc != H_SUCCESS)
+- panic("bad return code qirr - rc = %lx\n", rc);
++ int hw_cpu = get_hard_smp_processor_id(n_cpu);
++ long rc;
++
++ /* Make sure all previous accesses are ordered before IPI sending */
++ mb();
++ rc = plpar_hcall_norets(H_IPI, hw_cpu, value);
++ if (rc != H_SUCCESS) {
++ pr_err("%s: bad return code qirr cpu=%d hw_cpu=%d mfrr=0x%x "
++ "returned %ld\n", __func__, n_cpu, hw_cpu, value, rc);
++ WARN_ON_ONCE(1);
++ }
+ }
+
+ static void icp_hv_eoi(struct irq_data *d)
+diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
+index b2c7179..bb104b4 100644
+--- a/arch/x86/xen/setup.c
++++ b/arch/x86/xen/setup.c
+@@ -78,9 +78,16 @@ static void __init xen_add_extra_mem(u64 start, u64 size)
+ memblock_x86_reserve_range(start, start + size, "XEN EXTRA");
+
+ xen_max_p2m_pfn = PFN_DOWN(start + size);
++ for (pfn = PFN_DOWN(start); pfn < xen_max_p2m_pfn; pfn++) {
++ unsigned long mfn = pfn_to_mfn(pfn);
++
++ if (WARN(mfn == pfn, "Trying to over-write 1-1 mapping (pfn: %lx)\n", pfn))
++ continue;
++ WARN(mfn != INVALID_P2M_ENTRY, "Trying to remove %lx which has %lx mfn!\n",
++ pfn, mfn);
+
+- for (pfn = PFN_DOWN(start); pfn <= xen_max_p2m_pfn; pfn++)
+ __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
++ }
+ }
+
+ static unsigned long __init xen_release_chunk(unsigned long start,
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index fb65915..608257a 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -386,6 +386,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ .driver_data = board_ahci_yes_fbs }, /* 88se9125 */
+ { PCI_DEVICE(0x1b4b, 0x917a),
+ .driver_data = board_ahci_yes_fbs }, /* 88se9172 */
++ { PCI_DEVICE(0x1b4b, 0x9192),
++ .driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */
+ { PCI_DEVICE(0x1b4b, 0x91a3),
+ .driver_data = board_ahci_yes_fbs },
+
+diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
+index 8323fc3..3f1799b 100644
+--- a/drivers/gpu/drm/drm_crtc.c
++++ b/drivers/gpu/drm/drm_crtc.c
+@@ -1625,10 +1625,8 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+- if (!req->flags) {
+- DRM_ERROR("no operation set\n");
++ if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags))
+ return -EINVAL;
+- }
+
+ mutex_lock(&dev->mode_config.mutex);
+ obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
+@@ -1641,7 +1639,6 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
+
+ if (req->flags & DRM_MODE_CURSOR_BO) {
+ if (!crtc->funcs->cursor_set) {
+- DRM_ERROR("crtc does not support cursor\n");
+ ret = -ENXIO;
+ goto out;
+ }
+@@ -1654,7 +1651,6 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
+ if (crtc->funcs->cursor_move) {
+ ret = crtc->funcs->cursor_move(crtc, req->x, req->y);
+ } else {
+- DRM_ERROR("crtc does not support cursor\n");
+ ret = -EFAULT;
+ goto out;
+ }
+@@ -1692,14 +1688,11 @@ int drm_mode_addfb(struct drm_device *dev,
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+- if ((config->min_width > r->width) || (r->width > config->max_width)) {
+- DRM_ERROR("mode new framebuffer width not within limits\n");
++ if ((config->min_width > r->width) || (r->width > config->max_width))
+ return -EINVAL;
+- }
+- if ((config->min_height > r->height) || (r->height > config->max_height)) {
+- DRM_ERROR("mode new framebuffer height not within limits\n");
++
++ if ((config->min_height > r->height) || (r->height > config->max_height))
+ return -EINVAL;
+- }
+
+ mutex_lock(&dev->mode_config.mutex);
+
+@@ -1756,7 +1749,6 @@ int drm_mode_rmfb(struct drm_device *dev,
+ obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB);
+ /* TODO check that we really get a framebuffer back. */
+ if (!obj) {
+- DRM_ERROR("mode invalid framebuffer id\n");
+ ret = -EINVAL;
+ goto out;
+ }
+@@ -1767,7 +1759,6 @@ int drm_mode_rmfb(struct drm_device *dev,
+ found = 1;
+
+ if (!found) {
+- DRM_ERROR("tried to remove a fb that we didn't own\n");
+ ret = -EINVAL;
+ goto out;
+ }
+@@ -1814,7 +1805,6 @@ int drm_mode_getfb(struct drm_device *dev,
+ mutex_lock(&dev->mode_config.mutex);
+ obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
+ if (!obj) {
+- DRM_ERROR("invalid framebuffer id\n");
+ ret = -EINVAL;
+ goto out;
+ }
+@@ -1850,7 +1840,6 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
+ mutex_lock(&dev->mode_config.mutex);
+ obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
+ if (!obj) {
+- DRM_ERROR("invalid framebuffer id\n");
+ ret = -EINVAL;
+ goto out_err1;
+ }
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index a1ee634..0c1a99b 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -66,6 +66,8 @@
+ #define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5)
+ /* use +hsync +vsync for detailed mode */
+ #define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
++/* Force reduced-blanking timings for detailed modes */
++#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7)
+
+ struct detailed_mode_closure {
+ struct drm_connector *connector;
+@@ -85,6 +87,9 @@ static struct edid_quirk {
+ int product_id;
+ u32 quirks;
+ } edid_quirk_list[] = {
++ /* ASUS VW222S */
++ { "ACI", 0x22a2, EDID_QUIRK_FORCE_REDUCED_BLANKING },
++
+ /* Acer AL1706 */
+ { "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 },
+ /* Acer F51 */
+@@ -120,6 +125,9 @@ static struct edid_quirk {
+ /* Samsung SyncMaster 22[5-6]BW */
+ { "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 },
+ { "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
++
++ /* ViewSonic VA2026w */
++ { "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
+ };
+
+ /*** DDC fetch and block validation ***/
+@@ -863,12 +871,19 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
+ "Wrong Hsync/Vsync pulse width\n");
+ return NULL;
+ }
++
++ if (quirks & EDID_QUIRK_FORCE_REDUCED_BLANKING) {
++ mode = drm_cvt_mode(dev, hactive, vactive, 60, true, false, false);
++ if (!mode)
++ return NULL;
++
++ goto set_size;
++ }
++
+ mode = drm_mode_create(dev);
+ if (!mode)
+ return NULL;
+
+- mode->type = DRM_MODE_TYPE_DRIVER;
+-
+ if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH)
+ timing->pixel_clock = cpu_to_le16(1088);
+
+@@ -892,8 +907,6 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
+
+ drm_mode_do_interlace_quirk(mode, pt);
+
+- drm_mode_set_name(mode);
+-
+ if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
+ pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
+ }
+@@ -903,6 +916,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
+ mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
+ DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
+
++set_size:
+ mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
+ mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
+
+@@ -916,6 +930,9 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
+ mode->height_mm = edid->height_cm * 10;
+ }
+
++ mode->type = DRM_MODE_TYPE_DRIVER;
++ drm_mode_set_name(mode);
++
+ return mode;
+ }
+
+diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
+index 578ddfc..c8b5bc1 100644
+--- a/drivers/gpu/drm/i915/i915_irq.c
++++ b/drivers/gpu/drm/i915/i915_irq.c
+@@ -2006,10 +2006,22 @@ static int i915_driver_irq_postinstall(struct drm_device *dev)
+ hotplug_en |= HDMIC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMID_HOTPLUG_INT_EN;
+- if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
+- hotplug_en |= SDVOC_HOTPLUG_INT_EN;
+- if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
+- hotplug_en |= SDVOB_HOTPLUG_INT_EN;
++ if (IS_G4X(dev)) {
++ if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X)
++ hotplug_en |= SDVOC_HOTPLUG_INT_EN;
++ if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_G4X)
++ hotplug_en |= SDVOB_HOTPLUG_INT_EN;
++ } else if (IS_GEN4(dev)) {
++ if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I965)
++ hotplug_en |= SDVOC_HOTPLUG_INT_EN;
++ if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I965)
++ hotplug_en |= SDVOB_HOTPLUG_INT_EN;
++ } else {
++ if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
++ hotplug_en |= SDVOC_HOTPLUG_INT_EN;
++ if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
++ hotplug_en |= SDVOB_HOTPLUG_INT_EN;
++ }
+ if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
+ hotplug_en |= CRT_HOTPLUG_INT_EN;
+
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index fd53122..4a5e662 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -1419,14 +1419,20 @@
+ #define DPC_HOTPLUG_INT_STATUS (1 << 28)
+ #define HDMID_HOTPLUG_INT_STATUS (1 << 27)
+ #define DPD_HOTPLUG_INT_STATUS (1 << 27)
++/* CRT/TV common between gen3+ */
+ #define CRT_HOTPLUG_INT_STATUS (1 << 11)
+ #define TV_HOTPLUG_INT_STATUS (1 << 10)
+ #define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
+ #define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
+ #define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
+ #define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
+-#define SDVOC_HOTPLUG_INT_STATUS (1 << 7)
+-#define SDVOB_HOTPLUG_INT_STATUS (1 << 6)
++/* SDVO is different across gen3/4 */
++#define SDVOC_HOTPLUG_INT_STATUS_G4X (1 << 3)
++#define SDVOB_HOTPLUG_INT_STATUS_G4X (1 << 2)
++#define SDVOC_HOTPLUG_INT_STATUS_I965 (3 << 4)
++#define SDVOB_HOTPLUG_INT_STATUS_I965 (3 << 2)
++#define SDVOC_HOTPLUG_INT_STATUS_I915 (1 << 7)
++#define SDVOB_HOTPLUG_INT_STATUS_I915 (1 << 6)
+
+ /* SDVO port control */
+ #define SDVOB 0x61140
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 3eed270..6c3fb44 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -1072,8 +1072,8 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe, int reg)
+ {
+ u32 val = I915_READ(reg);
+- WARN(hdmi_pipe_enabled(dev_priv, val, pipe),
+- "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
++ WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
++ "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
+ reg, pipe_name(pipe));
+ }
+
+@@ -1089,13 +1089,13 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
+
+ reg = PCH_ADPA;
+ val = I915_READ(reg);
+- WARN(adpa_pipe_enabled(dev_priv, val, pipe),
++ WARN(adpa_pipe_enabled(dev_priv, pipe, val),
+ "PCH VGA enabled on transcoder %c, should be disabled\n",
+ pipe_name(pipe));
+
+ reg = PCH_LVDS;
+ val = I915_READ(reg);
+- WARN(lvds_pipe_enabled(dev_priv, val, pipe),
++ WARN(lvds_pipe_enabled(dev_priv, pipe, val),
+ "PCH LVDS enabled on transcoder %c, should be disabled\n",
+ pipe_name(pipe));
+
+@@ -1437,7 +1437,7 @@ static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
+ enum pipe pipe, int reg)
+ {
+ u32 val = I915_READ(reg);
+- if (hdmi_pipe_enabled(dev_priv, val, pipe)) {
++ if (hdmi_pipe_enabled(dev_priv, pipe, val)) {
+ DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
+ reg, pipe);
+ I915_WRITE(reg, val & ~PORT_ENABLE);
+@@ -1459,12 +1459,12 @@ static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
+
+ reg = PCH_ADPA;
+ val = I915_READ(reg);
+- if (adpa_pipe_enabled(dev_priv, val, pipe))
++ if (adpa_pipe_enabled(dev_priv, pipe, val))
+ I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
+
+ reg = PCH_LVDS;
+ val = I915_READ(reg);
+- if (lvds_pipe_enabled(dev_priv, val, pipe)) {
++ if (lvds_pipe_enabled(dev_priv, pipe, val)) {
+ DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
+ I915_WRITE(reg, val & ~LVDS_PORT_EN);
+ POSTING_READ(reg);
+@@ -2852,16 +2852,14 @@ static void intel_clear_scanline_wait(struct drm_device *dev)
+
+ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
+ {
+- struct drm_i915_gem_object *obj;
+- struct drm_i915_private *dev_priv;
++ struct drm_device *dev = crtc->dev;
+
+ if (crtc->fb == NULL)
+ return;
+
+- obj = to_intel_framebuffer(crtc->fb)->obj;
+- dev_priv = crtc->dev->dev_private;
+- wait_event(dev_priv->pending_flip_queue,
+- atomic_read(&obj->pending_flip) == 0);
++ mutex_lock(&dev->struct_mutex);
++ intel_finish_fb(crtc->fb);
++ mutex_unlock(&dev->struct_mutex);
+ }
+
+ static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
+@@ -3322,23 +3320,6 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ struct drm_device *dev = crtc->dev;
+
+- /* Flush any pending WAITs before we disable the pipe. Note that
+- * we need to drop the struct_mutex in order to acquire it again
+- * during the lowlevel dpms routines around a couple of the
+- * operations. It does not look trivial nor desirable to move
+- * that locking higher. So instead we leave a window for the
+- * submission of further commands on the fb before we can actually
+- * disable it. This race with userspace exists anyway, and we can
+- * only rely on the pipe being disabled by userspace after it
+- * receives the hotplug notification and has flushed any pending
+- * batches.
+- */
+- if (crtc->fb) {
+- mutex_lock(&dev->struct_mutex);
+- intel_finish_fb(crtc->fb);
+- mutex_unlock(&dev->struct_mutex);
+- }
+-
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+
+ if (crtc->fb) {
+diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
+index ceec71b..f07bde2 100644
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -752,7 +752,7 @@ static const struct dmi_system_id intel_no_lvds[] = {
+ .ident = "Hewlett-Packard t5745",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+- DMI_MATCH(DMI_BOARD_NAME, "hp t5745"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "hp t5745"),
+ },
+ },
+ {
+@@ -760,7 +760,7 @@ static const struct dmi_system_id intel_no_lvds[] = {
+ .ident = "Hewlett-Packard st5747",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+- DMI_MATCH(DMI_BOARD_NAME, "hp st5747"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "hp st5747"),
+ },
+ },
+ {
+diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
+index a8d8ee5..bbf247c 100644
+--- a/drivers/gpu/drm/i915/intel_sdvo.c
++++ b/drivers/gpu/drm/i915/intel_sdvo.c
+@@ -2514,6 +2514,7 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *intel_encoder;
+ struct intel_sdvo *intel_sdvo;
++ u32 hotplug_mask;
+ int i;
+
+ intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL);
+@@ -2544,10 +2545,17 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
+ }
+ }
+
+- if (IS_SDVOB(sdvo_reg))
+- dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
+- else
+- dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
++ hotplug_mask = 0;
++ if (IS_G4X(dev)) {
++ hotplug_mask = IS_SDVOB(sdvo_reg) ?
++ SDVOB_HOTPLUG_INT_STATUS_G4X : SDVOC_HOTPLUG_INT_STATUS_G4X;
++ } else if (IS_GEN4(dev)) {
++ hotplug_mask = IS_SDVOB(sdvo_reg) ?
++ SDVOB_HOTPLUG_INT_STATUS_I965 : SDVOC_HOTPLUG_INT_STATUS_I965;
++ } else {
++ hotplug_mask = IS_SDVOB(sdvo_reg) ?
++ SDVOB_HOTPLUG_INT_STATUS_I915 : SDVOC_HOTPLUG_INT_STATUS_I915;
++ }
+
+ drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs);
+
+@@ -2555,14 +2563,6 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
+ if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
+ goto err;
+
+- /* Set up hotplug command - note paranoia about contents of reply.
+- * We assume that the hardware is in a sane state, and only touch
+- * the bits we think we understand.
+- */
+- intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG,
+- &intel_sdvo->hotplug_active, 2);
+- intel_sdvo->hotplug_active[0] &= ~0x3;
+-
+ if (intel_sdvo_output_setup(intel_sdvo,
+ intel_sdvo->caps.output_flags) != true) {
+ DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
+@@ -2570,6 +2570,12 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
+ goto err;
+ }
+
++ /* Only enable the hotplug irq if we need it, to work around noisy
++ * hotplug lines.
++ */
++ if (intel_sdvo->hotplug_active[0])
++ dev_priv->hotplug_supported_mask |= hotplug_mask;
++
+ intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg);
+
+ /* Set the input timing to the screen. Assume always input 0. */
+diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
+index b12fd2c..6adef06 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_display.c
++++ b/drivers/gpu/drm/nouveau/nouveau_display.c
+@@ -381,7 +381,7 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
+ args->size = args->pitch * args->height;
+ args->size = roundup(args->size, PAGE_SIZE);
+
+- ret = nouveau_gem_new(dev, args->size, 0, TTM_PL_FLAG_VRAM, 0, 0, &bo);
++ ret = nouveau_gem_new(dev, args->size, 0, NOUVEAU_GEM_DOMAIN_VRAM, 0, 0, &bo);
+ if (ret)
+ return ret;
+
+diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
+index 757c549..ceffd20 100644
+--- a/drivers/gpu/drm/radeon/atombios_crtc.c
++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
+@@ -1446,14 +1446,98 @@ static void radeon_legacy_atom_fixup(struct drm_crtc *crtc)
+ }
+ }
+
++/**
++ * radeon_get_pll_use_mask - look up a mask of which pplls are in use
++ *
++ * @crtc: drm crtc
++ *
++ * Returns the mask of which PPLLs (Pixel PLLs) are in use.
++ */
++static u32 radeon_get_pll_use_mask(struct drm_crtc *crtc)
++{
++ struct drm_device *dev = crtc->dev;
++ struct drm_crtc *test_crtc;
++ struct radeon_crtc *radeon_test_crtc;
++ u32 pll_in_use = 0;
++
++ list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
++ if (crtc == test_crtc)
++ continue;
++
++ radeon_test_crtc = to_radeon_crtc(test_crtc);
++ if (radeon_test_crtc->pll_id != ATOM_PPLL_INVALID)
++ pll_in_use |= (1 << radeon_test_crtc->pll_id);
++ }
++ return pll_in_use;
++}
++
++/**
++ * radeon_get_shared_dp_ppll - return the PPLL used by another crtc for DP
++ *
++ * @crtc: drm crtc
++ *
++ * Returns the PPLL (Pixel PLL) used by another crtc/encoder which is
++ * also in DP mode. For DP, a single PPLL can be used for all DP
++ * crtcs/encoders.
++ */
++static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
++{
++ struct drm_device *dev = crtc->dev;
++ struct drm_encoder *test_encoder;
++ struct radeon_crtc *radeon_test_crtc;
++
++ list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
++ if (test_encoder->crtc && (test_encoder->crtc != crtc)) {
++ if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_encoder))) {
++ /* for DP use the same PLL for all */
++ radeon_test_crtc = to_radeon_crtc(test_encoder->crtc);
++ if (radeon_test_crtc->pll_id != ATOM_PPLL_INVALID)
++ return radeon_test_crtc->pll_id;
++ }
++ }
++ }
++ return ATOM_PPLL_INVALID;
++}
++
++/**
++ * radeon_atom_pick_pll - Allocate a PPLL for use by the crtc.
++ *
++ * @crtc: drm crtc
++ *
++ * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors
++ * a single PPLL can be used for all DP crtcs/encoders. For non-DP
++ * monitors a dedicated PPLL must be used. If a particular board has
++ * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
++ * as there is no need to program the PLL itself. If we are not able to
++ * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
++ * avoid messing up an existing monitor.
++ *
++ * Asic specific PLL information
++ *
++ * DCE 6.1
++ * - PPLL2 is only available to UNIPHYA (both DP and non-DP)
++ * - PPLL0, PPLL1 are available for UNIPHYB/C/D/E/F (both DP and non-DP)
++ *
++ * DCE 6.0
++ * - PPLL0 is available to all UNIPHY (DP only)
++ * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
++ *
++ * DCE 5.0
++ * - DCPLL is available to all UNIPHY (DP only)
++ * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
++ *
++ * DCE 3.0/4.0/4.1
++ * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
++ *
++ */
+ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
+ {
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_encoder *test_encoder;
+- struct drm_crtc *test_crtc;
+- uint32_t pll_in_use = 0;
++ u32 pll_in_use;
++ int pll;
+
+ if (ASIC_IS_DCE4(rdev)) {
+ list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
+@@ -1461,35 +1545,39 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
+ /* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock,
+ * depending on the asic:
+ * DCE4: PPLL or ext clock
+- * DCE5: DCPLL or ext clock
++ * DCE5: PPLL, DCPLL, or ext clock
+ *
+ * Setting ATOM_PPLL_INVALID will cause SetPixelClock to skip
+ * PPLL/DCPLL programming and only program the DP DTO for the
+ * crtc virtual pixel clock.
+ */
+ if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_encoder))) {
+- if (ASIC_IS_DCE5(rdev) || rdev->clock.dp_extclk)
++ if (rdev->clock.dp_extclk)
++ /* skip PPLL programming if using ext clock */
+ return ATOM_PPLL_INVALID;
++ else if (ASIC_IS_DCE5(rdev))
++ /* use DCPLL for all DP */
++ return ATOM_DCPLL;
++ else {
++ /* use the same PPLL for all DP monitors */
++ pll = radeon_get_shared_dp_ppll(crtc);
++ if (pll != ATOM_PPLL_INVALID)
++ return pll;
++ }
+ }
++ break;
+ }
+ }
+-
+- /* otherwise, pick one of the plls */
+- list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
+- struct radeon_crtc *radeon_test_crtc;
+-
+- if (crtc == test_crtc)
+- continue;
+-
+- radeon_test_crtc = to_radeon_crtc(test_crtc);
+- if ((radeon_test_crtc->pll_id >= ATOM_PPLL1) &&
+- (radeon_test_crtc->pll_id <= ATOM_PPLL2))
+- pll_in_use |= (1 << radeon_test_crtc->pll_id);
+- }
+- if (!(pll_in_use & 1))
++ /* all other cases */
++ pll_in_use = radeon_get_pll_use_mask(crtc);
++ if (!(pll_in_use & (1 << ATOM_PPLL2)))
++ return ATOM_PPLL2;
++ if (!(pll_in_use & (1 << ATOM_PPLL1)))
+ return ATOM_PPLL1;
+- return ATOM_PPLL2;
++ DRM_ERROR("unable to allocate a PPLL\n");
++ return ATOM_PPLL_INVALID;
+ } else
++ /* use PPLL1 or PPLL2 */
+ return radeon_crtc->crtc_id;
+
+ }
+@@ -1578,10 +1666,25 @@ static void atombios_crtc_commit(struct drm_crtc *crtc)
+ static void atombios_crtc_disable(struct drm_crtc *crtc)
+ {
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
++ struct drm_device *dev = crtc->dev;
++ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_atom_ss ss;
++ int i;
+
+ atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+
++ for (i = 0; i < rdev->num_crtc; i++) {
++ if (rdev->mode_info.crtcs[i] &&
++ rdev->mode_info.crtcs[i]->enabled &&
++ i != radeon_crtc->crtc_id &&
++ radeon_crtc->pll_id == rdev->mode_info.crtcs[i]->pll_id) {
++ /* one other crtc is using this pll don't turn
++ * off the pll
++ */
++ goto done;
++ }
++ }
++
+ switch (radeon_crtc->pll_id) {
+ case ATOM_PPLL1:
+ case ATOM_PPLL2:
+@@ -1592,7 +1695,8 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
+ default:
+ break;
+ }
+- radeon_crtc->pll_id = -1;
++done:
++ radeon_crtc->pll_id = ATOM_PPLL_INVALID;
+ }
+
+ static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
+@@ -1641,6 +1745,6 @@ void radeon_atombios_init_crtc(struct drm_device *dev,
+ else
+ radeon_crtc->crtc_offset = 0;
+ }
+- radeon_crtc->pll_id = -1;
++ radeon_crtc->pll_id = ATOM_PPLL_INVALID;
+ drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs);
+ }
+diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
+index 5351ee1..382e141 100644
+--- a/drivers/gpu/drm/radeon/atombios_encoders.c
++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
+@@ -1344,6 +1344,8 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
++ struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
++ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ struct radeon_connector *radeon_connector = NULL;
+ struct radeon_connector_atom_dig *radeon_dig_connector = NULL;
+@@ -1355,12 +1357,38 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+- /* some early dce3.2 boards have a bug in their transmitter control table */
+- if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730) ||
+- ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev))
++ if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
++ if (!connector)
++ dig->panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
++ else
++ dig->panel_mode = radeon_dp_get_panel_mode(encoder, connector);
++
++ /* setup and enable the encoder */
++ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
++ atombios_dig_encoder_setup(encoder,
++ ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
++ dig->panel_mode);
++ if (ext_encoder) {
++ if (ASIC_IS_DCE41(rdev))
++ atombios_external_encoder_setup(encoder, ext_encoder,
++ EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
++ }
++ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
++ } else if (ASIC_IS_DCE4(rdev)) {
++ /* setup and enable the encoder */
++ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
++ /* enable the transmitter */
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+- else
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
++ } else {
++ /* setup and enable the encoder and transmitter */
++ atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
++ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
++ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
++ /* some early dce3.2 boards have a bug in their transmitter control table */
++ if ((rdev->family != CHIP_RV710) || (rdev->family != CHIP_RV730))
++ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
++ }
+ if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ atombios_set_edp_panel_power(connector,
+@@ -1377,10 +1405,19 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+- if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev))
++ if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
++ /* disable the transmitter */
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+- else
++ } else if (ASIC_IS_DCE4(rdev)) {
++ /* disable the transmitter */
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
++ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
++ } else {
++ /* disable the encoder and transmitter */
++ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
++ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
++ atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
++ }
+ if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
+ if (ASIC_IS_DCE4(rdev))
+ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
+@@ -1805,10 +1842,12 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+- struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
+
+ radeon_encoder->pixel_clock = adjusted_mode->clock;
+
++ /* need to call this here rather than in prepare() since we need some crtc info */
++ radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
++
+ if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) {
+ if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT))
+ atombios_yuv_setup(encoder, true);
+@@ -1827,38 +1866,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+- if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
+- struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+- struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+-
+- if (!connector)
+- dig->panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
+- else
+- dig->panel_mode = radeon_dp_get_panel_mode(encoder, connector);
+-
+- /* setup and enable the encoder */
+- atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
+- atombios_dig_encoder_setup(encoder,
+- ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
+- dig->panel_mode);
+- } else if (ASIC_IS_DCE4(rdev)) {
+- /* disable the transmitter */
+- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+- /* setup and enable the encoder */
+- atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
+-
+- /* enable the transmitter */
+- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+- } else {
+- /* disable the encoder and transmitter */
+- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+- atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
+-
+- /* setup and enable the encoder and transmitter */
+- atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
+- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
+- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+- }
++ /* handled in dpms */
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+@@ -1879,14 +1887,6 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
+ break;
+ }
+
+- if (ext_encoder) {
+- if (ASIC_IS_DCE41(rdev))
+- atombios_external_encoder_setup(encoder, ext_encoder,
+- EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
+- else
+- atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
+- }
+-
+ atombios_apply_encoder_quirks(encoder, adjusted_mode);
+
+ if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
+@@ -2059,7 +2059,6 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
+ }
+
+ radeon_atom_output_lock(encoder, true);
+- radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+ if (connector) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+@@ -2080,6 +2079,7 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
+
+ static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
+ {
++ /* need to call this here as we need the crtc set up */
+ radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+ radeon_atom_output_lock(encoder, false);
+ }
+@@ -2120,14 +2120,7 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+- if (ASIC_IS_DCE4(rdev))
+- /* disable the transmitter */
+- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+- else {
+- /* disable the encoder and transmitter */
+- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+- atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
+- }
++ /* handled in dpms */
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index 9231564..c5762e3 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -761,7 +761,7 @@ int radeon_device_init(struct radeon_device *rdev,
+ if (rdev->flags & RADEON_IS_AGP)
+ rdev->need_dma32 = true;
+ if ((rdev->flags & RADEON_IS_PCI) &&
+- (rdev->family < CHIP_RS400))
++ (rdev->family <= CHIP_RS740))
+ rdev->need_dma32 = true;
+
+ dma_bits = rdev->need_dma32 ? 32 : 40;
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+index dff8fc7..033fc96 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+@@ -178,6 +178,7 @@ static struct pci_device_id vmw_pci_id_list[] = {
+ {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
+ {0, 0, 0}
+ };
++MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
+
+ static int enable_fbdev;
+
+@@ -1088,6 +1089,11 @@ static struct drm_driver driver = {
+ .master_drop = vmw_master_drop,
+ .open = vmw_driver_open,
+ .postclose = vmw_postclose,
++
++ .dumb_create = vmw_dumb_create,
++ .dumb_map_offset = vmw_dumb_map_offset,
++ .dumb_destroy = vmw_dumb_destroy,
++
+ .fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+index dc27970..0e3fa7d 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+@@ -641,6 +641,16 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
+ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
++int vmw_dumb_create(struct drm_file *file_priv,
++ struct drm_device *dev,
++ struct drm_mode_create_dumb *args);
++
++int vmw_dumb_map_offset(struct drm_file *file_priv,
++ struct drm_device *dev, uint32_t handle,
++ uint64_t *offset);
++int vmw_dumb_destroy(struct drm_file *file_priv,
++ struct drm_device *dev,
++ uint32_t handle);
+ /**
+ * Overlay control - vmwgfx_overlay.c
+ */
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+index 1c7f09e..0795d17 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+@@ -1950,3 +1950,76 @@ err_ref:
+ vmw_resource_unreference(&res);
+ return ret;
+ }
++
++
++int vmw_dumb_create(struct drm_file *file_priv,
++ struct drm_device *dev,
++ struct drm_mode_create_dumb *args)
++{
++ struct vmw_private *dev_priv = vmw_priv(dev);
++ struct vmw_master *vmaster = vmw_master(file_priv->master);
++ struct vmw_user_dma_buffer *vmw_user_bo;
++ struct ttm_buffer_object *tmp;
++ int ret;
++
++ args->pitch = args->width * ((args->bpp + 7) / 8);
++ args->size = args->pitch * args->height;
++
++ vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
++ if (vmw_user_bo == NULL)
++ return -ENOMEM;
++
++ ret = ttm_read_lock(&vmaster->lock, true);
++ if (ret != 0) {
++ kfree(vmw_user_bo);
++ return ret;
++ }
++
++ ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, args->size,
++ &vmw_vram_sys_placement, true,
++ &vmw_user_dmabuf_destroy);
++ if (ret != 0)
++ goto out_no_dmabuf;
++
++ tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
++ ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
++ &vmw_user_bo->base,
++ false,
++ ttm_buffer_type,
++ &vmw_user_dmabuf_release, NULL);
++ if (unlikely(ret != 0))
++ goto out_no_base_object;
++
++ args->handle = vmw_user_bo->base.hash.key;
++
++out_no_base_object:
++ ttm_bo_unref(&tmp);
++out_no_dmabuf:
++ ttm_read_unlock(&vmaster->lock);
++ return ret;
++}
++
++int vmw_dumb_map_offset(struct drm_file *file_priv,
++ struct drm_device *dev, uint32_t handle,
++ uint64_t *offset)
++{
++ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
++ struct vmw_dma_buffer *out_buf;
++ int ret;
++
++ ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf);
++ if (ret != 0)
++ return -EINVAL;
++
++ *offset = out_buf->base.addr_space_offset;
++ vmw_dmabuf_unreference(&out_buf);
++ return 0;
++}
++
++int vmw_dumb_destroy(struct drm_file *file_priv,
++ struct drm_device *dev,
++ uint32_t handle)
++{
++ return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
++ handle, TTM_REF_USAGE);
++}
+diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
+index d21f6d0..b5cc078 100644
+--- a/drivers/hid/Kconfig
++++ b/drivers/hid/Kconfig
+@@ -350,6 +350,7 @@ config HID_MULTITOUCH
+ - Lumio CrystalTouch panels
+ - MosArt dual-touch panels
+ - PenMount dual touch panels
++ - PixArt optical touch screen
+ - Pixcir dual touch panels
+ - eGalax dual-touch panels, including the Joojoo and Wetab tablets
+ - Stantum multitouch panels
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 5cc029f..0c8bea9 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1507,6 +1507,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_PCI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) },
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index e4317a2..ab75a4e 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -593,6 +593,11 @@
+ #define USB_VENDOR_ID_PI_ENGINEERING 0x05f3
+ #define USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL 0xff
+
++#define USB_VENDOR_ID_PIXART 0x093a
++#define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN 0x8001
++#define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1 0x8002
++#define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2 0x8003
++
+ #define USB_VENDOR_ID_PLAYDOTCOM 0x0b43
+ #define USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII 0x0003
+
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index 995fc4c..13af0f1 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -93,6 +93,7 @@ struct mt_class {
+ #define MT_CLS_DUAL_INRANGE_CONTACTID 0x0005
+ #define MT_CLS_DUAL_INRANGE_CONTACTNUMBER 0x0006
+ #define MT_CLS_DUAL_NSMU_CONTACTID 0x0007
++#define MT_CLS_INRANGE_CONTACTNUMBER 0x0009
+
+ /* vendor specific classes */
+ #define MT_CLS_3M 0x0101
+@@ -155,6 +156,9 @@ struct mt_class mt_classes[] = {
+ .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP |
+ MT_QUIRK_SLOT_IS_CONTACTID,
+ .maxcontacts = 2 },
++ { .name = MT_CLS_INRANGE_CONTACTNUMBER,
++ .quirks = MT_QUIRK_VALID_IS_INRANGE |
++ MT_QUIRK_SLOT_IS_CONTACTNUMBER },
+
+ /*
+ * vendor specific classes
+@@ -744,6 +748,17 @@ static const struct hid_device_id mt_devices[] = {
+ HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT,
+ USB_DEVICE_ID_PENMOUNT_PCI) },
+
++ /* PixArt optical touch screen */
++ { .driver_data = MT_CLS_INRANGE_CONTACTNUMBER,
++ HID_USB_DEVICE(USB_VENDOR_ID_PIXART,
++ USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN) },
++ { .driver_data = MT_CLS_INRANGE_CONTACTNUMBER,
++ HID_USB_DEVICE(USB_VENDOR_ID_PIXART,
++ USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1) },
++ { .driver_data = MT_CLS_INRANGE_CONTACTNUMBER,
++ HID_USB_DEVICE(USB_VENDOR_ID_PIXART,
++ USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2) },
++
+ /* PixCir-based panels */
+ { .driver_data = MT_CLS_DUAL_INRANGE_CONTACTID,
+ HID_USB_DEVICE(USB_VENDOR_ID_HANVON,
+diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
+index 1fe6b80..afb73af 100644
+--- a/drivers/hid/usbhid/hid-quirks.c
++++ b/drivers/hid/usbhid/hid-quirks.c
+@@ -68,6 +68,10 @@ static const struct hid_blacklist {
+ { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
++ { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
++ { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
++ { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
++ { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
+diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
+index 00e9851..83d2fbd6 100644
+--- a/drivers/hwmon/asus_atk0110.c
++++ b/drivers/hwmon/asus_atk0110.c
+@@ -34,6 +34,12 @@ static const struct dmi_system_id __initconst atk_force_new_if[] = {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "SABERTOOTH X58")
+ }
++ }, {
++ /* Old interface reads the same sensor for fan0 and fan1 */
++ .ident = "Asus M5A78L",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_NAME, "M5A78L")
++ }
+ },
+ { }
+ };
+diff --git a/drivers/hwmon/twl4030-madc-hwmon.c b/drivers/hwmon/twl4030-madc-hwmon.c
+index 0018c7d..1a174f0 100644
+--- a/drivers/hwmon/twl4030-madc-hwmon.c
++++ b/drivers/hwmon/twl4030-madc-hwmon.c
+@@ -44,12 +44,13 @@ static ssize_t madc_read(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+ {
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+- struct twl4030_madc_request req;
++ struct twl4030_madc_request req = {
++ .channels = 1 << attr->index,
++ .method = TWL4030_MADC_SW2,
++ .type = TWL4030_MADC_WAIT,
++ };
+ long val;
+
+- req.channels = (1 << attr->index);
+- req.method = TWL4030_MADC_SW2;
+- req.func_cb = NULL;
+ val = twl4030_madc_conversion(&req);
+ if (val < 0)
+ return val;
+diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
+index a3afac4..60f593c 100644
+--- a/drivers/i2c/busses/Kconfig
++++ b/drivers/i2c/busses/Kconfig
+@@ -103,6 +103,8 @@ config I2C_I801
+ Patsburg (PCH)
+ DH89xxCC (PCH)
+ Panther Point (PCH)
++ Lynx Point (PCH)
++ Lynx Point-LP (PCH)
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-i801.
+@@ -349,9 +351,13 @@ config I2C_DAVINCI
+ devices such as DaVinci NIC.
+ For details please see http://www.ti.com/davinci
+
++config I2C_DESIGNWARE_CORE
++ tristate
++
+ config I2C_DESIGNWARE_PLATFORM
+ tristate "Synopsys DesignWare Platfrom"
+ depends on HAVE_CLK
++ select I2C_DESIGNWARE_CORE
+ help
+ If you say yes to this option, support will be included for the
+ Synopsys DesignWare I2C adapter. Only master mode is supported.
+@@ -362,6 +368,7 @@ config I2C_DESIGNWARE_PLATFORM
+ config I2C_DESIGNWARE_PCI
+ tristate "Synopsys DesignWare PCI"
+ depends on PCI
++ select I2C_DESIGNWARE_CORE
+ help
+ If you say yes to this option, support will be included for the
+ Synopsys DesignWare I2C adapter. Only master mode is supported.
+diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
+index fba6da6..d6b8779 100644
+--- a/drivers/i2c/busses/Makefile
++++ b/drivers/i2c/busses/Makefile
+@@ -33,10 +33,11 @@ obj-$(CONFIG_I2C_AU1550) += i2c-au1550.o
+ obj-$(CONFIG_I2C_BLACKFIN_TWI) += i2c-bfin-twi.o
+ obj-$(CONFIG_I2C_CPM) += i2c-cpm.o
+ obj-$(CONFIG_I2C_DAVINCI) += i2c-davinci.o
++obj-$(CONFIG_I2C_DESIGNWARE_CORE) += i2c-designware-core.o
+ obj-$(CONFIG_I2C_DESIGNWARE_PLATFORM) += i2c-designware-platform.o
+-i2c-designware-platform-objs := i2c-designware-platdrv.o i2c-designware-core.o
++i2c-designware-platform-objs := i2c-designware-platdrv.o
+ obj-$(CONFIG_I2C_DESIGNWARE_PCI) += i2c-designware-pci.o
+-i2c-designware-pci-objs := i2c-designware-pcidrv.o i2c-designware-core.o
++i2c-designware-pci-objs := i2c-designware-pcidrv.o
+ obj-$(CONFIG_I2C_GPIO) += i2c-gpio.o
+ obj-$(CONFIG_I2C_HIGHLANDER) += i2c-highlander.o
+ obj-$(CONFIG_I2C_IBM_IIC) += i2c-ibm_iic.o
+diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
+index df87992..6193349 100644
+--- a/drivers/i2c/busses/i2c-designware-core.c
++++ b/drivers/i2c/busses/i2c-designware-core.c
+@@ -25,6 +25,7 @@
+ * ----------------------------------------------------------------------------
+ *
+ */
++#include <linux/export.h>
+ #include <linux/clk.h>
+ #include <linux/errno.h>
+ #include <linux/err.h>
+@@ -305,6 +306,7 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
+ dw_writel(dev, dev->master_cfg , DW_IC_CON);
+ return 0;
+ }
++EXPORT_SYMBOL_GPL(i2c_dw_init);
+
+ /*
+ * Waiting for bus not busy
+@@ -557,12 +559,14 @@ done:
+
+ return ret;
+ }
++EXPORT_SYMBOL_GPL(i2c_dw_xfer);
+
+ u32 i2c_dw_func(struct i2c_adapter *adap)
+ {
+ struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
+ return dev->functionality;
+ }
++EXPORT_SYMBOL_GPL(i2c_dw_func);
+
+ static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev)
+ {
+@@ -667,17 +671,20 @@ tx_aborted:
+
+ return IRQ_HANDLED;
+ }
++EXPORT_SYMBOL_GPL(i2c_dw_isr);
+
+ void i2c_dw_enable(struct dw_i2c_dev *dev)
+ {
+ /* Enable the adapter */
+ dw_writel(dev, 1, DW_IC_ENABLE);
+ }
++EXPORT_SYMBOL_GPL(i2c_dw_enable);
+
+ u32 i2c_dw_is_enabled(struct dw_i2c_dev *dev)
+ {
+ return dw_readl(dev, DW_IC_ENABLE);
+ }
++EXPORT_SYMBOL_GPL(i2c_dw_is_enabled);
+
+ void i2c_dw_disable(struct dw_i2c_dev *dev)
+ {
+@@ -688,18 +695,22 @@ void i2c_dw_disable(struct dw_i2c_dev *dev)
+ dw_writel(dev, 0, DW_IC_INTR_MASK);
+ dw_readl(dev, DW_IC_CLR_INTR);
+ }
++EXPORT_SYMBOL_GPL(i2c_dw_disable);
+
+ void i2c_dw_clear_int(struct dw_i2c_dev *dev)
+ {
+ dw_readl(dev, DW_IC_CLR_INTR);
+ }
++EXPORT_SYMBOL_GPL(i2c_dw_clear_int);
+
+ void i2c_dw_disable_int(struct dw_i2c_dev *dev)
+ {
+ dw_writel(dev, 0, DW_IC_INTR_MASK);
+ }
++EXPORT_SYMBOL_GPL(i2c_dw_disable_int);
+
+ u32 i2c_dw_read_comp_param(struct dw_i2c_dev *dev)
+ {
+ return dw_readl(dev, DW_IC_COMP_PARAM_1);
+ }
++EXPORT_SYMBOL_GPL(i2c_dw_read_comp_param);
+diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
+index ab26840d..817d025 100644
+--- a/drivers/i2c/busses/i2c-i801.c
++++ b/drivers/i2c/busses/i2c-i801.c
+@@ -51,6 +51,8 @@
+ Patsburg (PCH) IDF 0x1d72 32 hard yes yes yes
+ DH89xxCC (PCH) 0x2330 32 hard yes yes yes
+ Panther Point (PCH) 0x1e22 32 hard yes yes yes
++ Lynx Point (PCH) 0x8c22 32 hard yes yes yes
++ Lynx Point-LP (PCH) 0x9c22 32 hard yes yes yes
+
+ Features supported by this driver:
+ Software PEC no
+@@ -145,6 +147,8 @@
+ #define PCI_DEVICE_ID_INTEL_PANTHERPOINT_SMBUS 0x1e22
+ #define PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS 0x2330
+ #define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS 0x3b30
++#define PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS 0x8c22
++#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_SMBUS 0x9c22
+
+ struct i801_priv {
+ struct i2c_adapter adapter;
+@@ -633,6 +637,8 @@ static const struct pci_device_id i801_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PANTHERPOINT_SMBUS) },
++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS) },
++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_SMBUS) },
+ { 0, }
+ };
+
+diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
+index b4cfc6c..d4ec371 100644
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -177,6 +177,20 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
+ },
+ },
+ {
++ /* Gigabyte T1005 - defines wrong chassis type ("Other") */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "T1005"),
++ },
++ },
++ {
++ /* Gigabyte T1005M/P - defines wrong chassis type ("Other") */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "T1005M/P"),
++ },
++ },
++ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv9700"),
+diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c
+index d497db0..509135f 100644
+--- a/drivers/isdn/isdnloop/isdnloop.c
++++ b/drivers/isdn/isdnloop/isdnloop.c
+@@ -16,7 +16,6 @@
+ #include <linux/sched.h>
+ #include "isdnloop.h"
+
+-static char *revision = "$Revision: 1.11.6.7 $";
+ static char *isdnloop_id = "loop0";
+
+ MODULE_DESCRIPTION("ISDN4Linux: Pseudo Driver that simulates an ISDN card");
+@@ -1494,17 +1493,6 @@ isdnloop_addcard(char *id1)
+ static int __init
+ isdnloop_init(void)
+ {
+- char *p;
+- char rev[10];
+-
+- if ((p = strchr(revision, ':'))) {
+- strcpy(rev, p + 1);
+- p = strchr(rev, '$');
+- *p = 0;
+- } else
+- strcpy(rev, " ??? ");
+- printk(KERN_NOTICE "isdnloop-ISDN-driver Rev%s\n", rev);
+-
+ if (isdnloop_id)
+ return (isdnloop_addcard(isdnloop_id));
+
+diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
+index 34416d4..74793af 100644
+--- a/drivers/mmc/card/block.c
++++ b/drivers/mmc/card/block.c
+@@ -1339,7 +1339,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
+ /* complete ongoing async transfer before issuing discard */
+ if (card->host->areq)
+ mmc_blk_issue_rw_rq(mq, NULL);
+- if (req->cmd_flags & REQ_SECURE)
++ if (req->cmd_flags & REQ_SECURE &&
++ !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
+ ret = mmc_blk_issue_secdiscard_rq(mq, req);
+ else
+ ret = mmc_blk_issue_discard_rq(mq, req);
+@@ -1614,6 +1615,8 @@ static int mmc_add_disk(struct mmc_blk_data *md)
+ return ret;
+ }
+
++#define CID_MANFID_SAMSUNG 0x15
++
+ static const struct mmc_fixup blk_fixups[] =
+ {
+ MMC_FIXUP("SEM02G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
+@@ -1644,6 +1647,28 @@ static const struct mmc_fixup blk_fixups[] =
+ MMC_FIXUP(CID_NAME_ANY, 0x13, 0x200, add_quirk_mmc,
+ MMC_QUIRK_LONG_READ_TIME),
+
++ /*
++ * On these Samsung MoviNAND parts, performing secure erase or
++ * secure trim can result in unrecoverable corruption due to a
++ * firmware bug.
++ */
++ MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
++ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
++ MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
++ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
++ MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
++ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
++ MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
++ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
++ MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
++ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
++ MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
++ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
++ MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
++ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
++ MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
++ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
++
+ END_FIXUP
+ };
+
+diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
+index 99b449d..f201bed 100644
+--- a/drivers/mmc/host/mxs-mmc.c
++++ b/drivers/mmc/host/mxs-mmc.c
+@@ -279,11 +279,11 @@ static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id)
+ writel(stat & MXS_MMC_IRQ_BITS,
+ host->base + HW_SSP_CTRL1 + MXS_CLR_ADDR);
+
++ spin_unlock(&host->lock);
++
+ if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN))
+ mmc_signal_sdio_irq(host->mmc);
+
+- spin_unlock(&host->lock);
+-
+ if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ)
+ cmd->error = -ETIMEDOUT;
+ else if (stat & BM_SSP_CTRL1_RESP_ERR_IRQ)
+@@ -628,10 +628,6 @@ static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
+ host->base + HW_SSP_CTRL0 + MXS_SET_ADDR);
+ writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
+ host->base + HW_SSP_CTRL1 + MXS_SET_ADDR);
+-
+- if (readl(host->base + HW_SSP_STATUS) & BM_SSP_STATUS_SDIO_IRQ)
+- mmc_signal_sdio_irq(host->mmc);
+-
+ } else {
+ writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
+ host->base + HW_SSP_CTRL0 + MXS_CLR_ADDR);
+@@ -640,6 +636,10 @@ static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
+ }
+
+ spin_unlock_irqrestore(&host->lock, flags);
++
++ if (enable && readl(host->base + HW_SSP_STATUS) & BM_SSP_STATUS_SDIO_IRQ)
++ mmc_signal_sdio_irq(host->mmc);
++
+ }
+
+ static const struct mmc_host_ops mxs_mmc_ops = {
+diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
+index c3b08f1..62ca03a 100644
+--- a/drivers/mmc/host/sdhci-esdhc.h
++++ b/drivers/mmc/host/sdhci-esdhc.h
+@@ -48,14 +48,14 @@ static inline void esdhc_set_clock(struct sdhci_host *host, unsigned int clock)
+ int div = 1;
+ u32 temp;
+
++ if (clock == 0)
++ goto out;
++
+ temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
+ temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
+ | ESDHC_CLOCK_MASK);
+ sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
+
+- if (clock == 0)
+- goto out;
+-
+ while (host->max_clk / pre_div / 16 > clock && pre_div < 256)
+ pre_div *= 2;
+
+diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
+index 890754c..95b29f5 100644
+--- a/drivers/mtd/ubi/vtbl.c
++++ b/drivers/mtd/ubi/vtbl.c
+@@ -346,7 +346,7 @@ retry:
+ */
+ err = ubi_scan_add_used(ubi, si, new_seb->pnum, new_seb->ec,
+ vid_hdr, 0);
+- kfree(new_seb);
++ kmem_cache_free(si->scan_leb_slab, new_seb);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return err;
+
+@@ -359,7 +359,7 @@ write_error:
+ list_add(&new_seb->u.list, &si->erase);
+ goto retry;
+ }
+- kfree(new_seb);
++ kmem_cache_free(si->scan_leb_slab, new_seb);
+ out_free:
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return err;
+diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
+index 330140e..9bcc39a 100644
+--- a/drivers/net/can/mcp251x.c
++++ b/drivers/net/can/mcp251x.c
+@@ -83,6 +83,11 @@
+ #define INSTRUCTION_LOAD_TXB(n) (0x40 + 2 * (n))
+ #define INSTRUCTION_READ_RXB(n) (((n) == 0) ? 0x90 : 0x94)
+ #define INSTRUCTION_RESET 0xC0
++#define RTS_TXB0 0x01
++#define RTS_TXB1 0x02
++#define RTS_TXB2 0x04
++#define INSTRUCTION_RTS(n) (0x80 | ((n) & 0x07))
++
+
+ /* MPC251x registers */
+ #define CANSTAT 0x0e
+@@ -397,6 +402,7 @@ static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf,
+ static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame,
+ int tx_buf_idx)
+ {
++ struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
+ u32 sid, eid, exide, rtr;
+ u8 buf[SPI_TRANSFER_BUF_LEN];
+
+@@ -418,7 +424,10 @@ static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame,
+ buf[TXBDLC_OFF] = (rtr << DLC_RTR_SHIFT) | frame->can_dlc;
+ memcpy(buf + TXBDAT_OFF, frame->data, frame->can_dlc);
+ mcp251x_hw_tx_frame(spi, buf, frame->can_dlc, tx_buf_idx);
+- mcp251x_write_reg(spi, TXBCTRL(tx_buf_idx), TXBCTRL_TXREQ);
++
++ /* use INSTRUCTION_RTS, to avoid "repeated frame problem" */
++ priv->spi_tx_buf[0] = INSTRUCTION_RTS(1 << tx_buf_idx);
++ mcp251x_spi_trans(priv->spi, 1);
+ }
+
+ static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf,
+diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
+index 83199fd..d0722a7 100644
+--- a/drivers/net/ethernet/freescale/gianfar.c
++++ b/drivers/net/ethernet/freescale/gianfar.c
+@@ -1041,7 +1041,7 @@ static int gfar_probe(struct platform_device *ofdev)
+
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
+ dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+- dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
++ dev->features |= NETIF_F_HW_VLAN_RX;
+ }
+
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
+diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
+index b1cd41b..021463b 100644
+--- a/drivers/net/ethernet/ibm/ibmveth.c
++++ b/drivers/net/ethernet/ibm/ibmveth.c
+@@ -472,14 +472,9 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
+ }
+
+ if (adapter->rx_queue.queue_addr != NULL) {
+- if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) {
+- dma_unmap_single(dev,
+- adapter->rx_queue.queue_dma,
+- adapter->rx_queue.queue_len,
+- DMA_BIDIRECTIONAL);
+- adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
+- }
+- kfree(adapter->rx_queue.queue_addr);
++ dma_free_coherent(dev, adapter->rx_queue.queue_len,
++ adapter->rx_queue.queue_addr,
++ adapter->rx_queue.queue_dma);
+ adapter->rx_queue.queue_addr = NULL;
+ }
+
+@@ -556,10 +551,13 @@ static int ibmveth_open(struct net_device *netdev)
+ goto err_out;
+ }
+
++ dev = &adapter->vdev->dev;
++
+ adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
+ rxq_entries;
+- adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len,
+- GFP_KERNEL);
++ adapter->rx_queue.queue_addr =
++ dma_alloc_coherent(dev, adapter->rx_queue.queue_len,
++ &adapter->rx_queue.queue_dma, GFP_KERNEL);
+
+ if (!adapter->rx_queue.queue_addr) {
+ netdev_err(netdev, "unable to allocate rx queue pages\n");
+@@ -567,19 +565,13 @@ static int ibmveth_open(struct net_device *netdev)
+ goto err_out;
+ }
+
+- dev = &adapter->vdev->dev;
+-
+ adapter->buffer_list_dma = dma_map_single(dev,
+ adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
+ adapter->filter_list_dma = dma_map_single(dev,
+ adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
+- adapter->rx_queue.queue_dma = dma_map_single(dev,
+- adapter->rx_queue.queue_addr,
+- adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL);
+
+ if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
+- (dma_mapping_error(dev, adapter->filter_list_dma)) ||
+- (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) {
++ (dma_mapping_error(dev, adapter->filter_list_dma))) {
+ netdev_err(netdev, "unable to map filter or buffer list "
+ "pages\n");
+ rc = -ENOMEM;
+diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
+index f478a22..8e362bb 100644
+--- a/drivers/net/ethernet/intel/e1000e/e1000.h
++++ b/drivers/net/ethernet/intel/e1000e/e1000.h
+@@ -302,6 +302,7 @@ struct e1000_adapter {
+ */
+ struct e1000_ring *tx_ring /* One per active queue */
+ ____cacheline_aligned_in_smp;
++ u32 tx_fifo_limit;
+
+ struct napi_struct napi;
+
+diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
+index 64d3f98..0182649 100644
+--- a/drivers/net/ethernet/intel/e1000e/netdev.c
++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
+@@ -3386,6 +3386,15 @@ void e1000e_reset(struct e1000_adapter *adapter)
+ }
+
+ /*
++ * Alignment of Tx data is on an arbitrary byte boundary with the
++ * maximum size per Tx descriptor limited only to the transmit
++ * allocation of the packet buffer minus 96 bytes with an upper
++ * limit of 24KB due to receive synchronization limitations.
++ */
++ adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96,
++ 24 << 10);
++
++ /*
+ * Disable Adaptive Interrupt Moderation if 2 full packets cannot
+ * fit in receive buffer and early-receive not supported.
+ */
+@@ -4647,13 +4656,9 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
+ return 1;
+ }
+
+-#define E1000_MAX_PER_TXD 8192
+-#define E1000_MAX_TXD_PWR 12
+-
+ static int e1000_tx_map(struct e1000_adapter *adapter,
+ struct sk_buff *skb, unsigned int first,
+- unsigned int max_per_txd, unsigned int nr_frags,
+- unsigned int mss)
++ unsigned int max_per_txd, unsigned int nr_frags)
+ {
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+ struct pci_dev *pdev = adapter->pdev;
+@@ -4882,20 +4887,19 @@ static int e1000_maybe_stop_tx(struct net_device *netdev, int size)
+ {
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
++ BUG_ON(size > adapter->tx_ring->count);
++
+ if (e1000_desc_unused(adapter->tx_ring) >= size)
+ return 0;
+ return __e1000_maybe_stop_tx(netdev, size);
+ }
+
+-#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
+ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev)
+ {
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+ unsigned int first;
+- unsigned int max_per_txd = E1000_MAX_PER_TXD;
+- unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
+ unsigned int tx_flags = 0;
+ unsigned int len = skb_headlen(skb);
+ unsigned int nr_frags;
+@@ -4915,18 +4919,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
+ }
+
+ mss = skb_shinfo(skb)->gso_size;
+- /*
+- * The controller does a simple calculation to
+- * make sure there is enough room in the FIFO before
+- * initiating the DMA for each buffer. The calc is:
+- * 4 = ceil(buffer len/mss). To make sure we don't
+- * overrun the FIFO, adjust the max buffer len if mss
+- * drops.
+- */
+ if (mss) {
+ u8 hdr_len;
+- max_per_txd = min(mss << 2, max_per_txd);
+- max_txd_pwr = fls(max_per_txd) - 1;
+
+ /*
+ * TSO Workaround for 82571/2/3 Controllers -- if skb->data
+@@ -4956,12 +4950,12 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
+ count++;
+ count++;
+
+- count += TXD_USE_COUNT(len, max_txd_pwr);
++ count += DIV_ROUND_UP(len, adapter->tx_fifo_limit);
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ for (f = 0; f < nr_frags; f++)
+- count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
+- max_txd_pwr);
++ count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]),
++ adapter->tx_fifo_limit);
+
+ if (adapter->hw.mac.tx_pkt_filtering)
+ e1000_transfer_dhcp_info(adapter, skb);
+@@ -5000,12 +4994,15 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
+ tx_flags |= E1000_TX_FLAGS_IPV4;
+
+ /* if count is 0 then mapping error has occurred */
+- count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss);
++ count = e1000_tx_map(adapter, skb, first, adapter->tx_fifo_limit,
++ nr_frags);
+ if (count) {
+ e1000_tx_queue(adapter, tx_flags, count);
+ /* Make sure there is space in the ring for the next send. */
+- e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2);
+-
++ e1000_maybe_stop_tx(netdev,
++ (MAX_SKB_FRAGS *
++ DIV_ROUND_UP(PAGE_SIZE,
++ adapter->tx_fifo_limit) + 2));
+ } else {
+ dev_kfree_skb_any(skb);
+ tx_ring->buffer_info[first].time_stamp = 0;
+@@ -6150,8 +6147,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
+ adapter->hw.phy.autoneg_advertised = 0x2f;
+
+ /* ring size defaults */
+- adapter->rx_ring->count = 256;
+- adapter->tx_ring->count = 256;
++ adapter->rx_ring->count = E1000_DEFAULT_RXD;
++ adapter->tx_ring->count = E1000_DEFAULT_TXD;
+
+ /*
+ * Initial Wake on LAN setting - If APM wake is enabled in
+diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
+index d5731f1..a6611f1 100644
+--- a/drivers/net/ethernet/sfc/efx.c
++++ b/drivers/net/ethernet/sfc/efx.c
+@@ -1383,6 +1383,11 @@ static int efx_probe_all(struct efx_nic *efx)
+ goto fail2;
+ }
+
++ BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT);
++ if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) {
++ rc = -EINVAL;
++ goto fail3;
++ }
+ efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
+ rc = efx_probe_channels(efx);
+ if (rc)
+@@ -1973,6 +1978,7 @@ static int efx_register_netdev(struct efx_nic *efx)
+ net_dev->irq = efx->pci_dev->irq;
+ net_dev->netdev_ops = &efx_netdev_ops;
+ SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
++ net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
+
+ /* Clear MAC statistics */
+ efx->mac_op->update_stats(efx);
+diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
+index 4764793..1355245 100644
+--- a/drivers/net/ethernet/sfc/efx.h
++++ b/drivers/net/ethernet/sfc/efx.h
+@@ -34,6 +34,7 @@ extern netdev_tx_t
+ efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
+ extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
+ extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
++extern unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
+
+ /* RX */
+ extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
+@@ -56,10 +57,15 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
+ #define EFX_MAX_EVQ_SIZE 16384UL
+ #define EFX_MIN_EVQ_SIZE 512UL
+
+-/* The smallest [rt]xq_entries that the driver supports. Callers of
+- * efx_wake_queue() assume that they can subsequently send at least one
+- * skb. Falcon/A1 may require up to three descriptors per skb_frag. */
+-#define EFX_MIN_RING_SIZE (roundup_pow_of_two(2 * 3 * MAX_SKB_FRAGS))
++/* Maximum number of TCP segments we support for soft-TSO */
++#define EFX_TSO_MAX_SEGS 100
++
++/* The smallest [rt]xq_entries that the driver supports. RX minimum
++ * is a bit arbitrary. For TX, we must have space for at least 2
++ * TSO skbs.
++ */
++#define EFX_RXQ_MIN_ENT 128U
++#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx))
+
+ /* Filters */
+ extern int efx_probe_filters(struct efx_nic *efx);
+diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
+index f3cd96d..90158c9 100644
+--- a/drivers/net/ethernet/sfc/ethtool.c
++++ b/drivers/net/ethernet/sfc/ethtool.c
+@@ -690,21 +690,27 @@ static int efx_ethtool_set_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *ring)
+ {
+ struct efx_nic *efx = netdev_priv(net_dev);
++ u32 txq_entries;
+
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
+ ring->rx_pending > EFX_MAX_DMAQ_SIZE ||
+ ring->tx_pending > EFX_MAX_DMAQ_SIZE)
+ return -EINVAL;
+
+- if (ring->rx_pending < EFX_MIN_RING_SIZE ||
+- ring->tx_pending < EFX_MIN_RING_SIZE) {
++ if (ring->rx_pending < EFX_RXQ_MIN_ENT) {
+ netif_err(efx, drv, efx->net_dev,
+- "TX and RX queues cannot be smaller than %ld\n",
+- EFX_MIN_RING_SIZE);
++ "RX queues cannot be smaller than %u\n",
++ EFX_RXQ_MIN_ENT);
+ return -EINVAL;
+ }
+
+- return efx_realloc_channels(efx, ring->rx_pending, ring->tx_pending);
++ txq_entries = max(ring->tx_pending, EFX_TXQ_MIN_ENT(efx));
++ if (txq_entries != ring->tx_pending)
++ netif_warn(efx, drv, efx->net_dev,
++ "increasing TX queue size to minimum of %u\n",
++ txq_entries);
++
++ return efx_realloc_channels(efx, ring->rx_pending, txq_entries);
+ }
+
+ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
+diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
+index 5fb24d3..66ece48 100644
+--- a/drivers/net/ethernet/sfc/nic.h
++++ b/drivers/net/ethernet/sfc/nic.h
+@@ -65,6 +65,9 @@ enum {
+ #define FALCON_GMAC_LOOPBACKS \
+ (1 << LOOPBACK_GMAC)
+
++/* Alignment of PCIe DMA boundaries (4KB) */
++#define EFX_PAGE_SIZE 4096
++
+ /**
+ * struct falcon_board_type - board operations and type information
+ * @id: Board type id, as found in NVRAM
+diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
+index df88c543..807d515 100644
+--- a/drivers/net/ethernet/sfc/tx.c
++++ b/drivers/net/ethernet/sfc/tx.c
+@@ -115,6 +115,25 @@ efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
+ return len;
+ }
+
++unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
++{
++ /* Header and payload descriptor for each output segment, plus
++ * one for every input fragment boundary within a segment
++ */
++ unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
++
++ /* Possibly one more per segment for the alignment workaround */
++ if (EFX_WORKAROUND_5391(efx))
++ max_descs += EFX_TSO_MAX_SEGS;
++
++ /* Possibly more for PCIe page boundaries within input fragments */
++ if (PAGE_SIZE > EFX_PAGE_SIZE)
++ max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
++ DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
++
++ return max_descs;
++}
++
+ /*
+ * Add a socket buffer to a TX queue
+ *
+diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
+index f8a6853..ad6a9d9 100644
+--- a/drivers/net/ppp/pptp.c
++++ b/drivers/net/ppp/pptp.c
+@@ -189,7 +189,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+ if (sk_pppox(po)->sk_state & PPPOX_DEAD)
+ goto tx_error;
+
+- rt = ip_route_output_ports(&init_net, &fl4, NULL,
++ rt = ip_route_output_ports(sock_net(sk), &fl4, NULL,
+ opt->dst_addr.sin_addr.s_addr,
+ opt->src_addr.sin_addr.s_addr,
+ 0, 0, IPPROTO_GRE,
+@@ -468,7 +468,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
+ po->chan.private = sk;
+ po->chan.ops = &pptp_chan_ops;
+
+- rt = ip_route_output_ports(&init_net, &fl4, sk,
++ rt = ip_route_output_ports(sock_net(sk), &fl4, sk,
+ opt->dst_addr.sin_addr.s_addr,
+ opt->src_addr.sin_addr.s_addr,
+ 0, 0,
+diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+index a1670e3..93e6179 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
++++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+@@ -232,6 +232,9 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
+ struct iwl_priv *priv = file->private_data;
+ size_t bufsz;
+
++ if (!iwl_is_ready_rf(priv->shrd))
++ return -EAGAIN;
++
+ /* default is to dump the entire data segment */
+ if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
+ priv->dbgfs_sram_offset = 0x800000;
+diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
+index 5c29281..8533ba2 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
++++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
+@@ -303,7 +303,7 @@ int iwl_queue_space(const struct iwl_queue *q);
+ ******************************************************/
+ int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log,
+ char **buf, bool display);
+-int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display);
++int iwl_dump_fh(struct iwl_trans *trans, char **buf);
+ void iwl_dump_csr(struct iwl_trans *trans);
+
+ /*****************************************************
+diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
+index 1daf01e..17fb25d 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
++++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
+@@ -678,7 +678,7 @@ static void iwl_irq_handle_error(struct iwl_trans *trans)
+
+ iwl_dump_nic_error_log(trans);
+ iwl_dump_csr(trans);
+- iwl_dump_fh(trans, NULL, false);
++ iwl_dump_fh(trans, NULL);
+ iwl_dump_nic_event_log(trans, false, NULL, false);
+ #ifdef CONFIG_IWLWIFI_DEBUG
+ if (iwl_get_debug_level(trans->shrd) & IWL_DL_FW_ERRORS)
+diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
+index 4661a64..75da4bc 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
++++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
+@@ -1541,13 +1541,9 @@ static const char *get_fh_string(int cmd)
+ }
+ }
+
+-int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
++int iwl_dump_fh(struct iwl_trans *trans, char **buf)
+ {
+ int i;
+-#ifdef CONFIG_IWLWIFI_DEBUG
+- int pos = 0;
+- size_t bufsz = 0;
+-#endif
+ static const u32 fh_tbl[] = {
+ FH_RSCSR_CHNL0_STTS_WPTR_REG,
+ FH_RSCSR_CHNL0_RBDCB_BASE_REG,
+@@ -1559,29 +1555,35 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
+ FH_TSSR_TX_STATUS_REG,
+ FH_TSSR_TX_ERROR_REG
+ };
+-#ifdef CONFIG_IWLWIFI_DEBUG
+- if (display) {
+- bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
++
++#ifdef CONFIG_IWLWIFI_DEBUGFS
++ if (buf) {
++ int pos = 0;
++ size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
++
+ *buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!*buf)
+ return -ENOMEM;
++
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ "FH register values:\n");
+- for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
++
++ for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ " %34s: 0X%08x\n",
+ get_fh_string(fh_tbl[i]),
+ iwl_read_direct32(bus(trans), fh_tbl[i]));
+- }
++
+ return pos;
+ }
+ #endif
++
+ IWL_ERR(trans, "FH register values:\n");
+- for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
++ for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
+ IWL_ERR(trans, " %34s: 0X%08x\n",
+ get_fh_string(fh_tbl[i]),
+ iwl_read_direct32(bus(trans), fh_tbl[i]));
+- }
++
+ return 0;
+ }
+
+@@ -1929,11 +1931,11 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
+ size_t count, loff_t *ppos)
+ {
+ struct iwl_trans *trans = file->private_data;
+- char *buf;
++ char *buf = NULL;
+ int pos = 0;
+ ssize_t ret = -EFAULT;
+
+- ret = pos = iwl_dump_fh(trans, &buf, true);
++ ret = pos = iwl_dump_fh(trans, &buf);
+ if (buf) {
+ ret = simple_read_from_buffer(user_buf,
+ count, ppos, buf, pos);
+diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
+index 3a6b402..0ea85f4 100644
+--- a/drivers/net/wireless/rt2x00/rt2400pci.c
++++ b/drivers/net/wireless/rt2x00/rt2400pci.c
+@@ -1611,6 +1611,7 @@ static int rt2400pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
+ static int rt2400pci_probe_hw(struct rt2x00_dev *rt2x00dev)
+ {
+ int retval;
++ u32 reg;
+
+ /*
+ * Allocate eeprom data.
+@@ -1624,6 +1625,14 @@ static int rt2400pci_probe_hw(struct rt2x00_dev *rt2x00dev)
+ return retval;
+
+ /*
++ * Enable rfkill polling by setting GPIO direction of the
++ * rfkill switch GPIO pin correctly.
++ */
++ rt2x00pci_register_read(rt2x00dev, GPIOCSR, &reg);
++ rt2x00_set_field32(&reg, GPIOCSR_BIT8, 1);
++ rt2x00pci_register_write(rt2x00dev, GPIOCSR, reg);
++
++ /*
+ * Initialize hw specifications.
+ */
+ retval = rt2400pci_probe_hw_mode(rt2x00dev);
+diff --git a/drivers/net/wireless/rt2x00/rt2400pci.h b/drivers/net/wireless/rt2x00/rt2400pci.h
+index d3a4a68..7564ae9 100644
+--- a/drivers/net/wireless/rt2x00/rt2400pci.h
++++ b/drivers/net/wireless/rt2x00/rt2400pci.h
+@@ -670,6 +670,7 @@
+ #define GPIOCSR_BIT5 FIELD32(0x00000020)
+ #define GPIOCSR_BIT6 FIELD32(0x00000040)
+ #define GPIOCSR_BIT7 FIELD32(0x00000080)
++#define GPIOCSR_BIT8 FIELD32(0x00000100)
+
+ /*
+ * BBPPCSR: BBP Pin control register.
+diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
+index dcc0e1f..aa10c48 100644
+--- a/drivers/net/wireless/rt2x00/rt2500pci.c
++++ b/drivers/net/wireless/rt2x00/rt2500pci.c
+@@ -1929,6 +1929,7 @@ static int rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
+ static int rt2500pci_probe_hw(struct rt2x00_dev *rt2x00dev)
+ {
+ int retval;
++ u32 reg;
+
+ /*
+ * Allocate eeprom data.
+@@ -1942,6 +1943,14 @@ static int rt2500pci_probe_hw(struct rt2x00_dev *rt2x00dev)
+ return retval;
+
+ /*
++ * Enable rfkill polling by setting GPIO direction of the
++ * rfkill switch GPIO pin correctly.
++ */
++ rt2x00pci_register_read(rt2x00dev, GPIOCSR, &reg);
++ rt2x00_set_field32(&reg, GPIOCSR_DIR0, 1);
++ rt2x00pci_register_write(rt2x00dev, GPIOCSR, reg);
++
++ /*
+ * Initialize hw specifications.
+ */
+ retval = rt2500pci_probe_hw_mode(rt2x00dev);
+diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
+index 53c5f87..22ed6df 100644
+--- a/drivers/net/wireless/rt2x00/rt2500usb.c
++++ b/drivers/net/wireless/rt2x00/rt2500usb.c
+@@ -283,7 +283,7 @@ static int rt2500usb_rfkill_poll(struct rt2x00_dev *rt2x00dev)
+ u16 reg;
+
+ rt2500usb_register_read(rt2x00dev, MAC_CSR19, &reg);
+- return rt2x00_get_field32(reg, MAC_CSR19_BIT7);
++ return rt2x00_get_field16(reg, MAC_CSR19_BIT7);
+ }
+
+ #ifdef CONFIG_RT2X00_LIB_LEDS
+@@ -1768,6 +1768,7 @@ static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
+ static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev)
+ {
+ int retval;
++ u16 reg;
+
+ /*
+ * Allocate eeprom data.
+@@ -1781,6 +1782,14 @@ static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev)
+ return retval;
+
+ /*
++ * Enable rfkill polling by setting GPIO direction of the
++ * rfkill switch GPIO pin correctly.
++ */
++ rt2500usb_register_read(rt2x00dev, MAC_CSR19, &reg);
++ rt2x00_set_field16(&reg, MAC_CSR19_BIT8, 0);
++ rt2500usb_register_write(rt2x00dev, MAC_CSR19, reg);
++
++ /*
+ * Initialize hw specifications.
+ */
+ retval = rt2500usb_probe_hw_mode(rt2x00dev);
+diff --git a/drivers/net/wireless/rt2x00/rt2500usb.h b/drivers/net/wireless/rt2x00/rt2500usb.h
+index b493306..196bd51 100644
+--- a/drivers/net/wireless/rt2x00/rt2500usb.h
++++ b/drivers/net/wireless/rt2x00/rt2500usb.h
+@@ -189,14 +189,15 @@
+ * MAC_CSR19: GPIO control register.
+ */
+ #define MAC_CSR19 0x0426
+-#define MAC_CSR19_BIT0 FIELD32(0x0001)
+-#define MAC_CSR19_BIT1 FIELD32(0x0002)
+-#define MAC_CSR19_BIT2 FIELD32(0x0004)
+-#define MAC_CSR19_BIT3 FIELD32(0x0008)
+-#define MAC_CSR19_BIT4 FIELD32(0x0010)
+-#define MAC_CSR19_BIT5 FIELD32(0x0020)
+-#define MAC_CSR19_BIT6 FIELD32(0x0040)
+-#define MAC_CSR19_BIT7 FIELD32(0x0080)
++#define MAC_CSR19_BIT0 FIELD16(0x0001)
++#define MAC_CSR19_BIT1 FIELD16(0x0002)
++#define MAC_CSR19_BIT2 FIELD16(0x0004)
++#define MAC_CSR19_BIT3 FIELD16(0x0008)
++#define MAC_CSR19_BIT4 FIELD16(0x0010)
++#define MAC_CSR19_BIT5 FIELD16(0x0020)
++#define MAC_CSR19_BIT6 FIELD16(0x0040)
++#define MAC_CSR19_BIT7 FIELD16(0x0080)
++#define MAC_CSR19_BIT8 FIELD16(0x0100)
+
+ /*
+ * MAC_CSR20: LED control register.
+diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
+index 837b460..518157d 100644
+--- a/drivers/net/wireless/rt2x00/rt2800pci.c
++++ b/drivers/net/wireless/rt2x00/rt2800pci.c
+@@ -935,6 +935,7 @@ static int rt2800pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
+ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
+ {
+ int retval;
++ u32 reg;
+
+ /*
+ * Allocate eeprom data.
+@@ -948,6 +949,14 @@ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
+ return retval;
+
+ /*
++ * Enable rfkill polling by setting GPIO direction of the
++ * rfkill switch GPIO pin correctly.
++ */
++ rt2x00pci_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
++ rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT2, 1);
++ rt2x00pci_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
++
++ /*
+ * Initialize hw specifications.
+ */
+ retval = rt2800_probe_hw_mode(rt2x00dev);
+diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
+index ae7528b..b66a61b 100644
+--- a/drivers/net/wireless/rt2x00/rt2800usb.c
++++ b/drivers/net/wireless/rt2x00/rt2800usb.c
+@@ -621,8 +621,16 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
+ skb_pull(entry->skb, RXINFO_DESC_SIZE);
+
+ /*
+- * FIXME: we need to check for rx_pkt_len validity
++ * Check for rx_pkt_len validity. Return if invalid, leaving
++ * rxdesc->size zeroed out by the upper level.
+ */
++ if (unlikely(rx_pkt_len == 0 ||
++ rx_pkt_len > entry->queue->data_size)) {
++ ERROR(entry->queue->rt2x00dev,
++ "Bad frame size %d, forcing to 0\n", rx_pkt_len);
++ return;
++ }
++
+ rxd = (__le32 *)(entry->skb->data + rx_pkt_len);
+
+ /*
+@@ -690,6 +698,7 @@ static int rt2800usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
+ static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
+ {
+ int retval;
++ u32 reg;
+
+ /*
+ * Allocate eeprom data.
+@@ -703,6 +712,14 @@ static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
+ return retval;
+
+ /*
++ * Enable rfkill polling by setting GPIO direction of the
++ * rfkill switch GPIO pin correctly.
++ */
++ rt2x00usb_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
++ rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT2, 1);
++ rt2x00usb_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
++
++ /*
+ * Initialize hw specifications.
+ */
+ retval = rt2800_probe_hw_mode(rt2x00dev);
+@@ -1111,6 +1128,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ { USB_DEVICE(0x1690, 0x0744) },
+ { USB_DEVICE(0x1690, 0x0761) },
+ { USB_DEVICE(0x1690, 0x0764) },
++ /* ASUS */
++ { USB_DEVICE(0x0b05, 0x179d) },
+ /* Cisco */
+ { USB_DEVICE(0x167b, 0x4001) },
+ /* EnGenius */
+@@ -1163,7 +1182,6 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ { USB_DEVICE(0x0b05, 0x1760) },
+ { USB_DEVICE(0x0b05, 0x1761) },
+ { USB_DEVICE(0x0b05, 0x1790) },
+- { USB_DEVICE(0x0b05, 0x179d) },
+ /* AzureWave */
+ { USB_DEVICE(0x13d3, 0x3262) },
+ { USB_DEVICE(0x13d3, 0x3284) },
+diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
+index 21b529b..f099b30 100644
+--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
++++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
+@@ -624,7 +624,7 @@ void rt2x00lib_rxdone(struct queue_entry *entry)
+ */
+ if (unlikely(rxdesc.size == 0 ||
+ rxdesc.size > entry->queue->data_size)) {
+- WARNING(rt2x00dev, "Wrong frame size %d max %d.\n",
++ ERROR(rt2x00dev, "Wrong frame size %d max %d.\n",
+ rxdesc.size, entry->queue->data_size);
+ dev_kfree_skb(entry->skb);
+ goto renew_skb;
+diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
+index d69f88c..3e058e5 100644
+--- a/drivers/net/wireless/rt2x00/rt61pci.c
++++ b/drivers/net/wireless/rt2x00/rt61pci.c
+@@ -2832,6 +2832,7 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
+ static int rt61pci_probe_hw(struct rt2x00_dev *rt2x00dev)
+ {
+ int retval;
++ u32 reg;
+
+ /*
+ * Disable power saving.
+@@ -2850,6 +2851,14 @@ static int rt61pci_probe_hw(struct rt2x00_dev *rt2x00dev)
+ return retval;
+
+ /*
++ * Enable rfkill polling by setting GPIO direction of the
++ * rfkill switch GPIO pin correctly.
++ */
++ rt2x00pci_register_read(rt2x00dev, MAC_CSR13, &reg);
++ rt2x00_set_field32(&reg, MAC_CSR13_BIT13, 1);
++ rt2x00pci_register_write(rt2x00dev, MAC_CSR13, reg);
++
++ /*
+ * Initialize hw specifications.
+ */
+ retval = rt61pci_probe_hw_mode(rt2x00dev);
+diff --git a/drivers/net/wireless/rt2x00/rt61pci.h b/drivers/net/wireless/rt2x00/rt61pci.h
+index e3cd6db..8f3da5a 100644
+--- a/drivers/net/wireless/rt2x00/rt61pci.h
++++ b/drivers/net/wireless/rt2x00/rt61pci.h
+@@ -372,6 +372,7 @@ struct hw_pairwise_ta_entry {
+ #define MAC_CSR13_BIT10 FIELD32(0x00000400)
+ #define MAC_CSR13_BIT11 FIELD32(0x00000800)
+ #define MAC_CSR13_BIT12 FIELD32(0x00001000)
++#define MAC_CSR13_BIT13 FIELD32(0x00002000)
+
+ /*
+ * MAC_CSR14: LED control register.
+diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
+index cfb19db..2ad468d 100644
+--- a/drivers/net/wireless/rt2x00/rt73usb.c
++++ b/drivers/net/wireless/rt2x00/rt73usb.c
+@@ -2177,6 +2177,7 @@ static int rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
+ static int rt73usb_probe_hw(struct rt2x00_dev *rt2x00dev)
+ {
+ int retval;
++ u32 reg;
+
+ /*
+ * Allocate eeprom data.
+@@ -2190,6 +2191,14 @@ static int rt73usb_probe_hw(struct rt2x00_dev *rt2x00dev)
+ return retval;
+
+ /*
++ * Enable rfkill polling by setting GPIO direction of the
++ * rfkill switch GPIO pin correctly.
++ */
++ rt2x00usb_register_read(rt2x00dev, MAC_CSR13, &reg);
++ rt2x00_set_field32(&reg, MAC_CSR13_BIT15, 0);
++ rt2x00usb_register_write(rt2x00dev, MAC_CSR13, reg);
++
++ /*
+ * Initialize hw specifications.
+ */
+ retval = rt73usb_probe_hw_mode(rt2x00dev);
+diff --git a/drivers/net/wireless/rt2x00/rt73usb.h b/drivers/net/wireless/rt2x00/rt73usb.h
+index 9f6b470..df1cc11 100644
+--- a/drivers/net/wireless/rt2x00/rt73usb.h
++++ b/drivers/net/wireless/rt2x00/rt73usb.h
+@@ -282,6 +282,9 @@ struct hw_pairwise_ta_entry {
+ #define MAC_CSR13_BIT10 FIELD32(0x00000400)
+ #define MAC_CSR13_BIT11 FIELD32(0x00000800)
+ #define MAC_CSR13_BIT12 FIELD32(0x00001000)
++#define MAC_CSR13_BIT13 FIELD32(0x00002000)
++#define MAC_CSR13_BIT14 FIELD32(0x00004000)
++#define MAC_CSR13_BIT15 FIELD32(0x00008000)
+
+ /*
+ * MAC_CSR14: LED control register.
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index 29a994f..7c471eb 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -4125,7 +4125,6 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ spin_lock_init(&instance->cmd_pool_lock);
+ spin_lock_init(&instance->hba_lock);
+ spin_lock_init(&instance->completion_lock);
+- spin_lock_init(&poll_aen_lock);
+
+ mutex_init(&instance->aen_mutex);
+ mutex_init(&instance->reset_mutex);
+@@ -5520,6 +5519,8 @@ static int __init megasas_init(void)
+ printk(KERN_INFO "megasas: %s %s\n", MEGASAS_VERSION,
+ MEGASAS_EXT_VERSION);
+
++ spin_lock_init(&poll_aen_lock);
++
+ support_poll_for_event = 2;
+ support_device_change = 1;
+
+diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
+index e903077..98cb5e6 100644
+--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
++++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
+@@ -2353,10 +2353,13 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
+ }
+
+ /* command line tunables for max controller queue depth */
+- if (max_queue_depth != -1)
+- max_request_credit = (max_queue_depth < facts->RequestCredit)
+- ? max_queue_depth : facts->RequestCredit;
+- else
++ if (max_queue_depth != -1 && max_queue_depth != 0) {
++ max_request_credit = min_t(u16, max_queue_depth +
++ ioc->hi_priority_depth + ioc->internal_depth,
++ facts->RequestCredit);
++ if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
++ max_request_credit = MAX_HBA_QUEUE_DEPTH;
++ } else
+ max_request_credit = min_t(u16, facts->RequestCredit,
+ MAX_HBA_QUEUE_DEPTH);
+
+@@ -2431,7 +2434,7 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
+ /* set the scsi host can_queue depth
+ * with some internal commands that could be outstanding
+ */
+- ioc->shost->can_queue = ioc->scsiio_depth - (2);
++ ioc->shost->can_queue = ioc->scsiio_depth;
+ dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scsi host: "
+ "can_queue depth (%d)\n", ioc->name, ioc->shost->can_queue));
+
+diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
+index 456b131..c83571e 100644
+--- a/drivers/scsi/scsi_error.c
++++ b/drivers/scsi/scsi_error.c
+@@ -41,6 +41,8 @@
+
+ #include <trace/events/scsi.h>
+
++static void scsi_eh_done(struct scsi_cmnd *scmd);
++
+ #define SENSE_TIMEOUT (10*HZ)
+
+ /*
+@@ -240,6 +242,14 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
+ if (! scsi_command_normalize_sense(scmd, &sshdr))
+ return FAILED; /* no valid sense data */
+
++ if (scmd->cmnd[0] == TEST_UNIT_READY && scmd->scsi_done != scsi_eh_done)
++ /*
++ * nasty: for mid-layer issued TURs, we need to return the
++ * actual sense data without any recovery attempt. For eh
++ * issued ones, we need to try to recover and interpret
++ */
++ return SUCCESS;
++
+ if (scsi_sense_is_deferred(&sshdr))
+ return NEEDS_RETRY;
+
+diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
+index a48b59c..c6c80c9 100644
+--- a/drivers/scsi/scsi_scan.c
++++ b/drivers/scsi/scsi_scan.c
+@@ -776,6 +776,16 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
+ sdev->model = (char *) (sdev->inquiry + 16);
+ sdev->rev = (char *) (sdev->inquiry + 32);
+
++ if (strncmp(sdev->vendor, "ATA ", 8) == 0) {
++ /*
++ * sata emulation layer device. This is a hack to work around
++ * the SATL power management specifications which state that
++ * when the SATL detects the device has gone into standby
++ * mode, it shall respond with NOT READY.
++ */
++ sdev->allow_restart = 1;
++ }
++
+ if (*bflags & BLIST_ISROM) {
+ sdev->type = TYPE_ROM;
+ sdev->removable = 1;
+diff --git a/drivers/staging/comedi/drivers/das08.c b/drivers/staging/comedi/drivers/das08.c
+index 3141dc8..a48fe88 100644
+--- a/drivers/staging/comedi/drivers/das08.c
++++ b/drivers/staging/comedi/drivers/das08.c
+@@ -385,7 +385,7 @@ static const struct das08_board_struct das08_boards[] = {
+ .ai = das08_ai_rinsn,
+ .ai_nbits = 16,
+ .ai_pg = das08_pg_none,
+- .ai_encoding = das08_encode12,
++ .ai_encoding = das08_encode16,
+ .ao = das08jr_ao_winsn,
+ .ao_nbits = 16,
+ .di = das08jr_di_rbits,
+@@ -655,7 +655,7 @@ static int das08jr_ao_winsn(struct comedi_device *dev,
+ int chan;
+
+ lsb = data[0] & 0xff;
+- msb = (data[0] >> 8) & 0xf;
++ msb = (data[0] >> 8) & 0xff;
+
+ chan = CR_CHAN(insn->chanspec);
+
+diff --git a/drivers/staging/rtl8712/recv_linux.c b/drivers/staging/rtl8712/recv_linux.c
+index 0e26d5f..495ee12 100644
+--- a/drivers/staging/rtl8712/recv_linux.c
++++ b/drivers/staging/rtl8712/recv_linux.c
+@@ -117,13 +117,8 @@ void r8712_recv_indicatepkt(struct _adapter *padapter,
+ if (skb == NULL)
+ goto _recv_indicatepkt_drop;
+ skb->data = precv_frame->u.hdr.rx_data;
+-#ifdef NET_SKBUFF_DATA_USES_OFFSET
+- skb->tail = (sk_buff_data_t)(precv_frame->u.hdr.rx_tail -
+- precv_frame->u.hdr.rx_head);
+-#else
+- skb->tail = (sk_buff_data_t)precv_frame->u.hdr.rx_tail;
+-#endif
+ skb->len = precv_frame->u.hdr.len;
++ skb_set_tail_pointer(skb, skb->len);
+ if ((pattrib->tcpchk_valid == 1) && (pattrib->tcp_chkrpt == 1))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+diff --git a/drivers/staging/vt6656/dpc.c b/drivers/staging/vt6656/dpc.c
+index c0edf97..08021f4 100644
+--- a/drivers/staging/vt6656/dpc.c
++++ b/drivers/staging/vt6656/dpc.c
+@@ -200,7 +200,7 @@ s_vProcessRxMACHeader (
+ } else if (!compare_ether_addr(pbyRxBuffer, &pDevice->abySNAP_RFC1042[0])) {
+ cbHeaderSize += 6;
+ pwType = (PWORD) (pbyRxBufferAddr + cbHeaderSize);
+- if ((*pwType == cpu_to_le16(ETH_P_IPX)) ||
++ if ((*pwType == cpu_to_be16(ETH_P_IPX)) ||
+ (*pwType == cpu_to_le16(0xF380))) {
+ cbHeaderSize -= 8;
+ pwType = (PWORD) (pbyRxBufferAddr + cbHeaderSize);
+diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
+index 9b64b10..fe21868 100644
+--- a/drivers/staging/vt6656/rxtx.c
++++ b/drivers/staging/vt6656/rxtx.c
+@@ -1701,7 +1701,7 @@ s_bPacketToWirelessUsb(
+ // 802.1H
+ if (ntohs(psEthHeader->wType) > ETH_DATA_LEN) {
+ if (pDevice->dwDiagRefCount == 0) {
+- if ((psEthHeader->wType == cpu_to_le16(ETH_P_IPX)) ||
++ if ((psEthHeader->wType == cpu_to_be16(ETH_P_IPX)) ||
+ (psEthHeader->wType == cpu_to_le16(0xF380))) {
+ memcpy((PBYTE) (pbyPayloadHead),
+ abySNAP_Bridgetunnel, 6);
+@@ -2840,10 +2840,10 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb)
+ Packet_Type = skb->data[ETH_HLEN+1];
+ Descriptor_type = skb->data[ETH_HLEN+1+1+2];
+ Key_info = (skb->data[ETH_HLEN+1+1+2+1] << 8)|(skb->data[ETH_HLEN+1+1+2+2]);
+- if (pDevice->sTxEthHeader.wType == cpu_to_le16(ETH_P_PAE)) {
+- /* 802.1x OR eapol-key challenge frame transfer */
+- if (((Protocol_Version == 1) || (Protocol_Version == 2)) &&
+- (Packet_Type == 3)) {
++ if (pDevice->sTxEthHeader.wType == cpu_to_be16(ETH_P_PAE)) {
++ /* 802.1x OR eapol-key challenge frame transfer */
++ if (((Protocol_Version == 1) || (Protocol_Version == 2)) &&
++ (Packet_Type == 3)) {
+ bTxeapol_key = TRUE;
+ if(!(Key_info & BIT3) && //WPA or RSN group-key challenge
+ (Key_info & BIT8) && (Key_info & BIT9)) { //send 2/2 key
+@@ -2989,19 +2989,19 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb)
+ }
+ }
+
+- if (pDevice->sTxEthHeader.wType == cpu_to_le16(ETH_P_PAE)) {
+- if (pDevice->byBBType != BB_TYPE_11A) {
+- pDevice->wCurrentRate = RATE_1M;
+- pDevice->byACKRate = RATE_1M;
+- pDevice->byTopCCKBasicRate = RATE_1M;
+- pDevice->byTopOFDMBasicRate = RATE_6M;
+- } else {
+- pDevice->wCurrentRate = RATE_6M;
+- pDevice->byACKRate = RATE_6M;
+- pDevice->byTopCCKBasicRate = RATE_1M;
+- pDevice->byTopOFDMBasicRate = RATE_6M;
+- }
+- }
++ if (pDevice->sTxEthHeader.wType == cpu_to_be16(ETH_P_PAE)) {
++ if (pDevice->byBBType != BB_TYPE_11A) {
++ pDevice->wCurrentRate = RATE_1M;
++ pDevice->byACKRate = RATE_1M;
++ pDevice->byTopCCKBasicRate = RATE_1M;
++ pDevice->byTopOFDMBasicRate = RATE_6M;
++ } else {
++ pDevice->wCurrentRate = RATE_6M;
++ pDevice->byACKRate = RATE_6M;
++ pDevice->byTopCCKBasicRate = RATE_1M;
++ pDevice->byTopOFDMBasicRate = RATE_6M;
++ }
++ }
+
+ DBG_PRT(MSG_LEVEL_DEBUG,
+ KERN_INFO "dma_tx: pDevice->wCurrentRate = %d\n",
+@@ -3017,7 +3017,7 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb)
+
+ if (bNeedEncryption == TRUE) {
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ntohs Pkt Type=%04x\n", ntohs(pDevice->sTxEthHeader.wType));
+- if ((pDevice->sTxEthHeader.wType) == cpu_to_le16(ETH_P_PAE)) {
++ if ((pDevice->sTxEthHeader.wType) == cpu_to_be16(ETH_P_PAE)) {
+ bNeedEncryption = FALSE;
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Pkt Type=%04x\n", (pDevice->sTxEthHeader.wType));
+ if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) && (pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
+diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
+index 16ad9fe..4306475 100644
+--- a/drivers/staging/zcache/zcache-main.c
++++ b/drivers/staging/zcache/zcache-main.c
+@@ -1223,13 +1223,12 @@ static int zcache_pampd_get_data_and_free(char *data, size_t *bufsize, bool raw,
+ void *pampd, struct tmem_pool *pool,
+ struct tmem_oid *oid, uint32_t index)
+ {
+- int ret = 0;
+-
+ BUG_ON(!is_ephemeral(pool));
+- zbud_decompress((struct page *)(data), pampd);
++ if (zbud_decompress((struct page *)(data), pampd) < 0)
++ return -EINVAL;
+ zbud_free_and_delist((struct zbud_hdr *)pampd);
+ atomic_dec(&zcache_curr_eph_pampd_count);
+- return ret;
++ return 0;
+ }
+
+ /*
+diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
+index 163fc90..8e68f79 100644
+--- a/drivers/tty/serial/imx.c
++++ b/drivers/tty/serial/imx.c
+@@ -130,6 +130,7 @@
+ #define UCR4_OREN (1<<1) /* Receiver overrun interrupt enable */
+ #define UCR4_DREN (1<<0) /* Recv data ready interrupt enable */
+ #define UFCR_RXTL_SHF 0 /* Receiver trigger level shift */
++#define UFCR_DCEDTE (1<<6) /* DCE/DTE mode select */
+ #define UFCR_RFDIV (7<<7) /* Reference freq divider mask */
+ #define UFCR_RFDIV_REG(x) (((x) < 7 ? 6 - (x) : 6) << 7)
+ #define UFCR_TXTL_SHF 10 /* Transmitter trigger level shift */
+@@ -635,22 +636,11 @@ static void imx_break_ctl(struct uart_port *port, int break_state)
+ static int imx_setup_ufcr(struct imx_port *sport, unsigned int mode)
+ {
+ unsigned int val;
+- unsigned int ufcr_rfdiv;
+-
+- /* set receiver / transmitter trigger level.
+- * RFDIV is set such way to satisfy requested uartclk value
+- */
+- val = TXTL << 10 | RXTL;
+- ufcr_rfdiv = (clk_get_rate(sport->clk) + sport->port.uartclk / 2)
+- / sport->port.uartclk;
+-
+- if(!ufcr_rfdiv)
+- ufcr_rfdiv = 1;
+-
+- val |= UFCR_RFDIV_REG(ufcr_rfdiv);
+
++ /* set receiver / transmitter trigger level */
++ val = readl(sport->port.membase + UFCR) & (UFCR_RFDIV | UFCR_DCEDTE);
++ val |= TXTL << UFCR_TXTL_SHF | RXTL;
+ writel(val, sport->port.membase + UFCR);
+-
+ return 0;
+ }
+
+@@ -725,6 +715,7 @@ static int imx_startup(struct uart_port *port)
+ }
+ }
+
++ spin_lock_irqsave(&sport->port.lock, flags);
+ /*
+ * Finally, clear and enable interrupts
+ */
+@@ -778,7 +769,6 @@ static int imx_startup(struct uart_port *port)
+ /*
+ * Enable modem status interrupts
+ */
+- spin_lock_irqsave(&sport->port.lock,flags);
+ imx_enable_ms(&sport->port);
+ spin_unlock_irqrestore(&sport->port.lock,flags);
+
+@@ -808,10 +798,13 @@ static void imx_shutdown(struct uart_port *port)
+ {
+ struct imx_port *sport = (struct imx_port *)port;
+ unsigned long temp;
++ unsigned long flags;
+
++ spin_lock_irqsave(&sport->port.lock, flags);
+ temp = readl(sport->port.membase + UCR2);
+ temp &= ~(UCR2_TXEN);
+ writel(temp, sport->port.membase + UCR2);
++ spin_unlock_irqrestore(&sport->port.lock, flags);
+
+ if (USE_IRDA(sport)) {
+ struct imxuart_platform_data *pdata;
+@@ -840,12 +833,14 @@ static void imx_shutdown(struct uart_port *port)
+ * Disable all interrupts, port and break condition.
+ */
+
++ spin_lock_irqsave(&sport->port.lock, flags);
+ temp = readl(sport->port.membase + UCR1);
+ temp &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN);
+ if (USE_IRDA(sport))
+ temp &= ~(UCR1_IREN);
+
+ writel(temp, sport->port.membase + UCR1);
++ spin_unlock_irqrestore(&sport->port.lock, flags);
+ }
+
+ static void
+@@ -1119,6 +1114,9 @@ imx_console_write(struct console *co, const char *s, unsigned int count)
+ {
+ struct imx_port *sport = imx_ports[co->index];
+ unsigned int old_ucr1, old_ucr2, ucr1;
++ unsigned long flags;
++
++ spin_lock_irqsave(&sport->port.lock, flags);
+
+ /*
+ * First, save UCR1/2 and then disable interrupts
+@@ -1145,6 +1143,8 @@ imx_console_write(struct console *co, const char *s, unsigned int count)
+
+ writel(old_ucr1, sport->port.membase + UCR1);
+ writel(old_ucr2, sport->port.membase + UCR2);
++
++ spin_unlock_irqrestore(&sport->port.lock, flags);
+ }
+
+ /*
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 32d3adc..8b2a9d8 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -96,6 +96,10 @@ static const struct usb_device_id usb_quirk_list[] = {
+ { USB_DEVICE(0x04b4, 0x0526), .driver_info =
+ USB_QUIRK_CONFIG_INTF_STRINGS },
+
++ /* Microchip Joss Optical infrared touchboard device */
++ { USB_DEVICE(0x04d8, 0x000c), .driver_info =
++ USB_QUIRK_CONFIG_INTF_STRINGS },
++
+ /* Samsung Android phone modem - ID conflict with SPH-I500 */
+ { USB_DEVICE(0x04e8, 0x6601), .driver_info =
+ USB_QUIRK_CONFIG_INTF_STRINGS },
+diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
+index fef1db3..2023733 100644
+--- a/drivers/usb/host/ehci-q.c
++++ b/drivers/usb/host/ehci-q.c
+@@ -128,9 +128,17 @@ qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh)
+ else {
+ qtd = list_entry (qh->qtd_list.next,
+ struct ehci_qtd, qtd_list);
+- /* first qtd may already be partially processed */
+- if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw->hw_current)
++ /*
++ * first qtd may already be partially processed.
++ * If we come here during unlink, the QH overlay region
++ * might have reference to the just unlinked qtd. The
++ * qtd is updated in qh_completions(). Update the QH
++ * overlay here.
++ */
++ if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw->hw_current) {
++ qh->hw->hw_qtd_next = qtd->hw_next;
+ qtd = NULL;
++ }
+ }
+
+ if (qtd)
+diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
+index 833b3c6..d0ec2f0 100644
+--- a/drivers/usb/host/pci-quirks.c
++++ b/drivers/usb/host/pci-quirks.c
+@@ -75,7 +75,9 @@
+ #define NB_PIF0_PWRDOWN_1 0x01100013
+
+ #define USB_INTEL_XUSB2PR 0xD0
++#define USB_INTEL_USB2PRM 0xD4
+ #define USB_INTEL_USB3_PSSEN 0xD8
++#define USB_INTEL_USB3PRM 0xDC
+
+ static struct amd_chipset_info {
+ struct pci_dev *nb_dev;
+@@ -772,10 +774,18 @@ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev)
+ return;
+ }
+
+- ports_available = 0xffffffff;
++ /* Read USB3PRM, the USB 3.0 Port Routing Mask Register
++ * Indicate the ports that can be changed from OS.
++ */
++ pci_read_config_dword(xhci_pdev, USB_INTEL_USB3PRM,
++ &ports_available);
++
++ dev_dbg(&xhci_pdev->dev, "Configurable ports to enable SuperSpeed: 0x%x\n",
++ ports_available);
++
+ /* Write USB3_PSSEN, the USB 3.0 Port SuperSpeed Enable
+- * Register, to turn on SuperSpeed terminations for all
+- * available ports.
++ * Register, to turn on SuperSpeed terminations for the
++ * switchable ports.
+ */
+ pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
+ cpu_to_le32(ports_available));
+@@ -785,7 +795,16 @@ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev)
+ dev_dbg(&xhci_pdev->dev, "USB 3.0 ports that are now enabled "
+ "under xHCI: 0x%x\n", ports_available);
+
+- ports_available = 0xffffffff;
++ /* Read XUSB2PRM, xHCI USB 2.0 Port Routing Mask Register
++ * Indicate the USB 2.0 ports to be controlled by the xHCI host.
++ */
++
++ pci_read_config_dword(xhci_pdev, USB_INTEL_USB2PRM,
++ &ports_available);
++
++ dev_dbg(&xhci_pdev->dev, "Configurable USB 2.0 ports to hand over to xCHI: 0x%x\n",
++ ports_available);
++
+ /* Write XUSB2PR, the xHC USB 2.0 Port Routing Register, to
+ * switch the USB 2.0 power and data lines over to the xHCI
+ * host.
+@@ -800,6 +819,13 @@ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev)
+ }
+ EXPORT_SYMBOL_GPL(usb_enable_xhci_ports);
+
++void usb_disable_xhci_ports(struct pci_dev *xhci_pdev)
++{
++ pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, 0x0);
++ pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, 0x0);
++}
++EXPORT_SYMBOL_GPL(usb_disable_xhci_ports);
++
+ /**
+ * PCI Quirks for xHCI.
+ *
+@@ -815,12 +841,12 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
+ void __iomem *op_reg_base;
+ u32 val;
+ int timeout;
++ int len = pci_resource_len(pdev, 0);
+
+ if (!mmio_resource_enabled(pdev, 0))
+ return;
+
+- base = ioremap_nocache(pci_resource_start(pdev, 0),
+- pci_resource_len(pdev, 0));
++ base = ioremap_nocache(pci_resource_start(pdev, 0), len);
+ if (base == NULL)
+ return;
+
+@@ -830,9 +856,17 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
+ */
+ ext_cap_offset = xhci_find_next_cap_offset(base, XHCI_HCC_PARAMS_OFFSET);
+ do {
++ if ((ext_cap_offset + sizeof(val)) > len) {
++ /* We're reading garbage from the controller */
++ dev_warn(&pdev->dev,
++ "xHCI controller failing to respond");
++ return;
++ }
++
+ if (!ext_cap_offset)
+ /* We've reached the end of the extended capabilities */
+ goto hc_init;
++
+ val = readl(base + ext_cap_offset);
+ if (XHCI_EXT_CAPS_ID(val) == XHCI_EXT_CAPS_LEGACY)
+ break;
+@@ -863,9 +897,10 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
+ /* Disable any BIOS SMIs and clear all SMI events*/
+ writel(val, base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
+
++hc_init:
+ if (usb_is_intel_switchable_xhci(pdev))
+ usb_enable_xhci_ports(pdev);
+-hc_init:
++
+ op_reg_base = base + XHCI_HC_LENGTH(readl(base));
+
+ /* Wait for the host controller to be ready before writing any
+diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h
+index b1002a8..7f69a39 100644
+--- a/drivers/usb/host/pci-quirks.h
++++ b/drivers/usb/host/pci-quirks.h
+@@ -10,10 +10,12 @@ void usb_amd_quirk_pll_disable(void);
+ void usb_amd_quirk_pll_enable(void);
+ bool usb_is_intel_switchable_xhci(struct pci_dev *pdev);
+ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev);
++void usb_disable_xhci_ports(struct pci_dev *xhci_pdev);
+ #else
+ static inline void usb_amd_quirk_pll_disable(void) {}
+ static inline void usb_amd_quirk_pll_enable(void) {}
+ static inline void usb_amd_dev_put(void) {}
++static inline void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) {}
+ #endif /* CONFIG_PCI */
+
+ #endif /* __LINUX_USB_PCI_QUIRKS_H */
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index fd8a2c2..978860b 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -469,11 +469,48 @@ static void xhci_hub_report_link_state(u32 *status, u32 status_reg)
+ * when this bit is set.
+ */
+ pls |= USB_PORT_STAT_CONNECTION;
++ } else {
++ /*
++ * If CAS bit isn't set but the Port is already at
++ * Compliance Mode, fake a connection so the USB core
++ * notices the Compliance state and resets the port.
++ * This resolves an issue generated by the SN65LVPE502CP
++ * in which sometimes the port enters compliance mode
++ * caused by a delay on the host-device negotiation.
++ */
++ if (pls == USB_SS_PORT_LS_COMP_MOD)
++ pls |= USB_PORT_STAT_CONNECTION;
+ }
++
+ /* update status field */
+ *status |= pls;
+ }
+
++/*
++ * Function for Compliance Mode Quirk.
++ *
++ * This Function verifies if all xhc USB3 ports have entered U0, if so,
++ * the compliance mode timer is deleted. A port won't enter
++ * compliance mode if it has previously entered U0.
++ */
++void xhci_del_comp_mod_timer(struct xhci_hcd *xhci, u32 status, u16 wIndex)
++{
++ u32 all_ports_seen_u0 = ((1 << xhci->num_usb3_ports)-1);
++ bool port_in_u0 = ((status & PORT_PLS_MASK) == XDEV_U0);
++
++ if (!(xhci->quirks & XHCI_COMP_MODE_QUIRK))
++ return;
++
++ if ((xhci->port_status_u0 != all_ports_seen_u0) && port_in_u0) {
++ xhci->port_status_u0 |= 1 << wIndex;
++ if (xhci->port_status_u0 == all_ports_seen_u0) {
++ del_timer_sync(&xhci->comp_mode_recovery_timer);
++ xhci_dbg(xhci, "All USB3 ports have entered U0 already!\n");
++ xhci_dbg(xhci, "Compliance Mode Recovery Timer Deleted.\n");
++ }
++ }
++}
++
+ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ u16 wIndex, char *buf, u16 wLength)
+ {
+@@ -618,6 +655,11 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ /* Update Port Link State for super speed ports*/
+ if (hcd->speed == HCD_USB3) {
+ xhci_hub_report_link_state(&status, temp);
++ /*
++ * Verify if all USB3 Ports Have entered U0 already.
++ * Delete Compliance Mode Timer if so.
++ */
++ xhci_del_comp_mod_timer(xhci, temp, wIndex);
+ }
+ if (bus_state->port_c_suspend & (1 << wIndex))
+ status |= 1 << USB_PORT_FEAT_C_SUSPEND;
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 07c72a4..bddcbfc 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -90,6 +90,15 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ xhci->quirks |= XHCI_EP_LIMIT_QUIRK;
+ xhci->limit_active_eps = 64;
+ xhci->quirks |= XHCI_SW_BW_CHECKING;
++ /*
++ * PPT desktop boards DH77EB and DH77DF will power back on after
++ * a few seconds of being shutdown. The fix for this is to
++ * switch the ports from xHCI to EHCI on shutdown. We can't use
++ * DMI information to find those particular boards (since each
++ * vendor will change the board name), so we have to key off all
++ * PPT chipsets.
++ */
++ xhci->quirks |= XHCI_SPURIOUS_REBOOT;
+ }
+ if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
+ pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index fb0981e..c7c530c 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -145,25 +145,34 @@ static void next_trb(struct xhci_hcd *xhci,
+ */
+ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
+ {
+- union xhci_trb *next = ++(ring->dequeue);
+ unsigned long long addr;
+
+ ring->deq_updates++;
+- /* Update the dequeue pointer further if that was a link TRB or we're at
+- * the end of an event ring segment (which doesn't have link TRBS)
+- */
+- while (last_trb(xhci, ring, ring->deq_seg, next)) {
+- if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) {
+- ring->cycle_state = (ring->cycle_state ? 0 : 1);
+- if (!in_interrupt())
+- xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
+- ring,
+- (unsigned int) ring->cycle_state);
++
++ do {
++ /*
++ * Update the dequeue pointer further if that was a link TRB or
++ * we're at the end of an event ring segment (which doesn't have
++ * link TRBS)
++ */
++ if (last_trb(xhci, ring, ring->deq_seg, ring->dequeue)) {
++ if (consumer && last_trb_on_last_seg(xhci, ring,
++ ring->deq_seg, ring->dequeue)) {
++ if (!in_interrupt())
++ xhci_dbg(xhci, "Toggle cycle state "
++ "for ring %p = %i\n",
++ ring,
++ (unsigned int)
++ ring->cycle_state);
++ ring->cycle_state = (ring->cycle_state ? 0 : 1);
++ }
++ ring->deq_seg = ring->deq_seg->next;
++ ring->dequeue = ring->deq_seg->trbs;
++ } else {
++ ring->dequeue++;
+ }
+- ring->deq_seg = ring->deq_seg->next;
+- ring->dequeue = ring->deq_seg->trbs;
+- next = ring->dequeue;
+- }
++ } while (last_trb(xhci, ring, ring->deq_seg, ring->dequeue));
++
+ addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
+ }
+
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index f7c0a2a..09872ee 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -26,6 +26,7 @@
+ #include <linux/module.h>
+ #include <linux/moduleparam.h>
+ #include <linux/slab.h>
++#include <linux/dmi.h>
+
+ #include "xhci.h"
+
+@@ -387,6 +388,95 @@ static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
+
+ #endif
+
++static void compliance_mode_recovery(unsigned long arg)
++{
++ struct xhci_hcd *xhci;
++ struct usb_hcd *hcd;
++ u32 temp;
++ int i;
++
++ xhci = (struct xhci_hcd *)arg;
++
++ for (i = 0; i < xhci->num_usb3_ports; i++) {
++ temp = xhci_readl(xhci, xhci->usb3_ports[i]);
++ if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) {
++ /*
++ * Compliance Mode Detected. Letting USB Core
++ * handle the Warm Reset
++ */
++ xhci_dbg(xhci, "Compliance Mode Detected->Port %d!\n",
++ i + 1);
++ xhci_dbg(xhci, "Attempting Recovery routine!\n");
++ hcd = xhci->shared_hcd;
++
++ if (hcd->state == HC_STATE_SUSPENDED)
++ usb_hcd_resume_root_hub(hcd);
++
++ usb_hcd_poll_rh_status(hcd);
++ }
++ }
++
++ if (xhci->port_status_u0 != ((1 << xhci->num_usb3_ports)-1))
++ mod_timer(&xhci->comp_mode_recovery_timer,
++ jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
++}
++
++/*
++ * Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver
++ * that causes ports behind that hardware to enter compliance mode sometimes.
++ * The quirk creates a timer that polls every 2 seconds the link state of
++ * each host controller's port and recovers it by issuing a Warm reset
++ * if Compliance mode is detected, otherwise the port will become "dead" (no
++ * device connections or disconnections will be detected anymore). Becasue no
++ * status event is generated when entering compliance mode (per xhci spec),
++ * this quirk is needed on systems that have the failing hardware installed.
++ */
++static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
++{
++ xhci->port_status_u0 = 0;
++ init_timer(&xhci->comp_mode_recovery_timer);
++
++ xhci->comp_mode_recovery_timer.data = (unsigned long) xhci;
++ xhci->comp_mode_recovery_timer.function = compliance_mode_recovery;
++ xhci->comp_mode_recovery_timer.expires = jiffies +
++ msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
++
++ set_timer_slack(&xhci->comp_mode_recovery_timer,
++ msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
++ add_timer(&xhci->comp_mode_recovery_timer);
++ xhci_dbg(xhci, "Compliance Mode Recovery Timer Initialized.\n");
++}
++
++/*
++ * This function identifies the systems that have installed the SN65LVPE502CP
++ * USB3.0 re-driver and that need the Compliance Mode Quirk.
++ * Systems:
++ * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820
++ */
++static bool compliance_mode_recovery_timer_quirk_check(void)
++{
++ const char *dmi_product_name, *dmi_sys_vendor;
++
++ dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
++ dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
++
++ if (!(strstr(dmi_sys_vendor, "Hewlett-Packard")))
++ return false;
++
++ if (strstr(dmi_product_name, "Z420") ||
++ strstr(dmi_product_name, "Z620") ||
++ strstr(dmi_product_name, "Z820"))
++ return true;
++
++ return false;
++}
++
++static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
++{
++ return (xhci->port_status_u0 == ((1 << xhci->num_usb3_ports)-1));
++}
++
++
+ /*
+ * Initialize memory for HCD and xHC (one-time init).
+ *
+@@ -410,6 +500,12 @@ int xhci_init(struct usb_hcd *hcd)
+ retval = xhci_mem_init(xhci, GFP_KERNEL);
+ xhci_dbg(xhci, "Finished xhci_init\n");
+
++ /* Initializing Compliance Mode Recovery Data If Needed */
++ if (compliance_mode_recovery_timer_quirk_check()) {
++ xhci->quirks |= XHCI_COMP_MODE_QUIRK;
++ compliance_mode_recovery_timer_init(xhci);
++ }
++
+ return retval;
+ }
+
+@@ -618,6 +714,11 @@ void xhci_stop(struct usb_hcd *hcd)
+ del_timer_sync(&xhci->event_ring_timer);
+ #endif
+
++ /* Deleting Compliance Mode Recovery Timer */
++ if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
++ (!(xhci_all_ports_seen_u0(xhci))))
++ del_timer_sync(&xhci->comp_mode_recovery_timer);
++
+ if (xhci->quirks & XHCI_AMD_PLL_FIX)
+ usb_amd_dev_put();
+
+@@ -648,6 +749,9 @@ void xhci_shutdown(struct usb_hcd *hcd)
+ {
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
++ if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
++ usb_disable_xhci_ports(to_pci_dev(hcd->self.controller));
++
+ spin_lock_irq(&xhci->lock);
+ xhci_halt(xhci);
+ spin_unlock_irq(&xhci->lock);
+@@ -791,6 +895,16 @@ int xhci_suspend(struct xhci_hcd *xhci)
+ }
+ spin_unlock_irq(&xhci->lock);
+
++ /*
++ * Deleting Compliance Mode Recovery Timer because the xHCI Host
++ * is about to be suspended.
++ */
++ if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
++ (!(xhci_all_ports_seen_u0(xhci)))) {
++ del_timer_sync(&xhci->comp_mode_recovery_timer);
++ xhci_dbg(xhci, "Compliance Mode Recovery Timer Deleted!\n");
++ }
++
+ /* step 5: remove core well power */
+ /* synchronize irq when using MSI-X */
+ xhci_msix_sync_irqs(xhci);
+@@ -923,6 +1037,16 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
+ usb_hcd_resume_root_hub(hcd);
+ usb_hcd_resume_root_hub(xhci->shared_hcd);
+ }
++
++ /*
++ * If system is subject to the Quirk, Compliance Mode Timer needs to
++ * be re-initialized Always after a system resume. Ports are subject
++ * to suffer the Compliance Mode issue again. It doesn't matter if
++ * ports have entered previously to U0 before system's suspension.
++ */
++ if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
++ compliance_mode_recovery_timer_init(xhci);
++
+ return retval;
+ }
+ #endif /* CONFIG_PM */
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 7a56805..44d518a 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1471,6 +1471,8 @@ struct xhci_hcd {
+ #define XHCI_SW_BW_CHECKING (1 << 8)
+ #define XHCI_AMD_0x96_HOST (1 << 9)
+ #define XHCI_TRUST_TX_LENGTH (1 << 10)
++#define XHCI_SPURIOUS_REBOOT (1 << 13)
++#define XHCI_COMP_MODE_QUIRK (1 << 14)
+ unsigned int num_active_eps;
+ unsigned int limit_active_eps;
+ /* There are two roothubs to keep track of bus suspend info for */
+@@ -1487,6 +1489,11 @@ struct xhci_hcd {
+ unsigned sw_lpm_support:1;
+ /* support xHCI 1.0 spec USB2 hardware LPM */
+ unsigned hw_lpm_support:1;
++ /* Compliance Mode Recovery Data */
++ struct timer_list comp_mode_recovery_timer;
++ u32 port_status_u0;
++/* Compliance Mode Timer Triggered every 2 seconds */
++#define COMP_MODE_RCVRY_MSECS 2000
+ };
+
+ /* convert between an HCD pointer and the corresponding EHCI_HCD */
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index b3182bb..7324bea 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -704,6 +704,7 @@ static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(FTDI_VID, FTDI_PCDJ_DAC2_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_RRCIRKITS_LOCOBUFFER_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ASK_RDR400_PID) },
++ { USB_DEVICE(FTDI_VID, FTDI_NZR_SEM_USB_PID) },
+ { USB_DEVICE(ICOM_VID, ICOM_ID_1_PID) },
+ { USB_DEVICE(ICOM_VID, ICOM_OPC_U_UC_PID) },
+ { USB_DEVICE(ICOM_VID, ICOM_ID_RP2C1_PID) },
+@@ -804,13 +805,32 @@ static struct usb_device_id id_table_combined [] = {
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(ADI_VID, ADI_GNICEPLUS_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+- { USB_DEVICE(MICROCHIP_VID, MICROCHIP_USB_BOARD_PID) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MICROCHIP_VID, MICROCHIP_USB_BOARD_PID,
++ USB_CLASS_VENDOR_SPEC,
++ USB_SUBCLASS_VENDOR_SPEC, 0x00) },
+ { USB_DEVICE(JETI_VID, JETI_SPC1201_PID) },
+ { USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(LARSENBRUSGAARD_VID, LB_ALTITRACK_PID) },
+ { USB_DEVICE(GN_OTOMETRICS_VID, AURICAL_USB_PID) },
++ { USB_DEVICE(FTDI_VID, PI_C865_PID) },
++ { USB_DEVICE(FTDI_VID, PI_C857_PID) },
++ { USB_DEVICE(PI_VID, PI_C866_PID) },
++ { USB_DEVICE(PI_VID, PI_C663_PID) },
++ { USB_DEVICE(PI_VID, PI_C725_PID) },
++ { USB_DEVICE(PI_VID, PI_E517_PID) },
++ { USB_DEVICE(PI_VID, PI_C863_PID) },
+ { USB_DEVICE(PI_VID, PI_E861_PID) },
++ { USB_DEVICE(PI_VID, PI_C867_PID) },
++ { USB_DEVICE(PI_VID, PI_E609_PID) },
++ { USB_DEVICE(PI_VID, PI_E709_PID) },
++ { USB_DEVICE(PI_VID, PI_100F_PID) },
++ { USB_DEVICE(PI_VID, PI_1011_PID) },
++ { USB_DEVICE(PI_VID, PI_1012_PID) },
++ { USB_DEVICE(PI_VID, PI_1013_PID) },
++ { USB_DEVICE(PI_VID, PI_1014_PID) },
++ { USB_DEVICE(PI_VID, PI_1015_PID) },
++ { USB_DEVICE(PI_VID, PI_1016_PID) },
+ { USB_DEVICE(KONDO_VID, KONDO_USB_SERIAL_PID) },
+ { USB_DEVICE(BAYER_VID, BAYER_CONTOUR_CABLE_PID) },
+ { USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID),
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 54b4258..06f6fd2 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -75,6 +75,9 @@
+ #define FTDI_OPENDCC_GATEWAY_PID 0xBFDB
+ #define FTDI_OPENDCC_GBM_PID 0xBFDC
+
++/* NZR SEM 16+ USB (http://www.nzr.de) */
++#define FTDI_NZR_SEM_USB_PID 0xC1E0 /* NZR SEM-LOG16+ */
++
+ /*
+ * RR-CirKits LocoBuffer USB (http://www.rr-cirkits.com)
+ */
+@@ -539,7 +542,10 @@
+ /*
+ * Microchip Technology, Inc.
+ *
+- * MICROCHIP_VID (0x04D8) and MICROCHIP_USB_BOARD_PID (0x000A) are also used by:
++ * MICROCHIP_VID (0x04D8) and MICROCHIP_USB_BOARD_PID (0x000A) are
++ * used by single function CDC ACM class based firmware demo
++ * applications. The VID/PID has also been used in firmware
++ * emulating FTDI serial chips by:
+ * Hornby Elite - Digital Command Control Console
+ * http://www.hornby.com/hornby-dcc/controllers/
+ */
+@@ -791,8 +797,27 @@
+ * Physik Instrumente
+ * http://www.physikinstrumente.com/en/products/
+ */
++/* These two devices use the VID of FTDI */
++#define PI_C865_PID 0xe0a0 /* PI C-865 Piezomotor Controller */
++#define PI_C857_PID 0xe0a1 /* PI Encoder Trigger Box */
++
+ #define PI_VID 0x1a72 /* Vendor ID */
+-#define PI_E861_PID 0x1008 /* E-861 piezo controller USB connection */
++#define PI_C866_PID 0x1000 /* PI C-866 Piezomotor Controller */
++#define PI_C663_PID 0x1001 /* PI C-663 Mercury-Step */
++#define PI_C725_PID 0x1002 /* PI C-725 Piezomotor Controller */
++#define PI_E517_PID 0x1005 /* PI E-517 Digital Piezo Controller Operation Module */
++#define PI_C863_PID 0x1007 /* PI C-863 */
++#define PI_E861_PID 0x1008 /* PI E-861 Piezomotor Controller */
++#define PI_C867_PID 0x1009 /* PI C-867 Piezomotor Controller */
++#define PI_E609_PID 0x100D /* PI E-609 Digital Piezo Controller */
++#define PI_E709_PID 0x100E /* PI E-709 Digital Piezo Controller */
++#define PI_100F_PID 0x100F /* PI Digital Piezo Controller */
++#define PI_1011_PID 0x1011 /* PI Digital Piezo Controller */
++#define PI_1012_PID 0x1012 /* PI Motion Controller */
++#define PI_1013_PID 0x1013 /* PI Motion Controller */
++#define PI_1014_PID 0x1014 /* PI Device */
++#define PI_1015_PID 0x1015 /* PI Device */
++#define PI_1016_PID 0x1016 /* PI Digital Servo Module */
+
+ /*
+ * Kondo Kagaku Co.Ltd.
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 113560d..c068b4d 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -1090,6 +1090,10 @@ static const struct usb_device_id option_ids[] = {
+ .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist },
++ { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
++ { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
++ { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
++
+ { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
+ { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
+ { USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5) }, /* Yes, ALINK_VENDOR_ID */
+diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c
+index 70aa47d..f7c1753 100644
+--- a/drivers/video/omap2/omapfb/omapfb-main.c
++++ b/drivers/video/omap2/omapfb/omapfb-main.c
+@@ -1183,7 +1183,7 @@ static int _setcolreg(struct fb_info *fbi, u_int regno, u_int red, u_int green,
+ break;
+
+ if (regno < 16) {
+- u16 pal;
++ u32 pal;
+ pal = ((red >> (16 - var->red.length)) <<
+ var->red.offset) |
+ ((green >> (16 - var->green.length)) <<
+diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
+index 284798a..89588e7 100644
+--- a/drivers/xen/swiotlb-xen.c
++++ b/drivers/xen/swiotlb-xen.c
+@@ -231,7 +231,7 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
+ return ret;
+
+ if (hwdev && hwdev->coherent_dma_mask)
+- dma_mask = hwdev->coherent_dma_mask;
++ dma_mask = dma_alloc_coherent_mask(hwdev, flags);
+
+ phys = virt_to_phys(ret);
+ dev_addr = xen_phys_to_bus(phys);
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index 0bb785f..51574d4 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -882,7 +882,7 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
+ if (!buf) {
+ mutex_unlock(&cinode->lock_mutex);
+ FreeXid(xid);
+- return rc;
++ return -ENOMEM;
+ }
+
+ for (i = 0; i < 2; i++) {
+diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
+index af11098..7c7556b 100644
+--- a/fs/ecryptfs/inode.c
++++ b/fs/ecryptfs/inode.c
+@@ -640,6 +640,7 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct dentry *lower_old_dir_dentry;
+ struct dentry *lower_new_dir_dentry;
+ struct dentry *trap = NULL;
++ struct inode *target_inode;
+
+ lower_old_dentry = ecryptfs_dentry_to_lower(old_dentry);
+ lower_new_dentry = ecryptfs_dentry_to_lower(new_dentry);
+@@ -647,6 +648,7 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ dget(lower_new_dentry);
+ lower_old_dir_dentry = dget_parent(lower_old_dentry);
+ lower_new_dir_dentry = dget_parent(lower_new_dentry);
++ target_inode = new_dentry->d_inode;
+ trap = lock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
+ /* source should not be ancestor of target */
+ if (trap == lower_old_dentry) {
+@@ -662,6 +664,9 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ lower_new_dir_dentry->d_inode, lower_new_dentry);
+ if (rc)
+ goto out_lock;
++ if (target_inode)
++ fsstack_copy_attr_all(target_inode,
++ ecryptfs_inode_to_lower(target_inode));
+ fsstack_copy_attr_all(new_dir, lower_new_dir_dentry->d_inode);
+ if (new_dir != old_dir)
+ fsstack_copy_attr_all(old_dir, lower_old_dir_dentry->d_inode);
+diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
+index 5b3f907..71b263f 100644
+--- a/fs/ext3/inode.c
++++ b/fs/ext3/inode.c
+@@ -3072,6 +3072,8 @@ static int ext3_do_update_inode(handle_t *handle,
+ struct ext3_inode_info *ei = EXT3_I(inode);
+ struct buffer_head *bh = iloc->bh;
+ int err = 0, rc, block;
++ int need_datasync = 0;
++ __le32 disksize;
+
+ again:
+ /* we can't allow multiple procs in here at once, its a bit racey */
+@@ -3109,7 +3111,11 @@ again:
+ raw_inode->i_gid_high = 0;
+ }
+ raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
+- raw_inode->i_size = cpu_to_le32(ei->i_disksize);
++ disksize = cpu_to_le32(ei->i_disksize);
++ if (disksize != raw_inode->i_size) {
++ need_datasync = 1;
++ raw_inode->i_size = disksize;
++ }
+ raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
+ raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
+ raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
+@@ -3125,8 +3131,11 @@ again:
+ if (!S_ISREG(inode->i_mode)) {
+ raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
+ } else {
+- raw_inode->i_size_high =
+- cpu_to_le32(ei->i_disksize >> 32);
++ disksize = cpu_to_le32(ei->i_disksize >> 32);
++ if (disksize != raw_inode->i_size_high) {
++ raw_inode->i_size_high = disksize;
++ need_datasync = 1;
++ }
+ if (ei->i_disksize > 0x7fffffffULL) {
+ struct super_block *sb = inode->i_sb;
+ if (!EXT3_HAS_RO_COMPAT_FEATURE(sb,
+@@ -3179,6 +3188,8 @@ again:
+ ext3_clear_inode_state(inode, EXT3_STATE_NEW);
+
+ atomic_set(&ei->i_sync_tid, handle->h_transaction->t_tid);
++ if (need_datasync)
++ atomic_set(&ei->i_datasync_tid, handle->h_transaction->t_tid);
+ out_brelse:
+ brelse (bh);
+ ext3_std_error(inode->i_sb, err);
+diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
+index 2aaf3ea..5c029fb 100644
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -1524,6 +1524,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
+ req->pages[req->num_pages] = page;
+ req->num_pages++;
+
++ offset = 0;
+ num -= this_num;
+ total_len += this_num;
+ index++;
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index 50a15fa..b78b5b6 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
+ nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
+ nfsi->attrtimeo_timestamp = jiffies;
+
+- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
++ memset(NFS_I(inode)->cookieverf, 0, sizeof(NFS_I(inode)->cookieverf));
+ if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
+ else
+diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
+index 5195fd6..dba87e6 100644
+--- a/fs/nfs/nfs3proc.c
++++ b/fs/nfs/nfs3proc.c
+@@ -633,7 +633,7 @@ nfs3_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
+ u64 cookie, struct page **pages, unsigned int count, int plus)
+ {
+ struct inode *dir = dentry->d_inode;
+- __be32 *verf = NFS_COOKIEVERF(dir);
++ __be32 *verf = NFS_I(dir)->cookieverf;
+ struct nfs3_readdirargs arg = {
+ .fh = NFS_FH(dir),
+ .cookie = cookie,
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index d20221d..61796a40 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -3025,11 +3025,11 @@ static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
+ dentry->d_parent->d_name.name,
+ dentry->d_name.name,
+ (unsigned long long)cookie);
+- nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args);
++ nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args);
+ res.pgbase = args.pgbase;
+ status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
+ if (status >= 0) {
+- memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE);
++ memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE);
+ status += args.pgbase;
+ }
+
+diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
+index bdd5bdc..00818c8 100644
+--- a/fs/nfs/nfs4xdr.c
++++ b/fs/nfs/nfs4xdr.c
+@@ -6113,7 +6113,8 @@ static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ status = decode_open(xdr, res);
+ if (status)
+ goto out;
+- if (decode_getfh(xdr, &res->fh) != 0)
++ status = decode_getfh(xdr, &res->fh);
++ if (status)
+ goto out;
+ if (decode_getfattr(xdr, res->f_attr, res->server,
+ !RPC_IS_ASYNC(rqstp->rq_task)) != 0)
+diff --git a/fs/nfs/super.c b/fs/nfs/super.c
+index 6e85ec6..e42d6f6 100644
+--- a/fs/nfs/super.c
++++ b/fs/nfs/super.c
+@@ -1820,6 +1820,7 @@ static int nfs_validate_mount_data(void *options,
+
+ memcpy(sap, &data->addr, sizeof(data->addr));
+ args->nfs_server.addrlen = sizeof(data->addr);
++ args->nfs_server.port = ntohs(data->addr.sin_port);
+ if (!nfs_verify_server_address(sap))
+ goto out_no_address;
+
+@@ -2538,6 +2539,7 @@ static int nfs4_validate_mount_data(void *options,
+ return -EFAULT;
+ if (!nfs_verify_server_address(sap))
+ goto out_no_address;
++ args->nfs_server.port = ntohs(((struct sockaddr_in *)sap)->sin_port);
+
+ if (data->auth_flavourlen) {
+ if (data->auth_flavourlen > 1)
+diff --git a/fs/stat.c b/fs/stat.c
+index 8806b89..7b21801 100644
+--- a/fs/stat.c
++++ b/fs/stat.c
+@@ -57,12 +57,13 @@ EXPORT_SYMBOL(vfs_getattr);
+
+ int vfs_fstat(unsigned int fd, struct kstat *stat)
+ {
+- struct file *f = fget(fd);
++ int fput_needed;
++ struct file *f = fget_raw_light(fd, &fput_needed);
+ int error = -EBADF;
+
+ if (f) {
+ error = vfs_getattr(f->f_path.mnt, f->f_path.dentry, stat);
+- fput(f);
++ fput_light(f, fput_needed);
+ }
+ return error;
+ }
+diff --git a/fs/udf/file.c b/fs/udf/file.c
+index d567b84..874c9e3 100644
+--- a/fs/udf/file.c
++++ b/fs/udf/file.c
+@@ -39,20 +39,24 @@
+ #include "udf_i.h"
+ #include "udf_sb.h"
+
+-static int udf_adinicb_readpage(struct file *file, struct page *page)
++static void __udf_adinicb_readpage(struct page *page)
+ {
+ struct inode *inode = page->mapping->host;
+ char *kaddr;
+ struct udf_inode_info *iinfo = UDF_I(inode);
+
+- BUG_ON(!PageLocked(page));
+-
+ kaddr = kmap(page);
+- memset(kaddr, 0, PAGE_CACHE_SIZE);
+ memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr, inode->i_size);
++ memset(kaddr + inode->i_size, 0, PAGE_CACHE_SIZE - inode->i_size);
+ flush_dcache_page(page);
+ SetPageUptodate(page);
+ kunmap(page);
++}
++
++static int udf_adinicb_readpage(struct file *file, struct page *page)
++{
++ BUG_ON(!PageLocked(page));
++ __udf_adinicb_readpage(page);
+ unlock_page(page);
+
+ return 0;
+@@ -77,6 +81,25 @@ static int udf_adinicb_writepage(struct page *page,
+ return 0;
+ }
+
++static int udf_adinicb_write_begin(struct file *file,
++ struct address_space *mapping, loff_t pos,
++ unsigned len, unsigned flags, struct page **pagep,
++ void **fsdata)
++{
++ struct page *page;
++
++ if (WARN_ON_ONCE(pos >= PAGE_CACHE_SIZE))
++ return -EIO;
++ page = grab_cache_page_write_begin(mapping, 0, flags);
++ if (!page)
++ return -ENOMEM;
++ *pagep = page;
++
++ if (!PageUptodate(page) && len != PAGE_CACHE_SIZE)
++ __udf_adinicb_readpage(page);
++ return 0;
++}
++
+ static int udf_adinicb_write_end(struct file *file,
+ struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+@@ -98,8 +121,8 @@ static int udf_adinicb_write_end(struct file *file,
+ const struct address_space_operations udf_adinicb_aops = {
+ .readpage = udf_adinicb_readpage,
+ .writepage = udf_adinicb_writepage,
+- .write_begin = simple_write_begin,
+- .write_end = udf_adinicb_write_end,
++ .write_begin = udf_adinicb_write_begin,
++ .write_end = udf_adinicb_write_end,
+ };
+
+ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
+diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
+index ddd46db..7639f18 100644
+--- a/include/drm/drm_mode.h
++++ b/include/drm/drm_mode.h
+@@ -277,8 +277,9 @@ struct drm_mode_mode_cmd {
+ struct drm_mode_modeinfo mode;
+ };
+
+-#define DRM_MODE_CURSOR_BO (1<<0)
+-#define DRM_MODE_CURSOR_MOVE (1<<1)
++#define DRM_MODE_CURSOR_BO 0x01
++#define DRM_MODE_CURSOR_MOVE 0x02
++#define DRM_MODE_CURSOR_FLAGS 0x03
+
+ /*
+ * depending on the value in flags different members are used.
+diff --git a/include/linux/kobject.h b/include/linux/kobject.h
+index ad81e1c..445f978 100644
+--- a/include/linux/kobject.h
++++ b/include/linux/kobject.h
+@@ -226,7 +226,7 @@ static inline int kobject_uevent_env(struct kobject *kobj,
+
+ static inline __printf(2, 3)
+ int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...)
+-{ return 0; }
++{ return -ENOMEM; }
+
+ static inline int kobject_action_type(const char *buf, size_t count,
+ enum kobject_action *type)
+diff --git a/include/linux/ktime.h b/include/linux/ktime.h
+index 603bec2..06177ba10 100644
+--- a/include/linux/ktime.h
++++ b/include/linux/ktime.h
+@@ -58,13 +58,6 @@ union ktime {
+
+ typedef union ktime ktime_t; /* Kill this */
+
+-#define KTIME_MAX ((s64)~((u64)1 << 63))
+-#if (BITS_PER_LONG == 64)
+-# define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
+-#else
+-# define KTIME_SEC_MAX LONG_MAX
+-#endif
+-
+ /*
+ * ktime_t definitions when using the 64-bit scalar representation:
+ */
+diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
+index c8ef9bc..87967ee 100644
+--- a/include/linux/mmc/card.h
++++ b/include/linux/mmc/card.h
+@@ -219,6 +219,7 @@ struct mmc_card {
+ #define MMC_QUIRK_BLK_NO_CMD23 (1<<7) /* Avoid CMD23 for regular multiblock */
+ #define MMC_QUIRK_BROKEN_BYTE_MODE_512 (1<<8) /* Avoid sending 512 bytes in */
+ #define MMC_QUIRK_LONG_READ_TIME (1<<9) /* Data read time > CSD says */
++#define MMC_QUIRK_SEC_ERASE_TRIM_BROKEN (1<<10) /* Skip secure for erase/trim */
+ /* byte mode */
+ unsigned int poweroff_notify_state; /* eMMC4.5 notify feature */
+ #define MMC_NO_POWER_NOTIFICATION 0
+diff --git a/include/linux/mv643xx_eth.h b/include/linux/mv643xx_eth.h
+index 30b0c4e..43e038a 100644
+--- a/include/linux/mv643xx_eth.h
++++ b/include/linux/mv643xx_eth.h
+@@ -15,6 +15,8 @@
+ #define MV643XX_ETH_SIZE_REG_4 0x2224
+ #define MV643XX_ETH_BASE_ADDR_ENABLE_REG 0x2290
+
++#define MV643XX_TX_CSUM_DEFAULT_LIMIT 0
++
+ struct mv643xx_eth_shared_platform_data {
+ struct mbus_dram_target_info *dram;
+ struct platform_device *shared_smi;
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index cb52340..00ca32b 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -1299,6 +1299,8 @@ struct net_device {
+ /* for setting kernel sock attribute on TCP connection setup */
+ #define GSO_MAX_SIZE 65536
+ unsigned int gso_max_size;
++#define GSO_MAX_SEGS 65535
++ u16 gso_max_segs;
+
+ #ifdef CONFIG_DCB
+ /* Data Center Bridging netlink ops */
+@@ -1511,6 +1513,8 @@ struct packet_type {
+ struct sk_buff **(*gro_receive)(struct sk_buff **head,
+ struct sk_buff *skb);
+ int (*gro_complete)(struct sk_buff *skb);
++ bool (*id_match)(struct packet_type *ptype,
++ struct sock *sk);
+ void *af_packet_priv;
+ struct list_head list;
+ };
+diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
+index 92ecf55..33c52a2 100644
+--- a/include/linux/nfs_fs.h
++++ b/include/linux/nfs_fs.h
+@@ -261,11 +261,6 @@ static inline const struct nfs_rpc_ops *NFS_PROTO(const struct inode *inode)
+ return NFS_SERVER(inode)->nfs_client->rpc_ops;
+ }
+
+-static inline __be32 *NFS_COOKIEVERF(const struct inode *inode)
+-{
+- return NFS_I(inode)->cookieverf;
+-}
+-
+ static inline unsigned NFS_MINATTRTIMEO(const struct inode *inode)
+ {
+ struct nfs_server *nfss = NFS_SERVER(inode);
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index 2aaee0c..67cc215 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -2124,7 +2124,7 @@
+ #define PCI_DEVICE_ID_TIGON3_5704S 0x16a8
+ #define PCI_DEVICE_ID_NX2_57800_VF 0x16a9
+ #define PCI_DEVICE_ID_NX2_5706S 0x16aa
+-#define PCI_DEVICE_ID_NX2_57840_MF 0x16ab
++#define PCI_DEVICE_ID_NX2_57840_MF 0x16a4
+ #define PCI_DEVICE_ID_NX2_5708S 0x16ac
+ #define PCI_DEVICE_ID_NX2_57840_VF 0x16ad
+ #define PCI_DEVICE_ID_NX2_57810_MF 0x16ae
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
+index b1f8912..b669be6 100644
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -794,7 +794,7 @@ struct perf_event {
+ struct hw_perf_event hw;
+
+ struct perf_event_context *ctx;
+- struct file *filp;
++ atomic_long_t refcount;
+
+ /*
+ * These accumulate total time (in nanoseconds) that children
+diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
+index 15518a1..0a4cd10 100644
+--- a/include/linux/sunrpc/xprt.h
++++ b/include/linux/sunrpc/xprt.h
+@@ -114,6 +114,7 @@ struct rpc_xprt_ops {
+ void (*set_buffer_size)(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize);
+ int (*reserve_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
+ void (*release_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
++ void (*alloc_slot)(struct rpc_xprt *xprt, struct rpc_task *task);
+ void (*rpcbind)(struct rpc_task *task);
+ void (*set_port)(struct rpc_xprt *xprt, unsigned short port);
+ void (*connect)(struct rpc_task *task);
+@@ -274,6 +275,8 @@ void xprt_connect(struct rpc_task *task);
+ void xprt_reserve(struct rpc_task *task);
+ int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
+ int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
++void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
++void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
+ int xprt_prepare_transmit(struct rpc_task *task);
+ void xprt_transmit(struct rpc_task *task);
+ void xprt_end_transmit(struct rpc_task *task);
+diff --git a/include/linux/time.h b/include/linux/time.h
+index b306178..8c0216e 100644
+--- a/include/linux/time.h
++++ b/include/linux/time.h
+@@ -107,11 +107,36 @@ static inline struct timespec timespec_sub(struct timespec lhs,
+ return ts_delta;
+ }
+
++#define KTIME_MAX ((s64)~((u64)1 << 63))
++#if (BITS_PER_LONG == 64)
++# define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
++#else
++# define KTIME_SEC_MAX LONG_MAX
++#endif
++
+ /*
+ * Returns true if the timespec is norm, false if denorm:
+ */
+-#define timespec_valid(ts) \
+- (((ts)->tv_sec >= 0) && (((unsigned long) (ts)->tv_nsec) < NSEC_PER_SEC))
++static inline bool timespec_valid(const struct timespec *ts)
++{
++ /* Dates before 1970 are bogus */
++ if (ts->tv_sec < 0)
++ return false;
++ /* Can't have more nanoseconds then a second */
++ if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
++ return false;
++ return true;
++}
++
++static inline bool timespec_valid_strict(const struct timespec *ts)
++{
++ if (!timespec_valid(ts))
++ return false;
++ /* Disallow values that could overflow ktime_t */
++ if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX)
++ return false;
++ return true;
++}
+
+ extern void read_persistent_clock(struct timespec *ts);
+ extern void read_boot_clock(struct timespec *ts);
+diff --git a/include/net/scm.h b/include/net/scm.h
+index d456f4c..0c0017c 100644
+--- a/include/net/scm.h
++++ b/include/net/scm.h
+@@ -71,9 +71,11 @@ static __inline__ void scm_destroy(struct scm_cookie *scm)
+ }
+
+ static __inline__ int scm_send(struct socket *sock, struct msghdr *msg,
+- struct scm_cookie *scm)
++ struct scm_cookie *scm, bool forcecreds)
+ {
+ memset(scm, 0, sizeof(*scm));
++ if (forcecreds)
++ scm_set_cred(scm, task_tgid(current), current_cred());
+ unix_get_peersec_dgram(sock, scm);
+ if (msg->msg_controllen <= 0)
+ return 0;
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 32e3937..ddf523c 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -194,6 +194,7 @@ struct sock_common {
+ * @sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK)
+ * @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
+ * @sk_gso_max_size: Maximum GSO segment size to build
++ * @sk_gso_max_segs: Maximum number of GSO segments
+ * @sk_lingertime: %SO_LINGER l_linger setting
+ * @sk_backlog: always used with the per-socket spinlock held
+ * @sk_callback_lock: used with the callbacks in the end of this struct
+@@ -310,6 +311,7 @@ struct sock {
+ int sk_route_nocaps;
+ int sk_gso_type;
+ unsigned int sk_gso_max_size;
++ u16 sk_gso_max_segs;
+ int sk_rcvlowat;
+ unsigned long sk_lingertime;
+ struct sk_buff_head sk_error_queue;
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 58690af..7d1f05e 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -3011,12 +3011,12 @@ EXPORT_SYMBOL_GPL(perf_event_release_kernel);
+ /*
+ * Called when the last reference to the file is gone.
+ */
+-static int perf_release(struct inode *inode, struct file *file)
++static void put_event(struct perf_event *event)
+ {
+- struct perf_event *event = file->private_data;
+ struct task_struct *owner;
+
+- file->private_data = NULL;
++ if (!atomic_long_dec_and_test(&event->refcount))
++ return;
+
+ rcu_read_lock();
+ owner = ACCESS_ONCE(event->owner);
+@@ -3051,7 +3051,13 @@ static int perf_release(struct inode *inode, struct file *file)
+ put_task_struct(owner);
+ }
+
+- return perf_event_release_kernel(event);
++ perf_event_release_kernel(event);
++}
++
++static int perf_release(struct inode *inode, struct file *file)
++{
++ put_event(file->private_data);
++ return 0;
+ }
+
+ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
+@@ -3304,7 +3310,7 @@ unlock:
+
+ static const struct file_operations perf_fops;
+
+-static struct perf_event *perf_fget_light(int fd, int *fput_needed)
++static struct file *perf_fget_light(int fd, int *fput_needed)
+ {
+ struct file *file;
+
+@@ -3318,7 +3324,7 @@ static struct perf_event *perf_fget_light(int fd, int *fput_needed)
+ return ERR_PTR(-EBADF);
+ }
+
+- return file->private_data;
++ return file;
+ }
+
+ static int perf_event_set_output(struct perf_event *event,
+@@ -3350,19 +3356,21 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+
+ case PERF_EVENT_IOC_SET_OUTPUT:
+ {
++ struct file *output_file = NULL;
+ struct perf_event *output_event = NULL;
+ int fput_needed = 0;
+ int ret;
+
+ if (arg != -1) {
+- output_event = perf_fget_light(arg, &fput_needed);
+- if (IS_ERR(output_event))
+- return PTR_ERR(output_event);
++ output_file = perf_fget_light(arg, &fput_needed);
++ if (IS_ERR(output_file))
++ return PTR_ERR(output_file);
++ output_event = output_file->private_data;
+ }
+
+ ret = perf_event_set_output(event, output_event);
+ if (output_event)
+- fput_light(output_event->filp, fput_needed);
++ fput_light(output_file, fput_needed);
+
+ return ret;
+ }
+@@ -5912,6 +5920,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
+
+ mutex_init(&event->mmap_mutex);
+
++ atomic_long_set(&event->refcount, 1);
+ event->cpu = cpu;
+ event->attr = *attr;
+ event->group_leader = group_leader;
+@@ -6182,12 +6191,12 @@ SYSCALL_DEFINE5(perf_event_open,
+ return event_fd;
+
+ if (group_fd != -1) {
+- group_leader = perf_fget_light(group_fd, &fput_needed);
+- if (IS_ERR(group_leader)) {
+- err = PTR_ERR(group_leader);
++ group_file = perf_fget_light(group_fd, &fput_needed);
++ if (IS_ERR(group_file)) {
++ err = PTR_ERR(group_file);
+ goto err_fd;
+ }
+- group_file = group_leader->filp;
++ group_leader = group_file->private_data;
+ if (flags & PERF_FLAG_FD_OUTPUT)
+ output_event = group_leader;
+ if (flags & PERF_FLAG_FD_NO_GROUP)
+@@ -6322,7 +6331,6 @@ SYSCALL_DEFINE5(perf_event_open,
+ put_ctx(gctx);
+ }
+
+- event->filp = event_file;
+ WARN_ON_ONCE(ctx->parent_ctx);
+ mutex_lock(&ctx->mutex);
+
+@@ -6412,7 +6420,6 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
+ goto err_free;
+ }
+
+- event->filp = NULL;
+ WARN_ON_ONCE(ctx->parent_ctx);
+ mutex_lock(&ctx->mutex);
+ perf_install_in_context(ctx, event, cpu);
+@@ -6461,7 +6468,7 @@ static void sync_child_event(struct perf_event *child_event,
+ * Release the parent event, if this was the last
+ * reference to it.
+ */
+- fput(parent_event->filp);
++ put_event(parent_event);
+ }
+
+ static void
+@@ -6537,9 +6544,8 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
+ *
+ * __perf_event_exit_task()
+ * sync_child_event()
+- * fput(parent_event->filp)
+- * perf_release()
+- * mutex_lock(&ctx->mutex)
++ * put_event()
++ * mutex_lock(&ctx->mutex)
+ *
+ * But since its the parent context it won't be the same instance.
+ */
+@@ -6607,7 +6613,7 @@ static void perf_free_event(struct perf_event *event,
+ list_del_init(&event->child_list);
+ mutex_unlock(&parent->child_mutex);
+
+- fput(parent->filp);
++ put_event(parent);
+
+ perf_group_detach(event);
+ list_del_event(event, ctx);
+@@ -6687,6 +6693,12 @@ inherit_event(struct perf_event *parent_event,
+ NULL, NULL);
+ if (IS_ERR(child_event))
+ return child_event;
++
++ if (!atomic_long_inc_not_zero(&parent_event->refcount)) {
++ free_event(child_event);
++ return NULL;
++ }
++
+ get_ctx(child_ctx);
+
+ /*
+@@ -6728,14 +6740,6 @@ inherit_event(struct perf_event *parent_event,
+ raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
+
+ /*
+- * Get a reference to the parent filp - we will fput it
+- * when the child event exits. This is safe to do because
+- * we are in the parent and we know that the filp still
+- * exists and has a nonzero count:
+- */
+- atomic_long_inc(&parent_event->filp->f_count);
+-
+- /*
+ * Link this into the parent event's child list
+ */
+ WARN_ON_ONCE(parent_event->ctx->parent_ctx);
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
+index 03e67d4..5ee1ac0 100644
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -382,7 +382,7 @@ int do_settimeofday(const struct timespec *tv)
+ struct timespec ts_delta;
+ unsigned long flags;
+
+- if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
++ if (!timespec_valid_strict(tv))
+ return -EINVAL;
+
+ write_seqlock_irqsave(&xtime_lock, flags);
+@@ -417,6 +417,8 @@ EXPORT_SYMBOL(do_settimeofday);
+ int timekeeping_inject_offset(struct timespec *ts)
+ {
+ unsigned long flags;
++ struct timespec tmp;
++ int ret = 0;
+
+ if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
+ return -EINVAL;
+@@ -425,9 +427,16 @@ int timekeeping_inject_offset(struct timespec *ts)
+
+ timekeeping_forward_now();
+
++ tmp = timespec_add(xtime, *ts);
++ if (!timespec_valid_strict(&tmp)) {
++ ret = -EINVAL;
++ goto error;
++ }
++
+ xtime = timespec_add(xtime, *ts);
+ wall_to_monotonic = timespec_sub(wall_to_monotonic, *ts);
+
++error: /* even if we error out, we forwarded the time, so call update */
+ timekeeping_update(true);
+
+ write_sequnlock_irqrestore(&xtime_lock, flags);
+@@ -435,7 +444,7 @@ int timekeeping_inject_offset(struct timespec *ts)
+ /* signal hrtimers about time change */
+ clock_was_set();
+
+- return 0;
++ return ret;
+ }
+ EXPORT_SYMBOL(timekeeping_inject_offset);
+
+@@ -582,7 +591,20 @@ void __init timekeeping_init(void)
+ struct timespec now, boot;
+
+ read_persistent_clock(&now);
++ if (!timespec_valid_strict(&now)) {
++ pr_warn("WARNING: Persistent clock returned invalid value!\n"
++ " Check your CMOS/BIOS settings.\n");
++ now.tv_sec = 0;
++ now.tv_nsec = 0;
++ }
++
+ read_boot_clock(&boot);
++ if (!timespec_valid_strict(&boot)) {
++ pr_warn("WARNING: Boot clock returned invalid value!\n"
++ " Check your CMOS/BIOS settings.\n");
++ boot.tv_sec = 0;
++ boot.tv_nsec = 0;
++ }
+
+ write_seqlock_irqsave(&xtime_lock, flags);
+
+@@ -627,7 +649,7 @@ static void update_sleep_time(struct timespec t)
+ */
+ static void __timekeeping_inject_sleeptime(struct timespec *delta)
+ {
+- if (!timespec_valid(delta)) {
++ if (!timespec_valid_strict(delta)) {
+ printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid "
+ "sleep delta value!\n");
+ return;
+@@ -1011,6 +1033,10 @@ static void update_wall_time(void)
+ #else
+ offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
+ #endif
++ /* Check if there's really nothing to do */
++ if (offset < timekeeper.cycle_interval)
++ return;
++
+ timekeeper.xtime_nsec = (s64)xtime.tv_nsec << timekeeper.shift;
+
+ /*
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index a650bee..979d4de 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -3437,14 +3437,17 @@ static int __cpuinit trustee_thread(void *__gcwq)
+
+ for_each_busy_worker(worker, i, pos, gcwq) {
+ struct work_struct *rebind_work = &worker->rebind_work;
++ unsigned long worker_flags = worker->flags;
+
+ /*
+ * Rebind_work may race with future cpu hotplug
+ * operations. Use a separate flag to mark that
+- * rebinding is scheduled.
++ * rebinding is scheduled. The morphing should
++ * be atomic.
+ */
+- worker->flags |= WORKER_REBIND;
+- worker->flags &= ~WORKER_ROGUE;
++ worker_flags |= WORKER_REBIND;
++ worker_flags &= ~WORKER_ROGUE;
++ ACCESS_ONCE(worker->flags) = worker_flags;
+
+ /* queue rebind_work, wq doesn't matter, use the default one */
+ if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index c0007f9..11b8d47 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -2533,7 +2533,7 @@ int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
+ break;
+
+ default:
+- BUG();
++ return -EINVAL;
+ }
+
+ l = strlen(policy_modes[mode]);
+diff --git a/net/atm/common.c b/net/atm/common.c
+index 14ff9fe..0ca06e8 100644
+--- a/net/atm/common.c
++++ b/net/atm/common.c
+@@ -784,6 +784,7 @@ int vcc_getsockopt(struct socket *sock, int level, int optname,
+
+ if (!vcc->dev || !test_bit(ATM_VF_ADDR, &vcc->flags))
+ return -ENOTCONN;
++ memset(&pvc, 0, sizeof(pvc));
+ pvc.sap_family = AF_ATMPVC;
+ pvc.sap_addr.itf = vcc->dev->number;
+ pvc.sap_addr.vpi = vcc->vpi;
+diff --git a/net/atm/pvc.c b/net/atm/pvc.c
+index 3a73491..ae03240 100644
+--- a/net/atm/pvc.c
++++ b/net/atm/pvc.c
+@@ -95,6 +95,7 @@ static int pvc_getname(struct socket *sock, struct sockaddr *sockaddr,
+ return -ENOTCONN;
+ *sockaddr_len = sizeof(struct sockaddr_atmpvc);
+ addr = (struct sockaddr_atmpvc *)sockaddr;
++ memset(addr, 0, sizeof(*addr));
+ addr->sap_family = AF_ATMPVC;
+ addr->sap_addr.itf = vcc->dev->number;
+ addr->sap_addr.vpi = vcc->vpi;
+diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
+index f6afe3d..8361ee4 100644
+--- a/net/bluetooth/hci_sock.c
++++ b/net/bluetooth/hci_sock.c
+@@ -388,6 +388,7 @@ static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *add
+ *addr_len = sizeof(*haddr);
+ haddr->hci_family = AF_BLUETOOTH;
+ haddr->hci_dev = hdev->id;
++ haddr->hci_channel= 0;
+
+ release_sock(sk);
+ return 0;
+@@ -671,6 +672,7 @@ static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char
+ {
+ struct hci_filter *f = &hci_pi(sk)->filter;
+
++ memset(&uf, 0, sizeof(uf));
+ uf.type_mask = f->type_mask;
+ uf.opcode = f->opcode;
+ uf.event_mask[0] = *((u32 *) f->event_mask + 0);
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
+index 5c406d3..6dedd6f 100644
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -293,6 +293,7 @@ static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *l
+
+ BT_DBG("sock %p, sk %p", sock, sk);
+
++ memset(la, 0, sizeof(struct sockaddr_l2));
+ addr->sa_family = AF_BLUETOOTH;
+ *len = sizeof(struct sockaddr_l2);
+
+diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
+index 5417f61..7ee4ead 100644
+--- a/net/bluetooth/rfcomm/sock.c
++++ b/net/bluetooth/rfcomm/sock.c
+@@ -547,6 +547,7 @@ static int rfcomm_sock_getname(struct socket *sock, struct sockaddr *addr, int *
+
+ BT_DBG("sock %p, sk %p", sock, sk);
+
++ memset(sa, 0, sizeof(*sa));
+ sa->rc_family = AF_BLUETOOTH;
+ sa->rc_channel = rfcomm_pi(sk)->channel;
+ if (peer)
+@@ -835,6 +836,7 @@ static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, c
+ }
+
+ sec.level = rfcomm_pi(sk)->sec_level;
++ sec.key_size = 0;
+
+ len = min_t(unsigned int, len, sizeof(sec));
+ if (copy_to_user(optval, (char *) &sec, len))
+diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
+index c258796..bc1eb56 100644
+--- a/net/bluetooth/rfcomm/tty.c
++++ b/net/bluetooth/rfcomm/tty.c
+@@ -471,7 +471,7 @@ static int rfcomm_get_dev_list(void __user *arg)
+
+ size = sizeof(*dl) + dev_num * sizeof(*di);
+
+- dl = kmalloc(size, GFP_KERNEL);
++ dl = kzalloc(size, GFP_KERNEL);
+ if (!dl)
+ return -ENOMEM;
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 4b18703..832ba6d 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -1059,6 +1059,8 @@ rollback:
+ */
+ int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
+ {
++ char *new_ifalias;
++
+ ASSERT_RTNL();
+
+ if (len >= IFALIASZ)
+@@ -1072,9 +1074,10 @@ int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
+ return 0;
+ }
+
+- dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
+- if (!dev->ifalias)
++ new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
++ if (!new_ifalias)
+ return -ENOMEM;
++ dev->ifalias = new_ifalias;
+
+ strlcpy(dev->ifalias, alias, len+1);
+ return len;
+@@ -1628,6 +1631,19 @@ static inline int deliver_skb(struct sk_buff *skb,
+ return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
+ }
+
++static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
++{
++ if (ptype->af_packet_priv == NULL)
++ return false;
++
++ if (ptype->id_match)
++ return ptype->id_match(ptype, skb->sk);
++ else if ((struct sock *)ptype->af_packet_priv == skb->sk)
++ return true;
++
++ return false;
++}
++
+ /*
+ * Support routine. Sends outgoing frames to any network
+ * taps currently in use.
+@@ -1645,8 +1661,7 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
+ * they originated from - MvS (miquels@drinkel.ow.org)
+ */
+ if ((ptype->dev == dev || !ptype->dev) &&
+- (ptype->af_packet_priv == NULL ||
+- (struct sock *)ptype->af_packet_priv != skb->sk)) {
++ (!skb_loop_sk(ptype, skb))) {
+ if (pt_prev) {
+ deliver_skb(skb2, pt_prev, skb->dev);
+ pt_prev = ptype;
+@@ -2108,6 +2123,9 @@ u32 netif_skb_features(struct sk_buff *skb)
+ __be16 protocol = skb->protocol;
+ u32 features = skb->dev->features;
+
++ if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
++ features &= ~NETIF_F_GSO_MASK;
++
+ if (protocol == htons(ETH_P_8021Q)) {
+ struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
+ protocol = veh->h_vlan_encapsulated_proto;
+@@ -5990,6 +6008,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
+ dev_net_set(dev, &init_net);
+
+ dev->gso_max_size = GSO_MAX_SIZE;
++ dev->gso_max_segs = GSO_MAX_SEGS;
+
+ INIT_LIST_HEAD(&dev->napi_list);
+ INIT_LIST_HEAD(&dev->unreg_list);
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 8d095b9..018fd41 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1308,6 +1308,7 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
+ } else {
+ sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
+ sk->sk_gso_max_size = dst->dev->gso_max_size;
++ sk->sk_gso_max_segs = dst->dev->gso_max_segs;
+ }
+ }
+ }
+diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
+index 3d604e1..4caf63f 100644
+--- a/net/dccp/ccids/ccid3.c
++++ b/net/dccp/ccids/ccid3.c
+@@ -532,6 +532,7 @@ static int ccid3_hc_tx_getsockopt(struct sock *sk, const int optname, int len,
+ case DCCP_SOCKOPT_CCID_TX_INFO:
+ if (len < sizeof(tfrc))
+ return -EINVAL;
++ memset(&tfrc, 0, sizeof(tfrc));
+ tfrc.tfrctx_x = hc->tx_x;
+ tfrc.tfrctx_x_recv = hc->tx_x_recv;
+ tfrc.tfrctx_x_calc = hc->tx_x_calc;
+diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
+index d2aae27..0064394 100644
+--- a/net/ipv4/ipmr.c
++++ b/net/ipv4/ipmr.c
+@@ -125,6 +125,8 @@ static DEFINE_SPINLOCK(mfc_unres_lock);
+ static struct kmem_cache *mrt_cachep __read_mostly;
+
+ static struct mr_table *ipmr_new_table(struct net *net, u32 id);
++static void ipmr_free_table(struct mr_table *mrt);
++
+ static int ip_mr_forward(struct net *net, struct mr_table *mrt,
+ struct sk_buff *skb, struct mfc_cache *cache,
+ int local);
+@@ -132,6 +134,7 @@ static int ipmr_cache_report(struct mr_table *mrt,
+ struct sk_buff *pkt, vifi_t vifi, int assert);
+ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
+ struct mfc_cache *c, struct rtmsg *rtm);
++static void mroute_clean_tables(struct mr_table *mrt);
+ static void ipmr_expire_process(unsigned long arg);
+
+ #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
+@@ -272,7 +275,7 @@ static void __net_exit ipmr_rules_exit(struct net *net)
+
+ list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
+ list_del(&mrt->list);
+- kfree(mrt);
++ ipmr_free_table(mrt);
+ }
+ fib_rules_unregister(net->ipv4.mr_rules_ops);
+ }
+@@ -300,7 +303,7 @@ static int __net_init ipmr_rules_init(struct net *net)
+
+ static void __net_exit ipmr_rules_exit(struct net *net)
+ {
+- kfree(net->ipv4.mrt);
++ ipmr_free_table(net->ipv4.mrt);
+ }
+ #endif
+
+@@ -337,6 +340,13 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
+ return mrt;
+ }
+
++static void ipmr_free_table(struct mr_table *mrt)
++{
++ del_timer_sync(&mrt->ipmr_expire_timer);
++ mroute_clean_tables(mrt);
++ kfree(mrt);
++}
++
+ /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
+
+ static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index ad466a7..043d49b 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -740,7 +740,9 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
+ old_size_goal + mss_now > xmit_size_goal)) {
+ xmit_size_goal = old_size_goal;
+ } else {
+- tp->xmit_size_goal_segs = xmit_size_goal / mss_now;
++ tp->xmit_size_goal_segs =
++ min_t(u16, xmit_size_goal / mss_now,
++ sk->sk_gso_max_segs);
+ xmit_size_goal = tp->xmit_size_goal_segs * mss_now;
+ }
+ }
+diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
+index 850c737..6cebfd2 100644
+--- a/net/ipv4/tcp_cong.c
++++ b/net/ipv4/tcp_cong.c
+@@ -290,7 +290,8 @@ int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
+ left = tp->snd_cwnd - in_flight;
+ if (sk_can_gso(sk) &&
+ left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd &&
+- left * tp->mss_cache < sk->sk_gso_max_size)
++ left * tp->mss_cache < sk->sk_gso_max_size &&
++ left < sk->sk_gso_max_segs)
+ return 1;
+ return left <= tcp_max_burst(tp);
+ }
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index c51dd5b..921cbac 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1318,21 +1318,21 @@ static void tcp_cwnd_validate(struct sock *sk)
+ * when we would be allowed to send the split-due-to-Nagle skb fully.
+ */
+ static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb,
+- unsigned int mss_now, unsigned int cwnd)
++ unsigned int mss_now, unsigned int max_segs)
+ {
+ const struct tcp_sock *tp = tcp_sk(sk);
+- u32 needed, window, cwnd_len;
++ u32 needed, window, max_len;
+
+ window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
+- cwnd_len = mss_now * cwnd;
++ max_len = mss_now * max_segs;
+
+- if (likely(cwnd_len <= window && skb != tcp_write_queue_tail(sk)))
+- return cwnd_len;
++ if (likely(max_len <= window && skb != tcp_write_queue_tail(sk)))
++ return max_len;
+
+ needed = min(skb->len, window);
+
+- if (cwnd_len <= needed)
+- return cwnd_len;
++ if (max_len <= needed)
++ return max_len;
+
+ return needed - needed % mss_now;
+ }
+@@ -1560,7 +1560,8 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
+ limit = min(send_win, cong_win);
+
+ /* If a full-sized TSO skb can be sent, do it. */
+- if (limit >= sk->sk_gso_max_size)
++ if (limit >= min_t(unsigned int, sk->sk_gso_max_size,
++ sk->sk_gso_max_segs * tp->mss_cache))
+ goto send_now;
+
+ /* Middle in queue won't get any more data, full sendable already? */
+@@ -1786,7 +1787,9 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
+ limit = mss_now;
+ if (tso_segs > 1 && !tcp_urg_mode(tp))
+ limit = tcp_mss_split_point(sk, skb, mss_now,
+- cwnd_quota);
++ min_t(unsigned int,
++ cwnd_quota,
++ sk->sk_gso_max_segs));
+
+ if (skb->len > limit &&
+ unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index a5521c5..aef80d7 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -493,8 +493,7 @@ static void addrconf_forward_change(struct net *net, __s32 newf)
+ struct net_device *dev;
+ struct inet6_dev *idev;
+
+- rcu_read_lock();
+- for_each_netdev_rcu(net, dev) {
++ for_each_netdev(net, dev) {
+ idev = __in6_dev_get(dev);
+ if (idev) {
+ int changed = (!idev->cnf.forwarding) ^ (!newf);
+@@ -503,7 +502,6 @@ static void addrconf_forward_change(struct net *net, __s32 newf)
+ dev_forward_change(idev);
+ }
+ }
+- rcu_read_unlock();
+ }
+
+ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old)
+diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
+index 89ff8c6..7501b22 100644
+--- a/net/l2tp/l2tp_core.c
++++ b/net/l2tp/l2tp_core.c
+@@ -1253,11 +1253,10 @@ static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
+ /* Remove from tunnel list */
+ spin_lock_bh(&pn->l2tp_tunnel_list_lock);
+ list_del_rcu(&tunnel->list);
++ kfree_rcu(tunnel, rcu);
+ spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
+- synchronize_rcu();
+
+ atomic_dec(&l2tp_tunnel_count);
+- kfree(tunnel);
+ }
+
+ /* Create a socket for the tunnel, if one isn't set up by
+diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
+index a16a48e..4393794 100644
+--- a/net/l2tp/l2tp_core.h
++++ b/net/l2tp/l2tp_core.h
+@@ -157,6 +157,7 @@ struct l2tp_tunnel_cfg {
+
+ struct l2tp_tunnel {
+ int magic; /* Should be L2TP_TUNNEL_MAGIC */
++ struct rcu_head rcu;
+ rwlock_t hlist_lock; /* protect session_hlist */
+ struct hlist_head session_hlist[L2TP_HASH_SIZE];
+ /* hashed list of sessions,
+diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
+index a18e6c3..99a60d5 100644
+--- a/net/llc/af_llc.c
++++ b/net/llc/af_llc.c
+@@ -966,14 +966,13 @@ static int llc_ui_getname(struct socket *sock, struct sockaddr *uaddr,
+ struct sockaddr_llc sllc;
+ struct sock *sk = sock->sk;
+ struct llc_sock *llc = llc_sk(sk);
+- int rc = 0;
++ int rc = -EBADF;
+
+ memset(&sllc, 0, sizeof(sllc));
+ lock_sock(sk);
+ if (sock_flag(sk, SOCK_ZAPPED))
+ goto out;
+ *uaddrlen = sizeof(sllc);
+- memset(uaddr, 0, *uaddrlen);
+ if (peer) {
+ rc = -ENOTCONN;
+ if (sk->sk_state != TCP_ESTABLISHED)
+diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
+index e1a66cf..72f4253 100644
+--- a/net/netfilter/ipvs/ip_vs_ctl.c
++++ b/net/netfilter/ipvs/ip_vs_ctl.c
+@@ -2713,6 +2713,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
+ {
+ struct ip_vs_timeout_user t;
+
++ memset(&t, 0, sizeof(t));
+ __ip_vs_get_timeouts(net, &t);
+ if (copy_to_user(user, &t, sizeof(t)) != 0)
+ ret = -EFAULT;
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index a99fb41..38b78b9 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -1333,7 +1333,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
+ if (NULL == siocb->scm)
+ siocb->scm = &scm;
+
+- err = scm_send(sock, msg, siocb->scm);
++ err = scm_send(sock, msg, siocb->scm, true);
+ if (err < 0)
+ return err;
+
+@@ -1344,7 +1344,8 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
+ dst_pid = addr->nl_pid;
+ dst_group = ffs(addr->nl_groups);
+ err = -EPERM;
+- if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND))
++ if ((dst_group || dst_pid) &&
++ !netlink_capable(sock, NL_NONROOT_SEND))
+ goto out;
+ } else {
+ dst_pid = nlk->dst_pid;
+@@ -2103,6 +2104,7 @@ static void __init netlink_add_usersock_entry(void)
+ rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
+ nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
+ nl_table[NETLINK_USERSOCK].registered = 1;
++ nl_table[NETLINK_USERSOCK].nl_nonroot = NL_NONROOT_SEND;
+
+ netlink_table_ungrab();
+ }
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index d9d4970..85afc13 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -1281,6 +1281,14 @@ static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
+ spin_unlock(&f->lock);
+ }
+
++bool match_fanout_group(struct packet_type *ptype, struct sock * sk)
++{
++ if (ptype->af_packet_priv == (void*)((struct packet_sock *)sk)->fanout)
++ return true;
++
++ return false;
++}
++
+ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
+ {
+ struct packet_sock *po = pkt_sk(sk);
+@@ -1333,6 +1341,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
+ match->prot_hook.dev = po->prot_hook.dev;
+ match->prot_hook.func = packet_rcv_fanout;
+ match->prot_hook.af_packet_priv = match;
++ match->prot_hook.id_match = match_fanout_group;
+ dev_add_pack(&match->prot_hook);
+ list_add(&match->list, &fanout_list);
+ }
+@@ -1931,7 +1940,6 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
+
+ if (likely(po->tx_ring.pg_vec)) {
+ ph = skb_shinfo(skb)->destructor_arg;
+- BUG_ON(__packet_get_status(po, ph) != TP_STATUS_SENDING);
+ BUG_ON(atomic_read(&po->tx_ring.pending) == 0);
+ atomic_dec(&po->tx_ring.pending);
+ __packet_set_status(po, ph, TP_STATUS_AVAILABLE);
+diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
+index b77f5a0..bdacd8d 100644
+--- a/net/sched/act_gact.c
++++ b/net/sched/act_gact.c
+@@ -67,6 +67,9 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est,
+ struct tcf_common *pc;
+ int ret = 0;
+ int err;
++#ifdef CONFIG_GACT_PROB
++ struct tc_gact_p *p_parm = NULL;
++#endif
+
+ if (nla == NULL)
+ return -EINVAL;
+@@ -82,6 +85,12 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est,
+ #ifndef CONFIG_GACT_PROB
+ if (tb[TCA_GACT_PROB] != NULL)
+ return -EOPNOTSUPP;
++#else
++ if (tb[TCA_GACT_PROB]) {
++ p_parm = nla_data(tb[TCA_GACT_PROB]);
++ if (p_parm->ptype >= MAX_RAND)
++ return -EINVAL;
++ }
+ #endif
+
+ pc = tcf_hash_check(parm->index, a, bind, &gact_hash_info);
+@@ -103,8 +112,7 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est,
+ spin_lock_bh(&gact->tcf_lock);
+ gact->tcf_action = parm->action;
+ #ifdef CONFIG_GACT_PROB
+- if (tb[TCA_GACT_PROB] != NULL) {
+- struct tc_gact_p *p_parm = nla_data(tb[TCA_GACT_PROB]);
++ if (p_parm) {
+ gact->tcfg_paction = p_parm->paction;
+ gact->tcfg_pval = p_parm->pval;
+ gact->tcfg_ptype = p_parm->ptype;
+@@ -133,7 +141,7 @@ static int tcf_gact(struct sk_buff *skb, const struct tc_action *a,
+
+ spin_lock(&gact->tcf_lock);
+ #ifdef CONFIG_GACT_PROB
+- if (gact->tcfg_ptype && gact_rand[gact->tcfg_ptype] != NULL)
++ if (gact->tcfg_ptype)
+ action = gact_rand[gact->tcfg_ptype](gact);
+ else
+ action = gact->tcf_action;
+diff --git a/net/socket.c b/net/socket.c
+index 273cbce..68879db 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -2645,6 +2645,7 @@ static int dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32)
+ if (copy_from_user(&ifc32, uifc32, sizeof(struct compat_ifconf)))
+ return -EFAULT;
+
++ memset(&ifc, 0, sizeof(ifc));
+ if (ifc32.ifcbuf == 0) {
+ ifc32.ifc_len = 0;
+ ifc.ifc_len = 0;
+diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
+index 3ac9789..ffba207 100644
+--- a/net/sunrpc/xprt.c
++++ b/net/sunrpc/xprt.c
+@@ -962,11 +962,11 @@ static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
+ return false;
+ }
+
+-static void xprt_alloc_slot(struct rpc_task *task)
++void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
+ {
+- struct rpc_xprt *xprt = task->tk_xprt;
+ struct rpc_rqst *req;
+
++ spin_lock(&xprt->reserve_lock);
+ if (!list_empty(&xprt->free)) {
+ req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
+ list_del(&req->rq_list);
+@@ -987,12 +987,29 @@ static void xprt_alloc_slot(struct rpc_task *task)
+ default:
+ task->tk_status = -EAGAIN;
+ }
++ spin_unlock(&xprt->reserve_lock);
+ return;
+ out_init_req:
+ task->tk_status = 0;
+ task->tk_rqstp = req;
+ xprt_request_init(task, xprt);
++ spin_unlock(&xprt->reserve_lock);
++}
++EXPORT_SYMBOL_GPL(xprt_alloc_slot);
++
++void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
++{
++ /* Note: grabbing the xprt_lock_write() ensures that we throttle
++ * new slot allocation if the transport is congested (i.e. when
++ * reconnecting a stream transport or when out of socket write
++ * buffer space).
++ */
++ if (xprt_lock_write(xprt, task)) {
++ xprt_alloc_slot(xprt, task);
++ xprt_release_write(xprt, task);
++ }
+ }
++EXPORT_SYMBOL_GPL(xprt_lock_and_alloc_slot);
+
+ static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
+ {
+@@ -1076,20 +1093,9 @@ void xprt_reserve(struct rpc_task *task)
+ if (task->tk_rqstp != NULL)
+ return;
+
+- /* Note: grabbing the xprt_lock_write() here is not strictly needed,
+- * but ensures that we throttle new slot allocation if the transport
+- * is congested (e.g. if reconnecting or if we're out of socket
+- * write buffer space).
+- */
+ task->tk_timeout = 0;
+ task->tk_status = -EAGAIN;
+- if (!xprt_lock_write(xprt, task))
+- return;
+-
+- spin_lock(&xprt->reserve_lock);
+- xprt_alloc_slot(task);
+- spin_unlock(&xprt->reserve_lock);
+- xprt_release_write(xprt, task);
++ xprt->ops->alloc_slot(xprt, task);
+ }
+
+ static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
+diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
+index 06cdbff..5d9202d 100644
+--- a/net/sunrpc/xprtrdma/transport.c
++++ b/net/sunrpc/xprtrdma/transport.c
+@@ -713,6 +713,7 @@ static void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
+ static struct rpc_xprt_ops xprt_rdma_procs = {
+ .reserve_xprt = xprt_rdma_reserve_xprt,
+ .release_xprt = xprt_release_xprt_cong, /* sunrpc/xprt.c */
++ .alloc_slot = xprt_alloc_slot,
+ .release_request = xprt_release_rqst_cong, /* ditto */
+ .set_retrans_timeout = xprt_set_retrans_timeout_def, /* ditto */
+ .rpcbind = rpcb_getport_async, /* sunrpc/rpcb_clnt.c */
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index 1a6edc7..c5391af 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -2422,6 +2422,7 @@ static void bc_destroy(struct rpc_xprt *xprt)
+ static struct rpc_xprt_ops xs_local_ops = {
+ .reserve_xprt = xprt_reserve_xprt,
+ .release_xprt = xs_tcp_release_xprt,
++ .alloc_slot = xprt_alloc_slot,
+ .rpcbind = xs_local_rpcbind,
+ .set_port = xs_local_set_port,
+ .connect = xs_connect,
+@@ -2438,6 +2439,7 @@ static struct rpc_xprt_ops xs_udp_ops = {
+ .set_buffer_size = xs_udp_set_buffer_size,
+ .reserve_xprt = xprt_reserve_xprt_cong,
+ .release_xprt = xprt_release_xprt_cong,
++ .alloc_slot = xprt_alloc_slot,
+ .rpcbind = rpcb_getport_async,
+ .set_port = xs_set_port,
+ .connect = xs_connect,
+@@ -2455,6 +2457,7 @@ static struct rpc_xprt_ops xs_udp_ops = {
+ static struct rpc_xprt_ops xs_tcp_ops = {
+ .reserve_xprt = xprt_reserve_xprt,
+ .release_xprt = xs_tcp_release_xprt,
++ .alloc_slot = xprt_lock_and_alloc_slot,
+ .rpcbind = rpcb_getport_async,
+ .set_port = xs_set_port,
+ .connect = xs_connect,
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index d99678a..317bfe3 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -1435,7 +1435,7 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
+ if (NULL == siocb->scm)
+ siocb->scm = &tmp_scm;
+ wait_for_unix_gc();
+- err = scm_send(sock, msg, siocb->scm);
++ err = scm_send(sock, msg, siocb->scm, false);
+ if (err < 0)
+ return err;
+
+@@ -1596,7 +1596,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
+ if (NULL == siocb->scm)
+ siocb->scm = &tmp_scm;
+ wait_for_unix_gc();
+- err = scm_send(sock, msg, siocb->scm);
++ err = scm_send(sock, msg, siocb->scm, false);
+ if (err < 0)
+ return err;
+
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index f3be54e..b0187e7 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -2312,6 +2312,7 @@ int snd_hda_codec_reset(struct hda_codec *codec)
+ }
+ if (codec->patch_ops.free)
+ codec->patch_ops.free(codec);
++ memset(&codec->patch_ops, 0, sizeof(codec->patch_ops));
+ codec->proc_widget_hook = NULL;
+ codec->spec = NULL;
+ free_hda_cache(&codec->amp_cache);
+@@ -2324,7 +2325,6 @@ int snd_hda_codec_reset(struct hda_codec *codec)
+ codec->num_pcms = 0;
+ codec->pcm_info = NULL;
+ codec->preset = NULL;
+- memset(&codec->patch_ops, 0, sizeof(codec->patch_ops));
+ codec->slave_dig_outs = NULL;
+ codec->spdif_status_reset = 0;
+ module_put(codec->owner);
+diff --git a/sound/pci/ice1712/prodigy_hifi.c b/sound/pci/ice1712/prodigy_hifi.c
+index 764cc93..075d5aa 100644
+--- a/sound/pci/ice1712/prodigy_hifi.c
++++ b/sound/pci/ice1712/prodigy_hifi.c
+@@ -297,6 +297,7 @@ static int ak4396_dac_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem
+ }
+
+ static const DECLARE_TLV_DB_SCALE(db_scale_wm_dac, -12700, 100, 1);
++static const DECLARE_TLV_DB_LINEAR(ak4396_db_scale, TLV_DB_GAIN_MUTE, 0);
+
+ static struct snd_kcontrol_new prodigy_hd2_controls[] __devinitdata = {
+ {
+@@ -307,7 +308,7 @@ static struct snd_kcontrol_new prodigy_hd2_controls[] __devinitdata = {
+ .info = ak4396_dac_vol_info,
+ .get = ak4396_dac_vol_get,
+ .put = ak4396_dac_vol_put,
+- .tlv = { .p = db_scale_wm_dac },
++ .tlv = { .p = ak4396_db_scale },
+ },
+ };
+
diff --git a/1030_linux-3.2.31.patch b/1030_linux-3.2.31.patch
new file mode 100644
index 00000000..c6accf5f
--- /dev/null
+++ b/1030_linux-3.2.31.patch
@@ -0,0 +1,3327 @@
+diff --git a/Makefile b/Makefile
+index 9fd7e60..fd9c414 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 30
++SUBLEVEL = 31
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
+index 9c18ebd..d63632f 100644
+--- a/arch/arm/boot/compressed/head.S
++++ b/arch/arm/boot/compressed/head.S
+@@ -648,6 +648,7 @@ __armv7_mmu_cache_on:
+ mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
+ #endif
+ mrc p15, 0, r0, c1, c0, 0 @ read control reg
++ bic r0, r0, #1 << 28 @ clear SCTLR.TRE
+ orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
+ orr r0, r0, #0x003c @ write buffer
+ #ifdef CONFIG_MMU
+diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
+index 73ef56c..bda833c 100644
+--- a/arch/x86/kernel/alternative.c
++++ b/arch/x86/kernel/alternative.c
+@@ -160,7 +160,7 @@ static const unsigned char * const k7_nops[ASM_NOP_MAX+2] =
+ #endif
+
+ #ifdef P6_NOP1
+-static const unsigned char __initconst_or_module p6nops[] =
++static const unsigned char p6nops[] =
+ {
+ P6_NOP1,
+ P6_NOP2,
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index 44d4393..a1e21ae 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -1289,6 +1289,10 @@ asmlinkage void __init xen_start_kernel(void)
+
+ /* Make sure ACS will be enabled */
+ pci_request_acs();
++
++ /* Avoid searching for BIOS MP tables */
++ x86_init.mpparse.find_smp_config = x86_init_noop;
++ x86_init.mpparse.get_smp_config = x86_init_uint_noop;
+ }
+ #ifdef CONFIG_PCI
+ /* PCI BIOS service won't work from a PV guest. */
+diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
+index bb104b4..6e5a7f1 100644
+--- a/arch/x86/xen/setup.c
++++ b/arch/x86/xen/setup.c
+@@ -16,6 +16,7 @@
+ #include <asm/e820.h>
+ #include <asm/setup.h>
+ #include <asm/acpi.h>
++#include <asm/numa.h>
+ #include <asm/xen/hypervisor.h>
+ #include <asm/xen/hypercall.h>
+
+@@ -431,4 +432,7 @@ void __init xen_arch_setup(void)
+ boot_option_idle_override = IDLE_HALT;
+ WARN_ON(set_pm_idle_to_default());
+ fiddle_vdso();
++#ifdef CONFIG_NUMA
++ numa_off = 1;
++#endif
+ }
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index c04ad68..321e23e 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4118,6 +4118,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+
+ /* Devices which aren't very happy with higher link speeds */
+ { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, },
++ { "Seagate FreeAgent GoFlex", NULL, ATA_HORKAGE_1_5_GBPS, },
+
+ /*
+ * Devices which choke on SETXFER. Applies only if both the
+diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
+index de0435e..887f68f 100644
+--- a/drivers/block/aoe/aoecmd.c
++++ b/drivers/block/aoe/aoecmd.c
+@@ -35,6 +35,7 @@ new_skb(ulong len)
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb->protocol = __constant_htons(ETH_P_AOE);
++ skb_checksum_none_assert(skb);
+ }
+ return skb;
+ }
+diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
+index 38aa6dd..da33111 100644
+--- a/drivers/block/cciss_scsi.c
++++ b/drivers/block/cciss_scsi.c
+@@ -795,6 +795,7 @@ static void complete_scsi_command(CommandList_struct *c, int timeout,
+ }
+ break;
+ case CMD_PROTOCOL_ERR:
++ cmd->result = DID_ERROR << 16;
+ dev_warn(&h->pdev->dev,
+ "%p has protocol error\n", c);
+ break;
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index c3f0ee1..86848c6 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -445,6 +445,14 @@ static void nbd_clear_que(struct nbd_device *lo)
+ req->errors++;
+ nbd_end_request(req);
+ }
++
++ while (!list_empty(&lo->waiting_queue)) {
++ req = list_entry(lo->waiting_queue.next, struct request,
++ queuelist);
++ list_del_init(&req->queuelist);
++ req->errors++;
++ nbd_end_request(req);
++ }
+ }
+
+
+@@ -594,6 +602,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
+ lo->file = NULL;
+ nbd_clear_que(lo);
+ BUG_ON(!list_empty(&lo->queue_head));
++ BUG_ON(!list_empty(&lo->waiting_queue));
+ if (file)
+ fput(file);
+ return 0;
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index f1bd44f..5c6709d 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -62,6 +62,7 @@ static struct usb_device_id ath3k_table[] = {
+
+ /* Atheros AR3011 with sflash firmware*/
+ { USB_DEVICE(0x0CF3, 0x3002) },
++ { USB_DEVICE(0x0CF3, 0xE019) },
+ { USB_DEVICE(0x13d3, 0x3304) },
+ { USB_DEVICE(0x0930, 0x0215) },
+ { USB_DEVICE(0x0489, 0xE03D) },
+@@ -76,12 +77,15 @@ static struct usb_device_id ath3k_table[] = {
+ { USB_DEVICE(0x04CA, 0x3005) },
+ { USB_DEVICE(0x13d3, 0x3362) },
+ { USB_DEVICE(0x0CF3, 0xE004) },
++ { USB_DEVICE(0x0930, 0x0219) },
++ { USB_DEVICE(0x0489, 0xe057) },
+
+ /* Atheros AR5BBU12 with sflash firmware */
+ { USB_DEVICE(0x0489, 0xE02C) },
+
+ /* Atheros AR5BBU22 with sflash firmware */
+ { USB_DEVICE(0x0489, 0xE03C) },
++ { USB_DEVICE(0x0489, 0xE036) },
+
+ { } /* Terminating entry */
+ };
+@@ -100,9 +104,12 @@ static struct usb_device_id ath3k_blist_tbl[] = {
+ { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
+
+ /* Atheros AR5BBU22 with sflash firmware */
+ { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0489, 0xE036), .driver_info = BTUSB_ATH3012 },
+
+ { } /* Terminating entry */
+ };
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index fc4bcd6..6f95d98 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -60,6 +60,9 @@ static struct usb_device_id btusb_table[] = {
+ /* Generic Bluetooth USB device */
+ { USB_DEVICE_INFO(0xe0, 0x01, 0x01) },
+
++ /* Apple-specific (Broadcom) devices */
++ { USB_VENDOR_AND_INTERFACE_INFO(0x05ac, 0xff, 0x01, 0x01) },
++
+ /* Broadcom SoftSailing reporting vendor specific */
+ { USB_DEVICE(0x0a5c, 0x21e1) },
+
+@@ -102,15 +105,14 @@ static struct usb_device_id btusb_table[] = {
+
+ /* Broadcom BCM20702A0 */
+ { USB_DEVICE(0x0489, 0xe042) },
+- { USB_DEVICE(0x0a5c, 0x21e3) },
+- { USB_DEVICE(0x0a5c, 0x21e6) },
+- { USB_DEVICE(0x0a5c, 0x21e8) },
+- { USB_DEVICE(0x0a5c, 0x21f3) },
+ { USB_DEVICE(0x413c, 0x8197) },
+
+ /* Foxconn - Hon Hai */
+ { USB_DEVICE(0x0489, 0xe033) },
+
++ /*Broadcom devices with vendor specific id */
++ { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) },
++
+ { } /* Terminating entry */
+ };
+
+@@ -125,6 +127,7 @@ static struct usb_device_id blacklist_table[] = {
+
+ /* Atheros 3011 with sflash firmware */
+ { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
++ { USB_DEVICE(0x0cf3, 0xe019), .driver_info = BTUSB_IGNORE },
+ { USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE },
+ { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
+ { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
+@@ -139,12 +142,15 @@ static struct usb_device_id blacklist_table[] = {
+ { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
+
+ /* Atheros AR5BBU12 with sflash firmware */
+ { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
+
+ /* Atheros AR5BBU12 with sflash firmware */
+ { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0489, 0xe036), .driver_info = BTUSB_ATH3012 },
+
+ /* Broadcom BCM2035 */
+ { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU },
+diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
+index ad683ec..b7fe343 100644
+--- a/drivers/cpufreq/powernow-k8.c
++++ b/drivers/cpufreq/powernow-k8.c
+@@ -32,7 +32,6 @@
+ #include <linux/slab.h>
+ #include <linux/string.h>
+ #include <linux/cpumask.h>
+-#include <linux/sched.h> /* for current / set_cpus_allowed() */
+ #include <linux/io.h>
+ #include <linux/delay.h>
+
+@@ -1132,16 +1131,23 @@ static int transition_frequency_pstate(struct powernow_k8_data *data,
+ return res;
+ }
+
+-/* Driver entry point to switch to the target frequency */
+-static int powernowk8_target(struct cpufreq_policy *pol,
+- unsigned targfreq, unsigned relation)
++struct powernowk8_target_arg {
++ struct cpufreq_policy *pol;
++ unsigned targfreq;
++ unsigned relation;
++};
++
++static long powernowk8_target_fn(void *arg)
+ {
+- cpumask_var_t oldmask;
++ struct powernowk8_target_arg *pta = arg;
++ struct cpufreq_policy *pol = pta->pol;
++ unsigned targfreq = pta->targfreq;
++ unsigned relation = pta->relation;
+ struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
+ u32 checkfid;
+ u32 checkvid;
+ unsigned int newstate;
+- int ret = -EIO;
++ int ret;
+
+ if (!data)
+ return -EINVAL;
+@@ -1149,29 +1155,16 @@ static int powernowk8_target(struct cpufreq_policy *pol,
+ checkfid = data->currfid;
+ checkvid = data->currvid;
+
+- /* only run on specific CPU from here on. */
+- /* This is poor form: use a workqueue or smp_call_function_single */
+- if (!alloc_cpumask_var(&oldmask, GFP_KERNEL))
+- return -ENOMEM;
+-
+- cpumask_copy(oldmask, tsk_cpus_allowed(current));
+- set_cpus_allowed_ptr(current, cpumask_of(pol->cpu));
+-
+- if (smp_processor_id() != pol->cpu) {
+- printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
+- goto err_out;
+- }
+-
+ if (pending_bit_stuck()) {
+ printk(KERN_ERR PFX "failing targ, change pending bit set\n");
+- goto err_out;
++ return -EIO;
+ }
+
+ pr_debug("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n",
+ pol->cpu, targfreq, pol->min, pol->max, relation);
+
+ if (query_current_values_with_pending_wait(data))
+- goto err_out;
++ return -EIO;
+
+ if (cpu_family != CPU_HW_PSTATE) {
+ pr_debug("targ: curr fid 0x%x, vid 0x%x\n",
+@@ -1189,7 +1182,7 @@ static int powernowk8_target(struct cpufreq_policy *pol,
+
+ if (cpufreq_frequency_table_target(pol, data->powernow_table,
+ targfreq, relation, &newstate))
+- goto err_out;
++ return -EIO;
+
+ mutex_lock(&fidvid_mutex);
+
+@@ -1202,9 +1195,8 @@ static int powernowk8_target(struct cpufreq_policy *pol,
+ ret = transition_frequency_fidvid(data, newstate);
+ if (ret) {
+ printk(KERN_ERR PFX "transition frequency failed\n");
+- ret = 1;
+ mutex_unlock(&fidvid_mutex);
+- goto err_out;
++ return 1;
+ }
+ mutex_unlock(&fidvid_mutex);
+
+@@ -1213,12 +1205,25 @@ static int powernowk8_target(struct cpufreq_policy *pol,
+ data->powernow_table[newstate].index);
+ else
+ pol->cur = find_khz_freq_from_fid(data->currfid);
+- ret = 0;
+
+-err_out:
+- set_cpus_allowed_ptr(current, oldmask);
+- free_cpumask_var(oldmask);
+- return ret;
++ return 0;
++}
++
++/* Driver entry point to switch to the target frequency */
++static int powernowk8_target(struct cpufreq_policy *pol,
++ unsigned targfreq, unsigned relation)
++{
++ struct powernowk8_target_arg pta = { .pol = pol, .targfreq = targfreq,
++ .relation = relation };
++
++ /*
++ * Must run on @pol->cpu. cpufreq core is responsible for ensuring
++ * that we're bound to the current CPU and pol->cpu stays online.
++ */
++ if (smp_processor_id() == pol->cpu)
++ return powernowk8_target_fn(&pta);
++ else
++ return work_on_cpu(pol->cpu, powernowk8_target_fn, &pta);
+ }
+
+ /* Driver entry point to verify the policy and range of frequencies */
+diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
+index 79dcf6e..c60d9c1 100644
+--- a/drivers/dma/at_hdmac.c
++++ b/drivers/dma/at_hdmac.c
+@@ -678,7 +678,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ flags);
+
+ if (unlikely(!atslave || !sg_len)) {
+- dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
++ dev_dbg(chan2dev(chan), "prep_slave_sg: sg length is zero!\n");
+ return NULL;
+ }
+
+@@ -706,6 +706,11 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+
+ mem = sg_dma_address(sg);
+ len = sg_dma_len(sg);
++ if (unlikely(!len)) {
++ dev_dbg(chan2dev(chan),
++ "prep_slave_sg: sg(%d) data length is zero\n", i);
++ goto err;
++ }
+ mem_width = 2;
+ if (unlikely(mem & 3 || len & 3))
+ mem_width = 0;
+@@ -740,6 +745,11 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+
+ mem = sg_dma_address(sg);
+ len = sg_dma_len(sg);
++ if (unlikely(!len)) {
++ dev_dbg(chan2dev(chan),
++ "prep_slave_sg: sg(%d) data length is zero\n", i);
++ goto err;
++ }
+ mem_width = 2;
+ if (unlikely(mem & 3 || len & 3))
+ mem_width = 0;
+@@ -773,6 +783,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+
+ err_desc_get:
+ dev_err(chan2dev(chan), "not enough descriptors available\n");
++err:
+ atc_desc_put(atchan, first);
+ return NULL;
+ }
+diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
+index 57104147..e8eedb7 100644
+--- a/drivers/dma/pl330.c
++++ b/drivers/dma/pl330.c
+@@ -858,6 +858,11 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
+ /* Initialize channel parameters */
+ num_chan = max(pdat ? pdat->nr_valid_peri : 0, (u8)pi->pcfg.num_chan);
+ pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
++ if (!pdmac->peripherals) {
++ ret = -ENOMEM;
++ dev_err(&adev->dev, "unable to allocate pdmac->peripherals\n");
++ goto probe_err4;
++ }
+
+ for (i = 0; i < num_chan; i++) {
+ pch = &pdmac->peripherals[i];
+diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
+index 0db57b5..da71881 100644
+--- a/drivers/edac/sb_edac.c
++++ b/drivers/edac/sb_edac.c
+@@ -554,7 +554,8 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
+ {
+ struct sbridge_pvt *pvt = mci->pvt_info;
+ struct csrow_info *csr;
+- int i, j, banks, ranks, rows, cols, size, npages;
++ unsigned i, j, banks, ranks, rows, cols, npages;
++ u64 size;
+ int csrow = 0;
+ unsigned long last_page = 0;
+ u32 reg;
+@@ -626,10 +627,10 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
+ cols = numcol(mtr);
+
+ /* DDR3 has 8 I/O banks */
+- size = (rows * cols * banks * ranks) >> (20 - 3);
++ size = ((u64)rows * cols * banks * ranks) >> (20 - 3);
+ npages = MiB_TO_PAGES(size);
+
+- debugf0("mc#%d: channel %d, dimm %d, %d Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
++ debugf0("mc#%d: channel %d, dimm %d, %Ld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
+ pvt->sbridge_dev->mc, i, j,
+ size, npages,
+ banks, ranks, rows, cols);
+diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
+index 5b69480..2c40776 100644
+--- a/drivers/gpio/gpio-lpc32xx.c
++++ b/drivers/gpio/gpio-lpc32xx.c
+@@ -295,6 +295,7 @@ static int lpc32xx_gpio_dir_output_p012(struct gpio_chip *chip, unsigned pin,
+ {
+ struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip);
+
++ __set_gpio_level_p012(group, pin, value);
+ __set_gpio_dir_p012(group, pin, 0);
+
+ return 0;
+@@ -305,6 +306,7 @@ static int lpc32xx_gpio_dir_output_p3(struct gpio_chip *chip, unsigned pin,
+ {
+ struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip);
+
++ __set_gpio_level_p3(group, pin, value);
+ __set_gpio_dir_p3(group, pin, 0);
+
+ return 0;
+@@ -313,6 +315,9 @@ static int lpc32xx_gpio_dir_output_p3(struct gpio_chip *chip, unsigned pin,
+ static int lpc32xx_gpio_dir_out_always(struct gpio_chip *chip, unsigned pin,
+ int value)
+ {
++ struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip);
++
++ __set_gpo_level_p3(group, pin, value);
+ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 548a400..e48e01e 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -3357,7 +3357,8 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+- BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
++ if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
++ return -EBUSY;
+ WARN_ON(i915_verify_lists(dev));
+
+ if (obj->gtt_space != NULL) {
+diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
+index 9cd81ba..c2a64f4 100644
+--- a/drivers/gpu/drm/i915/intel_hdmi.c
++++ b/drivers/gpu/drm/i915/intel_hdmi.c
+@@ -271,7 +271,7 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
+ u32 temp;
+ u32 enable_bits = SDVO_ENABLE;
+
+- if (intel_hdmi->has_audio)
++ if (intel_hdmi->has_audio || mode != DRM_MODE_DPMS_ON)
+ enable_bits |= SDVO_AUDIO_ENABLE;
+
+ temp = I915_READ(intel_hdmi->sdvox_reg);
+diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
+index ceffd20..a4011b0 100644
+--- a/drivers/gpu/drm/radeon/atombios_crtc.c
++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
+@@ -1446,98 +1446,14 @@ static void radeon_legacy_atom_fixup(struct drm_crtc *crtc)
+ }
+ }
+
+-/**
+- * radeon_get_pll_use_mask - look up a mask of which pplls are in use
+- *
+- * @crtc: drm crtc
+- *
+- * Returns the mask of which PPLLs (Pixel PLLs) are in use.
+- */
+-static u32 radeon_get_pll_use_mask(struct drm_crtc *crtc)
+-{
+- struct drm_device *dev = crtc->dev;
+- struct drm_crtc *test_crtc;
+- struct radeon_crtc *radeon_test_crtc;
+- u32 pll_in_use = 0;
+-
+- list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
+- if (crtc == test_crtc)
+- continue;
+-
+- radeon_test_crtc = to_radeon_crtc(test_crtc);
+- if (radeon_test_crtc->pll_id != ATOM_PPLL_INVALID)
+- pll_in_use |= (1 << radeon_test_crtc->pll_id);
+- }
+- return pll_in_use;
+-}
+-
+-/**
+- * radeon_get_shared_dp_ppll - return the PPLL used by another crtc for DP
+- *
+- * @crtc: drm crtc
+- *
+- * Returns the PPLL (Pixel PLL) used by another crtc/encoder which is
+- * also in DP mode. For DP, a single PPLL can be used for all DP
+- * crtcs/encoders.
+- */
+-static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
+-{
+- struct drm_device *dev = crtc->dev;
+- struct drm_encoder *test_encoder;
+- struct radeon_crtc *radeon_test_crtc;
+-
+- list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
+- if (test_encoder->crtc && (test_encoder->crtc != crtc)) {
+- if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_encoder))) {
+- /* for DP use the same PLL for all */
+- radeon_test_crtc = to_radeon_crtc(test_encoder->crtc);
+- if (radeon_test_crtc->pll_id != ATOM_PPLL_INVALID)
+- return radeon_test_crtc->pll_id;
+- }
+- }
+- }
+- return ATOM_PPLL_INVALID;
+-}
+-
+-/**
+- * radeon_atom_pick_pll - Allocate a PPLL for use by the crtc.
+- *
+- * @crtc: drm crtc
+- *
+- * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors
+- * a single PPLL can be used for all DP crtcs/encoders. For non-DP
+- * monitors a dedicated PPLL must be used. If a particular board has
+- * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
+- * as there is no need to program the PLL itself. If we are not able to
+- * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
+- * avoid messing up an existing monitor.
+- *
+- * Asic specific PLL information
+- *
+- * DCE 6.1
+- * - PPLL2 is only available to UNIPHYA (both DP and non-DP)
+- * - PPLL0, PPLL1 are available for UNIPHYB/C/D/E/F (both DP and non-DP)
+- *
+- * DCE 6.0
+- * - PPLL0 is available to all UNIPHY (DP only)
+- * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
+- *
+- * DCE 5.0
+- * - DCPLL is available to all UNIPHY (DP only)
+- * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
+- *
+- * DCE 3.0/4.0/4.1
+- * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
+- *
+- */
+ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
+ {
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_encoder *test_encoder;
+- u32 pll_in_use;
+- int pll;
++ struct drm_crtc *test_crtc;
++ uint32_t pll_in_use = 0;
+
+ if (ASIC_IS_DCE4(rdev)) {
+ list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
+@@ -1545,7 +1461,7 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
+ /* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock,
+ * depending on the asic:
+ * DCE4: PPLL or ext clock
+- * DCE5: PPLL, DCPLL, or ext clock
++ * DCE5: DCPLL or ext clock
+ *
+ * Setting ATOM_PPLL_INVALID will cause SetPixelClock to skip
+ * PPLL/DCPLL programming and only program the DP DTO for the
+@@ -1553,31 +1469,29 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
+ */
+ if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_encoder))) {
+ if (rdev->clock.dp_extclk)
+- /* skip PPLL programming if using ext clock */
+ return ATOM_PPLL_INVALID;
+ else if (ASIC_IS_DCE5(rdev))
+- /* use DCPLL for all DP */
+ return ATOM_DCPLL;
+- else {
+- /* use the same PPLL for all DP monitors */
+- pll = radeon_get_shared_dp_ppll(crtc);
+- if (pll != ATOM_PPLL_INVALID)
+- return pll;
+- }
+ }
+- break;
+ }
+ }
+- /* all other cases */
+- pll_in_use = radeon_get_pll_use_mask(crtc);
+- if (!(pll_in_use & (1 << ATOM_PPLL2)))
+- return ATOM_PPLL2;
+- if (!(pll_in_use & (1 << ATOM_PPLL1)))
++
++ /* otherwise, pick one of the plls */
++ list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
++ struct radeon_crtc *radeon_test_crtc;
++
++ if (crtc == test_crtc)
++ continue;
++
++ radeon_test_crtc = to_radeon_crtc(test_crtc);
++ if ((radeon_test_crtc->pll_id >= ATOM_PPLL1) &&
++ (radeon_test_crtc->pll_id <= ATOM_PPLL2))
++ pll_in_use |= (1 << radeon_test_crtc->pll_id);
++ }
++ if (!(pll_in_use & 1))
+ return ATOM_PPLL1;
+- DRM_ERROR("unable to allocate a PPLL\n");
+- return ATOM_PPLL_INVALID;
++ return ATOM_PPLL2;
+ } else
+- /* use PPLL1 or PPLL2 */
+ return radeon_crtc->crtc_id;
+
+ }
+@@ -1696,7 +1610,7 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
+ break;
+ }
+ done:
+- radeon_crtc->pll_id = ATOM_PPLL_INVALID;
++ radeon_crtc->pll_id = -1;
+ }
+
+ static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
+@@ -1745,6 +1659,6 @@ void radeon_atombios_init_crtc(struct drm_device *dev,
+ else
+ radeon_crtc->crtc_offset = 0;
+ }
+- radeon_crtc->pll_id = ATOM_PPLL_INVALID;
++ radeon_crtc->pll_id = -1;
+ drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs);
+ }
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 0c8bea9..a21e763 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1026,7 +1026,7 @@ static struct hid_report *hid_get_report(struct hid_report_enum *report_enum,
+ return report;
+ }
+
+-void hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
++int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
+ int interrupt)
+ {
+ struct hid_report_enum *report_enum = hid->report_enum + type;
+@@ -1034,10 +1034,11 @@ void hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
+ unsigned int a;
+ int rsize, csize = size;
+ u8 *cdata = data;
++ int ret = 0;
+
+ report = hid_get_report(report_enum, data);
+ if (!report)
+- return;
++ goto out;
+
+ if (report_enum->numbered) {
+ cdata++;
+@@ -1057,14 +1058,19 @@ void hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
+
+ if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_report_event)
+ hid->hiddev_report_event(hid, report);
+- if (hid->claimed & HID_CLAIMED_HIDRAW)
+- hidraw_report_event(hid, data, size);
++ if (hid->claimed & HID_CLAIMED_HIDRAW) {
++ ret = hidraw_report_event(hid, data, size);
++ if (ret)
++ goto out;
++ }
+
+ for (a = 0; a < report->maxfield; a++)
+ hid_input_field(hid, report->field[a], cdata, interrupt);
+
+ if (hid->claimed & HID_CLAIMED_INPUT)
+ hidinput_report_event(hid, report);
++out:
++ return ret;
+ }
+ EXPORT_SYMBOL_GPL(hid_report_raw_event);
+
+@@ -1141,7 +1147,7 @@ nomem:
+ }
+ }
+
+- hid_report_raw_event(hid, type, data, size, interrupt);
++ ret = hid_report_raw_event(hid, type, data, size, interrupt);
+
+ unlock:
+ up(&hid->driver_lock);
+diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
+index 2eac8c5..8821ecc 100644
+--- a/drivers/hid/hid-logitech-dj.c
++++ b/drivers/hid/hid-logitech-dj.c
+@@ -185,6 +185,7 @@ static struct hid_ll_driver logi_dj_ll_driver;
+ static int logi_dj_output_hidraw_report(struct hid_device *hid, u8 * buf,
+ size_t count,
+ unsigned char report_type);
++static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev);
+
+ static void logi_dj_recv_destroy_djhid_device(struct dj_receiver_dev *djrcv_dev,
+ struct dj_report *dj_report)
+@@ -225,6 +226,7 @@ static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev,
+ if (dj_report->report_params[DEVICE_PAIRED_PARAM_SPFUNCTION] &
+ SPFUNCTION_DEVICE_LIST_EMPTY) {
+ dbg_hid("%s: device list is empty\n", __func__);
++ djrcv_dev->querying_devices = false;
+ return;
+ }
+
+@@ -235,6 +237,12 @@ static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev,
+ return;
+ }
+
++ if (djrcv_dev->paired_dj_devices[dj_report->device_index]) {
++ /* The device is already known. No need to reallocate it. */
++ dbg_hid("%s: device is already known\n", __func__);
++ return;
++ }
++
+ dj_hiddev = hid_allocate_device();
+ if (IS_ERR(dj_hiddev)) {
+ dev_err(&djrcv_hdev->dev, "%s: hid_allocate_device failed\n",
+@@ -298,6 +306,7 @@ static void delayedwork_callback(struct work_struct *work)
+ struct dj_report dj_report;
+ unsigned long flags;
+ int count;
++ int retval;
+
+ dbg_hid("%s\n", __func__);
+
+@@ -330,6 +339,25 @@ static void delayedwork_callback(struct work_struct *work)
+ logi_dj_recv_destroy_djhid_device(djrcv_dev, &dj_report);
+ break;
+ default:
++ /* A normal report (i. e. not belonging to a pair/unpair notification)
++ * arriving here, means that the report arrived but we did not have a
++ * paired dj_device associated to the report's device_index, this
++ * means that the original "device paired" notification corresponding
++ * to this dj_device never arrived to this driver. The reason is that
++ * hid-core discards all packets coming from a device while probe() is
++ * executing. */
++ if (!djrcv_dev->paired_dj_devices[dj_report.device_index]) {
++ /* ok, we don't know the device, just re-ask the
++ * receiver for the list of connected devices. */
++ retval = logi_dj_recv_query_paired_devices(djrcv_dev);
++ if (!retval) {
++ /* everything went fine, so just leave */
++ break;
++ }
++ dev_err(&djrcv_dev->hdev->dev,
++ "%s:logi_dj_recv_query_paired_devices "
++ "error:%d\n", __func__, retval);
++ }
+ dbg_hid("%s: unexpected report type\n", __func__);
+ }
+ }
+@@ -360,6 +388,12 @@ static void logi_dj_recv_forward_null_report(struct dj_receiver_dev *djrcv_dev,
+ if (!djdev) {
+ dbg_hid("djrcv_dev->paired_dj_devices[dj_report->device_index]"
+ " is NULL, index %d\n", dj_report->device_index);
++ kfifo_in(&djrcv_dev->notif_fifo, dj_report, sizeof(struct dj_report));
++
++ if (schedule_work(&djrcv_dev->work) == 0) {
++ dbg_hid("%s: did not schedule the work item, was already "
++ "queued\n", __func__);
++ }
+ return;
+ }
+
+@@ -390,6 +424,12 @@ static void logi_dj_recv_forward_report(struct dj_receiver_dev *djrcv_dev,
+ if (dj_device == NULL) {
+ dbg_hid("djrcv_dev->paired_dj_devices[dj_report->device_index]"
+ " is NULL, index %d\n", dj_report->device_index);
++ kfifo_in(&djrcv_dev->notif_fifo, dj_report, sizeof(struct dj_report));
++
++ if (schedule_work(&djrcv_dev->work) == 0) {
++ dbg_hid("%s: did not schedule the work item, was already "
++ "queued\n", __func__);
++ }
+ return;
+ }
+
+@@ -428,27 +468,42 @@ static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev,
+
+ static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
+ {
+- struct dj_report dj_report;
++ struct dj_report *dj_report;
++ int retval;
++
++ /* no need to protect djrcv_dev->querying_devices */
++ if (djrcv_dev->querying_devices)
++ return 0;
+
+- memset(&dj_report, 0, sizeof(dj_report));
+- dj_report.report_id = REPORT_ID_DJ_SHORT;
+- dj_report.device_index = 0xFF;
+- dj_report.report_type = REPORT_TYPE_CMD_GET_PAIRED_DEVICES;
+- return logi_dj_recv_send_report(djrcv_dev, &dj_report);
++ dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL);
++ if (!dj_report)
++ return -ENOMEM;
++ dj_report->report_id = REPORT_ID_DJ_SHORT;
++ dj_report->device_index = 0xFF;
++ dj_report->report_type = REPORT_TYPE_CMD_GET_PAIRED_DEVICES;
++ retval = logi_dj_recv_send_report(djrcv_dev, dj_report);
++ kfree(dj_report);
++ return retval;
+ }
+
++
+ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
+ unsigned timeout)
+ {
+- struct dj_report dj_report;
++ struct dj_report *dj_report;
++ int retval;
+
+- memset(&dj_report, 0, sizeof(dj_report));
+- dj_report.report_id = REPORT_ID_DJ_SHORT;
+- dj_report.device_index = 0xFF;
+- dj_report.report_type = REPORT_TYPE_CMD_SWITCH;
+- dj_report.report_params[CMD_SWITCH_PARAM_DEVBITFIELD] = 0x1F;
+- dj_report.report_params[CMD_SWITCH_PARAM_TIMEOUT_SECONDS] = (u8)timeout;
+- return logi_dj_recv_send_report(djrcv_dev, &dj_report);
++ dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL);
++ if (!dj_report)
++ return -ENOMEM;
++ dj_report->report_id = REPORT_ID_DJ_SHORT;
++ dj_report->device_index = 0xFF;
++ dj_report->report_type = REPORT_TYPE_CMD_SWITCH;
++ dj_report->report_params[CMD_SWITCH_PARAM_DEVBITFIELD] = 0x3F;
++ dj_report->report_params[CMD_SWITCH_PARAM_TIMEOUT_SECONDS] = (u8)timeout;
++ retval = logi_dj_recv_send_report(djrcv_dev, dj_report);
++ kfree(dj_report);
++ return retval;
+ }
+
+
+diff --git a/drivers/hid/hid-logitech-dj.h b/drivers/hid/hid-logitech-dj.h
+index fd28a5e..4a40003 100644
+--- a/drivers/hid/hid-logitech-dj.h
++++ b/drivers/hid/hid-logitech-dj.h
+@@ -101,6 +101,7 @@ struct dj_receiver_dev {
+ struct work_struct work;
+ struct kfifo notif_fifo;
+ spinlock_t lock;
++ bool querying_devices;
+ };
+
+ struct dj_device {
+diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
+index cf7d6d5..17d15bb 100644
+--- a/drivers/hid/hidraw.c
++++ b/drivers/hid/hidraw.c
+@@ -42,6 +42,7 @@ static struct cdev hidraw_cdev;
+ static struct class *hidraw_class;
+ static struct hidraw *hidraw_table[HIDRAW_MAX_DEVICES];
+ static DEFINE_MUTEX(minors_lock);
++static void drop_ref(struct hidraw *hid, int exists_bit);
+
+ static ssize_t hidraw_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
+ {
+@@ -87,13 +88,16 @@ static ssize_t hidraw_read(struct file *file, char __user *buffer, size_t count,
+ len = list->buffer[list->tail].len > count ?
+ count : list->buffer[list->tail].len;
+
+- if (copy_to_user(buffer, list->buffer[list->tail].value, len)) {
+- ret = -EFAULT;
+- goto out;
++ if (list->buffer[list->tail].value) {
++ if (copy_to_user(buffer, list->buffer[list->tail].value, len)) {
++ ret = -EFAULT;
++ goto out;
++ }
++ ret = len;
+ }
+- ret = len;
+
+ kfree(list->buffer[list->tail].value);
++ list->buffer[list->tail].value = NULL;
+ list->tail = (list->tail + 1) & (HIDRAW_BUFFER_SIZE - 1);
+ }
+ out:
+@@ -110,7 +114,7 @@ static ssize_t hidraw_send_report(struct file *file, const char __user *buffer,
+ __u8 *buf;
+ int ret = 0;
+
+- if (!hidraw_table[minor]) {
++ if (!hidraw_table[minor] || !hidraw_table[minor]->exist) {
+ ret = -ENODEV;
+ goto out;
+ }
+@@ -258,7 +262,7 @@ static int hidraw_open(struct inode *inode, struct file *file)
+ }
+
+ mutex_lock(&minors_lock);
+- if (!hidraw_table[minor]) {
++ if (!hidraw_table[minor] || !hidraw_table[minor]->exist) {
+ err = -ENODEV;
+ goto out_unlock;
+ }
+@@ -295,32 +299,12 @@ out:
+ static int hidraw_release(struct inode * inode, struct file * file)
+ {
+ unsigned int minor = iminor(inode);
+- struct hidraw *dev;
+ struct hidraw_list *list = file->private_data;
+- int ret;
+-
+- mutex_lock(&minors_lock);
+- if (!hidraw_table[minor]) {
+- ret = -ENODEV;
+- goto unlock;
+- }
+
++ drop_ref(hidraw_table[minor], 0);
+ list_del(&list->node);
+- dev = hidraw_table[minor];
+- if (!--dev->open) {
+- if (list->hidraw->exist) {
+- hid_hw_power(dev->hid, PM_HINT_NORMAL);
+- hid_hw_close(dev->hid);
+- } else {
+- kfree(list->hidraw);
+- }
+- }
+ kfree(list);
+- ret = 0;
+-unlock:
+- mutex_unlock(&minors_lock);
+-
+- return ret;
++ return 0;
+ }
+
+ static long hidraw_ioctl(struct file *file, unsigned int cmd,
+@@ -437,19 +421,29 @@ static const struct file_operations hidraw_ops = {
+ .llseek = noop_llseek,
+ };
+
+-void hidraw_report_event(struct hid_device *hid, u8 *data, int len)
++int hidraw_report_event(struct hid_device *hid, u8 *data, int len)
+ {
+ struct hidraw *dev = hid->hidraw;
+ struct hidraw_list *list;
++ int ret = 0;
+
+ list_for_each_entry(list, &dev->list, node) {
+- list->buffer[list->head].value = kmemdup(data, len, GFP_ATOMIC);
++ int new_head = (list->head + 1) & (HIDRAW_BUFFER_SIZE - 1);
++
++ if (new_head == list->tail)
++ continue;
++
++ if (!(list->buffer[list->head].value = kmemdup(data, len, GFP_ATOMIC))) {
++ ret = -ENOMEM;
++ break;
++ }
+ list->buffer[list->head].len = len;
+- list->head = (list->head + 1) & (HIDRAW_BUFFER_SIZE - 1);
++ list->head = new_head;
+ kill_fasync(&list->fasync, SIGIO, POLL_IN);
+ }
+
+ wake_up_interruptible(&dev->wait);
++ return ret;
+ }
+ EXPORT_SYMBOL_GPL(hidraw_report_event);
+
+@@ -512,21 +506,7 @@ EXPORT_SYMBOL_GPL(hidraw_connect);
+ void hidraw_disconnect(struct hid_device *hid)
+ {
+ struct hidraw *hidraw = hid->hidraw;
+-
+- mutex_lock(&minors_lock);
+- hidraw->exist = 0;
+-
+- device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor));
+-
+- hidraw_table[hidraw->minor] = NULL;
+-
+- if (hidraw->open) {
+- hid_hw_close(hid);
+- wake_up_interruptible(&hidraw->wait);
+- } else {
+- kfree(hidraw);
+- }
+- mutex_unlock(&minors_lock);
++ drop_ref(hidraw, 1);
+ }
+ EXPORT_SYMBOL_GPL(hidraw_disconnect);
+
+@@ -542,21 +522,28 @@ int __init hidraw_init(void)
+
+ if (result < 0) {
+ pr_warn("can't get major number\n");
+- result = 0;
+ goto out;
+ }
+
+ hidraw_class = class_create(THIS_MODULE, "hidraw");
+ if (IS_ERR(hidraw_class)) {
+ result = PTR_ERR(hidraw_class);
+- unregister_chrdev(hidraw_major, "hidraw");
+- goto out;
++ goto error_cdev;
+ }
+
+ cdev_init(&hidraw_cdev, &hidraw_ops);
+- cdev_add(&hidraw_cdev, dev_id, HIDRAW_MAX_DEVICES);
++ result = cdev_add(&hidraw_cdev, dev_id, HIDRAW_MAX_DEVICES);
++ if (result < 0)
++ goto error_class;
++
+ out:
+ return result;
++
++error_class:
++ class_destroy(hidraw_class);
++error_cdev:
++ unregister_chrdev_region(dev_id, HIDRAW_MAX_DEVICES);
++ goto out;
+ }
+
+ void hidraw_exit(void)
+@@ -568,3 +555,23 @@ void hidraw_exit(void)
+ unregister_chrdev_region(dev_id, HIDRAW_MAX_DEVICES);
+
+ }
++
++static void drop_ref(struct hidraw *hidraw, int exists_bit)
++{
++ mutex_lock(&minors_lock);
++ if (exists_bit) {
++ hid_hw_close(hidraw->hid);
++ hidraw->exist = 0;
++ if (hidraw->open)
++ wake_up_interruptible(&hidraw->wait);
++ } else {
++ --hidraw->open;
++ }
++
++ if (!hidraw->open && !hidraw->exist) {
++ device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor));
++ hidraw_table[hidraw->minor] = NULL;
++ kfree(hidraw);
++ }
++ mutex_unlock(&minors_lock);
++}
+diff --git a/drivers/hwmon/ad7314.c b/drivers/hwmon/ad7314.c
+index 5d760f3..08e2947 100644
+--- a/drivers/hwmon/ad7314.c
++++ b/drivers/hwmon/ad7314.c
+@@ -96,10 +96,18 @@ static ssize_t ad7314_show_temperature(struct device *dev,
+ }
+ }
+
++static ssize_t ad7314_show_name(struct device *dev,
++ struct device_attribute *devattr, char *buf)
++{
++ return sprintf(buf, "%s\n", to_spi_device(dev)->modalias);
++}
++
++static DEVICE_ATTR(name, S_IRUGO, ad7314_show_name, NULL);
+ static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
+ ad7314_show_temperature, NULL, 0);
+
+ static struct attribute *ad7314_attributes[] = {
++ &dev_attr_name.attr,
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ NULL,
+ };
+diff --git a/drivers/hwmon/ads7871.c b/drivers/hwmon/ads7871.c
+index 04450f8..685aae6 100644
+--- a/drivers/hwmon/ads7871.c
++++ b/drivers/hwmon/ads7871.c
+@@ -133,6 +133,12 @@ static ssize_t show_voltage(struct device *dev,
+ }
+ }
+
++static ssize_t ads7871_show_name(struct device *dev,
++ struct device_attribute *devattr, char *buf)
++{
++ return sprintf(buf, "%s\n", to_spi_device(dev)->modalias);
++}
++
+ static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, show_voltage, NULL, 0);
+ static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_voltage, NULL, 1);
+ static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_voltage, NULL, 2);
+@@ -142,6 +148,8 @@ static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, show_voltage, NULL, 5);
+ static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, show_voltage, NULL, 6);
+ static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, show_voltage, NULL, 7);
+
++static DEVICE_ATTR(name, S_IRUGO, ads7871_show_name, NULL);
++
+ static struct attribute *ads7871_attributes[] = {
+ &sensor_dev_attr_in0_input.dev_attr.attr,
+ &sensor_dev_attr_in1_input.dev_attr.attr,
+@@ -151,6 +159,7 @@ static struct attribute *ads7871_attributes[] = {
+ &sensor_dev_attr_in5_input.dev_attr.attr,
+ &sensor_dev_attr_in6_input.dev_attr.attr,
+ &sensor_dev_attr_in7_input.dev_attr.attr,
++ &dev_attr_name.attr,
+ NULL
+ };
+
+diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
+index e8e18ca..ac2d6cb 100644
+--- a/drivers/hwmon/fam15h_power.c
++++ b/drivers/hwmon/fam15h_power.c
+@@ -128,12 +128,12 @@ static bool __devinit fam15h_power_is_internal_node0(struct pci_dev *f4)
+ * counter saturations resulting in bogus power readings.
+ * We correct this value ourselves to cope with older BIOSes.
+ */
+-static DEFINE_PCI_DEVICE_TABLE(affected_device) = {
++static const struct pci_device_id affected_device[] = {
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
+ { 0 }
+ };
+
+-static void __devinit tweak_runavg_range(struct pci_dev *pdev)
++static void tweak_runavg_range(struct pci_dev *pdev)
+ {
+ u32 val;
+
+@@ -157,6 +157,16 @@ static void __devinit tweak_runavg_range(struct pci_dev *pdev)
+ REG_TDP_RUNNING_AVERAGE, val);
+ }
+
++#ifdef CONFIG_PM
++static int fam15h_power_resume(struct pci_dev *pdev)
++{
++ tweak_runavg_range(pdev);
++ return 0;
++}
++#else
++#define fam15h_power_resume NULL
++#endif
++
+ static void __devinit fam15h_power_init_data(struct pci_dev *f4,
+ struct fam15h_power_data *data)
+ {
+@@ -255,6 +265,7 @@ static struct pci_driver fam15h_power_driver = {
+ .id_table = fam15h_power_id_table,
+ .probe = fam15h_power_probe,
+ .remove = __devexit_p(fam15h_power_remove),
++ .resume = fam15h_power_resume,
+ };
+
+ static int __init fam15h_power_init(void)
+diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
+index d4ec371..cd1a843 100644
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -335,6 +335,12 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
+ },
+ {
+ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE C850D"),
++ },
++ },
++ {
++ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ALIENWARE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Sentia"),
+ },
+diff --git a/drivers/iommu/intr_remapping.c b/drivers/iommu/intr_remapping.c
+index 6777ca0..73ca321 100644
+--- a/drivers/iommu/intr_remapping.c
++++ b/drivers/iommu/intr_remapping.c
+@@ -752,6 +752,7 @@ int __init parse_ioapics_under_ir(void)
+ {
+ struct dmar_drhd_unit *drhd;
+ int ir_supported = 0;
++ int ioapic_idx;
+
+ for_each_drhd_unit(drhd) {
+ struct intel_iommu *iommu = drhd->iommu;
+@@ -764,13 +765,20 @@ int __init parse_ioapics_under_ir(void)
+ }
+ }
+
+- if (ir_supported && ir_ioapic_num != nr_ioapics) {
+- printk(KERN_WARNING
+- "Not all IO-APIC's listed under remapping hardware\n");
+- return -1;
++ if (!ir_supported)
++ return 0;
++
++ for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) {
++ int ioapic_id = mpc_ioapic_id(ioapic_idx);
++ if (!map_ioapic_to_ir(ioapic_id)) {
++ pr_err(FW_BUG "ioapic %d has no mapping iommu, "
++ "interrupt remapping will be disabled\n",
++ ioapic_id);
++ return -1;
++ }
+ }
+
+- return ir_supported;
++ return 1;
+ }
+
+ int __init ir_dev_scope_init(void)
+diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
+index 8e91321..52848ab 100644
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -1350,17 +1350,25 @@ static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
+ return q && blk_queue_nonrot(q);
+ }
+
+-static bool dm_table_is_nonrot(struct dm_table *t)
++static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
++ sector_t start, sector_t len, void *data)
++{
++ struct request_queue *q = bdev_get_queue(dev->bdev);
++
++ return q && !blk_queue_add_random(q);
++}
++
++static bool dm_table_all_devices_attribute(struct dm_table *t,
++ iterate_devices_callout_fn func)
+ {
+ struct dm_target *ti;
+ unsigned i = 0;
+
+- /* Ensure that all underlying device are non-rotational. */
+ while (i < dm_table_get_num_targets(t)) {
+ ti = dm_table_get_target(t, i++);
+
+ if (!ti->type->iterate_devices ||
+- !ti->type->iterate_devices(ti, device_is_nonrot, NULL))
++ !ti->type->iterate_devices(ti, func, NULL))
+ return 0;
+ }
+
+@@ -1392,7 +1400,8 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
+ if (!dm_table_discard_zeroes_data(t))
+ q->limits.discard_zeroes_data = 0;
+
+- if (dm_table_is_nonrot(t))
++ /* Ensure that all underlying devices are non-rotational. */
++ if (dm_table_all_devices_attribute(t, device_is_nonrot))
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
+ else
+ queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q);
+@@ -1400,6 +1409,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
+ dm_table_set_integrity(t);
+
+ /*
++ * Determine whether or not this queue's I/O timings contribute
++ * to the entropy pool, Only request-based targets use this.
++ * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
++ * have it set.
++ */
++ if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
++ queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
++
++ /*
+ * QUEUE_FLAG_STACKABLE must be set after all queue settings are
+ * visible to other CPUs because, once the flag is set, incoming bios
+ * are processed by request-based dm, which refers to the queue
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 4720f68..502dcf7 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -866,10 +866,14 @@ static void dm_done(struct request *clone, int error, bool mapped)
+ {
+ int r = error;
+ struct dm_rq_target_io *tio = clone->end_io_data;
+- dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io;
++ dm_request_endio_fn rq_end_io = NULL;
+
+- if (mapped && rq_end_io)
+- r = rq_end_io(tio->ti, clone, error, &tio->info);
++ if (tio->ti) {
++ rq_end_io = tio->ti->type->rq_end_io;
++
++ if (mapped && rq_end_io)
++ r = rq_end_io(tio->ti, clone, error, &tio->info);
++ }
+
+ if (r <= 0)
+ /* The target wants to complete the I/O */
+@@ -1566,15 +1570,6 @@ static int map_request(struct dm_target *ti, struct request *clone,
+ int r, requeued = 0;
+ struct dm_rq_target_io *tio = clone->end_io_data;
+
+- /*
+- * Hold the md reference here for the in-flight I/O.
+- * We can't rely on the reference count by device opener,
+- * because the device may be closed during the request completion
+- * when all bios are completed.
+- * See the comment in rq_completed() too.
+- */
+- dm_get(md);
+-
+ tio->ti = ti;
+ r = ti->type->map_rq(ti, clone, &tio->info);
+ switch (r) {
+@@ -1606,6 +1601,26 @@ static int map_request(struct dm_target *ti, struct request *clone,
+ return requeued;
+ }
+
++static struct request *dm_start_request(struct mapped_device *md, struct request *orig)
++{
++ struct request *clone;
++
++ blk_start_request(orig);
++ clone = orig->special;
++ atomic_inc(&md->pending[rq_data_dir(clone)]);
++
++ /*
++ * Hold the md reference here for the in-flight I/O.
++ * We can't rely on the reference count by device opener,
++ * because the device may be closed during the request completion
++ * when all bios are completed.
++ * See the comment in rq_completed() too.
++ */
++ dm_get(md);
++
++ return clone;
++}
++
+ /*
+ * q->request_fn for request-based dm.
+ * Called with the queue lock held.
+@@ -1635,14 +1650,21 @@ static void dm_request_fn(struct request_queue *q)
+ pos = blk_rq_pos(rq);
+
+ ti = dm_table_find_target(map, pos);
+- BUG_ON(!dm_target_is_valid(ti));
++ if (!dm_target_is_valid(ti)) {
++ /*
++ * Must perform setup, that dm_done() requires,
++ * before calling dm_kill_unmapped_request
++ */
++ DMERR_LIMIT("request attempted access beyond the end of device");
++ clone = dm_start_request(md, rq);
++ dm_kill_unmapped_request(clone, -EIO);
++ continue;
++ }
+
+ if (ti->type->busy && ti->type->busy(ti))
+ goto delay_and_out;
+
+- blk_start_request(rq);
+- clone = rq->special;
+- atomic_inc(&md->pending[rq_data_dir(clone)]);
++ clone = dm_start_request(md, rq);
+
+ spin_unlock(q->queue_lock);
+ if (map_request(ti, clone, md))
+@@ -1662,8 +1684,6 @@ delay_and_out:
+ blk_delay_queue(q, HZ / 10);
+ out:
+ dm_table_put(map);
+-
+- return;
+ }
+
+ int dm_underlying_device_busy(struct request_queue *q)
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index 7a9eef6..0634ee5 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -1226,14 +1226,16 @@ static int enough(struct r10conf *conf, int ignore)
+ do {
+ int n = conf->copies;
+ int cnt = 0;
++ int this = first;
+ while (n--) {
+- if (conf->mirrors[first].rdev &&
+- first != ignore)
++ if (conf->mirrors[this].rdev &&
++ this != ignore)
+ cnt++;
+- first = (first+1) % conf->raid_disks;
++ this = (this+1) % conf->raid_disks;
+ }
+ if (cnt == 0)
+ return 0;
++ first = (first + conf->near_copies) % conf->raid_disks;
+ } while (first != 0);
+ return 1;
+ }
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 6ce32a7..aaeaff2 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -2712,8 +2712,9 @@ int sdhci_add_host(struct sdhci_host *host)
+ mmc_card_is_removable(mmc))
+ mmc->caps |= MMC_CAP_NEEDS_POLL;
+
+- /* UHS-I mode(s) supported by the host controller. */
+- if (host->version >= SDHCI_SPEC_300)
++ /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
++ if (caps[1] & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
++ SDHCI_SUPPORT_DDR50))
+ mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
+
+ /* SDR104 supports also implies SDR50 support */
+diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
+index 32778d5..46194bc 100644
+--- a/drivers/net/can/janz-ican3.c
++++ b/drivers/net/can/janz-ican3.c
+@@ -1250,7 +1250,6 @@ static irqreturn_t ican3_irq(int irq, void *dev_id)
+ */
+ static int ican3_reset_module(struct ican3_dev *mod)
+ {
+- u8 val = 1 << mod->num;
+ unsigned long start;
+ u8 runold, runnew;
+
+@@ -1264,8 +1263,7 @@ static int ican3_reset_module(struct ican3_dev *mod)
+ runold = ioread8(mod->dpm + TARGET_RUNNING);
+
+ /* reset the module */
+- iowrite8(val, &mod->ctrl->reset_assert);
+- iowrite8(val, &mod->ctrl->reset_deassert);
++ iowrite8(0x00, &mod->dpmctrl->hwreset);
+
+ /* wait until the module has finished resetting and is running */
+ start = jiffies;
+diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
+index 2adc294..79c70ae 100644
+--- a/drivers/net/can/ti_hecc.c
++++ b/drivers/net/can/ti_hecc.c
+@@ -971,12 +971,12 @@ static int __devexit ti_hecc_remove(struct platform_device *pdev)
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct ti_hecc_priv *priv = netdev_priv(ndev);
+
++ unregister_candev(ndev);
+ clk_disable(priv->clk);
+ clk_put(priv->clk);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ iounmap(priv->base);
+ release_mem_region(res->start, resource_size(res));
+- unregister_candev(ndev);
+ free_candev(ndev);
+ platform_set_drvdata(pdev, NULL);
+
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+index 2c1a5c0..4c50ac0 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+@@ -554,14 +554,16 @@ static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
+ static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
+ struct bnx2x_fastpath *fp)
+ {
+- /* Do nothing if no IP/L4 csum validation was done */
+-
++ /* Do nothing if no L4 csum validation was done.
++ * We do not check whether IP csum was validated. For IPv4 we assume
++ * that if the card got as far as validating the L4 csum, it also
++ * validated the IP csum. IPv6 has no IP csum.
++ */
+ if (cqe->fast_path_cqe.status_flags &
+- (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG |
+- ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG))
++ ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
+ return;
+
+- /* If both IP/L4 validation were done, check if an error was found. */
++ /* If L4 validation was done, check if an error was found. */
+
+ if (cqe->fast_path_cqe.type_error_flags &
+ (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index 6b258d9..01bc102 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -14013,9 +14013,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
+ if (tg3_flag(tp, HW_TSO_1) ||
+ tg3_flag(tp, HW_TSO_2) ||
+ tg3_flag(tp, HW_TSO_3) ||
+- (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
++ tp->fw_needed) {
++ /* For firmware TSO, assume ASF is disabled.
++ * We'll disable TSO later if we discover ASF
++ * is enabled in tg3_get_eeprom_hw_cfg().
++ */
+ tg3_flag_set(tp, TSO_CAPABLE);
+- else {
++ } else {
+ tg3_flag_clear(tp, TSO_CAPABLE);
+ tg3_flag_clear(tp, TSO_BUG);
+ tp->fw_needed = NULL;
+@@ -14290,6 +14294,12 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
+ */
+ tg3_get_eeprom_hw_cfg(tp);
+
++ if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
++ tg3_flag_clear(tp, TSO_CAPABLE);
++ tg3_flag_clear(tp, TSO_BUG);
++ tp->fw_needed = NULL;
++ }
++
+ if (tg3_flag(tp, ENABLE_APE)) {
+ /* Allow reads and writes to the
+ * APE register and memory space.
+diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+index 8cf3173..da5204d 100644
+--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
++++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+@@ -1351,6 +1351,10 @@ static void netxen_mask_aer_correctable(struct netxen_adapter *adapter)
+ struct pci_dev *root = pdev->bus->self;
+ u32 aer_pos;
+
++ /* root bus? */
++ if (!root)
++ return;
++
+ if (adapter->ahw.board_type != NETXEN_BRDTYPE_P3_4_GB_MM &&
+ adapter->ahw.board_type != NETXEN_BRDTYPE_P3_10G_TP)
+ return;
+diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
+index c97d2f5..bfc3b0d 100644
+--- a/drivers/net/ethernet/ti/davinci_cpdma.c
++++ b/drivers/net/ethernet/ti/davinci_cpdma.c
+@@ -851,6 +851,7 @@ int cpdma_chan_stop(struct cpdma_chan *chan)
+
+ next_dma = desc_read(desc, hw_next);
+ chan->head = desc_from_phys(pool, next_dma);
++ chan->count--;
+ chan->stats.teardown_dequeue++;
+
+ /* issue callback without locks held */
+diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
+index bc9a4bb..1161584 100644
+--- a/drivers/net/ppp/pppoe.c
++++ b/drivers/net/ppp/pppoe.c
+@@ -576,7 +576,7 @@ static int pppoe_release(struct socket *sock)
+
+ po = pppox_sk(sk);
+
+- if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
++ if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) {
+ dev_put(po->pppoe_dev);
+ po->pppoe_dev = NULL;
+ }
+diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
+index fc147a5..6729585 100644
+--- a/drivers/net/usb/asix.c
++++ b/drivers/net/usb/asix.c
+@@ -1648,6 +1648,10 @@ static const struct usb_device_id products [] = {
+ USB_DEVICE (0x2001, 0x3c05),
+ .driver_info = (unsigned long) &ax88772_info,
+ }, {
++ // DLink DUB-E100 H/W Ver C1
++ USB_DEVICE (0x2001, 0x1a02),
++ .driver_info = (unsigned long) &ax88772_info,
++}, {
+ // Linksys USB1000
+ USB_DEVICE (0x1737, 0x0039),
+ .driver_info = (unsigned long) &ax88178_info,
+diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
+index 864448b..e773250 100644
+--- a/drivers/net/usb/sierra_net.c
++++ b/drivers/net/usb/sierra_net.c
+@@ -678,7 +678,7 @@ static int sierra_net_get_fw_attr(struct usbnet *dev, u16 *datap)
+ return -EIO;
+ }
+
+- *datap = *attrdata;
++ *datap = le16_to_cpu(*attrdata);
+
+ kfree(attrdata);
+ return result;
+diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
+index aaaca9a..3f575af 100644
+--- a/drivers/net/wan/ixp4xx_hss.c
++++ b/drivers/net/wan/ixp4xx_hss.c
+@@ -10,6 +10,7 @@
+
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
++#include <linux/module.h>
+ #include <linux/bitops.h>
+ #include <linux/cdev.h>
+ #include <linux/dma-mapping.h>
+diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
+index 8918261..746202f 100644
+--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
++++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
+@@ -775,8 +775,11 @@ static void brcmf_c_arp_offload_set(struct brcmf_pub *drvr, int arp_mode)
+ {
+ char iovbuf[32];
+ int retcode;
++ __le32 arp_mode_le;
+
+- brcmf_c_mkiovar("arp_ol", (char *)&arp_mode, 4, iovbuf, sizeof(iovbuf));
++ arp_mode_le = cpu_to_le32(arp_mode);
++ brcmf_c_mkiovar("arp_ol", (char *)&arp_mode_le, 4, iovbuf,
++ sizeof(iovbuf));
+ retcode = brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR,
+ iovbuf, sizeof(iovbuf));
+ retcode = retcode >= 0 ? 0 : retcode;
+@@ -792,8 +795,11 @@ static void brcmf_c_arp_offload_enable(struct brcmf_pub *drvr, int arp_enable)
+ {
+ char iovbuf[32];
+ int retcode;
++ __le32 arp_enable_le;
+
+- brcmf_c_mkiovar("arpoe", (char *)&arp_enable, 4,
++ arp_enable_le = cpu_to_le32(arp_enable);
++
++ brcmf_c_mkiovar("arpoe", (char *)&arp_enable_le, 4,
+ iovbuf, sizeof(iovbuf));
+ retcode = brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR,
+ iovbuf, sizeof(iovbuf));
+@@ -814,10 +820,10 @@ int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr)
+ char buf[128], *ptr;
+ u32 dongle_align = BRCMF_SDALIGN;
+ u32 glom = 0;
+- u32 roaming = 1;
+- uint bcn_timeout = 3;
+- int scan_assoc_time = 40;
+- int scan_unassoc_time = 40;
++ __le32 roaming_le = cpu_to_le32(1);
++ __le32 bcn_timeout_le = cpu_to_le32(3);
++ __le32 scan_assoc_time_le = cpu_to_le32(40);
++ __le32 scan_unassoc_time_le = cpu_to_le32(40);
+ int i;
+
+ brcmf_os_proto_block(drvr);
+@@ -852,14 +858,14 @@ int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr)
+
+ /* Setup timeout if Beacons are lost and roam is off to report
+ link down */
+- brcmf_c_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf,
++ brcmf_c_mkiovar("bcn_timeout", (char *)&bcn_timeout_le, 4, iovbuf,
+ sizeof(iovbuf));
+ brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf,
+ sizeof(iovbuf));
+
+ /* Enable/Disable build-in roaming to allowed ext supplicant to take
+ of romaing */
+- brcmf_c_mkiovar("roam_off", (char *)&roaming, 4,
++ brcmf_c_mkiovar("roam_off", (char *)&roaming_le, 4,
+ iovbuf, sizeof(iovbuf));
+ brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf,
+ sizeof(iovbuf));
+@@ -874,9 +880,9 @@ int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr)
+ sizeof(iovbuf));
+
+ brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_SCAN_CHANNEL_TIME,
+- (char *)&scan_assoc_time, sizeof(scan_assoc_time));
++ (char *)&scan_assoc_time_le, sizeof(scan_assoc_time_le));
+ brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_SCAN_UNASSOC_TIME,
+- (char *)&scan_unassoc_time, sizeof(scan_unassoc_time));
++ (char *)&scan_unassoc_time_le, sizeof(scan_unassoc_time_le));
+
+ /* Set and enable ARP offload feature */
+ brcmf_c_arp_offload_set(drvr, BRCMF_ARPOL_MODE);
+diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+index 5eddabe..e4e326a 100644
+--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
++++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+@@ -498,8 +498,10 @@ static void wl_iscan_prep(struct brcmf_scan_params_le *params_le,
+ params_le->active_time = cpu_to_le32(-1);
+ params_le->passive_time = cpu_to_le32(-1);
+ params_le->home_time = cpu_to_le32(-1);
+- if (ssid && ssid->SSID_len)
+- memcpy(&params_le->ssid_le, ssid, sizeof(struct brcmf_ssid));
++ if (ssid && ssid->SSID_len) {
++ params_le->ssid_le.SSID_len = cpu_to_le32(ssid->SSID_len);
++ memcpy(&params_le->ssid_le.SSID, ssid->SSID, ssid->SSID_len);
++ }
+ }
+
+ static s32
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
+index 9fc804d..7305a47 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
++++ b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
+@@ -117,6 +117,7 @@
+
+ #define CHIP_VER_B BIT(4)
+ #define CHIP_92C_BITMASK BIT(0)
++#define CHIP_UNKNOWN BIT(7)
+ #define CHIP_92C_1T2R 0x03
+ #define CHIP_92C 0x01
+ #define CHIP_88C 0x00
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+index a3deaef..cb480d8 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+@@ -1001,8 +1001,16 @@ static enum version_8192c _rtl92ce_read_chip_version(struct ieee80211_hw *hw)
+ version = (value32 & TYPE_ID) ? VERSION_A_CHIP_92C :
+ VERSION_A_CHIP_88C;
+ } else {
+- version = (value32 & TYPE_ID) ? VERSION_B_CHIP_92C :
+- VERSION_B_CHIP_88C;
++ version = (enum version_8192c) (CHIP_VER_B |
++ ((value32 & TYPE_ID) ? CHIP_92C_BITMASK : 0) |
++ ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : 0));
++ if ((!IS_CHIP_VENDOR_UMC(version)) && (value32 &
++ CHIP_VER_RTL_MASK)) {
++ version = (enum version_8192c)(version |
++ ((((value32 & CHIP_VER_RTL_MASK) == BIT(12))
++ ? CHIP_VENDOR_UMC_B_CUT : CHIP_UNKNOWN) |
++ CHIP_VENDOR_UMC));
++ }
+ }
+
+ switch (version) {
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+index f2aa33d..df852e8 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+@@ -165,12 +165,14 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
+
+ /* request fw */
+ if (IS_VENDOR_UMC_A_CUT(rtlhal->version) &&
+- !IS_92C_SERIAL(rtlhal->version))
++ !IS_92C_SERIAL(rtlhal->version)) {
+ fw_name = "rtlwifi/rtl8192cfwU.bin";
+- else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version))
++ } else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version)) {
+ fw_name = "rtlwifi/rtl8192cfwU_B.bin";
+- else
++ pr_info("****** This B_CUT device may not work with kernels 3.6 and earlier\n");
++ } else {
+ fw_name = rtlpriv->cfg->fw_name;
++ }
+ err = request_firmware(&firmware, fw_name, rtlpriv->io.dev);
+ if (err) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
+index 9ddf69e..74d38ca 100644
+--- a/drivers/pci/hotplug/acpiphp_glue.c
++++ b/drivers/pci/hotplug/acpiphp_glue.c
+@@ -132,6 +132,15 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
+ if (!acpi_pci_check_ejectable(pbus, handle) && !is_dock_device(handle))
+ return AE_OK;
+
++ status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
++ if (ACPI_FAILURE(status)) {
++ warn("can't evaluate _ADR (%#x)\n", status);
++ return AE_OK;
++ }
++
++ device = (adr >> 16) & 0xffff;
++ function = adr & 0xffff;
++
+ pdev = pbus->self;
+ if (pdev && pci_is_pcie(pdev)) {
+ tmp = acpi_find_root_bridge_handle(pdev);
+@@ -144,10 +153,6 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
+ }
+ }
+
+- acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
+- device = (adr >> 16) & 0xffff;
+- function = adr & 0xffff;
+-
+ newfunc = kzalloc(sizeof(struct acpiphp_func), GFP_KERNEL);
+ if (!newfunc)
+ return AE_NO_MEMORY;
+diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
+index edaccad..f75a4c8 100644
+--- a/drivers/platform/x86/asus-laptop.c
++++ b/drivers/platform/x86/asus-laptop.c
+@@ -823,9 +823,9 @@ static ssize_t show_infos(struct device *dev,
+ * The significance of others is yet to be found.
+ * If we don't find the method, we assume the device are present.
+ */
+- rv = acpi_evaluate_integer(asus->handle, "HRWS", NULL, &temp);
++ rv = acpi_evaluate_integer(asus->handle, "HWRS", NULL, &temp);
+ if (!ACPI_FAILURE(rv))
+- len += sprintf(page + len, "HRWS value : %#x\n",
++ len += sprintf(page + len, "HWRS value : %#x\n",
+ (uint) temp);
+ /*
+ * Another value for userspace: the ASYM method returns 0x02 for
+@@ -1660,9 +1660,9 @@ static int asus_laptop_get_info(struct asus_laptop *asus)
+ * The significance of others is yet to be found.
+ */
+ status =
+- acpi_evaluate_integer(asus->handle, "HRWS", NULL, &hwrs_result);
++ acpi_evaluate_integer(asus->handle, "HWRS", NULL, &hwrs_result);
+ if (!ACPI_FAILURE(status))
+- pr_notice(" HRWS returned %x", (int)hwrs_result);
++ pr_notice(" HWRS returned %x", (int)hwrs_result);
+
+ if (!acpi_check_handle(asus->handle, METHOD_WL_STATUS, NULL))
+ asus->have_rsts = true;
+diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
+index 20687d5..a3e98f1 100644
+--- a/drivers/rtc/rtc-twl.c
++++ b/drivers/rtc/rtc-twl.c
+@@ -462,6 +462,11 @@ static int __devinit twl_rtc_probe(struct platform_device *pdev)
+ goto out1;
+ }
+
++ /* ensure interrupts are disabled, bootloaders can be strange */
++ ret = twl_rtc_write_u8(0, REG_RTC_INTERRUPTS_REG);
++ if (ret < 0)
++ dev_warn(&pdev->dev, "unable to disable interrupt\n");
++
+ /* init cached IRQ enable bits */
+ ret = twl_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG);
+ if (ret < 0)
+diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
+index 1ad0b82..1069974 100644
+--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
++++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
+@@ -1264,6 +1264,9 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
+ int rc = 0;
+ u64 mask64;
+
++ memset(&iscsi_init, 0x00, sizeof(struct iscsi_kwqe_init1));
++ memset(&iscsi_init2, 0x00, sizeof(struct iscsi_kwqe_init2));
++
+ bnx2i_adjust_qp_size(hba);
+
+ iscsi_init.flags =
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index b4d2c86..be9aad8 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -1213,8 +1213,9 @@ static void complete_scsi_command(struct CommandList *cp)
+ }
+ break;
+ case CMD_PROTOCOL_ERR:
++ cmd->result = DID_ERROR << 16;
+ dev_warn(&h->pdev->dev, "cp %p has "
+- "protocol error \n", cp);
++ "protocol error\n", cp);
+ break;
+ case CMD_HARDWARE_ERR:
+ cmd->result = DID_ERROR << 16;
+diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
+index 98cb5e6..17de348 100644
+--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
++++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
+@@ -1156,6 +1156,13 @@ _base_check_enable_msix(struct MPT2SAS_ADAPTER *ioc)
+ u16 message_control;
+
+
++ /* Check whether controller SAS2008 B0 controller,
++ if it is SAS2008 B0 controller use IO-APIC instead of MSIX */
++ if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 &&
++ ioc->pdev->revision == 0x01) {
++ return -EINVAL;
++ }
++
+ base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
+ if (!base) {
+ dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "msix not "
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 597fb9b..34d114a 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -3039,15 +3039,20 @@ static int transport_generic_cmd_sequencer(
+ /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
+ goto out_invalid_cdb_field;
+ }
+-
++ /*
++ * For the overflow case keep the existing fabric provided
++ * ->data_length. Otherwise for the underflow case, reset
++ * ->data_length to the smaller SCSI expected data transfer
++ * length.
++ */
+ if (size > cmd->data_length) {
+ cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
+ cmd->residual_count = (size - cmd->data_length);
+ } else {
+ cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
+ cmd->residual_count = (cmd->data_length - size);
++ cmd->data_length = size;
+ }
+- cmd->data_length = size;
+ }
+
+ /* reject any command that we don't have a handler for */
+diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
+index 08b92a6..8d70fbc 100644
+--- a/drivers/tty/serial/pch_uart.c
++++ b/drivers/tty/serial/pch_uart.c
+@@ -236,6 +236,9 @@ struct eg20t_port {
+ int tx_dma_use;
+ void *rx_buf_virt;
+ dma_addr_t rx_buf_dma;
++
++ /* protect the eg20t_port private structure and io access to membase */
++ spinlock_t lock;
+ };
+
+ /**
+@@ -964,7 +967,7 @@ static irqreturn_t pch_uart_interrupt(int irq, void *dev_id)
+ unsigned int iid;
+ unsigned long flags;
+
+- spin_lock_irqsave(&priv->port.lock, flags);
++ spin_lock_irqsave(&priv->lock, flags);
+ handled = 0;
+ while ((iid = pch_uart_hal_get_iid(priv)) > 1) {
+ switch (iid) {
+@@ -1017,7 +1020,7 @@ static irqreturn_t pch_uart_interrupt(int irq, void *dev_id)
+ priv->int_dis_flag = 0;
+ }
+
+- spin_unlock_irqrestore(&priv->port.lock, flags);
++ spin_unlock_irqrestore(&priv->lock, flags);
+ return IRQ_RETVAL(handled);
+ }
+
+@@ -1131,9 +1134,9 @@ static void pch_uart_break_ctl(struct uart_port *port, int ctl)
+ unsigned long flags;
+
+ priv = container_of(port, struct eg20t_port, port);
+- spin_lock_irqsave(&port->lock, flags);
++ spin_lock_irqsave(&priv->lock, flags);
+ pch_uart_hal_set_break(priv, ctl);
+- spin_unlock_irqrestore(&port->lock, flags);
++ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+
+ /* Grab any interrupt resources and initialise any low level driver state. */
+@@ -1284,7 +1287,8 @@ static void pch_uart_set_termios(struct uart_port *port,
+
+ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
+
+- spin_lock_irqsave(&port->lock, flags);
++ spin_lock_irqsave(&priv->lock, flags);
++ spin_lock(&port->lock);
+
+ uart_update_timeout(port, termios->c_cflag, baud);
+ rtn = pch_uart_hal_set_line(priv, baud, parity, bits, stb);
+@@ -1297,7 +1301,8 @@ static void pch_uart_set_termios(struct uart_port *port,
+ tty_termios_encode_baud_rate(termios, baud, baud);
+
+ out:
+- spin_unlock_irqrestore(&port->lock, flags);
++ spin_unlock(&port->lock);
++ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+
+ static const char *pch_uart_type(struct uart_port *port)
+@@ -1449,6 +1454,8 @@ static struct eg20t_port *pch_uart_init_port(struct pci_dev *pdev,
+ pci_enable_msi(pdev);
+ pci_set_master(pdev);
+
++ spin_lock_init(&priv->lock);
++
+ iobase = pci_resource_start(pdev, 0);
+ mapbase = pci_resource_start(pdev, 1);
+ priv->mapbase = mapbase;
+diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
+index d956965..3440812 100644
+--- a/drivers/usb/core/devices.c
++++ b/drivers/usb/core/devices.c
+@@ -624,7 +624,7 @@ static ssize_t usb_device_read(struct file *file, char __user *buf,
+ /* print devices for all busses */
+ list_for_each_entry(bus, &usb_bus_list, bus_list) {
+ /* recurse through all children of the root hub */
+- if (!bus->root_hub)
++ if (!bus_to_hcd(bus)->rh_registered)
+ continue;
+ usb_lock_device(bus->root_hub);
+ ret = usb_device_dump(&buf, &nbytes, &skip_bytes, ppos,
+diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
+index 8cb9304..032e5a6 100644
+--- a/drivers/usb/core/hcd.c
++++ b/drivers/usb/core/hcd.c
+@@ -1002,10 +1002,7 @@ static int register_root_hub(struct usb_hcd *hcd)
+ if (retval) {
+ dev_err (parent_dev, "can't register root hub for %s, %d\n",
+ dev_name(&usb_dev->dev), retval);
+- }
+- mutex_unlock(&usb_bus_list_lock);
+-
+- if (retval == 0) {
++ } else {
+ spin_lock_irq (&hcd_root_hub_lock);
+ hcd->rh_registered = 1;
+ spin_unlock_irq (&hcd_root_hub_lock);
+@@ -1014,6 +1011,7 @@ static int register_root_hub(struct usb_hcd *hcd)
+ if (HCD_DEAD(hcd))
+ usb_hc_died (hcd); /* This time clean up */
+ }
++ mutex_unlock(&usb_bus_list_lock);
+
+ return retval;
+ }
+diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
+index 527736e..d584eaf 100644
+--- a/drivers/usb/gadget/dummy_hcd.c
++++ b/drivers/usb/gadget/dummy_hcd.c
+@@ -2292,10 +2292,8 @@ static int dummy_hcd_probe(struct platform_device *pdev)
+ hs_hcd->has_tt = 1;
+
+ retval = usb_add_hcd(hs_hcd, 0, 0);
+- if (retval != 0) {
+- usb_put_hcd(hs_hcd);
+- return retval;
+- }
++ if (retval)
++ goto put_usb2_hcd;
+
+ if (mod_data.is_super_speed) {
+ ss_hcd = usb_create_shared_hcd(&dummy_hcd, &pdev->dev,
+@@ -2314,6 +2312,8 @@ static int dummy_hcd_probe(struct platform_device *pdev)
+ put_usb3_hcd:
+ usb_put_hcd(ss_hcd);
+ dealloc_usb2_hcd:
++ usb_remove_hcd(hs_hcd);
++put_usb2_hcd:
+ usb_put_hcd(hs_hcd);
+ the_controller.hs_hcd = the_controller.ss_hcd = NULL;
+ return retval;
+diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
+index 3c166d3..f62be89 100644
+--- a/drivers/watchdog/hpwdt.c
++++ b/drivers/watchdog/hpwdt.c
+@@ -813,6 +813,9 @@ static int __devinit hpwdt_init_one(struct pci_dev *dev,
+ hpwdt_timer_reg = pci_mem_addr + 0x70;
+ hpwdt_timer_con = pci_mem_addr + 0x72;
+
++ /* Make sure that timer is disabled until /dev/watchdog is opened */
++ hpwdt_stop();
++
+ /* Make sure that we have a valid soft_margin */
+ if (hpwdt_change_timer(soft_margin))
+ hpwdt_change_timer(DEFAULT_MARGIN);
+diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
+index 1b2e180..667776e 100644
+--- a/fs/cifs/cifs_unicode.c
++++ b/fs/cifs/cifs_unicode.c
+@@ -327,6 +327,6 @@ cifsConvertToUCS(__le16 *target, const char *source, int srclen,
+ }
+
+ ctoUCS_out:
+- return i;
++ return j;
+ }
+
+diff --git a/fs/dcache.c b/fs/dcache.c
+index eb723d3..63c0c6b 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -311,7 +311,7 @@ static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
+ * Inform try_to_ascend() that we are no longer attached to the
+ * dentry tree
+ */
+- dentry->d_flags |= DCACHE_DISCONNECTED;
++ dentry->d_flags |= DCACHE_DENTRY_KILLED;
+ if (parent)
+ spin_unlock(&parent->d_lock);
+ dentry_iput(dentry);
+@@ -968,7 +968,7 @@ static struct dentry *try_to_ascend(struct dentry *old, int locked, unsigned seq
+ * or deletion
+ */
+ if (new != old->d_parent ||
+- (old->d_flags & DCACHE_DISCONNECTED) ||
++ (old->d_flags & DCACHE_DENTRY_KILLED) ||
+ (!locked && read_seqretry(&rename_lock, seq))) {
+ spin_unlock(&new->d_lock);
+ new = NULL;
+@@ -1054,6 +1054,8 @@ positive:
+ return 1;
+
+ rename_retry:
++ if (locked)
++ goto again;
+ locked = 1;
+ write_seqlock(&rename_lock);
+ goto again;
+@@ -1156,6 +1158,8 @@ out:
+ rename_retry:
+ if (found)
+ return found;
++ if (locked)
++ goto again;
+ locked = 1;
+ write_seqlock(&rename_lock);
+ goto again;
+@@ -2922,6 +2926,8 @@ resume:
+ return;
+
+ rename_retry:
++ if (locked)
++ goto again;
+ locked = 1;
+ write_seqlock(&rename_lock);
+ goto again;
+diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
+index 53c3bce..0be1aa4 100644
+--- a/fs/proc/proc_sysctl.c
++++ b/fs/proc/proc_sysctl.c
+@@ -123,9 +123,6 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
+
+ err = ERR_PTR(-ENOMEM);
+ inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
+- if (h)
+- sysctl_head_finish(h);
+-
+ if (!inode)
+ goto out;
+
+@@ -134,6 +131,8 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
+ d_add(dentry, inode);
+
+ out:
++ if (h)
++ sysctl_head_finish(h);
+ sysctl_head_finish(head);
+ return err;
+ }
+diff --git a/include/linux/dcache.h b/include/linux/dcache.h
+index 4eb8c80..1dfe974 100644
+--- a/include/linux/dcache.h
++++ b/include/linux/dcache.h
+@@ -219,6 +219,8 @@ struct dentry_operations {
+ #define DCACHE_MANAGED_DENTRY \
+ (DCACHE_MOUNTED|DCACHE_NEED_AUTOMOUNT|DCACHE_MANAGE_TRANSIT)
+
++#define DCACHE_DENTRY_KILLED 0x100000
++
+ extern seqlock_t rename_lock;
+
+ static inline int dname_external(struct dentry *dentry)
+diff --git a/include/linux/hid.h b/include/linux/hid.h
+index c235e4e..331e2ef 100644
+--- a/include/linux/hid.h
++++ b/include/linux/hid.h
+@@ -875,7 +875,7 @@ static inline int hid_hw_power(struct hid_device *hdev, int level)
+ return hdev->ll_driver->power ? hdev->ll_driver->power(hdev, level) : 0;
+ }
+
+-void hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
++int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
+ int interrupt);
+
+ extern int hid_generic_init(void);
+diff --git a/include/linux/hidraw.h b/include/linux/hidraw.h
+index 4b88e69..45e9fcb 100644
+--- a/include/linux/hidraw.h
++++ b/include/linux/hidraw.h
+@@ -76,13 +76,13 @@ struct hidraw_list {
+ #ifdef CONFIG_HIDRAW
+ int hidraw_init(void);
+ void hidraw_exit(void);
+-void hidraw_report_event(struct hid_device *, u8 *, int);
++int hidraw_report_event(struct hid_device *, u8 *, int);
+ int hidraw_connect(struct hid_device *);
+ void hidraw_disconnect(struct hid_device *);
+ #else
+ static inline int hidraw_init(void) { return 0; }
+ static inline void hidraw_exit(void) { }
+-static inline void hidraw_report_event(struct hid_device *hid, u8 *data, int len) { }
++static inline int hidraw_report_event(struct hid_device *hid, u8 *data, int len) { return 0; }
+ static inline int hidraw_connect(struct hid_device *hid) { return -1; }
+ static inline void hidraw_disconnect(struct hid_device *hid) { }
+ #endif
+diff --git a/include/linux/memory.h b/include/linux/memory.h
+index 935699b..6bea2c2 100644
+--- a/include/linux/memory.h
++++ b/include/linux/memory.h
+@@ -20,7 +20,7 @@
+ #include <linux/compiler.h>
+ #include <linux/mutex.h>
+
+-#define MIN_MEMORY_BLOCK_SIZE (1 << SECTION_SIZE_BITS)
++#define MIN_MEMORY_BLOCK_SIZE (1UL << SECTION_SIZE_BITS)
+
+ struct memory_block {
+ unsigned long start_section_nr;
+diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h
+index 22e61fd..28e493b 100644
+--- a/include/linux/xfrm.h
++++ b/include/linux/xfrm.h
+@@ -84,6 +84,8 @@ struct xfrm_replay_state {
+ __u32 bitmap;
+ };
+
++#define XFRMA_REPLAY_ESN_MAX 4096
++
+ struct xfrm_replay_state_esn {
+ unsigned int bmp_len;
+ __u32 oseq;
+diff --git a/include/net/bluetooth/smp.h b/include/net/bluetooth/smp.h
+index 15b97d5..fe810d4 100644
+--- a/include/net/bluetooth/smp.h
++++ b/include/net/bluetooth/smp.h
+@@ -131,7 +131,7 @@ struct smp_chan {
+ };
+
+ /* SMP Commands */
+-int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level);
++int smp_conn_security(struct hci_conn *hcon, __u8 sec_level);
+ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb);
+ int smp_distribute_keys(struct l2cap_conn *conn, __u8 force);
+
+diff --git a/include/net/xfrm.h b/include/net/xfrm.h
+index b203e14..921f627 100644
+--- a/include/net/xfrm.h
++++ b/include/net/xfrm.h
+@@ -269,6 +269,9 @@ struct xfrm_replay {
+ int (*check)(struct xfrm_state *x,
+ struct sk_buff *skb,
+ __be32 net_seq);
++ int (*recheck)(struct xfrm_state *x,
++ struct sk_buff *skb,
++ __be32 net_seq);
+ void (*notify)(struct xfrm_state *x, int event);
+ int (*overflow)(struct xfrm_state *x, struct sk_buff *skb);
+ };
+diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
+index a9c87ad..a9536da 100644
+--- a/include/trace/events/kmem.h
++++ b/include/trace/events/kmem.h
+@@ -214,7 +214,7 @@ TRACE_EVENT(mm_page_alloc,
+
+ TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s",
+ __entry->page,
+- page_to_pfn(__entry->page),
++ __entry->page ? page_to_pfn(__entry->page) : 0,
+ __entry->order,
+ __entry->migratetype,
+ show_gfp_flags(__entry->gfp_flags))
+@@ -240,7 +240,7 @@ DECLARE_EVENT_CLASS(mm_page,
+
+ TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d",
+ __entry->page,
+- page_to_pfn(__entry->page),
++ __entry->page ? page_to_pfn(__entry->page) : 0,
+ __entry->order,
+ __entry->migratetype,
+ __entry->order == 0)
+diff --git a/kernel/async.c b/kernel/async.c
+index 80b74b8..009f516 100644
+--- a/kernel/async.c
++++ b/kernel/async.c
+@@ -88,6 +88,13 @@ static async_cookie_t __lowest_in_progress(struct list_head *running)
+ {
+ struct async_entry *entry;
+
++ if (!running) { /* just check the entry count */
++ if (atomic_read(&entry_count))
++ return 0; /* smaller than any cookie */
++ else
++ return next_cookie;
++ }
++
+ if (!list_empty(running)) {
+ entry = list_first_entry(running,
+ struct async_entry, list);
+@@ -238,9 +245,7 @@ EXPORT_SYMBOL_GPL(async_schedule_domain);
+ */
+ void async_synchronize_full(void)
+ {
+- do {
+- async_synchronize_cookie(next_cookie);
+- } while (!list_empty(&async_running) || !list_empty(&async_pending));
++ async_synchronize_cookie_domain(next_cookie, NULL);
+ }
+ EXPORT_SYMBOL_GPL(async_synchronize_full);
+
+@@ -260,7 +265,7 @@ EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
+ /**
+ * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing
+ * @cookie: async_cookie_t to use as checkpoint
+- * @running: running list to synchronize on
++ * @running: running list to synchronize on, NULL indicates all lists
+ *
+ * This function waits until all asynchronous function calls for the
+ * synchronization domain specified by the running list @list submitted
+diff --git a/kernel/cpuset.c b/kernel/cpuset.c
+index 46a1d3c..84a524b 100644
+--- a/kernel/cpuset.c
++++ b/kernel/cpuset.c
+@@ -2080,6 +2080,9 @@ static void scan_for_empty_cpusets(struct cpuset *root)
+ * (of no affect) on systems that are actively using CPU hotplug
+ * but making no active use of cpusets.
+ *
++ * The only exception to this is suspend/resume, where we don't
++ * modify cpusets at all.
++ *
+ * This routine ensures that top_cpuset.cpus_allowed tracks
+ * cpu_active_mask on each CPU hotplug (cpuhp) event.
+ *
+diff --git a/kernel/exit.c b/kernel/exit.c
+index 5a8a66e..234e152 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -1019,6 +1019,22 @@ NORET_TYPE void do_exit(long code)
+
+ preempt_disable();
+ exit_rcu();
++
++ /*
++ * The setting of TASK_RUNNING by try_to_wake_up() may be delayed
++ * when the following two conditions become true.
++ * - There is race condition of mmap_sem (It is acquired by
++ * exit_mm()), and
++ * - SMI occurs before setting TASK_RUNINNG.
++ * (or hypervisor of virtual machine switches to other guest)
++ * As a result, we may become TASK_RUNNING after becoming TASK_DEAD
++ *
++ * To avoid it, we have to wait for releasing tsk->pi_lock which
++ * is held by try_to_wake_up()
++ */
++ smp_mb();
++ raw_spin_unlock_wait(&tsk->pi_lock);
++
+ /* causes final put_task_struct in finish_task_switch(). */
+ tsk->state = TASK_DEAD;
+ schedule();
+diff --git a/kernel/sched.c b/kernel/sched.c
+index 910db7d..fcc893f 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -8192,34 +8192,66 @@ int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
+ }
+ #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
+
++static int num_cpus_frozen; /* used to mark begin/end of suspend/resume */
++
+ /*
+ * Update cpusets according to cpu_active mask. If cpusets are
+ * disabled, cpuset_update_active_cpus() becomes a simple wrapper
+ * around partition_sched_domains().
++ *
++ * If we come here as part of a suspend/resume, don't touch cpusets because we
++ * want to restore it back to its original state upon resume anyway.
+ */
+ static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
+ void *hcpu)
+ {
+- switch (action & ~CPU_TASKS_FROZEN) {
++ switch (action) {
++ case CPU_ONLINE_FROZEN:
++ case CPU_DOWN_FAILED_FROZEN:
++
++ /*
++ * num_cpus_frozen tracks how many CPUs are involved in suspend
++ * resume sequence. As long as this is not the last online
++ * operation in the resume sequence, just build a single sched
++ * domain, ignoring cpusets.
++ */
++ num_cpus_frozen--;
++ if (likely(num_cpus_frozen)) {
++ partition_sched_domains(1, NULL, NULL);
++ break;
++ }
++
++ /*
++ * This is the last CPU online operation. So fall through and
++ * restore the original sched domains by considering the
++ * cpuset configurations.
++ */
++
+ case CPU_ONLINE:
+ case CPU_DOWN_FAILED:
+ cpuset_update_active_cpus();
+- return NOTIFY_OK;
++ break;
+ default:
+ return NOTIFY_DONE;
+ }
++ return NOTIFY_OK;
+ }
+
+ static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
+ void *hcpu)
+ {
+- switch (action & ~CPU_TASKS_FROZEN) {
++ switch (action) {
+ case CPU_DOWN_PREPARE:
+ cpuset_update_active_cpus();
+- return NOTIFY_OK;
++ break;
++ case CPU_DOWN_PREPARE_FROZEN:
++ num_cpus_frozen++;
++ partition_sched_domains(1, NULL, NULL);
++ break;
+ default:
+ return NOTIFY_DONE;
+ }
++ return NOTIFY_OK;
+ }
+
+ static int update_runtime(struct notifier_block *nfb,
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 979d4de..b413138 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -3627,18 +3627,17 @@ static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb,
+ #ifdef CONFIG_SMP
+
+ struct work_for_cpu {
+- struct completion completion;
++ struct work_struct work;
+ long (*fn)(void *);
+ void *arg;
+ long ret;
+ };
+
+-static int do_work_for_cpu(void *_wfc)
++static void work_for_cpu_fn(struct work_struct *work)
+ {
+- struct work_for_cpu *wfc = _wfc;
++ struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
++
+ wfc->ret = wfc->fn(wfc->arg);
+- complete(&wfc->completion);
+- return 0;
+ }
+
+ /**
+@@ -3653,19 +3652,11 @@ static int do_work_for_cpu(void *_wfc)
+ */
+ long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
+ {
+- struct task_struct *sub_thread;
+- struct work_for_cpu wfc = {
+- .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
+- .fn = fn,
+- .arg = arg,
+- };
++ struct work_for_cpu wfc = { .fn = fn, .arg = arg };
+
+- sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
+- if (IS_ERR(sub_thread))
+- return PTR_ERR(sub_thread);
+- kthread_bind(sub_thread, cpu);
+- wake_up_process(sub_thread);
+- wait_for_completion(&wfc.completion);
++ INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
++ schedule_work_on(cpu, &wfc.work);
++ flush_work(&wfc.work);
+ return wfc.ret;
+ }
+ EXPORT_SYMBOL_GPL(work_on_cpu);
+diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
+index 6629faf..9ad7d1e 100644
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -127,9 +127,6 @@ static void register_page_bootmem_info_section(unsigned long start_pfn)
+ struct mem_section *ms;
+ struct page *page, *memmap;
+
+- if (!pfn_valid(start_pfn))
+- return;
+-
+ section_nr = pfn_to_section_nr(start_pfn);
+ ms = __nr_to_section(section_nr);
+
+@@ -188,9 +185,16 @@ void register_page_bootmem_info_node(struct pglist_data *pgdat)
+ end_pfn = pfn + pgdat->node_spanned_pages;
+
+ /* register_section info */
+- for (; pfn < end_pfn; pfn += PAGES_PER_SECTION)
+- register_page_bootmem_info_section(pfn);
+-
++ for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
++ /*
++ * Some platforms can assign the same pfn to multiple nodes - on
++ * node0 as well as nodeN. To avoid registering a pfn against
++ * multiple nodes we check that this pfn does not already
++ * reside in some other node.
++ */
++ if (pfn_valid(pfn) && (pfn_to_nid(pfn) == node))
++ register_page_bootmem_info_section(pfn);
++ }
+ }
+ #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
+
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 6e51bf0..a88dded 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -541,7 +541,7 @@ static inline void __free_one_page(struct page *page,
+ combined_idx = buddy_idx & page_idx;
+ higher_page = page + (combined_idx - page_idx);
+ buddy_idx = __find_buddy_index(combined_idx, order + 1);
+- higher_buddy = page + (buddy_idx - combined_idx);
++ higher_buddy = higher_page + (buddy_idx - combined_idx);
+ if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
+ list_add_tail(&page->lru,
+ &zone->free_area[order].free_list[migratetype]);
+diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
+index f5ffc02..9ddbd4e 100644
+--- a/net/8021q/vlan_core.c
++++ b/net/8021q/vlan_core.c
+@@ -106,7 +106,6 @@ static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
+ return NULL;
+ memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
+ skb->mac_header += VLAN_HLEN;
+- skb_reset_mac_len(skb);
+ return skb;
+ }
+
+@@ -173,6 +172,8 @@ struct sk_buff *vlan_untag(struct sk_buff *skb)
+
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
++ skb_reset_mac_len(skb);
++
+ return skb;
+
+ err_free:
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index 98bfbd5..1fb1aec 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -44,6 +44,7 @@
+
+ #include <net/bluetooth/bluetooth.h>
+ #include <net/bluetooth/hci_core.h>
++#include <net/bluetooth/smp.h>
+
+ static void hci_le_connect(struct hci_conn *conn)
+ {
+@@ -641,6 +642,9 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
+ {
+ BT_DBG("conn %p", conn);
+
++ if (conn->type == LE_LINK)
++ return smp_conn_security(conn, sec_level);
++
+ /* For sdp we don't need the link key. */
+ if (sec_level == BT_SECURITY_SDP)
+ return 1;
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index dd76177..04175d9 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -902,14 +902,15 @@ static void l2cap_chan_ready(struct sock *sk)
+ static void l2cap_conn_ready(struct l2cap_conn *conn)
+ {
+ struct l2cap_chan *chan;
++ struct hci_conn *hcon = conn->hcon;
+
+ BT_DBG("conn %p", conn);
+
+- if (!conn->hcon->out && conn->hcon->type == LE_LINK)
++ if (!hcon->out && hcon->type == LE_LINK)
+ l2cap_le_conn_ready(conn);
+
+- if (conn->hcon->out && conn->hcon->type == LE_LINK)
+- smp_conn_security(conn, conn->hcon->pending_sec_level);
++ if (hcon->out && hcon->type == LE_LINK)
++ smp_conn_security(hcon, hcon->pending_sec_level);
+
+ read_lock(&conn->chan_lock);
+
+@@ -918,8 +919,8 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
+
+ bh_lock_sock(sk);
+
+- if (conn->hcon->type == LE_LINK) {
+- if (smp_conn_security(conn, chan->sec_level))
++ if (hcon->type == LE_LINK) {
++ if (smp_conn_security(hcon, chan->sec_level))
+ l2cap_chan_ready(sk);
+
+ } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
+index 6dedd6f..158887a 100644
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -616,7 +616,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
+ break;
+ }
+
+- if (smp_conn_security(conn, sec.level))
++ if (smp_conn_security(conn->hcon, sec.level))
+ break;
+
+ err = 0;
+diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
+index 759b635..c27b4e3 100644
+--- a/net/bluetooth/smp.c
++++ b/net/bluetooth/smp.c
+@@ -554,9 +554,9 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
+ return 0;
+ }
+
+-int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level)
++int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
+ {
+- struct hci_conn *hcon = conn->hcon;
++ struct l2cap_conn *conn = hcon->l2cap_data;
+ struct smp_chan *smp = conn->smp_chan;
+
+ BT_DBG("conn %p hcon %p level 0x%2.2x", conn, hcon, sec_level);
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 832ba6d..abe1147 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2108,7 +2108,8 @@ static bool can_checksum_protocol(unsigned long features, __be16 protocol)
+
+ static u32 harmonize_features(struct sk_buff *skb, __be16 protocol, u32 features)
+ {
+- if (!can_checksum_protocol(features, protocol)) {
++ if (skb->ip_summed != CHECKSUM_NONE &&
++ !can_checksum_protocol(features, protocol)) {
+ features &= ~NETIF_F_ALL_CSUM;
+ features &= ~NETIF_F_SG;
+ } else if (illegal_highdma(skb->dev, skb)) {
+@@ -2686,16 +2687,17 @@ ipv6:
+ nhoff += poff;
+ if (pskb_may_pull(skb, nhoff + 4)) {
+ ports.v32 = * (__force u32 *) (skb->data + nhoff);
+- if (ports.v16[1] < ports.v16[0])
+- swap(ports.v16[0], ports.v16[1]);
+ skb->l4_rxhash = 1;
+ }
+ }
+
+ /* get a consistent hash (same value on both flow directions) */
+- if (addr2 < addr1)
++ if (addr2 < addr1 ||
++ (addr2 == addr1 &&
++ ports.v16[1] < ports.v16[0])) {
+ swap(addr1, addr2);
+-
++ swap(ports.v16[0], ports.v16[1]);
++ }
+ hash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
+ if (!hash)
+ hash = 1;
+@@ -6387,7 +6389,8 @@ static struct hlist_head *netdev_create_hash(void)
+ /* Initialize per network namespace state */
+ static int __net_init netdev_init(struct net *net)
+ {
+- INIT_LIST_HEAD(&net->dev_base_head);
++ if (net != &init_net)
++ INIT_LIST_HEAD(&net->dev_base_head);
+
+ net->dev_name_head = netdev_create_hash();
+ if (net->dev_name_head == NULL)
+diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
+index 31a5ae5..dd00b71 100644
+--- a/net/core/net_namespace.c
++++ b/net/core/net_namespace.c
+@@ -25,7 +25,9 @@ static DEFINE_MUTEX(net_mutex);
+ LIST_HEAD(net_namespace_list);
+ EXPORT_SYMBOL_GPL(net_namespace_list);
+
+-struct net init_net;
++struct net init_net = {
++ .dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head),
++};
+ EXPORT_SYMBOL(init_net);
+
+ #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 018fd41..1e8a882 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -593,7 +593,8 @@ set_rcvbuf:
+
+ case SO_KEEPALIVE:
+ #ifdef CONFIG_INET
+- if (sk->sk_protocol == IPPROTO_TCP)
++ if (sk->sk_protocol == IPPROTO_TCP &&
++ sk->sk_type == SOCK_STREAM)
+ tcp_set_keepalive(sk, valbool);
+ #endif
+ sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
+diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
+index 007e2eb..e1d4f30 100644
+--- a/net/ipv4/raw.c
++++ b/net/ipv4/raw.c
+@@ -131,18 +131,20 @@ found:
+ * 0 - deliver
+ * 1 - block
+ */
+-static __inline__ int icmp_filter(struct sock *sk, struct sk_buff *skb)
++static int icmp_filter(const struct sock *sk, const struct sk_buff *skb)
+ {
+- int type;
++ struct icmphdr _hdr;
++ const struct icmphdr *hdr;
+
+- if (!pskb_may_pull(skb, sizeof(struct icmphdr)))
++ hdr = skb_header_pointer(skb, skb_transport_offset(skb),
++ sizeof(_hdr), &_hdr);
++ if (!hdr)
+ return 1;
+
+- type = icmp_hdr(skb)->type;
+- if (type < 32) {
++ if (hdr->type < 32) {
+ __u32 data = raw_sk(sk)->filter.data;
+
+- return ((1 << type) & data) != 0;
++ return ((1U << hdr->type) & data) != 0;
+ }
+
+ /* Do not block unknown ICMP types */
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 043d49b..7397ad8 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -1589,8 +1589,14 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ }
+
+ #ifdef CONFIG_NET_DMA
+- if (tp->ucopy.dma_chan)
+- dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
++ if (tp->ucopy.dma_chan) {
++ if (tp->rcv_wnd == 0 &&
++ !skb_queue_empty(&sk->sk_async_wait_queue)) {
++ tcp_service_net_dma(sk, true);
++ tcp_cleanup_rbuf(sk, copied);
++ } else
++ dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
++ }
+ #endif
+ if (copied >= target) {
+ /* Do not sleep, just process backlog. */
+diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c
+index 43242e6..42853c4 100644
+--- a/net/ipv6/mip6.c
++++ b/net/ipv6/mip6.c
+@@ -84,28 +84,30 @@ static int mip6_mh_len(int type)
+
+ static int mip6_mh_filter(struct sock *sk, struct sk_buff *skb)
+ {
+- struct ip6_mh *mh;
++ struct ip6_mh _hdr;
++ const struct ip6_mh *mh;
+
+- if (!pskb_may_pull(skb, (skb_transport_offset(skb)) + 8) ||
+- !pskb_may_pull(skb, (skb_transport_offset(skb) +
+- ((skb_transport_header(skb)[1] + 1) << 3))))
++ mh = skb_header_pointer(skb, skb_transport_offset(skb),
++ sizeof(_hdr), &_hdr);
++ if (!mh)
+ return -1;
+
+- mh = (struct ip6_mh *)skb_transport_header(skb);
++ if (((mh->ip6mh_hdrlen + 1) << 3) > skb->len)
++ return -1;
+
+ if (mh->ip6mh_hdrlen < mip6_mh_len(mh->ip6mh_type)) {
+ LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH message too short: %d vs >=%d\n",
+ mh->ip6mh_hdrlen, mip6_mh_len(mh->ip6mh_type));
+- mip6_param_prob(skb, 0, ((&mh->ip6mh_hdrlen) -
+- skb_network_header(skb)));
++ mip6_param_prob(skb, 0, offsetof(struct ip6_mh, ip6mh_hdrlen) +
++ skb_network_header_len(skb));
+ return -1;
+ }
+
+ if (mh->ip6mh_proto != IPPROTO_NONE) {
+ LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH invalid payload proto = %d\n",
+ mh->ip6mh_proto);
+- mip6_param_prob(skb, 0, ((&mh->ip6mh_proto) -
+- skb_network_header(skb)));
++ mip6_param_prob(skb, 0, offsetof(struct ip6_mh, ip6mh_proto) +
++ skb_network_header_len(skb));
+ return -1;
+ }
+
+diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
+index 361ebf3..6e6c2c4 100644
+--- a/net/ipv6/raw.c
++++ b/net/ipv6/raw.c
+@@ -107,21 +107,20 @@ found:
+ * 0 - deliver
+ * 1 - block
+ */
+-static __inline__ int icmpv6_filter(struct sock *sk, struct sk_buff *skb)
++static int icmpv6_filter(const struct sock *sk, const struct sk_buff *skb)
+ {
+- struct icmp6hdr *icmph;
+- struct raw6_sock *rp = raw6_sk(sk);
+-
+- if (pskb_may_pull(skb, sizeof(struct icmp6hdr))) {
+- __u32 *data = &rp->filter.data[0];
+- int bit_nr;
++ struct icmp6hdr *_hdr;
++ const struct icmp6hdr *hdr;
+
+- icmph = (struct icmp6hdr *) skb->data;
+- bit_nr = icmph->icmp6_type;
++ hdr = skb_header_pointer(skb, skb_transport_offset(skb),
++ sizeof(_hdr), &_hdr);
++ if (hdr) {
++ const __u32 *data = &raw6_sk(sk)->filter.data[0];
++ unsigned int type = hdr->icmp6_type;
+
+- return (data[bit_nr >> 5] & (1 << (bit_nr & 31))) != 0;
++ return (data[type >> 5] & (1U << (type & 31))) != 0;
+ }
+- return 0;
++ return 1;
+ }
+
+ #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 2e21751..488a1b7 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -1435,17 +1435,18 @@ static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
+ struct fib6_table *table;
+ struct net *net = dev_net(rt->rt6i_dev);
+
+- if (rt == net->ipv6.ip6_null_entry)
+- return -ENOENT;
++ if (rt == net->ipv6.ip6_null_entry) {
++ err = -ENOENT;
++ goto out;
++ }
+
+ table = rt->rt6i_table;
+ write_lock_bh(&table->tb6_lock);
+-
+ err = fib6_del(rt, info);
+- dst_release(&rt->dst);
+-
+ write_unlock_bh(&table->tb6_lock);
+
++out:
++ dst_release(&rt->dst);
+ return err;
+ }
+
+diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
+index 3c55f63..2cef50b 100644
+--- a/net/l2tp/l2tp_eth.c
++++ b/net/l2tp/l2tp_eth.c
+@@ -132,7 +132,7 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
+ printk("\n");
+ }
+
+- if (!pskb_may_pull(skb, sizeof(ETH_HLEN)))
++ if (!pskb_may_pull(skb, ETH_HLEN))
+ goto error;
+
+ secpath_reset(skb);
+diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
+index 732152f..f156382 100644
+--- a/net/netrom/af_netrom.c
++++ b/net/netrom/af_netrom.c
+@@ -1170,7 +1170,12 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock,
+ msg->msg_flags |= MSG_TRUNC;
+ }
+
+- skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
++ er = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
++ if (er < 0) {
++ skb_free_datagram(sk, skb);
++ release_sock(sk);
++ return er;
++ }
+
+ if (sax != NULL) {
+ sax->sax25_family = AF_NETROM;
+diff --git a/net/rds/recv.c b/net/rds/recv.c
+index bc3f8cd..fc57d31 100644
+--- a/net/rds/recv.c
++++ b/net/rds/recv.c
+@@ -410,6 +410,8 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
+
+ rdsdebug("size %zu flags 0x%x timeo %ld\n", size, msg_flags, timeo);
+
++ msg->msg_namelen = 0;
++
+ if (msg_flags & MSG_OOB)
+ goto out;
+
+@@ -485,6 +487,7 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
+ sin->sin_port = inc->i_hdr.h_sport;
+ sin->sin_addr.s_addr = inc->i_saddr;
+ memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
++ msg->msg_namelen = sizeof(*sin);
+ }
+ break;
+ }
+diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
+index 24d94c0..599f67a 100644
+--- a/net/sched/sch_cbq.c
++++ b/net/sched/sch_cbq.c
+@@ -250,10 +250,11 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
+ else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL)
+ cl = defmap[TC_PRIO_BESTEFFORT];
+
+- if (cl == NULL || cl->level >= head->level)
++ if (cl == NULL)
+ goto fallback;
+ }
+-
++ if (cl->level >= head->level)
++ goto fallback;
+ #ifdef CONFIG_NET_CLS_ACT
+ switch (result) {
+ case TC_ACT_QUEUED:
+diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
+index 7b03254..ca0fb48 100644
+--- a/net/sched/sch_qfq.c
++++ b/net/sched/sch_qfq.c
+@@ -829,7 +829,10 @@ static void qfq_update_start(struct qfq_sched *q, struct qfq_class *cl)
+ if (mask) {
+ struct qfq_group *next = qfq_ffs(q, mask);
+ if (qfq_gt(roundedF, next->F)) {
+- cl->S = next->F;
++ if (qfq_gt(limit, next->F))
++ cl->S = next->F;
++ else /* preserve timestamp correctness */
++ cl->S = limit;
+ return;
+ }
+ }
+diff --git a/net/sctp/output.c b/net/sctp/output.c
+index 8fc4dcd..32ba8d0 100644
+--- a/net/sctp/output.c
++++ b/net/sctp/output.c
+@@ -334,6 +334,25 @@ finish:
+ return retval;
+ }
+
++static void sctp_packet_release_owner(struct sk_buff *skb)
++{
++ sk_free(skb->sk);
++}
++
++static void sctp_packet_set_owner_w(struct sk_buff *skb, struct sock *sk)
++{
++ skb_orphan(skb);
++ skb->sk = sk;
++ skb->destructor = sctp_packet_release_owner;
++
++ /*
++ * The data chunks have already been accounted for in sctp_sendmsg(),
++ * therefore only reserve a single byte to keep socket around until
++ * the packet has been transmitted.
++ */
++ atomic_inc(&sk->sk_wmem_alloc);
++}
++
+ /* All packets are sent to the network through this function from
+ * sctp_outq_tail().
+ *
+@@ -375,7 +394,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
+ /* Set the owning socket so that we know where to get the
+ * destination IP address.
+ */
+- skb_set_owner_w(nskb, sk);
++ sctp_packet_set_owner_w(nskb, sk);
+
+ if (!sctp_transport_dst_check(tp)) {
+ sctp_transport_route(tp, NULL, sctp_sk(sk));
+diff --git a/net/wireless/reg.c b/net/wireless/reg.c
+index d57d05b..fa39731 100644
+--- a/net/wireless/reg.c
++++ b/net/wireless/reg.c
+@@ -331,6 +331,9 @@ static void reg_regdb_search(struct work_struct *work)
+ struct reg_regdb_search_request *request;
+ const struct ieee80211_regdomain *curdom, *regdom;
+ int i, r;
++ bool set_reg = false;
++
++ mutex_lock(&cfg80211_mutex);
+
+ mutex_lock(&reg_regdb_search_mutex);
+ while (!list_empty(&reg_regdb_search_list)) {
+@@ -346,9 +349,7 @@ static void reg_regdb_search(struct work_struct *work)
+ r = reg_copy_regd(&regdom, curdom);
+ if (r)
+ break;
+- mutex_lock(&cfg80211_mutex);
+- set_regdom(regdom);
+- mutex_unlock(&cfg80211_mutex);
++ set_reg = true;
+ break;
+ }
+ }
+@@ -356,6 +357,11 @@ static void reg_regdb_search(struct work_struct *work)
+ kfree(request);
+ }
+ mutex_unlock(&reg_regdb_search_mutex);
++
++ if (set_reg)
++ set_regdom(regdom);
++
++ mutex_unlock(&cfg80211_mutex);
+ }
+
+ static DECLARE_WORK(reg_regdb_work, reg_regdb_search);
+diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
+index 54a0dc2..ab2bb42 100644
+--- a/net/xfrm/xfrm_input.c
++++ b/net/xfrm/xfrm_input.c
+@@ -212,7 +212,7 @@ resume:
+ /* only the first xfrm gets the encap type */
+ encap_type = 0;
+
+- if (async && x->repl->check(x, skb, seq)) {
++ if (async && x->repl->recheck(x, skb, seq)) {
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
+ goto drop_unlock;
+ }
+diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
+index 0174034..113d20e 100644
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -1761,7 +1761,7 @@ static struct dst_entry *make_blackhole(struct net *net, u16 family,
+
+ if (!afinfo) {
+ dst_release(dst_orig);
+- ret = ERR_PTR(-EINVAL);
++ return ERR_PTR(-EINVAL);
+ } else {
+ ret = afinfo->blackhole_route(net, dst_orig);
+ }
+diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
+index 2f6d11d..3efb07d 100644
+--- a/net/xfrm/xfrm_replay.c
++++ b/net/xfrm/xfrm_replay.c
+@@ -420,6 +420,18 @@ err:
+ return -EINVAL;
+ }
+
++static int xfrm_replay_recheck_esn(struct xfrm_state *x,
++ struct sk_buff *skb, __be32 net_seq)
++{
++ if (unlikely(XFRM_SKB_CB(skb)->seq.input.hi !=
++ htonl(xfrm_replay_seqhi(x, net_seq)))) {
++ x->stats.replay_window++;
++ return -EINVAL;
++ }
++
++ return xfrm_replay_check_esn(x, skb, net_seq);
++}
++
+ static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq)
+ {
+ unsigned int bitnr, nr, i;
+@@ -479,6 +491,7 @@ static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq)
+ static struct xfrm_replay xfrm_replay_legacy = {
+ .advance = xfrm_replay_advance,
+ .check = xfrm_replay_check,
++ .recheck = xfrm_replay_check,
+ .notify = xfrm_replay_notify,
+ .overflow = xfrm_replay_overflow,
+ };
+@@ -486,6 +499,7 @@ static struct xfrm_replay xfrm_replay_legacy = {
+ static struct xfrm_replay xfrm_replay_bmp = {
+ .advance = xfrm_replay_advance_bmp,
+ .check = xfrm_replay_check_bmp,
++ .recheck = xfrm_replay_check_bmp,
+ .notify = xfrm_replay_notify_bmp,
+ .overflow = xfrm_replay_overflow_bmp,
+ };
+@@ -493,6 +507,7 @@ static struct xfrm_replay xfrm_replay_bmp = {
+ static struct xfrm_replay xfrm_replay_esn = {
+ .advance = xfrm_replay_advance_esn,
+ .check = xfrm_replay_check_esn,
++ .recheck = xfrm_replay_recheck_esn,
+ .notify = xfrm_replay_notify_bmp,
+ .overflow = xfrm_replay_overflow_esn,
+ };
+diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
+index 7cae73e..ede01a8 100644
+--- a/net/xfrm/xfrm_user.c
++++ b/net/xfrm/xfrm_user.c
+@@ -123,9 +123,21 @@ static inline int verify_replay(struct xfrm_usersa_info *p,
+ struct nlattr **attrs)
+ {
+ struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL];
++ struct xfrm_replay_state_esn *rs;
+
+- if ((p->flags & XFRM_STATE_ESN) && !rt)
+- return -EINVAL;
++ if (p->flags & XFRM_STATE_ESN) {
++ if (!rt)
++ return -EINVAL;
++
++ rs = nla_data(rt);
++
++ if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8)
++ return -EINVAL;
++
++ if (nla_len(rt) < xfrm_replay_state_esn_len(rs) &&
++ nla_len(rt) != sizeof(*rs))
++ return -EINVAL;
++ }
+
+ if (!rt)
+ return 0;
+@@ -370,14 +382,15 @@ static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_es
+ struct nlattr *rp)
+ {
+ struct xfrm_replay_state_esn *up;
++ int ulen;
+
+ if (!replay_esn || !rp)
+ return 0;
+
+ up = nla_data(rp);
++ ulen = xfrm_replay_state_esn_len(up);
+
+- if (xfrm_replay_state_esn_len(replay_esn) !=
+- xfrm_replay_state_esn_len(up))
++ if (nla_len(rp) < ulen || xfrm_replay_state_esn_len(replay_esn) != ulen)
+ return -EINVAL;
+
+ return 0;
+@@ -388,22 +401,28 @@ static int xfrm_alloc_replay_state_esn(struct xfrm_replay_state_esn **replay_esn
+ struct nlattr *rta)
+ {
+ struct xfrm_replay_state_esn *p, *pp, *up;
++ int klen, ulen;
+
+ if (!rta)
+ return 0;
+
+ up = nla_data(rta);
++ klen = xfrm_replay_state_esn_len(up);
++ ulen = nla_len(rta) >= klen ? klen : sizeof(*up);
+
+- p = kmemdup(up, xfrm_replay_state_esn_len(up), GFP_KERNEL);
++ p = kzalloc(klen, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+- pp = kmemdup(up, xfrm_replay_state_esn_len(up), GFP_KERNEL);
++ pp = kzalloc(klen, GFP_KERNEL);
+ if (!pp) {
+ kfree(p);
+ return -ENOMEM;
+ }
+
++ memcpy(p, up, ulen);
++ memcpy(pp, up, ulen);
++
+ *replay_esn = p;
+ *preplay_esn = pp;
+
+@@ -442,10 +461,11 @@ static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *
+ * somehow made shareable and move it to xfrm_state.c - JHS
+ *
+ */
+-static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs)
++static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs,
++ int update_esn)
+ {
+ struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
+- struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL];
++ struct nlattr *re = update_esn ? attrs[XFRMA_REPLAY_ESN_VAL] : NULL;
+ struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
+ struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
+ struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
+@@ -555,7 +575,7 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
+ goto error;
+
+ /* override default values from above */
+- xfrm_update_ae_params(x, attrs);
++ xfrm_update_ae_params(x, attrs, 0);
+
+ return x;
+
+@@ -689,6 +709,7 @@ out:
+
+ static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
+ {
++ memset(p, 0, sizeof(*p));
+ memcpy(&p->id, &x->id, sizeof(p->id));
+ memcpy(&p->sel, &x->sel, sizeof(p->sel));
+ memcpy(&p->lft, &x->lft, sizeof(p->lft));
+@@ -742,7 +763,7 @@ static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb)
+ return -EMSGSIZE;
+
+ algo = nla_data(nla);
+- strcpy(algo->alg_name, auth->alg_name);
++ strncpy(algo->alg_name, auth->alg_name, sizeof(algo->alg_name));
+ memcpy(algo->alg_key, auth->alg_key, (auth->alg_key_len + 7) / 8);
+ algo->alg_key_len = auth->alg_key_len;
+
+@@ -862,6 +883,7 @@ static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
+ {
+ struct xfrm_dump_info info;
+ struct sk_buff *skb;
++ int err;
+
+ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+ if (!skb)
+@@ -872,9 +894,10 @@ static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
+ info.nlmsg_seq = seq;
+ info.nlmsg_flags = 0;
+
+- if (dump_one_state(x, 0, &info)) {
++ err = dump_one_state(x, 0, &info);
++ if (err) {
+ kfree_skb(skb);
+- return NULL;
++ return ERR_PTR(err);
+ }
+
+ return skb;
+@@ -1297,6 +1320,7 @@ static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy
+
+ static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir)
+ {
++ memset(p, 0, sizeof(*p));
+ memcpy(&p->sel, &xp->selector, sizeof(p->sel));
+ memcpy(&p->lft, &xp->lft, sizeof(p->lft));
+ memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft));
+@@ -1401,6 +1425,7 @@ static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
+ struct xfrm_user_tmpl *up = &vec[i];
+ struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
+
++ memset(up, 0, sizeof(*up));
+ memcpy(&up->id, &kp->id, sizeof(up->id));
+ up->family = kp->encap_family;
+ memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr));
+@@ -1529,6 +1554,7 @@ static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
+ {
+ struct xfrm_dump_info info;
+ struct sk_buff *skb;
++ int err;
+
+ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!skb)
+@@ -1539,9 +1565,10 @@ static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
+ info.nlmsg_seq = seq;
+ info.nlmsg_flags = 0;
+
+- if (dump_one_policy(xp, dir, 0, &info) < 0) {
++ err = dump_one_policy(xp, dir, 0, &info);
++ if (err) {
+ kfree_skb(skb);
+- return NULL;
++ return ERR_PTR(err);
+ }
+
+ return skb;
+@@ -1794,7 +1821,7 @@ static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
+ goto out;
+
+ spin_lock_bh(&x->lock);
+- xfrm_update_ae_params(x, attrs);
++ xfrm_update_ae_params(x, attrs, 1);
+ spin_unlock_bh(&x->lock);
+
+ c.event = nlh->nlmsg_type;
+diff --git a/sound/soc/samsung/dma.c b/sound/soc/samsung/dma.c
+index a68b264..a9a593a 100644
+--- a/sound/soc/samsung/dma.c
++++ b/sound/soc/samsung/dma.c
+@@ -34,9 +34,7 @@ static const struct snd_pcm_hardware dma_hardware = {
+ .info = SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_MMAP |
+- SNDRV_PCM_INFO_MMAP_VALID |
+- SNDRV_PCM_INFO_PAUSE |
+- SNDRV_PCM_INFO_RESUME,
++ SNDRV_PCM_INFO_MMAP_VALID,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_U16_LE |
+ SNDRV_PCM_FMTBIT_U8 |
+@@ -246,15 +244,11 @@ static int dma_trigger(struct snd_pcm_substream *substream, int cmd)
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+- case SNDRV_PCM_TRIGGER_RESUME:
+- case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ prtd->state |= ST_RUNNING;
+ prtd->params->ops->trigger(prtd->params->ch);
+ break;
+
+ case SNDRV_PCM_TRIGGER_STOP:
+- case SNDRV_PCM_TRIGGER_SUSPEND:
+- case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ prtd->state &= ~ST_RUNNING;
+ prtd->params->ops->stop(prtd->params->ch);
+ break;
diff --git a/1031_linux-3.2.32.patch b/1031_linux-3.2.32.patch
new file mode 100644
index 00000000..247fc0b7
--- /dev/null
+++ b/1031_linux-3.2.32.patch
@@ -0,0 +1,6206 @@
+diff --git a/Documentation/virtual/lguest/lguest.c b/Documentation/virtual/lguest/lguest.c
+index c095d79..288dba6 100644
+--- a/Documentation/virtual/lguest/lguest.c
++++ b/Documentation/virtual/lguest/lguest.c
+@@ -1299,6 +1299,7 @@ static struct device *new_device(const char *name, u16 type)
+ dev->feature_len = 0;
+ dev->num_vq = 0;
+ dev->running = false;
++ dev->next = NULL;
+
+ /*
+ * Append to device list. Prepending to a single-linked list is
+diff --git a/Makefile b/Makefile
+index fd9c414..b6d8282 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 31
++SUBLEVEL = 32
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/plat-omap/counter_32k.c b/arch/arm/plat-omap/counter_32k.c
+index a6cbb71..04e703a 100644
+--- a/arch/arm/plat-omap/counter_32k.c
++++ b/arch/arm/plat-omap/counter_32k.c
+@@ -82,22 +82,29 @@ static void notrace omap_update_sched_clock(void)
+ * nsecs and adds to a monotonically increasing timespec.
+ */
+ static struct timespec persistent_ts;
+-static cycles_t cycles, last_cycles;
++static cycles_t cycles;
+ static unsigned int persistent_mult, persistent_shift;
++static DEFINE_SPINLOCK(read_persistent_clock_lock);
++
+ void read_persistent_clock(struct timespec *ts)
+ {
+ unsigned long long nsecs;
+- cycles_t delta;
+- struct timespec *tsp = &persistent_ts;
++ cycles_t last_cycles;
++ unsigned long flags;
++
++ spin_lock_irqsave(&read_persistent_clock_lock, flags);
+
+ last_cycles = cycles;
+ cycles = timer_32k_base ? __raw_readl(timer_32k_base) : 0;
+- delta = cycles - last_cycles;
+
+- nsecs = clocksource_cyc2ns(delta, persistent_mult, persistent_shift);
++ nsecs = clocksource_cyc2ns(cycles - last_cycles,
++ persistent_mult, persistent_shift);
++
++ timespec_add_ns(&persistent_ts, nsecs);
++
++ *ts = persistent_ts;
+
+- timespec_add_ns(tsp, nsecs);
+- *ts = *tsp;
++ spin_unlock_irqrestore(&read_persistent_clock_lock, flags);
+ }
+
+ int __init omap_init_clocksource_32k(void)
+diff --git a/arch/mips/Makefile b/arch/mips/Makefile
+index 0be3186..aaf7444 100644
+--- a/arch/mips/Makefile
++++ b/arch/mips/Makefile
+@@ -224,7 +224,7 @@ KBUILD_CPPFLAGS += -D"DATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0)"
+ LDFLAGS += -m $(ld-emul)
+
+ ifdef CONFIG_MIPS
+-CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -xc /dev/null | \
++CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \
+ egrep -vw '__GNUC_(|MINOR_|PATCHLEVEL_)_' | \
+ sed -e "s/^\#define /-D'/" -e "s/ /'='/" -e "s/$$/'/")
+ ifdef CONFIG_64BIT
+diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
+index 1a96618..ce7dd99 100644
+--- a/arch/mips/kernel/Makefile
++++ b/arch/mips/kernel/Makefile
+@@ -102,7 +102,7 @@ obj-$(CONFIG_MIPS_MACHINE) += mips_machine.o
+
+ obj-$(CONFIG_OF) += prom.o
+
+-CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
++CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
+
+ obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT) += 8250-platform.o
+
+diff --git a/arch/mn10300/Makefile b/arch/mn10300/Makefile
+index 7120282..3eb4a52 100644
+--- a/arch/mn10300/Makefile
++++ b/arch/mn10300/Makefile
+@@ -26,7 +26,7 @@ CHECKFLAGS +=
+ PROCESSOR := unset
+ UNIT := unset
+
+-KBUILD_CFLAGS += -mam33 -mmem-funcs -DCPU=AM33
++KBUILD_CFLAGS += -mam33 -DCPU=AM33 $(call cc-option,-mmem-funcs,)
+ KBUILD_AFLAGS += -mam33 -DCPU=AM33
+
+ ifeq ($(CONFIG_MN10300_CURRENT_IN_E2),y)
+diff --git a/arch/powerpc/platforms/pseries/eeh_driver.c b/arch/powerpc/platforms/pseries/eeh_driver.c
+index 1b6cb10..a0a4e8a 100644
+--- a/arch/powerpc/platforms/pseries/eeh_driver.c
++++ b/arch/powerpc/platforms/pseries/eeh_driver.c
+@@ -25,6 +25,7 @@
+ #include <linux/delay.h>
+ #include <linux/interrupt.h>
+ #include <linux/irq.h>
++#include <linux/module.h>
+ #include <linux/pci.h>
+ #include <asm/eeh.h>
+ #include <asm/eeh_event.h>
+@@ -41,6 +42,41 @@ static inline const char * pcid_name (struct pci_dev *pdev)
+ return "";
+ }
+
++/**
++ * eeh_pcid_get - Get the PCI device driver
++ * @pdev: PCI device
++ *
++ * The function is used to retrieve the PCI device driver for
++ * the indicated PCI device. Besides, we will increase the reference
++ * of the PCI device driver to prevent that being unloaded on
++ * the fly. Otherwise, kernel crash would be seen.
++ */
++static inline struct pci_driver *eeh_pcid_get(struct pci_dev *pdev)
++{
++ if (!pdev || !pdev->driver)
++ return NULL;
++
++ if (!try_module_get(pdev->driver->driver.owner))
++ return NULL;
++
++ return pdev->driver;
++}
++
++/**
++ * eeh_pcid_put - Dereference on the PCI device driver
++ * @pdev: PCI device
++ *
++ * The function is called to do dereference on the PCI device
++ * driver of the indicated PCI device.
++ */
++static inline void eeh_pcid_put(struct pci_dev *pdev)
++{
++ if (!pdev || !pdev->driver)
++ return;
++
++ module_put(pdev->driver->driver.owner);
++}
++
+ #if 0
+ static void print_device_node_tree(struct pci_dn *pdn, int dent)
+ {
+@@ -109,18 +145,20 @@ static void eeh_enable_irq(struct pci_dev *dev)
+ static int eeh_report_error(struct pci_dev *dev, void *userdata)
+ {
+ enum pci_ers_result rc, *res = userdata;
+- struct pci_driver *driver = dev->driver;
++ struct pci_driver *driver;
+
+ dev->error_state = pci_channel_io_frozen;
+
+- if (!driver)
+- return 0;
++ driver = eeh_pcid_get(dev);
++ if (!driver) return 0;
+
+ eeh_disable_irq(dev);
+
+ if (!driver->err_handler ||
+- !driver->err_handler->error_detected)
++ !driver->err_handler->error_detected) {
++ eeh_pcid_put(dev);
+ return 0;
++ }
+
+ rc = driver->err_handler->error_detected (dev, pci_channel_io_frozen);
+
+@@ -128,6 +166,7 @@ static int eeh_report_error(struct pci_dev *dev, void *userdata)
+ if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
+ if (*res == PCI_ERS_RESULT_NONE) *res = rc;
+
++ eeh_pcid_put(dev);
+ return 0;
+ }
+
+@@ -142,12 +181,15 @@ static int eeh_report_error(struct pci_dev *dev, void *userdata)
+ static int eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata)
+ {
+ enum pci_ers_result rc, *res = userdata;
+- struct pci_driver *driver = dev->driver;
++ struct pci_driver *driver;
+
+- if (!driver ||
+- !driver->err_handler ||
+- !driver->err_handler->mmio_enabled)
++ driver = eeh_pcid_get(dev);
++ if (!driver) return 0;
++ if (!driver->err_handler ||
++ !driver->err_handler->mmio_enabled) {
++ eeh_pcid_put(dev);
+ return 0;
++ }
+
+ rc = driver->err_handler->mmio_enabled (dev);
+
+@@ -155,6 +197,7 @@ static int eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata)
+ if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
+ if (*res == PCI_ERS_RESULT_NONE) *res = rc;
+
++ eeh_pcid_put(dev);
+ return 0;
+ }
+
+@@ -165,18 +208,20 @@ static int eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata)
+ static int eeh_report_reset(struct pci_dev *dev, void *userdata)
+ {
+ enum pci_ers_result rc, *res = userdata;
+- struct pci_driver *driver = dev->driver;
+-
+- if (!driver)
+- return 0;
++ struct pci_driver *driver;
+
+ dev->error_state = pci_channel_io_normal;
+
++ driver = eeh_pcid_get(dev);
++ if (!driver) return 0;
++
+ eeh_enable_irq(dev);
+
+ if (!driver->err_handler ||
+- !driver->err_handler->slot_reset)
++ !driver->err_handler->slot_reset) {
++ eeh_pcid_put(dev);
+ return 0;
++ }
+
+ rc = driver->err_handler->slot_reset(dev);
+ if ((*res == PCI_ERS_RESULT_NONE) ||
+@@ -184,6 +229,7 @@ static int eeh_report_reset(struct pci_dev *dev, void *userdata)
+ if (*res == PCI_ERS_RESULT_DISCONNECT &&
+ rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
+
++ eeh_pcid_put(dev);
+ return 0;
+ }
+
+@@ -193,21 +239,24 @@ static int eeh_report_reset(struct pci_dev *dev, void *userdata)
+
+ static int eeh_report_resume(struct pci_dev *dev, void *userdata)
+ {
+- struct pci_driver *driver = dev->driver;
++ struct pci_driver *driver;
+
+ dev->error_state = pci_channel_io_normal;
+
+- if (!driver)
+- return 0;
++ driver = eeh_pcid_get(dev);
++ if (!driver) return 0;
+
+ eeh_enable_irq(dev);
+
+ if (!driver->err_handler ||
+- !driver->err_handler->resume)
++ !driver->err_handler->resume) {
++ eeh_pcid_put(dev);
+ return 0;
++ }
+
+ driver->err_handler->resume(dev);
+
++ eeh_pcid_put(dev);
+ return 0;
+ }
+
+@@ -220,21 +269,24 @@ static int eeh_report_resume(struct pci_dev *dev, void *userdata)
+
+ static int eeh_report_failure(struct pci_dev *dev, void *userdata)
+ {
+- struct pci_driver *driver = dev->driver;
++ struct pci_driver *driver;
+
+ dev->error_state = pci_channel_io_perm_failure;
+
+- if (!driver)
+- return 0;
++ driver = eeh_pcid_get(dev);
++ if (!driver) return 0;
+
+ eeh_disable_irq(dev);
+
+ if (!driver->err_handler ||
+- !driver->err_handler->error_detected)
++ !driver->err_handler->error_detected) {
++ eeh_pcid_put(dev);
+ return 0;
++ }
+
+ driver->err_handler->error_detected(dev, pci_channel_io_perm_failure);
+
++ eeh_pcid_put(dev);
+ return 0;
+ }
+
+diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
+index 18601c8..884507e 100644
+--- a/arch/x86/include/asm/pgtable.h
++++ b/arch/x86/include/asm/pgtable.h
+@@ -146,8 +146,7 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
+
+ static inline int pmd_large(pmd_t pte)
+ {
+- return (pmd_flags(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
+- (_PAGE_PSE | _PAGE_PRESENT);
++ return pmd_flags(pte) & _PAGE_PSE;
+ }
+
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+@@ -415,7 +414,13 @@ static inline int pte_hidden(pte_t pte)
+
+ static inline int pmd_present(pmd_t pmd)
+ {
+- return pmd_flags(pmd) & _PAGE_PRESENT;
++ /*
++ * Checking for _PAGE_PSE is needed too because
++ * split_huge_page will temporarily clear the present bit (but
++ * the _PAGE_PSE flag will remain set at all times while the
++ * _PAGE_PRESENT bit is clear).
++ */
++ return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
+ }
+
+ static inline int pmd_none(pmd_t pmd)
+diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
+index 37718f0..4d320b2 100644
+--- a/arch/x86/platform/efi/efi.c
++++ b/arch/x86/platform/efi/efi.c
+@@ -731,6 +731,7 @@ void __init efi_enter_virtual_mode(void)
+ *
+ * Call EFI services through wrapper functions.
+ */
++ efi.runtime_version = efi_systab.fw_revision;
+ efi.get_time = virt_efi_get_time;
+ efi.set_time = virt_efi_set_time;
+ efi.get_wakeup_time = virt_efi_get_wakeup_time;
+diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
+index 9ecec98..5016de5 100644
+--- a/drivers/acpi/bus.c
++++ b/drivers/acpi/bus.c
+@@ -950,8 +950,6 @@ static int __init acpi_bus_init(void)
+ status = acpi_ec_ecdt_probe();
+ /* Ignore result. Not having an ECDT is not fatal. */
+
+- acpi_bus_osc_support();
+-
+ status = acpi_initialize_objects(ACPI_FULL_INITIALIZATION);
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_ERR PREFIX "Unable to initialize ACPI objects\n");
+@@ -959,6 +957,12 @@ static int __init acpi_bus_init(void)
+ }
+
+ /*
++ * _OSC method may exist in module level code,
++ * so it must be run after ACPI_FULL_INITIALIZATION
++ */
++ acpi_bus_osc_support();
++
++ /*
+ * _PDC control method may load dynamic SSDT tables,
+ * and we need to install the table handler before that.
+ */
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 6f95d98..1f90dab 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -108,7 +108,7 @@ static struct usb_device_id btusb_table[] = {
+ { USB_DEVICE(0x413c, 0x8197) },
+
+ /* Foxconn - Hon Hai */
+- { USB_DEVICE(0x0489, 0xe033) },
++ { USB_VENDOR_AND_INTERFACE_INFO(0x0489, 0xff, 0x01, 0x01) },
+
+ /*Broadcom devices with vendor specific id */
+ { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) },
+diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c
+index eedd547..5936691 100644
+--- a/drivers/char/ttyprintk.c
++++ b/drivers/char/ttyprintk.c
+@@ -67,7 +67,7 @@ static int tpk_printk(const unsigned char *buf, int count)
+ tmp[tpk_curr + 1] = '\0';
+ printk(KERN_INFO "%s%s", tpk_tag, tmp);
+ tpk_curr = 0;
+- if (buf[i + 1] == '\n')
++ if ((i + 1) < count && buf[i + 1] == '\n')
+ i++;
+ break;
+ case '\n':
+diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
+index b48967b..5991114 100644
+--- a/drivers/dma/dmaengine.c
++++ b/drivers/dma/dmaengine.c
+@@ -564,8 +564,8 @@ void dmaengine_get(void)
+ list_del_rcu(&device->global_node);
+ break;
+ } else if (err)
+- pr_err("dmaengine: failed to get %s: (%d)\n",
+- dma_chan_name(chan), err);
++ pr_debug("%s: failed to get %s: (%d)\n",
++ __func__, dma_chan_name(chan), err);
+ }
+ }
+
+diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
+index 4799393..b97d4f0 100644
+--- a/drivers/firewire/core-cdev.c
++++ b/drivers/firewire/core-cdev.c
+@@ -471,8 +471,8 @@ static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
+ client->bus_reset_closure = a->bus_reset_closure;
+ if (a->bus_reset != 0) {
+ fill_bus_reset_event(&bus_reset, client);
+- ret = copy_to_user(u64_to_uptr(a->bus_reset),
+- &bus_reset, sizeof(bus_reset));
++ /* unaligned size of bus_reset is 36 bytes */
++ ret = copy_to_user(u64_to_uptr(a->bus_reset), &bus_reset, 36);
+ }
+ if (ret == 0 && list_empty(&client->link))
+ list_add_tail(&client->link, &client->device->client_list);
+diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
+index 0535c21..3e60e8d 100644
+--- a/drivers/firmware/efivars.c
++++ b/drivers/firmware/efivars.c
+@@ -435,12 +435,23 @@ efivar_attr_read(struct efivar_entry *entry, char *buf)
+ if (status != EFI_SUCCESS)
+ return -EIO;
+
+- if (var->Attributes & 0x1)
++ if (var->Attributes & EFI_VARIABLE_NON_VOLATILE)
+ str += sprintf(str, "EFI_VARIABLE_NON_VOLATILE\n");
+- if (var->Attributes & 0x2)
++ if (var->Attributes & EFI_VARIABLE_BOOTSERVICE_ACCESS)
+ str += sprintf(str, "EFI_VARIABLE_BOOTSERVICE_ACCESS\n");
+- if (var->Attributes & 0x4)
++ if (var->Attributes & EFI_VARIABLE_RUNTIME_ACCESS)
+ str += sprintf(str, "EFI_VARIABLE_RUNTIME_ACCESS\n");
++ if (var->Attributes & EFI_VARIABLE_HARDWARE_ERROR_RECORD)
++ str += sprintf(str, "EFI_VARIABLE_HARDWARE_ERROR_RECORD\n");
++ if (var->Attributes & EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS)
++ str += sprintf(str,
++ "EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS\n");
++ if (var->Attributes &
++ EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS)
++ str += sprintf(str,
++ "EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS\n");
++ if (var->Attributes & EFI_VARIABLE_APPEND_WRITE)
++ str += sprintf(str, "EFI_VARIABLE_APPEND_WRITE\n");
+ return str - buf;
+ }
+
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index e48e01e..33e1555 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -1543,16 +1543,19 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
+ list_move_tail(&obj->ring_list, &ring->active_list);
+
+ obj->last_rendering_seqno = seqno;
+- if (obj->fenced_gpu_access) {
+- struct drm_i915_fence_reg *reg;
+-
+- BUG_ON(obj->fence_reg == I915_FENCE_REG_NONE);
+
++ if (obj->fenced_gpu_access) {
+ obj->last_fenced_seqno = seqno;
+ obj->last_fenced_ring = ring;
+
+- reg = &dev_priv->fence_regs[obj->fence_reg];
+- list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
++ /* Bump MRU to take account of the delayed flush */
++ if (obj->fence_reg != I915_FENCE_REG_NONE) {
++ struct drm_i915_fence_reg *reg;
++
++ reg = &dev_priv->fence_regs[obj->fence_reg];
++ list_move_tail(&reg->lru_list,
++ &dev_priv->mm.fence_list);
++ }
+ }
+ }
+
+@@ -1561,6 +1564,7 @@ i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
+ {
+ list_del_init(&obj->ring_list);
+ obj->last_rendering_seqno = 0;
++ obj->last_fenced_seqno = 0;
+ }
+
+ static void
+@@ -1589,6 +1593,7 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
+ BUG_ON(!list_empty(&obj->gpu_write_list));
+ BUG_ON(!obj->active);
+ obj->ring = NULL;
++ obj->last_fenced_ring = NULL;
+
+ i915_gem_object_move_off_active(obj);
+ obj->fenced_gpu_access = false;
+diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+index a6c2f7a..1202198 100644
+--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+@@ -574,7 +574,8 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
+ if (ret)
+ break;
+ }
+- obj->pending_fenced_gpu_access = need_fence;
++ obj->pending_fenced_gpu_access =
++ !!(entry->flags & EXEC_OBJECT_NEEDS_FENCE);
+ }
+
+ entry->offset = obj->gtt_offset;
+diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
+index 31d334d..861223b 100644
+--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
++++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
+@@ -107,10 +107,10 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
+ */
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+- } else if (IS_MOBILE(dev)) {
++ } else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) {
+ uint32_t dcc;
+
+- /* On mobile 9xx chipsets, channel interleave by the CPU is
++ /* On 9xx chipsets, channel interleave by the CPU is
+ * determined by DCC. For single-channel, neither the CPU
+ * nor the GPU do swizzling. For dual channel interleaved,
+ * the GPU's interleave is bit 9 and 10 for X tiled, and bit
+diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
+index c8b5bc1..2812d7b 100644
+--- a/drivers/gpu/drm/i915/i915_irq.c
++++ b/drivers/gpu/drm/i915/i915_irq.c
+@@ -530,6 +530,12 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
+ if (de_iir & DE_GSE_IVB)
+ intel_opregion_gse_intr(dev);
+
++ if (de_iir & DE_PIPEA_VBLANK_IVB)
++ drm_handle_vblank(dev, 0);
++
++ if (de_iir & DE_PIPEB_VBLANK_IVB)
++ drm_handle_vblank(dev, 1);
++
+ if (de_iir & DE_PLANEA_FLIP_DONE_IVB) {
+ intel_prepare_page_flip(dev, 0);
+ intel_finish_page_flip_plane(dev, 0);
+@@ -540,12 +546,6 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
+ intel_finish_page_flip_plane(dev, 1);
+ }
+
+- if (de_iir & DE_PIPEA_VBLANK_IVB)
+- drm_handle_vblank(dev, 0);
+-
+- if (de_iir & DE_PIPEB_VBLANK_IVB)
+- drm_handle_vblank(dev, 1);
+-
+ /* check event from PCH */
+ if (de_iir & DE_PCH_EVENT_IVB) {
+ if (pch_iir & SDE_HOTPLUG_MASK_CPT)
+@@ -622,6 +622,12 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
+ if (de_iir & DE_GSE)
+ intel_opregion_gse_intr(dev);
+
++ if (de_iir & DE_PIPEA_VBLANK)
++ drm_handle_vblank(dev, 0);
++
++ if (de_iir & DE_PIPEB_VBLANK)
++ drm_handle_vblank(dev, 1);
++
+ if (de_iir & DE_PLANEA_FLIP_DONE) {
+ intel_prepare_page_flip(dev, 0);
+ intel_finish_page_flip_plane(dev, 0);
+@@ -632,12 +638,6 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
+ intel_finish_page_flip_plane(dev, 1);
+ }
+
+- if (de_iir & DE_PIPEA_VBLANK)
+- drm_handle_vblank(dev, 0);
+-
+- if (de_iir & DE_PIPEB_VBLANK)
+- drm_handle_vblank(dev, 1);
+-
+ /* check event from PCH */
+ if (de_iir & DE_PCH_EVENT) {
+ if (pch_iir & hotplug_mask)
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 4a5e662..a294a32 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -401,6 +401,9 @@
+ # define VS_TIMER_DISPATCH (1 << 6)
+ # define MI_FLUSH_ENABLE (1 << 11)
+
++#define GEN6_GT_MODE 0x20d0
++#define GEN6_GT_MODE_HI (1 << 9)
++
+ #define GFX_MODE 0x02520
+ #define GFX_MODE_GEN7 0x0229c
+ #define GFX_RUN_LIST_ENABLE (1<<15)
+@@ -1557,6 +1560,10 @@
+
+ /* Video Data Island Packet control */
+ #define VIDEO_DIP_DATA 0x61178
++/* Read the description of VIDEO_DIP_DATA (before Haswel) or VIDEO_DIP_ECC
++ * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
++ * of the infoframe structure specified by CEA-861. */
++#define VIDEO_DIP_DATA_SIZE 32
+ #define VIDEO_DIP_CTL 0x61170
+ #define VIDEO_DIP_ENABLE (1 << 31)
+ #define VIDEO_DIP_PORT_B (1 << 29)
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 6c3fb44..adac0dd 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -2850,13 +2850,34 @@ static void intel_clear_scanline_wait(struct drm_device *dev)
+ I915_WRITE_CTL(ring, tmp);
+ }
+
++static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
++{
++ struct drm_device *dev = crtc->dev;
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ unsigned long flags;
++ bool pending;
++
++ if (atomic_read(&dev_priv->mm.wedged))
++ return false;
++
++ spin_lock_irqsave(&dev->event_lock, flags);
++ pending = to_intel_crtc(crtc)->unpin_work != NULL;
++ spin_unlock_irqrestore(&dev->event_lock, flags);
++
++ return pending;
++}
++
+ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
+ {
+ struct drm_device *dev = crtc->dev;
++ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (crtc->fb == NULL)
+ return;
+
++ wait_event(dev_priv->pending_flip_queue,
++ !intel_crtc_has_pending_flip(crtc));
++
+ mutex_lock(&dev->struct_mutex);
+ intel_finish_fb(crtc->fb);
+ mutex_unlock(&dev->struct_mutex);
+@@ -5027,7 +5048,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
+ /* default to 8bpc */
+ pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
+ if (is_dp) {
+- if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
++ if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
+ pipeconf |= PIPECONF_BPP_6 |
+ PIPECONF_DITHER_EN |
+ PIPECONF_DITHER_TYPE_SP;
+@@ -5495,7 +5516,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
+ /* determine panel color depth */
+ temp = I915_READ(PIPECONF(pipe));
+ temp &= ~PIPE_BPC_MASK;
+- dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
++ dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, adjusted_mode);
+ switch (pipe_bpp) {
+ case 18:
+ temp |= PIPE_6BPC;
+@@ -6952,9 +6973,8 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
+
+ atomic_clear_mask(1 << intel_crtc->plane,
+ &obj->pending_flip.counter);
+- if (atomic_read(&obj->pending_flip) == 0)
+- wake_up(&dev_priv->pending_flip_queue);
+
++ wake_up(&dev_priv->pending_flip_queue);
+ schedule_work(&work->work);
+
+ trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
+@@ -7193,7 +7213,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
+ default:
+ WARN_ONCE(1, "unknown plane in flip command\n");
+ ret = -ENODEV;
+- goto err;
++ goto err_unpin;
+ }
+
+ ret = intel_ring_begin(ring, 4);
+@@ -8278,6 +8298,11 @@ static void gen6_init_clock_gating(struct drm_device *dev)
+ DISPPLANE_TRICKLE_FEED_DISABLE);
+ intel_flush_display_plane(dev_priv, pipe);
+ }
++
++ /* The default value should be 0x200 according to docs, but the two
++ * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */
++ I915_WRITE(GEN6_GT_MODE, 0xffff << 16);
++ I915_WRITE(GEN6_GT_MODE, GEN6_GT_MODE_HI << 16 | GEN6_GT_MODE_HI);
+ }
+
+ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
+diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
+index c2a64f4..497da2a 100644
+--- a/drivers/gpu/drm/i915/intel_hdmi.c
++++ b/drivers/gpu/drm/i915/intel_hdmi.c
+@@ -138,14 +138,20 @@ static void i9xx_write_infoframe(struct drm_encoder *encoder,
+
+ I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags);
+
++ mmiowb();
+ for (i = 0; i < len; i += 4) {
+ I915_WRITE(VIDEO_DIP_DATA, *data);
+ data++;
+ }
++ /* Write every possible data byte to force correct ECC calculation. */
++ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
++ I915_WRITE(VIDEO_DIP_DATA, 0);
++ mmiowb();
+
+ flags |= intel_infoframe_flags(frame);
+
+ I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags);
++ POSTING_READ(VIDEO_DIP_CTL);
+ }
+
+ static void ironlake_write_infoframe(struct drm_encoder *encoder,
+@@ -168,14 +174,20 @@ static void ironlake_write_infoframe(struct drm_encoder *encoder,
+
+ I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags);
+
++ mmiowb();
+ for (i = 0; i < len; i += 4) {
+ I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
+ data++;
+ }
++ /* Write every possible data byte to force correct ECC calculation. */
++ for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
++ I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
++ mmiowb();
+
+ flags |= intel_infoframe_flags(frame);
+
+ I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags);
++ POSTING_READ(reg);
+ }
+ static void intel_set_infoframe(struct drm_encoder *encoder,
+ struct dip_infoframe *frame)
+@@ -546,10 +558,13 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
+ if (!HAS_PCH_SPLIT(dev)) {
+ intel_hdmi->write_infoframe = i9xx_write_infoframe;
+ I915_WRITE(VIDEO_DIP_CTL, 0);
++ POSTING_READ(VIDEO_DIP_CTL);
+ } else {
+ intel_hdmi->write_infoframe = ironlake_write_infoframe;
+- for_each_pipe(i)
++ for_each_pipe(i) {
+ I915_WRITE(TVIDEO_DIP_CTL(i), 0);
++ POSTING_READ(TVIDEO_DIP_CTL(i));
++ }
+ }
+
+ drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index fc0633c..b61f490 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -37,6 +37,16 @@
+ #define EVERGREEN_PFP_UCODE_SIZE 1120
+ #define EVERGREEN_PM4_UCODE_SIZE 1376
+
++static const u32 crtc_offsets[6] =
++{
++ EVERGREEN_CRTC0_REGISTER_OFFSET,
++ EVERGREEN_CRTC1_REGISTER_OFFSET,
++ EVERGREEN_CRTC2_REGISTER_OFFSET,
++ EVERGREEN_CRTC3_REGISTER_OFFSET,
++ EVERGREEN_CRTC4_REGISTER_OFFSET,
++ EVERGREEN_CRTC5_REGISTER_OFFSET
++};
++
+ static void evergreen_gpu_init(struct radeon_device *rdev);
+ void evergreen_fini(struct radeon_device *rdev);
+ void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
+@@ -66,6 +76,27 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
+ }
+ }
+
++void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
++{
++ int i;
++
++ if (crtc >= rdev->num_crtc)
++ return;
++
++ if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN) {
++ for (i = 0; i < rdev->usec_timeout; i++) {
++ if (!(RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK))
++ break;
++ udelay(1);
++ }
++ for (i = 0; i < rdev->usec_timeout; i++) {
++ if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
++ break;
++ udelay(1);
++ }
++ }
++}
++
+ void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc)
+ {
+ /* enable the pflip int */
+@@ -1065,116 +1096,88 @@ void evergreen_agp_enable(struct radeon_device *rdev)
+
+ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
+ {
++ u32 crtc_enabled, tmp, frame_count, blackout;
++ int i, j;
++
+ save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
+ save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
+
+- /* Stop all video */
++ /* disable VGA render */
+ WREG32(VGA_RENDER_CONTROL, 0);
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
+- if (rdev->num_crtc >= 4) {
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
+- }
+- if (rdev->num_crtc >= 6) {
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
+- }
+- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+- if (rdev->num_crtc >= 4) {
+- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+- }
+- if (rdev->num_crtc >= 6) {
+- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+- }
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+- if (rdev->num_crtc >= 4) {
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+- }
+- if (rdev->num_crtc >= 6) {
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
++ /* blank the display controllers */
++ for (i = 0; i < rdev->num_crtc; i++) {
++ crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN;
++ if (crtc_enabled) {
++ save->crtc_enabled[i] = true;
++ tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
++ if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
++ dce4_wait_for_vblank(rdev, i);
++ tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
++ WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
++ }
++ /* wait for the next frame */
++ frame_count = radeon_get_vblank_counter(rdev, i);
++ for (j = 0; j < rdev->usec_timeout; j++) {
++ if (radeon_get_vblank_counter(rdev, i) != frame_count)
++ break;
++ udelay(1);
++ }
++ }
+ }
+
+- WREG32(D1VGA_CONTROL, 0);
+- WREG32(D2VGA_CONTROL, 0);
+- if (rdev->num_crtc >= 4) {
+- WREG32(EVERGREEN_D3VGA_CONTROL, 0);
+- WREG32(EVERGREEN_D4VGA_CONTROL, 0);
+- }
+- if (rdev->num_crtc >= 6) {
+- WREG32(EVERGREEN_D5VGA_CONTROL, 0);
+- WREG32(EVERGREEN_D6VGA_CONTROL, 0);
++ evergreen_mc_wait_for_idle(rdev);
++
++ blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
++ if ((blackout & BLACKOUT_MODE_MASK) != 1) {
++ /* Block CPU access */
++ WREG32(BIF_FB_EN, 0);
++ /* blackout the MC */
++ blackout &= ~BLACKOUT_MODE_MASK;
++ WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
+ }
+ }
+
+ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
+ {
+- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
+- upper_32_bits(rdev->mc.vram_start));
+- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
+- upper_32_bits(rdev->mc.vram_start));
+- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
+- (u32)rdev->mc.vram_start);
+- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
+- (u32)rdev->mc.vram_start);
+-
+- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
+- upper_32_bits(rdev->mc.vram_start));
+- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
+- upper_32_bits(rdev->mc.vram_start));
+- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
+- (u32)rdev->mc.vram_start);
+- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
+- (u32)rdev->mc.vram_start);
+-
+- if (rdev->num_crtc >= 4) {
+- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
+- upper_32_bits(rdev->mc.vram_start));
+- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
+- upper_32_bits(rdev->mc.vram_start));
+- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
+- (u32)rdev->mc.vram_start);
+- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
+- (u32)rdev->mc.vram_start);
+-
+- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
+- upper_32_bits(rdev->mc.vram_start));
+- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
+- upper_32_bits(rdev->mc.vram_start));
+- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
+- (u32)rdev->mc.vram_start);
+- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
+- (u32)rdev->mc.vram_start);
+- }
+- if (rdev->num_crtc >= 6) {
+- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
+- upper_32_bits(rdev->mc.vram_start));
+- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
+- upper_32_bits(rdev->mc.vram_start));
+- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
+- (u32)rdev->mc.vram_start);
+- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
+- (u32)rdev->mc.vram_start);
++ u32 tmp, frame_count;
++ int i, j;
+
+- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
++ /* update crtc base addresses */
++ for (i = 0; i < rdev->num_crtc; i++) {
++ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
+ upper_32_bits(rdev->mc.vram_start));
+- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
++ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
+ upper_32_bits(rdev->mc.vram_start));
+- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
++ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
+ (u32)rdev->mc.vram_start);
+- WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
++ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
+ (u32)rdev->mc.vram_start);
+ }
+-
+ WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
+- /* Unlock host access */
++
++ /* unblackout the MC */
++ tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
++ tmp &= ~BLACKOUT_MODE_MASK;
++ WREG32(MC_SHARED_BLACKOUT_CNTL, tmp);
++ /* allow CPU access */
++ WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
++
++ for (i = 0; i < rdev->num_crtc; i++) {
++ if (save->crtc_enabled) {
++ tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
++ tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
++ WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
++ /* wait for the next frame */
++ frame_count = radeon_get_vblank_counter(rdev, i);
++ for (j = 0; j < rdev->usec_timeout; j++) {
++ if (radeon_get_vblank_counter(rdev, i) != frame_count)
++ break;
++ udelay(1);
++ }
++ }
++ }
++ /* Unlock vga access */
+ WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
+ mdelay(1);
+ WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
+diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
+index 7d7f215..e022776 100644
+--- a/drivers/gpu/drm/radeon/evergreen_reg.h
++++ b/drivers/gpu/drm/radeon/evergreen_reg.h
+@@ -210,7 +210,10 @@
+ #define EVERGREEN_CRTC_CONTROL 0x6e70
+ # define EVERGREEN_CRTC_MASTER_EN (1 << 0)
+ # define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
++#define EVERGREEN_CRTC_BLANK_CONTROL 0x6e74
++# define EVERGREEN_CRTC_BLANK_DATA_EN (1 << 8)
+ #define EVERGREEN_CRTC_STATUS 0x6e8c
++# define EVERGREEN_CRTC_V_BLANK (1 << 0)
+ #define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
+ #define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
+ #define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
+diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
+index 6ecd23f..fe44a95 100644
+--- a/drivers/gpu/drm/radeon/evergreend.h
++++ b/drivers/gpu/drm/radeon/evergreend.h
+@@ -77,6 +77,10 @@
+
+ #define CONFIG_MEMSIZE 0x5428
+
++#define BIF_FB_EN 0x5490
++#define FB_READ_EN (1 << 0)
++#define FB_WRITE_EN (1 << 1)
++
+ #define CP_ME_CNTL 0x86D8
+ #define CP_ME_HALT (1 << 28)
+ #define CP_PFP_HALT (1 << 26)
+@@ -194,6 +198,9 @@
+ #define NOOFCHAN_MASK 0x00003000
+ #define MC_SHARED_CHREMAP 0x2008
+
++#define MC_SHARED_BLACKOUT_CNTL 0x20ac
++#define BLACKOUT_MODE_MASK 0x00000007
++
+ #define MC_ARB_RAMCFG 0x2760
+ #define NOOFBANK_SHIFT 0
+ #define NOOFBANK_MASK 0x00000003
+diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
+index 5ce9402..5aa6670 100644
+--- a/drivers/gpu/drm/radeon/radeon_asic.h
++++ b/drivers/gpu/drm/radeon/radeon_asic.h
+@@ -386,6 +386,7 @@ void r700_cp_fini(struct radeon_device *rdev);
+ struct evergreen_mc_save {
+ u32 vga_render_control;
+ u32 vga_hdp_control;
++ bool crtc_enabled[RADEON_MAX_CRTCS];
+ };
+
+ void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev);
+diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
+index baa019e..4f9496e 100644
+--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
++++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
+@@ -143,6 +143,16 @@ static bool radeon_msi_ok(struct radeon_device *rdev)
+ (rdev->pdev->subsystem_device == 0x01fd))
+ return true;
+
++ /* Gateway RS690 only seems to work with MSIs. */
++ if ((rdev->pdev->device == 0x791f) &&
++ (rdev->pdev->subsystem_vendor == 0x107b) &&
++ (rdev->pdev->subsystem_device == 0x0185))
++ return true;
++
++ /* try and enable MSIs by default on all RS690s */
++ if (rdev->family == CHIP_RS690)
++ return true;
++
+ /* RV515 seems to have MSI issues where it loses
+ * MSI rearms occasionally. This leads to lockups and freezes.
+ * disable it by default.
+diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
+index 78a665b..ebd6c51 100644
+--- a/drivers/gpu/drm/radeon/radeon_pm.c
++++ b/drivers/gpu/drm/radeon/radeon_pm.c
+@@ -553,7 +553,9 @@ void radeon_pm_suspend(struct radeon_device *rdev)
+ void radeon_pm_resume(struct radeon_device *rdev)
+ {
+ /* set up the default clocks if the MC ucode is loaded */
+- if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) {
++ if ((rdev->family >= CHIP_BARTS) &&
++ (rdev->family <= CHIP_CAYMAN) &&
++ rdev->mc_fw) {
+ if (rdev->pm.default_vddc)
+ radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
+ SET_VOLTAGE_TYPE_ASIC_VDDC);
+@@ -608,7 +610,9 @@ int radeon_pm_init(struct radeon_device *rdev)
+ radeon_pm_print_states(rdev);
+ radeon_pm_init_profile(rdev);
+ /* set up the default clocks if the MC ucode is loaded */
+- if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) {
++ if ((rdev->family >= CHIP_BARTS) &&
++ (rdev->family <= CHIP_CAYMAN) &&
++ rdev->mc_fw) {
+ if (rdev->pm.default_vddc)
+ radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
+ SET_VOLTAGE_TYPE_ASIC_VDDC);
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+index fe2fdbb..1740b82 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+@@ -148,7 +148,7 @@ static int ipoib_stop(struct net_device *dev)
+
+ netif_stop_queue(dev);
+
+- ipoib_ib_dev_down(dev, 0);
++ ipoib_ib_dev_down(dev, 1);
+ ipoib_ib_dev_stop(dev, 0);
+
+ if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+index e5069b4..80799c0 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+@@ -190,7 +190,9 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
+
+ mcast->mcmember = *mcmember;
+
+- /* Set the cached Q_Key before we attach if it's the broadcast group */
++ /* Set the multicast MTU and cached Q_Key before we attach if it's
++ * the broadcast group.
++ */
+ if (!memcmp(mcast->mcmember.mgid.raw, priv->dev->broadcast + 4,
+ sizeof (union ib_gid))) {
+ spin_lock_irq(&priv->lock);
+@@ -198,10 +200,17 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
+ spin_unlock_irq(&priv->lock);
+ return -EAGAIN;
+ }
++ priv->mcast_mtu = IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
+ priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey);
+ spin_unlock_irq(&priv->lock);
+ priv->tx_wr.wr.ud.remote_qkey = priv->qkey;
+ set_qkey = 1;
++
++ if (!ipoib_cm_admin_enabled(dev)) {
++ rtnl_lock();
++ dev_set_mtu(dev, min(priv->mcast_mtu, priv->admin_mtu));
++ rtnl_unlock();
++ }
+ }
+
+ if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
+@@ -590,14 +599,6 @@ void ipoib_mcast_join_task(struct work_struct *work)
+ return;
+ }
+
+- priv->mcast_mtu = IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
+-
+- if (!ipoib_cm_admin_enabled(dev)) {
+- rtnl_lock();
+- dev_set_mtu(dev, min(priv->mcast_mtu, priv->admin_mtu));
+- rtnl_unlock();
+- }
+-
+ ipoib_dbg_mcast(priv, "successfully joined all multicast groups\n");
+
+ clear_bit(IPOIB_MCAST_RUN, &priv->flags);
+diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
+index c76b051..4ec049d 100644
+--- a/drivers/infiniband/ulp/srp/ib_srp.c
++++ b/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -620,9 +620,9 @@ static void srp_reset_req(struct srp_target_port *target, struct srp_request *re
+ struct scsi_cmnd *scmnd = srp_claim_req(target, req, NULL);
+
+ if (scmnd) {
++ srp_free_req(target, req, scmnd, 0);
+ scmnd->result = DID_RESET << 16;
+ scmnd->scsi_done(scmnd);
+- srp_free_req(target, req, scmnd, 0);
+ }
+ }
+
+@@ -1669,6 +1669,7 @@ static int srp_abort(struct scsi_cmnd *scmnd)
+ SRP_TSK_ABORT_TASK);
+ srp_free_req(target, req, scmnd, 0);
+ scmnd->result = DID_ABORT << 16;
++ scmnd->scsi_done(scmnd);
+
+ return SUCCESS;
+ }
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index 96532bc..7be5fd9 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -53,14 +53,19 @@
+ #define ABS_POS_BITS 13
+
+ /*
+- * Any position values from the hardware above the following limits are
+- * treated as "wrapped around negative" values that have been truncated to
+- * the 13-bit reporting range of the hardware. These are just reasonable
+- * guesses and can be adjusted if hardware is found that operates outside
+- * of these parameters.
++ * These values should represent the absolute maximum value that will
++ * be reported for a positive position value. Some Synaptics firmware
++ * uses this value to indicate a finger near the edge of the touchpad
++ * whose precise position cannot be determined.
++ *
++ * At least one touchpad is known to report positions in excess of this
++ * value which are actually negative values truncated to the 13-bit
++ * reporting range. These values have never been observed to be lower
++ * than 8184 (i.e. -8), so we treat all values greater than 8176 as
++ * negative and any other value as positive.
+ */
+-#define X_MAX_POSITIVE (((1 << ABS_POS_BITS) + XMAX) / 2)
+-#define Y_MAX_POSITIVE (((1 << ABS_POS_BITS) + YMAX) / 2)
++#define X_MAX_POSITIVE 8176
++#define Y_MAX_POSITIVE 8176
+
+ /*
+ * Synaptics touchpads report the y coordinate from bottom to top, which is
+@@ -561,11 +566,21 @@ static int synaptics_parse_hw_state(const unsigned char buf[],
+ hw->right = (buf[0] & 0x02) ? 1 : 0;
+ }
+
+- /* Convert wrap-around values to negative */
++ /*
++ * Convert wrap-around values to negative. (X|Y)_MAX_POSITIVE
++ * is used by some firmware to indicate a finger at the edge of
++ * the touchpad whose precise position cannot be determined, so
++ * convert these values to the maximum axis value.
++ */
+ if (hw->x > X_MAX_POSITIVE)
+ hw->x -= 1 << ABS_POS_BITS;
++ else if (hw->x == X_MAX_POSITIVE)
++ hw->x = XMAX;
++
+ if (hw->y > Y_MAX_POSITIVE)
+ hw->y -= 1 << ABS_POS_BITS;
++ else if (hw->y == Y_MAX_POSITIVE)
++ hw->y = YMAX;
+
+ return 0;
+ }
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index ccf347f..b9062c0 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -563,7 +563,9 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
+ {
+ int i;
+
+- domain->iommu_coherency = 1;
++ i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
++
++ domain->iommu_coherency = i < g_num_of_iommus ? 1 : 0;
+
+ for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
+ if (!ecap_coherent(g_iommus[i]->ecap)) {
+diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
+index 0e49c99..c06992e 100644
+--- a/drivers/media/rc/ite-cir.c
++++ b/drivers/media/rc/ite-cir.c
+@@ -1473,6 +1473,7 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
+ rdev = rc_allocate_device();
+ if (!rdev)
+ goto failure;
++ itdev->rdev = rdev;
+
+ ret = -ENODEV;
+
+@@ -1604,7 +1605,6 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
+ if (ret)
+ goto failure;
+
+- itdev->rdev = rdev;
+ ite_pr(KERN_NOTICE, "driver has been successfully loaded\n");
+
+ return 0;
+diff --git a/drivers/media/video/gspca/pac7302.c b/drivers/media/video/gspca/pac7302.c
+index 1c44f78..6ddc769 100644
+--- a/drivers/media/video/gspca/pac7302.c
++++ b/drivers/media/video/gspca/pac7302.c
+@@ -1197,6 +1197,8 @@ static const struct usb_device_id device_table[] = {
+ {USB_DEVICE(0x093a, 0x2629), .driver_info = FL_VFLIP},
+ {USB_DEVICE(0x093a, 0x262a)},
+ {USB_DEVICE(0x093a, 0x262c)},
++ {USB_DEVICE(0x145f, 0x013c)},
++ {USB_DEVICE(0x1ae7, 0x2001)}, /* SpeedLink Snappy Mic SL-6825-SBK */
+ {}
+ };
+ MODULE_DEVICE_TABLE(usb, device_table);
+diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
+index d5fe43d..bc27065 100644
+--- a/drivers/mmc/host/omap_hsmmc.c
++++ b/drivers/mmc/host/omap_hsmmc.c
+@@ -2188,9 +2188,7 @@ static int omap_hsmmc_suspend(struct device *dev)
+ } else {
+ host->suspended = 0;
+ if (host->pdata->resume) {
+- ret = host->pdata->resume(&pdev->dev,
+- host->slot_id);
+- if (ret)
++ if (host->pdata->resume(&pdev->dev, host->slot_id))
+ dev_dbg(mmc_dev(host->mmc),
+ "Unmask interrupt failed\n");
+ }
+diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
+index 0d33ff0..06af9e4 100644
+--- a/drivers/mmc/host/sdhci-s3c.c
++++ b/drivers/mmc/host/sdhci-s3c.c
+@@ -601,7 +601,7 @@ static int __devexit sdhci_s3c_remove(struct platform_device *pdev)
+
+ sdhci_remove_host(host, 1);
+
+- for (ptr = 0; ptr < 3; ptr++) {
++ for (ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
+ if (sc->clk_bus[ptr]) {
+ clk_disable(sc->clk_bus[ptr]);
+ clk_put(sc->clk_bus[ptr]);
+diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
+index d5505f3..559d30d 100644
+--- a/drivers/mmc/host/sh_mmcif.c
++++ b/drivers/mmc/host/sh_mmcif.c
+@@ -1003,6 +1003,10 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
+ host->sd_error = true;
+ dev_dbg(&host->pd->dev, "int err state = %08x\n", state);
+ }
++ if (host->state == STATE_IDLE) {
++ dev_info(&host->pd->dev, "Spurious IRQ status 0x%x", state);
++ return IRQ_HANDLED;
++ }
+ if (state & ~(INT_CMD12RBE | INT_CMD12CRE))
+ complete(&host->intr_wait);
+ else
+diff --git a/drivers/mtd/maps/autcpu12-nvram.c b/drivers/mtd/maps/autcpu12-nvram.c
+index e5bfd0e..0598d52 100644
+--- a/drivers/mtd/maps/autcpu12-nvram.c
++++ b/drivers/mtd/maps/autcpu12-nvram.c
+@@ -43,7 +43,8 @@ struct map_info autcpu12_sram_map = {
+
+ static int __init init_autcpu12_sram (void)
+ {
+- int err, save0, save1;
++ map_word tmp, save0, save1;
++ int err;
+
+ autcpu12_sram_map.virt = ioremap(0x12000000, SZ_128K);
+ if (!autcpu12_sram_map.virt) {
+@@ -51,7 +52,7 @@ static int __init init_autcpu12_sram (void)
+ err = -EIO;
+ goto out;
+ }
+- simple_map_init(&autcpu_sram_map);
++ simple_map_init(&autcpu12_sram_map);
+
+ /*
+ * Check for 32K/128K
+@@ -61,20 +62,22 @@ static int __init init_autcpu12_sram (void)
+ * Read and check result on ofs 0x0
+ * Restore contents
+ */
+- save0 = map_read32(&autcpu12_sram_map,0);
+- save1 = map_read32(&autcpu12_sram_map,0x10000);
+- map_write32(&autcpu12_sram_map,~save0,0x10000);
++ save0 = map_read(&autcpu12_sram_map, 0);
++ save1 = map_read(&autcpu12_sram_map, 0x10000);
++ tmp.x[0] = ~save0.x[0];
++ map_write(&autcpu12_sram_map, tmp, 0x10000);
+ /* if we find this pattern on 0x0, we have 32K size
+ * restore contents and exit
+ */
+- if ( map_read32(&autcpu12_sram_map,0) != save0) {
+- map_write32(&autcpu12_sram_map,save0,0x0);
++ tmp = map_read(&autcpu12_sram_map, 0);
++ if (!map_word_equal(&autcpu12_sram_map, tmp, save0)) {
++ map_write(&autcpu12_sram_map, save0, 0x0);
+ goto map;
+ }
+ /* We have a 128K found, restore 0x10000 and set size
+ * to 128K
+ */
+- map_write32(&autcpu12_sram_map,save1,0x10000);
++ map_write(&autcpu12_sram_map, save1, 0x10000);
+ autcpu12_sram_map.size = SZ_128K;
+
+ map:
+diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
+index a0bd2de..198da0a 100644
+--- a/drivers/mtd/mtdpart.c
++++ b/drivers/mtd/mtdpart.c
+@@ -748,6 +748,8 @@ static const char *default_mtd_part_types[] = {
+ * partition parsers, specified in @types. However, if @types is %NULL, then
+ * the default list of parsers is used. The default list contains only the
+ * "cmdlinepart" and "ofpart" parsers ATM.
++ * Note: If there are more then one parser in @types, the kernel only takes the
++ * partitions parsed out by the first parser.
+ *
+ * This function may return:
+ * o a negative error code in case of failure
+@@ -772,11 +774,12 @@ int parse_mtd_partitions(struct mtd_info *master, const char **types,
+ if (!parser)
+ continue;
+ ret = (*parser->parse_fn)(master, pparts, data);
++ put_partition_parser(parser);
+ if (ret > 0) {
+ printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
+ ret, parser->name, master->name);
++ break;
+ }
+- put_partition_parser(parser);
+ }
+ return ret;
+ }
+diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
+index f024375..532da04 100644
+--- a/drivers/mtd/nand/nand_bbt.c
++++ b/drivers/mtd/nand/nand_bbt.c
+@@ -390,7 +390,7 @@ static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
+ /* Read the mirror version, if available */
+ if (md && (md->options & NAND_BBT_VERSION)) {
+ scan_read_raw(mtd, buf, (loff_t)md->pages[0] << this->page_shift,
+- mtd->writesize, td);
++ mtd->writesize, md);
+ md->version[0] = buf[bbt_get_ver_offs(mtd, md)];
+ pr_info("Bad block table at page %d, version 0x%02X\n",
+ md->pages[0], md->version[0]);
+diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
+index 83e8e1b..ade0da0 100644
+--- a/drivers/mtd/nand/nandsim.c
++++ b/drivers/mtd/nand/nandsim.c
+@@ -2355,6 +2355,7 @@ static int __init ns_init_module(void)
+ uint64_t new_size = (uint64_t)nsmtd->erasesize << overridesize;
+ if (new_size >> overridesize != nsmtd->erasesize) {
+ NS_ERR("overridesize is too big\n");
++ retval = -EINVAL;
+ goto err_exit;
+ }
+ /* N.B. This relies on nand_scan not doing anything with the size before we change it */
+diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
+index f745f00..297c965 100644
+--- a/drivers/mtd/nand/omap2.c
++++ b/drivers/mtd/nand/omap2.c
+@@ -1132,7 +1132,8 @@ static int omap_nand_remove(struct platform_device *pdev)
+ /* Release NAND device, its internal structures and partitions */
+ nand_release(&info->mtd);
+ iounmap(info->nand.IO_ADDR_R);
+- kfree(&info->mtd);
++ release_mem_region(info->phys_base, NAND_IO_SIZE);
++ kfree(info);
+ return 0;
+ }
+
+diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
+index 6c3fb5a..1f9c363 100644
+--- a/drivers/mtd/ubi/build.c
++++ b/drivers/mtd/ubi/build.c
+@@ -816,6 +816,11 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
+ struct ubi_volume *vol = ubi->volumes[vol_id];
+ int err, old_reserved_pebs = vol->reserved_pebs;
+
++ if (ubi->ro_mode) {
++ ubi_warn("skip auto-resize because of R/O mode");
++ return 0;
++ }
++
+ /*
+ * Clear the auto-resize flag in the volume in-memory copy of the
+ * volume table, and 'ubi_resize_volume()' will propagate this change
+diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
+index b99318e..b2b62de 100644
+--- a/drivers/mtd/ubi/scan.c
++++ b/drivers/mtd/ubi/scan.c
+@@ -997,7 +997,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
+ return err;
+ goto adjust_mean_ec;
+ case UBI_IO_FF:
+- if (ec_err)
++ if (ec_err || bitflips)
+ err = add_to_list(si, pnum, ec, 1, &si->erase);
+ else
+ err = add_to_list(si, pnum, ec, 0, &si->free);
+diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
+index 5fedc33..d8f2b5b 100644
+--- a/drivers/net/can/mscan/mpc5xxx_can.c
++++ b/drivers/net/can/mscan/mpc5xxx_can.c
+@@ -181,7 +181,7 @@ static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev,
+
+ if (!clock_name || !strcmp(clock_name, "sys")) {
+ sys_clk = clk_get(&ofdev->dev, "sys_clk");
+- if (!sys_clk) {
++ if (IS_ERR(sys_clk)) {
+ dev_err(&ofdev->dev, "couldn't get sys_clk\n");
+ goto exit_unmap;
+ }
+@@ -204,7 +204,7 @@ static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev,
+
+ if (clocksrc < 0) {
+ ref_clk = clk_get(&ofdev->dev, "ref_clk");
+- if (!ref_clk) {
++ if (IS_ERR(ref_clk)) {
+ dev_err(&ofdev->dev, "couldn't get ref_clk\n");
+ goto exit_unmap;
+ }
+diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
+index 0549261..c5f6b0e 100644
+--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
++++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
+@@ -4720,8 +4720,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
+
+ netif_device_detach(netdev);
+
+- mutex_lock(&adapter->mutex);
+-
+ if (netif_running(netdev)) {
+ WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
+ e1000_down(adapter);
+@@ -4729,10 +4727,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
+
+ #ifdef CONFIG_PM
+ retval = pci_save_state(pdev);
+- if (retval) {
+- mutex_unlock(&adapter->mutex);
++ if (retval)
+ return retval;
+- }
+ #endif
+
+ status = er32(STATUS);
+@@ -4789,8 +4785,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
+ if (netif_running(netdev))
+ e1000_free_irq(adapter);
+
+- mutex_unlock(&adapter->mutex);
+-
+ pci_disable_device(pdev);
+
+ return 0;
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index ed1be8a..4b43bc5 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -327,6 +327,8 @@ enum rtl_registers {
+ Config0 = 0x51,
+ Config1 = 0x52,
+ Config2 = 0x53,
++#define PME_SIGNAL (1 << 5) /* 8168c and later */
++
+ Config3 = 0x54,
+ Config4 = 0x55,
+ Config5 = 0x56,
+@@ -1360,7 +1362,6 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
+ u16 reg;
+ u8 mask;
+ } cfg[] = {
+- { WAKE_ANY, Config1, PMEnable },
+ { WAKE_PHY, Config3, LinkUp },
+ { WAKE_MAGIC, Config3, MagicPacket },
+ { WAKE_UCAST, Config5, UWF },
+@@ -1368,16 +1369,32 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
+ { WAKE_MCAST, Config5, MWF },
+ { WAKE_ANY, Config5, LanWake }
+ };
++ u8 options;
+
+ RTL_W8(Cfg9346, Cfg9346_Unlock);
+
+ for (i = 0; i < ARRAY_SIZE(cfg); i++) {
+- u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
++ options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
+ if (wolopts & cfg[i].opt)
+ options |= cfg[i].mask;
+ RTL_W8(cfg[i].reg, options);
+ }
+
++ switch (tp->mac_version) {
++ case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_17:
++ options = RTL_R8(Config1) & ~PMEnable;
++ if (wolopts)
++ options |= PMEnable;
++ RTL_W8(Config1, options);
++ break;
++ default:
++ options = RTL_R8(Config2) & ~PME_SIGNAL;
++ if (wolopts)
++ options |= PME_SIGNAL;
++ RTL_W8(Config2, options);
++ break;
++ }
++
+ RTL_W8(Cfg9346, Cfg9346_Lock);
+ }
+
+diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
+index 7145714..c0f097b 100644
+--- a/drivers/net/rionet.c
++++ b/drivers/net/rionet.c
+@@ -79,6 +79,7 @@ static int rionet_capable = 1;
+ * on system trade-offs.
+ */
+ static struct rio_dev **rionet_active;
++static int nact; /* total number of active rionet peers */
+
+ #define is_rionet_capable(src_ops, dst_ops) \
+ ((src_ops & RIO_SRC_OPS_DATA_MSG) && \
+@@ -175,6 +176,7 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ struct ethhdr *eth = (struct ethhdr *)skb->data;
+ u16 destid;
+ unsigned long flags;
++ int add_num = 1;
+
+ local_irq_save(flags);
+ if (!spin_trylock(&rnet->tx_lock)) {
+@@ -182,7 +184,10 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ return NETDEV_TX_LOCKED;
+ }
+
+- if ((rnet->tx_cnt + 1) > RIONET_TX_RING_SIZE) {
++ if (is_multicast_ether_addr(eth->h_dest))
++ add_num = nact;
++
++ if ((rnet->tx_cnt + add_num) > RIONET_TX_RING_SIZE) {
+ netif_stop_queue(ndev);
+ spin_unlock_irqrestore(&rnet->tx_lock, flags);
+ printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n",
+@@ -191,11 +196,16 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ }
+
+ if (is_multicast_ether_addr(eth->h_dest)) {
++ int count = 0;
+ for (i = 0; i < RIO_MAX_ROUTE_ENTRIES(rnet->mport->sys_size);
+ i++)
+- if (rionet_active[i])
++ if (rionet_active[i]) {
+ rionet_queue_tx_msg(skb, ndev,
+ rionet_active[i]);
++ if (count)
++ atomic_inc(&skb->users);
++ count++;
++ }
+ } else if (RIONET_MAC_MATCH(eth->h_dest)) {
+ destid = RIONET_GET_DESTID(eth->h_dest);
+ if (rionet_active[destid])
+@@ -220,14 +230,17 @@ static void rionet_dbell_event(struct rio_mport *mport, void *dev_id, u16 sid, u
+ if (info == RIONET_DOORBELL_JOIN) {
+ if (!rionet_active[sid]) {
+ list_for_each_entry(peer, &rionet_peers, node) {
+- if (peer->rdev->destid == sid)
++ if (peer->rdev->destid == sid) {
+ rionet_active[sid] = peer->rdev;
++ nact++;
++ }
+ }
+ rio_mport_send_doorbell(mport, sid,
+ RIONET_DOORBELL_JOIN);
+ }
+ } else if (info == RIONET_DOORBELL_LEAVE) {
+ rionet_active[sid] = NULL;
++ nact--;
+ } else {
+ if (netif_msg_intr(rnet))
+ printk(KERN_WARNING "%s: unhandled doorbell\n",
+@@ -524,6 +537,7 @@ static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id)
+
+ rc = rionet_setup_netdev(rdev->net->hport, ndev);
+ rionet_check = 1;
++ nact = 0;
+ }
+
+ /*
+diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
+index 1883d39..f7e17a0 100644
+--- a/drivers/net/wireless/ath/ath9k/pci.c
++++ b/drivers/net/wireless/ath/ath9k/pci.c
+@@ -122,8 +122,9 @@ static void ath_pci_aspm_init(struct ath_common *common)
+ if (!parent)
+ return;
+
+- if (ah->btcoex_hw.scheme != ATH_BTCOEX_CFG_NONE) {
+- /* Bluetooth coexistance requires disabling ASPM. */
++ if ((ah->btcoex_hw.scheme != ATH_BTCOEX_CFG_NONE) &&
++ (AR_SREV_9285(ah))) {
++ /* Bluetooth coexistance requires disabling ASPM for AR9285. */
+ pci_read_config_byte(pdev, pos + PCI_EXP_LNKCTL, &aspm);
+ aspm &= ~(PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
+ pci_write_config_byte(pdev, pos + PCI_EXP_LNKCTL, aspm);
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index dfee1b3..9005380 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -658,8 +658,10 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
+
+ /* Check if setup is sensible at all */
+ if (!pass &&
+- (primary != bus->number || secondary <= bus->number)) {
+- dev_dbg(&dev->dev, "bus configuration invalid, reconfiguring\n");
++ (primary != bus->number || secondary <= bus->number ||
++ secondary > subordinate)) {
++ dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
++ secondary, subordinate);
+ broken = 1;
+ }
+
+diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
+index 0860181..4f1b10b 100644
+--- a/drivers/s390/scsi/zfcp_aux.c
++++ b/drivers/s390/scsi/zfcp_aux.c
+@@ -519,6 +519,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
+
+ rwlock_init(&port->unit_list_lock);
+ INIT_LIST_HEAD(&port->unit_list);
++ atomic_set(&port->units, 0);
+
+ INIT_WORK(&port->gid_pn_work, zfcp_fc_port_did_lookup);
+ INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work);
+diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
+index 96f13ad8..79a6afe 100644
+--- a/drivers/s390/scsi/zfcp_ccw.c
++++ b/drivers/s390/scsi/zfcp_ccw.c
+@@ -39,17 +39,23 @@ void zfcp_ccw_adapter_put(struct zfcp_adapter *adapter)
+ spin_unlock_irqrestore(&zfcp_ccw_adapter_ref_lock, flags);
+ }
+
+-static int zfcp_ccw_activate(struct ccw_device *cdev)
+-
++/**
++ * zfcp_ccw_activate - activate adapter and wait for it to finish
++ * @cdev: pointer to belonging ccw device
++ * @clear: Status flags to clear.
++ * @tag: s390dbf trace record tag
++ */
++static int zfcp_ccw_activate(struct ccw_device *cdev, int clear, char *tag)
+ {
+ struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
+
+ if (!adapter)
+ return 0;
+
++ zfcp_erp_clear_adapter_status(adapter, clear);
+ zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
+ zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
+- "ccresu2");
++ tag);
+ zfcp_erp_wait(adapter);
+ flush_work(&adapter->scan_work);
+
+@@ -164,26 +170,29 @@ static int zfcp_ccw_set_online(struct ccw_device *cdev)
+ BUG_ON(!zfcp_reqlist_isempty(adapter->req_list));
+ adapter->req_no = 0;
+
+- zfcp_ccw_activate(cdev);
++ zfcp_ccw_activate(cdev, 0, "ccsonl1");
+ zfcp_ccw_adapter_put(adapter);
+ return 0;
+ }
+
+ /**
+- * zfcp_ccw_set_offline - set_offline function of zfcp driver
++ * zfcp_ccw_offline_sync - shut down adapter and wait for it to finish
+ * @cdev: pointer to belonging ccw device
++ * @set: Status flags to set.
++ * @tag: s390dbf trace record tag
+ *
+ * This function gets called by the common i/o layer and sets an adapter
+ * into state offline.
+ */
+-static int zfcp_ccw_set_offline(struct ccw_device *cdev)
++static int zfcp_ccw_offline_sync(struct ccw_device *cdev, int set, char *tag)
+ {
+ struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
+
+ if (!adapter)
+ return 0;
+
+- zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1");
++ zfcp_erp_set_adapter_status(adapter, set);
++ zfcp_erp_adapter_shutdown(adapter, 0, tag);
+ zfcp_erp_wait(adapter);
+
+ zfcp_ccw_adapter_put(adapter);
+@@ -191,6 +200,18 @@ static int zfcp_ccw_set_offline(struct ccw_device *cdev)
+ }
+
+ /**
++ * zfcp_ccw_set_offline - set_offline function of zfcp driver
++ * @cdev: pointer to belonging ccw device
++ *
++ * This function gets called by the common i/o layer and sets an adapter
++ * into state offline.
++ */
++static int zfcp_ccw_set_offline(struct ccw_device *cdev)
++{
++ return zfcp_ccw_offline_sync(cdev, 0, "ccsoff1");
++}
++
++/**
+ * zfcp_ccw_notify - ccw notify function
+ * @cdev: pointer to belonging ccw device
+ * @event: indicates if adapter was detached or attached
+@@ -207,6 +228,11 @@ static int zfcp_ccw_notify(struct ccw_device *cdev, int event)
+
+ switch (event) {
+ case CIO_GONE:
++ if (atomic_read(&adapter->status) &
++ ZFCP_STATUS_ADAPTER_SUSPENDED) { /* notification ignore */
++ zfcp_dbf_hba_basic("ccnigo1", adapter);
++ break;
++ }
+ dev_warn(&cdev->dev, "The FCP device has been detached\n");
+ zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1");
+ break;
+@@ -216,6 +242,11 @@ static int zfcp_ccw_notify(struct ccw_device *cdev, int event)
+ zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2");
+ break;
+ case CIO_OPER:
++ if (atomic_read(&adapter->status) &
++ ZFCP_STATUS_ADAPTER_SUSPENDED) { /* notification ignore */
++ zfcp_dbf_hba_basic("ccniop1", adapter);
++ break;
++ }
+ dev_info(&cdev->dev, "The FCP device is operational again\n");
+ zfcp_erp_set_adapter_status(adapter,
+ ZFCP_STATUS_COMMON_RUNNING);
+@@ -251,6 +282,28 @@ static void zfcp_ccw_shutdown(struct ccw_device *cdev)
+ zfcp_ccw_adapter_put(adapter);
+ }
+
++static int zfcp_ccw_suspend(struct ccw_device *cdev)
++{
++ zfcp_ccw_offline_sync(cdev, ZFCP_STATUS_ADAPTER_SUSPENDED, "ccsusp1");
++ return 0;
++}
++
++static int zfcp_ccw_thaw(struct ccw_device *cdev)
++{
++ /* trace records for thaw and final shutdown during suspend
++ can only be found in system dump until the end of suspend
++ but not after resume because it's based on the memory image
++ right after the very first suspend (freeze) callback */
++ zfcp_ccw_activate(cdev, 0, "ccthaw1");
++ return 0;
++}
++
++static int zfcp_ccw_resume(struct ccw_device *cdev)
++{
++ zfcp_ccw_activate(cdev, ZFCP_STATUS_ADAPTER_SUSPENDED, "ccresu1");
++ return 0;
++}
++
+ struct ccw_driver zfcp_ccw_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+@@ -263,7 +316,7 @@ struct ccw_driver zfcp_ccw_driver = {
+ .set_offline = zfcp_ccw_set_offline,
+ .notify = zfcp_ccw_notify,
+ .shutdown = zfcp_ccw_shutdown,
+- .freeze = zfcp_ccw_set_offline,
+- .thaw = zfcp_ccw_activate,
+- .restore = zfcp_ccw_activate,
++ .freeze = zfcp_ccw_suspend,
++ .thaw = zfcp_ccw_thaw,
++ .restore = zfcp_ccw_resume,
+ };
+diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c
+index fab2c25..8ed63aa 100644
+--- a/drivers/s390/scsi/zfcp_cfdc.c
++++ b/drivers/s390/scsi/zfcp_cfdc.c
+@@ -293,7 +293,7 @@ void zfcp_cfdc_adapter_access_changed(struct zfcp_adapter *adapter)
+ }
+ read_unlock_irqrestore(&adapter->port_list_lock, flags);
+
+- shost_for_each_device(sdev, port->adapter->scsi_host) {
++ shost_for_each_device(sdev, adapter->scsi_host) {
+ zfcp_sdev = sdev_to_zfcp(sdev);
+ status = atomic_read(&zfcp_sdev->status);
+ if ((status & ZFCP_STATUS_COMMON_ACCESS_DENIED) ||
+diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
+index a9a816e..79b9848 100644
+--- a/drivers/s390/scsi/zfcp_dbf.c
++++ b/drivers/s390/scsi/zfcp_dbf.c
+@@ -191,7 +191,7 @@ void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount,
+ length = min((u16)sizeof(struct qdio_buffer),
+ (u16)ZFCP_DBF_PAY_MAX_REC);
+
+- while ((char *)pl[payload->counter] && payload->counter < scount) {
++ while (payload->counter < scount && (char *)pl[payload->counter]) {
+ memcpy(payload->data, (char *)pl[payload->counter], length);
+ debug_event(dbf->pay, 1, payload, zfcp_dbf_plen(length));
+ payload->counter++;
+@@ -200,6 +200,26 @@ void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount,
+ spin_unlock_irqrestore(&dbf->pay_lock, flags);
+ }
+
++/**
++ * zfcp_dbf_hba_basic - trace event for basic adapter events
++ * @adapter: pointer to struct zfcp_adapter
++ */
++void zfcp_dbf_hba_basic(char *tag, struct zfcp_adapter *adapter)
++{
++ struct zfcp_dbf *dbf = adapter->dbf;
++ struct zfcp_dbf_hba *rec = &dbf->hba_buf;
++ unsigned long flags;
++
++ spin_lock_irqsave(&dbf->hba_lock, flags);
++ memset(rec, 0, sizeof(*rec));
++
++ memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
++ rec->id = ZFCP_DBF_HBA_BASIC;
++
++ debug_event(dbf->hba, 1, rec, sizeof(*rec));
++ spin_unlock_irqrestore(&dbf->hba_lock, flags);
++}
++
+ static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec,
+ struct zfcp_adapter *adapter,
+ struct zfcp_port *port,
+diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
+index 714f087..3ac7a4b 100644
+--- a/drivers/s390/scsi/zfcp_dbf.h
++++ b/drivers/s390/scsi/zfcp_dbf.h
+@@ -154,6 +154,7 @@ enum zfcp_dbf_hba_id {
+ ZFCP_DBF_HBA_RES = 1,
+ ZFCP_DBF_HBA_USS = 2,
+ ZFCP_DBF_HBA_BIT = 3,
++ ZFCP_DBF_HBA_BASIC = 4,
+ };
+
+ /**
+diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
+index ed5d921..f172b84 100644
+--- a/drivers/s390/scsi/zfcp_def.h
++++ b/drivers/s390/scsi/zfcp_def.h
+@@ -77,6 +77,7 @@ struct zfcp_reqlist;
+ #define ZFCP_STATUS_ADAPTER_SIOSL_ISSUED 0x00000004
+ #define ZFCP_STATUS_ADAPTER_XCONFIG_OK 0x00000008
+ #define ZFCP_STATUS_ADAPTER_HOST_CON_INIT 0x00000010
++#define ZFCP_STATUS_ADAPTER_SUSPENDED 0x00000040
+ #define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100
+ #define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200
+ #define ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED 0x00000400
+@@ -204,6 +205,7 @@ struct zfcp_port {
+ struct zfcp_adapter *adapter; /* adapter used to access port */
+ struct list_head unit_list; /* head of logical unit list */
+ rwlock_t unit_list_lock; /* unit list lock */
++ atomic_t units; /* zfcp_unit count */
+ atomic_t status; /* status of this remote port */
+ u64 wwnn; /* WWNN if known */
+ u64 wwpn; /* WWPN */
+diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
+index 2302e1c..ef9e502 100644
+--- a/drivers/s390/scsi/zfcp_ext.h
++++ b/drivers/s390/scsi/zfcp_ext.h
+@@ -54,6 +54,7 @@ extern void zfcp_dbf_hba_fsf_res(char *, struct zfcp_fsf_req *);
+ extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *);
+ extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *);
+ extern void zfcp_dbf_hba_def_err(struct zfcp_adapter *, u64, u16, void **);
++extern void zfcp_dbf_hba_basic(char *, struct zfcp_adapter *);
+ extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32);
+ extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *);
+ extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *);
+@@ -158,6 +159,7 @@ extern void zfcp_scsi_dif_sense_error(struct scsi_cmnd *, int);
+ extern struct attribute_group zfcp_sysfs_unit_attrs;
+ extern struct attribute_group zfcp_sysfs_adapter_attrs;
+ extern struct attribute_group zfcp_sysfs_port_attrs;
++extern struct mutex zfcp_sysfs_port_units_mutex;
+ extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
+ extern struct device_attribute *zfcp_sysfs_shost_attrs[];
+
+diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
+index e9a787e..8c849f0 100644
+--- a/drivers/s390/scsi/zfcp_fsf.c
++++ b/drivers/s390/scsi/zfcp_fsf.c
+@@ -219,7 +219,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
+ return;
+ }
+
+- zfcp_dbf_hba_fsf_uss("fssrh_2", req);
++ zfcp_dbf_hba_fsf_uss("fssrh_4", req);
+
+ switch (sr_buf->status_type) {
+ case FSF_STATUS_READ_PORT_CLOSED:
+@@ -771,12 +771,14 @@ out:
+ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
+ {
+ struct scsi_device *sdev = req->data;
+- struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
++ struct zfcp_scsi_dev *zfcp_sdev;
+ union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual;
+
+ if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
+ return;
+
++ zfcp_sdev = sdev_to_zfcp(sdev);
++
+ switch (req->qtcb->header.fsf_status) {
+ case FSF_PORT_HANDLE_NOT_VALID:
+ if (fsq->word[0] == fsq->word[1]) {
+@@ -885,7 +887,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
+
+ switch (header->fsf_status) {
+ case FSF_GOOD:
+- zfcp_dbf_san_res("fsscth1", req);
++ zfcp_dbf_san_res("fsscth2", req);
+ ct->status = 0;
+ break;
+ case FSF_SERVICE_CLASS_NOT_SUPPORTED:
+@@ -1739,13 +1741,15 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
+ {
+ struct zfcp_adapter *adapter = req->adapter;
+ struct scsi_device *sdev = req->data;
+- struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
++ struct zfcp_scsi_dev *zfcp_sdev;
+ struct fsf_qtcb_header *header = &req->qtcb->header;
+ struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support;
+
+ if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
+ return;
+
++ zfcp_sdev = sdev_to_zfcp(sdev);
++
+ atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
+ ZFCP_STATUS_COMMON_ACCESS_BOXED |
+ ZFCP_STATUS_LUN_SHARED |
+@@ -1856,11 +1860,13 @@ out:
+ static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
+ {
+ struct scsi_device *sdev = req->data;
+- struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
++ struct zfcp_scsi_dev *zfcp_sdev;
+
+ if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
+ return;
+
++ zfcp_sdev = sdev_to_zfcp(sdev);
++
+ switch (req->qtcb->header.fsf_status) {
+ case FSF_PORT_HANDLE_NOT_VALID:
+ zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1");
+@@ -1950,7 +1956,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
+ {
+ struct fsf_qual_latency_info *lat_in;
+ struct latency_cont *lat = NULL;
+- struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scsi->device);
++ struct zfcp_scsi_dev *zfcp_sdev;
+ struct zfcp_blk_drv_data blktrc;
+ int ticks = req->adapter->timer_ticks;
+
+@@ -1965,6 +1971,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
+
+ if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA &&
+ !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
++ zfcp_sdev = sdev_to_zfcp(scsi->device);
+ blktrc.flags |= ZFCP_BLK_LAT_VALID;
+ blktrc.channel_lat = lat_in->channel_lat * ticks;
+ blktrc.fabric_lat = lat_in->fabric_lat * ticks;
+@@ -2002,12 +2009,14 @@ static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req)
+ {
+ struct scsi_cmnd *scmnd = req->data;
+ struct scsi_device *sdev = scmnd->device;
+- struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
++ struct zfcp_scsi_dev *zfcp_sdev;
+ struct fsf_qtcb_header *header = &req->qtcb->header;
+
+ if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
+ return;
+
++ zfcp_sdev = sdev_to_zfcp(sdev);
++
+ switch (header->fsf_status) {
+ case FSF_HANDLE_MISMATCH:
+ case FSF_PORT_HANDLE_NOT_VALID:
+diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
+index e14da57..e76d003 100644
+--- a/drivers/s390/scsi/zfcp_qdio.c
++++ b/drivers/s390/scsi/zfcp_qdio.c
+@@ -102,18 +102,22 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
+ {
+ struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
+ struct zfcp_adapter *adapter = qdio->adapter;
+- struct qdio_buffer_element *sbale;
+ int sbal_no, sbal_idx;
+- void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1];
+- u64 req_id;
+- u8 scount;
+
+ if (unlikely(qdio_err)) {
+- memset(pl, 0, ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *));
+ if (zfcp_adapter_multi_buffer_active(adapter)) {
++ void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1];
++ struct qdio_buffer_element *sbale;
++ u64 req_id;
++ u8 scount;
++
++ memset(pl, 0,
++ ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *));
+ sbale = qdio->res_q[idx]->element;
+ req_id = (u64) sbale->addr;
+- scount = sbale->scount + 1; /* incl. signaling SBAL */
++ scount = min(sbale->scount + 1,
++ ZFCP_QDIO_MAX_SBALS_PER_REQ + 1);
++ /* incl. signaling SBAL */
+
+ for (sbal_no = 0; sbal_no < scount; sbal_no++) {
+ sbal_idx = (idx + sbal_no) %
+diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
+index cdc4ff7..9e62210 100644
+--- a/drivers/s390/scsi/zfcp_sysfs.c
++++ b/drivers/s390/scsi/zfcp_sysfs.c
+@@ -227,6 +227,8 @@ static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev,
+ static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL,
+ zfcp_sysfs_port_rescan_store);
+
++DEFINE_MUTEX(zfcp_sysfs_port_units_mutex);
++
+ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+@@ -249,6 +251,16 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
+ else
+ retval = 0;
+
++ mutex_lock(&zfcp_sysfs_port_units_mutex);
++ if (atomic_read(&port->units) > 0) {
++ retval = -EBUSY;
++ mutex_unlock(&zfcp_sysfs_port_units_mutex);
++ goto out;
++ }
++ /* port is about to be removed, so no more unit_add */
++ atomic_set(&port->units, -1);
++ mutex_unlock(&zfcp_sysfs_port_units_mutex);
++
+ write_lock_irq(&adapter->port_list_lock);
+ list_del(&port->list);
+ write_unlock_irq(&adapter->port_list_lock);
+@@ -289,12 +301,14 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
+ {
+ struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
+ u64 fcp_lun;
++ int retval;
+
+ if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
+ return -EINVAL;
+
+- if (zfcp_unit_add(port, fcp_lun))
+- return -EINVAL;
++ retval = zfcp_unit_add(port, fcp_lun);
++ if (retval)
++ return retval;
+
+ return count;
+ }
+diff --git a/drivers/s390/scsi/zfcp_unit.c b/drivers/s390/scsi/zfcp_unit.c
+index 20796eb..4e6a535 100644
+--- a/drivers/s390/scsi/zfcp_unit.c
++++ b/drivers/s390/scsi/zfcp_unit.c
+@@ -104,7 +104,7 @@ static void zfcp_unit_release(struct device *dev)
+ {
+ struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
+
+- put_device(&unit->port->dev);
++ atomic_dec(&unit->port->units);
+ kfree(unit);
+ }
+
+@@ -119,16 +119,27 @@ static void zfcp_unit_release(struct device *dev)
+ int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
+ {
+ struct zfcp_unit *unit;
++ int retval = 0;
++
++ mutex_lock(&zfcp_sysfs_port_units_mutex);
++ if (atomic_read(&port->units) == -1) {
++ /* port is already gone */
++ retval = -ENODEV;
++ goto out;
++ }
+
+ unit = zfcp_unit_find(port, fcp_lun);
+ if (unit) {
+ put_device(&unit->dev);
+- return -EEXIST;
++ retval = -EEXIST;
++ goto out;
+ }
+
+ unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL);
+- if (!unit)
+- return -ENOMEM;
++ if (!unit) {
++ retval = -ENOMEM;
++ goto out;
++ }
+
+ unit->port = port;
+ unit->fcp_lun = fcp_lun;
+@@ -139,28 +150,33 @@ int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
+ if (dev_set_name(&unit->dev, "0x%016llx",
+ (unsigned long long) fcp_lun)) {
+ kfree(unit);
+- return -ENOMEM;
++ retval = -ENOMEM;
++ goto out;
+ }
+
+- get_device(&port->dev);
+-
+ if (device_register(&unit->dev)) {
+ put_device(&unit->dev);
+- return -ENOMEM;
++ retval = -ENOMEM;
++ goto out;
+ }
+
+ if (sysfs_create_group(&unit->dev.kobj, &zfcp_sysfs_unit_attrs)) {
+ device_unregister(&unit->dev);
+- return -EINVAL;
++ retval = -EINVAL;
++ goto out;
+ }
+
++ atomic_inc(&port->units); /* under zfcp_sysfs_port_units_mutex ! */
++
+ write_lock_irq(&port->unit_list_lock);
+ list_add_tail(&unit->list, &port->unit_list);
+ write_unlock_irq(&port->unit_list_lock);
+
+ zfcp_unit_scsi_scan(unit);
+
+- return 0;
++out:
++ mutex_unlock(&zfcp_sysfs_port_units_mutex);
++ return retval;
+ }
+
+ /**
+diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
+index 7e6eca4..59fc5a1 100644
+--- a/drivers/scsi/atp870u.c
++++ b/drivers/scsi/atp870u.c
+@@ -1174,7 +1174,16 @@ wait_io1:
+ outw(val, tmport);
+ outb(2, 0x80);
+ TCM_SYNC:
+- udelay(0x800);
++ /*
++ * The funny division into multiple delays is to accomodate
++ * arches like ARM where udelay() multiplies its argument by
++ * a large number to initialize a loop counter. To avoid
++ * overflow, the maximum supported udelay is 2000 microseconds.
++ *
++ * XXX it would be more polite to find a way to use msleep()
++ */
++ mdelay(2);
++ udelay(48);
+ if ((inb(tmport) & 0x80) == 0x00) { /* bsy ? */
+ outw(0, tmport--);
+ outb(0, tmport);
+diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
+index 4ef0212..e5a4423 100644
+--- a/drivers/scsi/device_handler/scsi_dh_alua.c
++++ b/drivers/scsi/device_handler/scsi_dh_alua.c
+@@ -578,8 +578,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
+ h->state = TPGS_STATE_STANDBY;
+ break;
+ case TPGS_STATE_OFFLINE:
+- case TPGS_STATE_UNAVAILABLE:
+- /* Path unusable for unavailable/offline */
++ /* Path unusable */
+ err = SCSI_DH_DEV_OFFLINED;
+ break;
+ default:
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index be9aad8..22523aa 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -532,12 +532,42 @@ static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
+ c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
+ }
+
++static int is_firmware_flash_cmd(u8 *cdb)
++{
++ return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
++}
++
++/*
++ * During firmware flash, the heartbeat register may not update as frequently
++ * as it should. So we dial down lockup detection during firmware flash. and
++ * dial it back up when firmware flash completes.
++ */
++#define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
++#define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
++static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
++ struct CommandList *c)
++{
++ if (!is_firmware_flash_cmd(c->Request.CDB))
++ return;
++ atomic_inc(&h->firmware_flash_in_progress);
++ h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
++}
++
++static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
++ struct CommandList *c)
++{
++ if (is_firmware_flash_cmd(c->Request.CDB) &&
++ atomic_dec_and_test(&h->firmware_flash_in_progress))
++ h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
++}
++
+ static void enqueue_cmd_and_start_io(struct ctlr_info *h,
+ struct CommandList *c)
+ {
+ unsigned long flags;
+
+ set_performant_mode(h, c);
++ dial_down_lockup_detection_during_fw_flash(h, c);
+ spin_lock_irqsave(&h->lock, flags);
+ addQ(&h->reqQ, c);
+ h->Qdepth++;
+@@ -2926,7 +2956,7 @@ static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
+ c->Request.Timeout = 0; /* Don't time out */
+ memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
+ c->Request.CDB[0] = cmd;
+- c->Request.CDB[1] = 0x03; /* Reset target above */
++ c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
+ /* If bytes 4-7 are zero, it means reset the */
+ /* LunID device */
+ c->Request.CDB[4] = 0x00;
+@@ -3032,6 +3062,7 @@ static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
+ static inline void finish_cmd(struct CommandList *c, u32 raw_tag)
+ {
+ removeQ(c);
++ dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
+ if (likely(c->cmd_type == CMD_SCSI))
+ complete_scsi_command(c);
+ else if (c->cmd_type == CMD_IOCTL_PEND)
+@@ -4172,9 +4203,6 @@ static void controller_lockup_detected(struct ctlr_info *h)
+ spin_unlock_irqrestore(&h->lock, flags);
+ }
+
+-#define HEARTBEAT_SAMPLE_INTERVAL (10 * HZ)
+-#define HEARTBEAT_CHECK_MINIMUM_INTERVAL (HEARTBEAT_SAMPLE_INTERVAL / 2)
+-
+ static void detect_controller_lockup(struct ctlr_info *h)
+ {
+ u64 now;
+@@ -4185,7 +4213,7 @@ static void detect_controller_lockup(struct ctlr_info *h)
+ now = get_jiffies_64();
+ /* If we've received an interrupt recently, we're ok. */
+ if (time_after64(h->last_intr_timestamp +
+- (HEARTBEAT_CHECK_MINIMUM_INTERVAL), now))
++ (h->heartbeat_sample_interval), now))
+ return;
+
+ /*
+@@ -4194,7 +4222,7 @@ static void detect_controller_lockup(struct ctlr_info *h)
+ * otherwise don't care about signals in this thread.
+ */
+ if (time_after64(h->last_heartbeat_timestamp +
+- (HEARTBEAT_CHECK_MINIMUM_INTERVAL), now))
++ (h->heartbeat_sample_interval), now))
+ return;
+
+ /* If heartbeat has not changed since we last looked, we're not ok. */
+@@ -4236,6 +4264,7 @@ static void add_ctlr_to_lockup_detector_list(struct ctlr_info *h)
+ {
+ unsigned long flags;
+
++ h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
+ spin_lock_irqsave(&lockup_detector_lock, flags);
+ list_add_tail(&h->lockup_list, &hpsa_ctlr_list);
+ spin_unlock_irqrestore(&lockup_detector_lock, flags);
+diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
+index 91edafb..c721509 100644
+--- a/drivers/scsi/hpsa.h
++++ b/drivers/scsi/hpsa.h
+@@ -124,6 +124,8 @@ struct ctlr_info {
+ u64 last_intr_timestamp;
+ u32 last_heartbeat;
+ u64 last_heartbeat_timestamp;
++ u32 heartbeat_sample_interval;
++ atomic_t firmware_flash_in_progress;
+ u32 lockup_detected;
+ struct list_head lockup_list;
+ };
+diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
+index 3fd4715..e4ea0a3 100644
+--- a/drivers/scsi/hpsa_cmd.h
++++ b/drivers/scsi/hpsa_cmd.h
+@@ -163,6 +163,7 @@ struct SenseSubsystem_info {
+ #define BMIC_WRITE 0x27
+ #define BMIC_CACHE_FLUSH 0xc2
+ #define HPSA_CACHE_FLUSH 0x01 /* C2 was already being used by HPSA */
++#define BMIC_FLASH_FIRMWARE 0xF7
+
+ /* Command List Structure */
+ union SCSI3Addr {
+diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
+index 3d391dc..36aca4b 100644
+--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
++++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
+@@ -1547,6 +1547,9 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
+
+ host_config = &evt_struct->iu.mad.host_config;
+
++ /* The transport length field is only 16-bit */
++ length = min(0xffff, length);
++
+ /* Set up a lun reset SRP command */
+ memset(host_config, 0x00, sizeof(*host_config));
+ host_config->common.type = VIOSRP_HOST_CONFIG_TYPE;
+diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
+index 83d08b6..5c8b0dc 100644
+--- a/drivers/scsi/isci/init.c
++++ b/drivers/scsi/isci/init.c
+@@ -469,7 +469,6 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic
+ if (sci_oem_parameters_validate(&orom->ctrl[i])) {
+ dev_warn(&pdev->dev,
+ "[%d]: invalid oem parameters detected, falling back to firmware\n", i);
+- devm_kfree(&pdev->dev, orom);
+ orom = NULL;
+ break;
+ }
+diff --git a/drivers/scsi/isci/probe_roms.c b/drivers/scsi/isci/probe_roms.c
+index b5f4341..7cd637d 100644
+--- a/drivers/scsi/isci/probe_roms.c
++++ b/drivers/scsi/isci/probe_roms.c
+@@ -104,7 +104,6 @@ struct isci_orom *isci_request_oprom(struct pci_dev *pdev)
+
+ if (i >= len) {
+ dev_err(&pdev->dev, "oprom parse error\n");
+- devm_kfree(&pdev->dev, rom);
+ rom = NULL;
+ }
+ pci_unmap_biosrom(oprom);
+diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
+index bb7c482..08d48a3 100644
+--- a/drivers/scsi/scsi_sysfs.c
++++ b/drivers/scsi/scsi_sysfs.c
+@@ -1023,33 +1023,31 @@ static void __scsi_remove_target(struct scsi_target *starget)
+ void scsi_remove_target(struct device *dev)
+ {
+ struct Scsi_Host *shost = dev_to_shost(dev->parent);
+- struct scsi_target *starget, *found;
++ struct scsi_target *starget, *last = NULL;
+ unsigned long flags;
+
+- restart:
+- found = NULL;
++ /* remove targets being careful to lookup next entry before
++ * deleting the last
++ */
+ spin_lock_irqsave(shost->host_lock, flags);
+ list_for_each_entry(starget, &shost->__targets, siblings) {
+ if (starget->state == STARGET_DEL)
+ continue;
+ if (starget->dev.parent == dev || &starget->dev == dev) {
+- found = starget;
+- found->reap_ref++;
+- break;
++ /* assuming new targets arrive at the end */
++ starget->reap_ref++;
++ spin_unlock_irqrestore(shost->host_lock, flags);
++ if (last)
++ scsi_target_reap(last);
++ last = starget;
++ __scsi_remove_target(starget);
++ spin_lock_irqsave(shost->host_lock, flags);
+ }
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+- if (found) {
+- __scsi_remove_target(found);
+- scsi_target_reap(found);
+- /* in the case where @dev has multiple starget children,
+- * continue removing.
+- *
+- * FIXME: does such a case exist?
+- */
+- goto restart;
+- }
++ if (last)
++ scsi_target_reap(last);
+ }
+ EXPORT_SYMBOL(scsi_remove_target);
+
+diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
+index 4ad2c0e..9465bce 100644
+--- a/drivers/staging/comedi/comedi_fops.c
++++ b/drivers/staging/comedi/comedi_fops.c
+@@ -843,7 +843,7 @@ static int parse_insn(struct comedi_device *dev, struct comedi_insn *insn,
+ ret = -EAGAIN;
+ break;
+ }
+- ret = s->async->inttrig(dev, s, insn->data[0]);
++ ret = s->async->inttrig(dev, s, data[0]);
+ if (ret >= 0)
+ ret = 1;
+ break;
+@@ -1088,7 +1088,6 @@ static int do_cmd_ioctl(struct comedi_device *dev,
+ goto cleanup;
+ }
+
+- kfree(async->cmd.chanlist);
+ async->cmd = user_cmd;
+ async->cmd.data = NULL;
+ /* load channel/gain list */
+@@ -1833,6 +1832,8 @@ void do_become_nonbusy(struct comedi_device *dev, struct comedi_subdevice *s)
+ if (async) {
+ comedi_reset_async_buf(async);
+ async->inttrig = NULL;
++ kfree(async->cmd.chanlist);
++ async->cmd.chanlist = NULL;
+ } else {
+ printk(KERN_ERR
+ "BUG: (?) do_become_nonbusy called with async=0\n");
+diff --git a/drivers/staging/comedi/drivers/jr3_pci.c b/drivers/staging/comedi/drivers/jr3_pci.c
+index 8d98cf4..c8b7eed 100644
+--- a/drivers/staging/comedi/drivers/jr3_pci.c
++++ b/drivers/staging/comedi/drivers/jr3_pci.c
+@@ -913,7 +913,7 @@ static int jr3_pci_attach(struct comedi_device *dev,
+ }
+
+ /* Reset DSP card */
+- devpriv->iobase->channel[0].reset = 0;
++ writel(0, &devpriv->iobase->channel[0].reset);
+
+ result = comedi_load_firmware(dev, "jr3pci.idm", jr3_download_firmware);
+ printk("Firmare load %d\n", result);
+diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c
+index 23fc64b..c72128f 100644
+--- a/drivers/staging/comedi/drivers/s626.c
++++ b/drivers/staging/comedi/drivers/s626.c
+@@ -2370,7 +2370,7 @@ static int s626_enc_insn_config(struct comedi_device *dev,
+ /* (data==NULL) ? (Preloadvalue=0) : (Preloadvalue=data[0]); */
+
+ k->SetMode(dev, k, Setup, TRUE);
+- Preload(dev, k, *(insn->data));
++ Preload(dev, k, data[0]);
+ k->PulseIndex(dev, k);
+ SetLatchSource(dev, k, valueSrclatch);
+ k->SetEnable(dev, k, (uint16_t) (enab != 0));
+diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
+index 42cdafe..b5130c8 100644
+--- a/drivers/staging/speakup/speakup_soft.c
++++ b/drivers/staging/speakup/speakup_soft.c
+@@ -40,7 +40,7 @@ static int softsynth_is_alive(struct spk_synth *synth);
+ static unsigned char get_index(void);
+
+ static struct miscdevice synth_device;
+-static int initialized;
++static int init_pos;
+ static int misc_registered;
+
+ static struct var_t vars[] = {
+@@ -194,7 +194,7 @@ static int softsynth_close(struct inode *inode, struct file *fp)
+ unsigned long flags;
+ spk_lock(flags);
+ synth_soft.alive = 0;
+- initialized = 0;
++ init_pos = 0;
+ spk_unlock(flags);
+ /* Make sure we let applications go before leaving */
+ speakup_start_ttys();
+@@ -239,13 +239,8 @@ static ssize_t softsynth_read(struct file *fp, char *buf, size_t count,
+ ch = '\x18';
+ } else if (synth_buffer_empty()) {
+ break;
+- } else if (!initialized) {
+- if (*init) {
+- ch = *init;
+- init++;
+- } else {
+- initialized = 1;
+- }
++ } else if (init[init_pos]) {
++ ch = init[init_pos++];
+ } else {
+ ch = synth_buffer_getc();
+ }
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 2ff1255..f35cb10 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -3204,7 +3204,6 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
+ len += 1;
+
+ if ((len + payload_len) > buffer_len) {
+- spin_unlock(&tiqn->tiqn_tpg_lock);
+ end_of_buf = 1;
+ goto eob;
+ }
+@@ -3357,6 +3356,7 @@ static int iscsit_send_reject(
+ hdr->opcode = ISCSI_OP_REJECT;
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ hton24(hdr->dlength, ISCSI_HDR_LEN);
++ hdr->ffffffff = 0xffffffff;
+ cmd->stat_sn = conn->stat_sn++;
+ hdr->statsn = cpu_to_be32(cmd->stat_sn);
+ hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
+diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
+index 0f68197..dae283f 100644
+--- a/drivers/target/iscsi/iscsi_target_core.h
++++ b/drivers/target/iscsi/iscsi_target_core.h
+@@ -25,10 +25,10 @@
+ #define NA_DATAOUT_TIMEOUT_RETRIES 5
+ #define NA_DATAOUT_TIMEOUT_RETRIES_MAX 15
+ #define NA_DATAOUT_TIMEOUT_RETRIES_MIN 1
+-#define NA_NOPIN_TIMEOUT 5
++#define NA_NOPIN_TIMEOUT 15
+ #define NA_NOPIN_TIMEOUT_MAX 60
+ #define NA_NOPIN_TIMEOUT_MIN 3
+-#define NA_NOPIN_RESPONSE_TIMEOUT 5
++#define NA_NOPIN_RESPONSE_TIMEOUT 30
+ #define NA_NOPIN_RESPONSE_TIMEOUT_MAX 60
+ #define NA_NOPIN_RESPONSE_TIMEOUT_MIN 3
+ #define NA_RANDOM_DATAIN_PDU_OFFSETS 0
+diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
+index d4cf2cd..309f14c 100644
+--- a/drivers/target/iscsi/iscsi_target_tpg.c
++++ b/drivers/target/iscsi/iscsi_target_tpg.c
+@@ -674,6 +674,12 @@ int iscsit_ta_generate_node_acls(
+ pr_debug("iSCSI_TPG[%hu] - Generate Initiator Portal Group ACLs: %s\n",
+ tpg->tpgt, (a->generate_node_acls) ? "Enabled" : "Disabled");
+
++ if (flag == 1 && a->cache_dynamic_acls == 0) {
++ pr_debug("Explicitly setting cache_dynamic_acls=1 when "
++ "generate_node_acls=1\n");
++ a->cache_dynamic_acls = 1;
++ }
++
+ return 0;
+ }
+
+@@ -713,6 +719,12 @@ int iscsit_ta_cache_dynamic_acls(
+ return -EINVAL;
+ }
+
++ if (a->generate_node_acls == 1 && flag == 0) {
++ pr_debug("Skipping cache_dynamic_acls=0 when"
++ " generate_node_acls=1\n");
++ return 0;
++ }
++
+ a->cache_dynamic_acls = flag;
+ pr_debug("iSCSI_TPG[%hu] - Cache Dynamic Initiator Portal Group"
+ " ACLs %s\n", tpg->tpgt, (a->cache_dynamic_acls) ?
+diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
+index 93d4f6a..0b01bfc 100644
+--- a/drivers/target/target_core_configfs.c
++++ b/drivers/target/target_core_configfs.c
+@@ -3123,6 +3123,7 @@ static int __init target_core_init_configfs(void)
+ GFP_KERNEL);
+ if (!target_cg->default_groups) {
+ pr_err("Unable to allocate target_cg->default_groups\n");
++ ret = -ENOMEM;
+ goto out_global;
+ }
+
+@@ -3138,6 +3139,7 @@ static int __init target_core_init_configfs(void)
+ GFP_KERNEL);
+ if (!hba_cg->default_groups) {
+ pr_err("Unable to allocate hba_cg->default_groups\n");
++ ret = -ENOMEM;
+ goto out_global;
+ }
+ config_group_init_type_name(&alua_group,
+@@ -3153,6 +3155,7 @@ static int __init target_core_init_configfs(void)
+ GFP_KERNEL);
+ if (!alua_cg->default_groups) {
+ pr_err("Unable to allocate alua_cg->default_groups\n");
++ ret = -ENOMEM;
+ goto out_global;
+ }
+
+@@ -3164,14 +3167,17 @@ static int __init target_core_init_configfs(void)
+ * Add core/alua/lu_gps/default_lu_gp
+ */
+ lu_gp = core_alua_allocate_lu_gp("default_lu_gp", 1);
+- if (IS_ERR(lu_gp))
++ if (IS_ERR(lu_gp)) {
++ ret = -ENOMEM;
+ goto out_global;
++ }
+
+ lu_gp_cg = &alua_lu_gps_group;
+ lu_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+ GFP_KERNEL);
+ if (!lu_gp_cg->default_groups) {
+ pr_err("Unable to allocate lu_gp_cg->default_groups\n");
++ ret = -ENOMEM;
+ goto out_global;
+ }
+
+diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
+index 455a251..cafa477 100644
+--- a/drivers/target/target_core_file.c
++++ b/drivers/target/target_core_file.c
+@@ -139,6 +139,19 @@ static struct se_device *fd_create_virtdevice(
+ * of pure timestamp updates.
+ */
+ flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
++ /*
++ * Optionally allow fd_buffered_io=1 to be enabled for people
++ * who want use the fs buffer cache as an WriteCache mechanism.
++ *
++ * This means that in event of a hard failure, there is a risk
++ * of silent data-loss if the SCSI client has *not* performed a
++ * forced unit access (FUA) write, or issued SYNCHRONIZE_CACHE
++ * to write-out the entire device cache.
++ */
++ if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
++ pr_debug("FILEIO: Disabling O_DSYNC, using buffered FILEIO\n");
++ flags &= ~O_DSYNC;
++ }
+
+ file = filp_open(dev_p, flags, 0600);
+ if (IS_ERR(file)) {
+@@ -206,6 +219,12 @@ static struct se_device *fd_create_virtdevice(
+ if (!dev)
+ goto fail;
+
++ if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
++ pr_debug("FILEIO: Forcing setting of emulate_write_cache=1"
++ " with FDBD_HAS_BUFFERED_IO_WCE\n");
++ dev->se_sub_dev->se_dev_attrib.emulate_write_cache = 1;
++ }
++
+ fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
+ fd_dev->fd_queue_depth = dev->queue_depth;
+
+@@ -450,6 +469,7 @@ enum {
+ static match_table_t tokens = {
+ {Opt_fd_dev_name, "fd_dev_name=%s"},
+ {Opt_fd_dev_size, "fd_dev_size=%s"},
++ {Opt_fd_buffered_io, "fd_buffered_io=%d"},
+ {Opt_err, NULL}
+ };
+
+@@ -461,7 +481,7 @@ static ssize_t fd_set_configfs_dev_params(
+ struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
+ char *orig, *ptr, *arg_p, *opts;
+ substring_t args[MAX_OPT_ARGS];
+- int ret = 0, token;
++ int ret = 0, arg, token;
+
+ opts = kstrdup(page, GFP_KERNEL);
+ if (!opts)
+@@ -505,6 +525,19 @@ static ssize_t fd_set_configfs_dev_params(
+ " bytes\n", fd_dev->fd_dev_size);
+ fd_dev->fbd_flags |= FBDF_HAS_SIZE;
+ break;
++ case Opt_fd_buffered_io:
++ match_int(args, &arg);
++ if (arg != 1) {
++ pr_err("bogus fd_buffered_io=%d value\n", arg);
++ ret = -EINVAL;
++ goto out;
++ }
++
++ pr_debug("FILEIO: Using buffered I/O"
++ " operations for struct fd_dev\n");
++
++ fd_dev->fbd_flags |= FDBD_HAS_BUFFERED_IO_WCE;
++ break;
+ default:
+ break;
+ }
+@@ -536,8 +569,10 @@ static ssize_t fd_show_configfs_dev_params(
+ ssize_t bl = 0;
+
+ bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
+- bl += sprintf(b + bl, " File: %s Size: %llu Mode: O_DSYNC\n",
+- fd_dev->fd_dev_name, fd_dev->fd_dev_size);
++ bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s\n",
++ fd_dev->fd_dev_name, fd_dev->fd_dev_size,
++ (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) ?
++ "Buffered-WCE" : "O_DSYNC");
+ return bl;
+ }
+
+diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
+index 53ece69..6b1b6a9 100644
+--- a/drivers/target/target_core_file.h
++++ b/drivers/target/target_core_file.h
+@@ -18,6 +18,7 @@ struct fd_request {
+
+ #define FBDF_HAS_PATH 0x01
+ #define FBDF_HAS_SIZE 0x02
++#define FDBD_HAS_BUFFERED_IO_WCE 0x04
+
+ struct fd_dev {
+ u32 fbd_flags;
+diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
+index fc7bbba..d190269 100644
+--- a/drivers/tty/n_gsm.c
++++ b/drivers/tty/n_gsm.c
+@@ -108,7 +108,7 @@ struct gsm_mux_net {
+ */
+
+ struct gsm_msg {
+- struct gsm_msg *next;
++ struct list_head list;
+ u8 addr; /* DLCI address + flags */
+ u8 ctrl; /* Control byte + flags */
+ unsigned int len; /* Length of data block (can be zero) */
+@@ -245,8 +245,7 @@ struct gsm_mux {
+ unsigned int tx_bytes; /* TX data outstanding */
+ #define TX_THRESH_HI 8192
+ #define TX_THRESH_LO 2048
+- struct gsm_msg *tx_head; /* Pending data packets */
+- struct gsm_msg *tx_tail;
++ struct list_head tx_list; /* Pending data packets */
+
+ /* Control messages */
+ struct timer_list t2_timer; /* Retransmit timer for commands */
+@@ -663,7 +662,7 @@ static struct gsm_msg *gsm_data_alloc(struct gsm_mux *gsm, u8 addr, int len,
+ m->len = len;
+ m->addr = addr;
+ m->ctrl = ctrl;
+- m->next = NULL;
++ INIT_LIST_HEAD(&m->list);
+ return m;
+ }
+
+@@ -673,22 +672,21 @@ static struct gsm_msg *gsm_data_alloc(struct gsm_mux *gsm, u8 addr, int len,
+ *
+ * The tty device has called us to indicate that room has appeared in
+ * the transmit queue. Ram more data into the pipe if we have any
++ * If we have been flow-stopped by a CMD_FCOFF, then we can only
++ * send messages on DLCI0 until CMD_FCON
+ *
+ * FIXME: lock against link layer control transmissions
+ */
+
+ static void gsm_data_kick(struct gsm_mux *gsm)
+ {
+- struct gsm_msg *msg = gsm->tx_head;
++ struct gsm_msg *msg, *nmsg;
+ int len;
+ int skip_sof = 0;
+
+- /* FIXME: We need to apply this solely to data messages */
+- if (gsm->constipated)
+- return;
+-
+- while (gsm->tx_head != NULL) {
+- msg = gsm->tx_head;
++ list_for_each_entry_safe(msg, nmsg, &gsm->tx_list, list) {
++ if (gsm->constipated && msg->addr)
++ continue;
+ if (gsm->encoding != 0) {
+ gsm->txframe[0] = GSM1_SOF;
+ len = gsm_stuff_frame(msg->data,
+@@ -711,14 +709,13 @@ static void gsm_data_kick(struct gsm_mux *gsm)
+ len - skip_sof) < 0)
+ break;
+ /* FIXME: Can eliminate one SOF in many more cases */
+- gsm->tx_head = msg->next;
+- if (gsm->tx_head == NULL)
+- gsm->tx_tail = NULL;
+ gsm->tx_bytes -= msg->len;
+- kfree(msg);
+ /* For a burst of frames skip the extra SOF within the
+ burst */
+ skip_sof = 1;
++
++ list_del(&msg->list);
++ kfree(msg);
+ }
+ }
+
+@@ -768,11 +765,7 @@ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
+ msg->data = dp;
+
+ /* Add to the actual output queue */
+- if (gsm->tx_tail)
+- gsm->tx_tail->next = msg;
+- else
+- gsm->tx_head = msg;
+- gsm->tx_tail = msg;
++ list_add_tail(&msg->list, &gsm->tx_list);
+ gsm->tx_bytes += msg->len;
+ gsm_data_kick(gsm);
+ }
+@@ -875,7 +868,7 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
+
+ /* dlci->skb is locked by tx_lock */
+ if (dlci->skb == NULL) {
+- dlci->skb = skb_dequeue(&dlci->skb_list);
++ dlci->skb = skb_dequeue_tail(&dlci->skb_list);
+ if (dlci->skb == NULL)
+ return 0;
+ first = 1;
+@@ -886,7 +879,7 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
+ if (len > gsm->mtu) {
+ if (dlci->adaption == 3) {
+ /* Over long frame, bin it */
+- kfree_skb(dlci->skb);
++ dev_kfree_skb_any(dlci->skb);
+ dlci->skb = NULL;
+ return 0;
+ }
+@@ -899,8 +892,11 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
+
+ /* FIXME: need a timer or something to kick this so it can't
+ get stuck with no work outstanding and no buffer free */
+- if (msg == NULL)
++ if (msg == NULL) {
++ skb_queue_tail(&dlci->skb_list, dlci->skb);
++ dlci->skb = NULL;
+ return -ENOMEM;
++ }
+ dp = msg->data;
+
+ if (dlci->adaption == 4) { /* Interruptible framed (Packetised Data) */
+@@ -912,7 +908,7 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
+ skb_pull(dlci->skb, len);
+ __gsm_data_queue(dlci, msg);
+ if (last) {
+- kfree_skb(dlci->skb);
++ dev_kfree_skb_any(dlci->skb);
+ dlci->skb = NULL;
+ }
+ return size;
+@@ -971,16 +967,22 @@ static void gsm_dlci_data_sweep(struct gsm_mux *gsm)
+ static void gsm_dlci_data_kick(struct gsm_dlci *dlci)
+ {
+ unsigned long flags;
++ int sweep;
++
++ if (dlci->constipated)
++ return;
+
+ spin_lock_irqsave(&dlci->gsm->tx_lock, flags);
+ /* If we have nothing running then we need to fire up */
++ sweep = (dlci->gsm->tx_bytes < TX_THRESH_LO);
+ if (dlci->gsm->tx_bytes == 0) {
+ if (dlci->net)
+ gsm_dlci_data_output_framed(dlci->gsm, dlci);
+ else
+ gsm_dlci_data_output(dlci->gsm, dlci);
+- } else if (dlci->gsm->tx_bytes < TX_THRESH_LO)
+- gsm_dlci_data_sweep(dlci->gsm);
++ }
++ if (sweep)
++ gsm_dlci_data_sweep(dlci->gsm);
+ spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags);
+ }
+
+@@ -1027,6 +1029,7 @@ static void gsm_process_modem(struct tty_struct *tty, struct gsm_dlci *dlci,
+ {
+ int mlines = 0;
+ u8 brk = 0;
++ int fc;
+
+ /* The modem status command can either contain one octet (v.24 signals)
+ or two octets (v.24 signals + break signals). The length field will
+@@ -1038,19 +1041,21 @@ static void gsm_process_modem(struct tty_struct *tty, struct gsm_dlci *dlci,
+ else {
+ brk = modem & 0x7f;
+ modem = (modem >> 7) & 0x7f;
+- };
++ }
+
+ /* Flow control/ready to communicate */
+- if (modem & MDM_FC) {
++ fc = (modem & MDM_FC) || !(modem & MDM_RTR);
++ if (fc && !dlci->constipated) {
+ /* Need to throttle our output on this device */
+ dlci->constipated = 1;
+- }
+- if (modem & MDM_RTC) {
+- mlines |= TIOCM_DSR | TIOCM_DTR;
++ } else if (!fc && dlci->constipated) {
+ dlci->constipated = 0;
+ gsm_dlci_data_kick(dlci);
+ }
++
+ /* Map modem bits */
++ if (modem & MDM_RTC)
++ mlines |= TIOCM_DSR | TIOCM_DTR;
+ if (modem & MDM_RTR)
+ mlines |= TIOCM_RTS | TIOCM_CTS;
+ if (modem & MDM_IC)
+@@ -1190,6 +1195,8 @@ static void gsm_control_message(struct gsm_mux *gsm, unsigned int command,
+ u8 *data, int clen)
+ {
+ u8 buf[1];
++ unsigned long flags;
++
+ switch (command) {
+ case CMD_CLD: {
+ struct gsm_dlci *dlci = gsm->dlci[0];
+@@ -1206,16 +1213,18 @@ static void gsm_control_message(struct gsm_mux *gsm, unsigned int command,
+ gsm_control_reply(gsm, CMD_TEST, data, clen);
+ break;
+ case CMD_FCON:
+- /* Modem wants us to STFU */
+- gsm->constipated = 1;
+- gsm_control_reply(gsm, CMD_FCON, NULL, 0);
+- break;
+- case CMD_FCOFF:
+ /* Modem can accept data again */
+ gsm->constipated = 0;
+- gsm_control_reply(gsm, CMD_FCOFF, NULL, 0);
++ gsm_control_reply(gsm, CMD_FCON, NULL, 0);
+ /* Kick the link in case it is idling */
++ spin_lock_irqsave(&gsm->tx_lock, flags);
+ gsm_data_kick(gsm);
++ spin_unlock_irqrestore(&gsm->tx_lock, flags);
++ break;
++ case CMD_FCOFF:
++ /* Modem wants us to STFU */
++ gsm->constipated = 1;
++ gsm_control_reply(gsm, CMD_FCOFF, NULL, 0);
+ break;
+ case CMD_MSC:
+ /* Out of band modem line change indicator for a DLCI */
+@@ -1668,7 +1677,7 @@ static void gsm_dlci_free(struct kref *ref)
+ dlci->gsm->dlci[dlci->addr] = NULL;
+ kfifo_free(dlci->fifo);
+ while ((dlci->skb = skb_dequeue(&dlci->skb_list)))
+- kfree_skb(dlci->skb);
++ dev_kfree_skb(dlci->skb);
+ kfree(dlci);
+ }
+
+@@ -2007,7 +2016,7 @@ void gsm_cleanup_mux(struct gsm_mux *gsm)
+ {
+ int i;
+ struct gsm_dlci *dlci = gsm->dlci[0];
+- struct gsm_msg *txq;
++ struct gsm_msg *txq, *ntxq;
+ struct gsm_control *gc;
+
+ gsm->dead = 1;
+@@ -2042,11 +2051,9 @@ void gsm_cleanup_mux(struct gsm_mux *gsm)
+ if (gsm->dlci[i])
+ gsm_dlci_release(gsm->dlci[i]);
+ /* Now wipe the queues */
+- for (txq = gsm->tx_head; txq != NULL; txq = gsm->tx_head) {
+- gsm->tx_head = txq->next;
++ list_for_each_entry_safe(txq, ntxq, &gsm->tx_list, list)
+ kfree(txq);
+- }
+- gsm->tx_tail = NULL;
++ INIT_LIST_HEAD(&gsm->tx_list);
+ }
+ EXPORT_SYMBOL_GPL(gsm_cleanup_mux);
+
+@@ -2157,6 +2164,7 @@ struct gsm_mux *gsm_alloc_mux(void)
+ }
+ spin_lock_init(&gsm->lock);
+ kref_init(&gsm->ref);
++ INIT_LIST_HEAD(&gsm->tx_list);
+
+ gsm->t1 = T1;
+ gsm->t2 = T2;
+@@ -2273,7 +2281,7 @@ static void gsmld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ gsm->error(gsm, *dp, flags);
+ break;
+ default:
+- WARN_ONCE("%s: unknown flag %d\n",
++ WARN_ONCE(1, "%s: unknown flag %d\n",
+ tty_name(tty, buf), flags);
+ break;
+ }
+@@ -2377,12 +2385,12 @@ static void gsmld_write_wakeup(struct tty_struct *tty)
+
+ /* Queue poll */
+ clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
++ spin_lock_irqsave(&gsm->tx_lock, flags);
+ gsm_data_kick(gsm);
+ if (gsm->tx_bytes < TX_THRESH_LO) {
+- spin_lock_irqsave(&gsm->tx_lock, flags);
+ gsm_dlci_data_sweep(gsm);
+- spin_unlock_irqrestore(&gsm->tx_lock, flags);
+ }
++ spin_unlock_irqrestore(&gsm->tx_lock, flags);
+ }
+
+ /**
+@@ -2889,6 +2897,10 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
+ gsm = gsm_mux[mux];
+ if (gsm->dead)
+ return -EL2HLT;
++ /* If DLCI 0 is not yet fully open return an error. This is ok from a locking
++ perspective as we don't have to worry about this if DLCI0 is lost */
++ if (gsm->dlci[0] && gsm->dlci[0]->state != DLCI_OPEN)
++ return -EL2NSYNC;
+ dlci = gsm->dlci[line];
+ if (dlci == NULL)
+ dlci = gsm_dlci_alloc(gsm, line);
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index 39d6ab6..8481aae 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -1728,7 +1728,8 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
+
+ do_it_again:
+
+- BUG_ON(!tty->read_buf);
++ if (WARN_ON(!tty->read_buf))
++ return -EAGAIN;
+
+ c = job_control(tty, file);
+ if (c < 0)
+diff --git a/drivers/tty/serial/8250_pci.c b/drivers/tty/serial/8250_pci.c
+index 482d51e..e7d82c1 100644
+--- a/drivers/tty/serial/8250_pci.c
++++ b/drivers/tty/serial/8250_pci.c
+@@ -1118,6 +1118,8 @@ pci_xr17c154_setup(struct serial_private *priv,
+ #define PCI_SUBDEVICE_ID_OCTPRO422 0x0208
+ #define PCI_SUBDEVICE_ID_POCTAL232 0x0308
+ #define PCI_SUBDEVICE_ID_POCTAL422 0x0408
++#define PCI_SUBDEVICE_ID_SIIG_DUAL_00 0x2500
++#define PCI_SUBDEVICE_ID_SIIG_DUAL_30 0x2530
+ #define PCI_VENDOR_ID_ADVANTECH 0x13fe
+ #define PCI_DEVICE_ID_INTEL_CE4100_UART 0x2e66
+ #define PCI_DEVICE_ID_ADVANTECH_PCI3620 0x3620
+@@ -3168,8 +3170,11 @@ static struct pci_device_id serial_pci_tbl[] = {
+ * For now just used the hex ID 0x950a.
+ */
+ { PCI_VENDOR_ID_OXSEMI, 0x950a,
+- PCI_SUBVENDOR_ID_SIIG, PCI_SUBDEVICE_ID_SIIG_DUAL_SERIAL, 0, 0,
+- pbn_b0_2_115200 },
++ PCI_SUBVENDOR_ID_SIIG, PCI_SUBDEVICE_ID_SIIG_DUAL_00,
++ 0, 0, pbn_b0_2_115200 },
++ { PCI_VENDOR_ID_OXSEMI, 0x950a,
++ PCI_SUBVENDOR_ID_SIIG, PCI_SUBDEVICE_ID_SIIG_DUAL_30,
++ 0, 0, pbn_b0_2_115200 },
+ { PCI_VENDOR_ID_OXSEMI, 0x950a,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_2_1130000 },
+diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
+index 6da8cf8..fe9f111 100644
+--- a/drivers/tty/serial/amba-pl011.c
++++ b/drivers/tty/serial/amba-pl011.c
+@@ -1627,13 +1627,26 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
+ old_cr &= ~ST_UART011_CR_OVSFACT;
+ }
+
++ /*
++ * Workaround for the ST Micro oversampling variants to
++ * increase the bitrate slightly, by lowering the divisor,
++ * to avoid delayed sampling of start bit at high speeds,
++ * else we see data corruption.
++ */
++ if (uap->vendor->oversampling) {
++ if ((baud >= 3000000) && (baud < 3250000) && (quot > 1))
++ quot -= 1;
++ else if ((baud > 3250000) && (quot > 2))
++ quot -= 2;
++ }
+ /* Set baud rate */
+ writew(quot & 0x3f, port->membase + UART011_FBRD);
+ writew(quot >> 6, port->membase + UART011_IBRD);
+
+ /*
+ * ----------v----------v----------v----------v-----
+- * NOTE: MUST BE WRITTEN AFTER UARTLCR_M & UARTLCR_L
++ * NOTE: lcrh_tx and lcrh_rx MUST BE WRITTEN AFTER
++ * UART011_FBRD & UART011_IBRD.
+ * ----------^----------^----------^----------^-----
+ */
+ writew(lcr_h, port->membase + uap->lcrh_rx);
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index a40ab98..4cddbfc 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -1680,6 +1680,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
+ {
+ struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+ struct dev_info *dev_info, *next;
++ struct xhci_cd *cur_cd, *next_cd;
+ unsigned long flags;
+ int size;
+ int i, j, num_ports;
+@@ -1701,6 +1702,11 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
+ xhci_ring_free(xhci, xhci->cmd_ring);
+ xhci->cmd_ring = NULL;
+ xhci_dbg(xhci, "Freed command ring\n");
++ list_for_each_entry_safe(cur_cd, next_cd,
++ &xhci->cancel_cmd_list, cancel_cmd_list) {
++ list_del(&cur_cd->cancel_cmd_list);
++ kfree(cur_cd);
++ }
+
+ for (i = 1; i < MAX_HC_SLOTS; ++i)
+ xhci_free_virt_device(xhci, i);
+@@ -2246,6 +2252,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
+ xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, false, flags);
+ if (!xhci->cmd_ring)
+ goto fail;
++ INIT_LIST_HEAD(&xhci->cancel_cmd_list);
+ xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
+ xhci_dbg(xhci, "First segment DMA is 0x%llx\n",
+ (unsigned long long)xhci->cmd_ring->first_seg->dma);
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index bddcbfc..4ed7572 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -99,6 +99,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ * PPT chipsets.
+ */
+ xhci->quirks |= XHCI_SPURIOUS_REBOOT;
++ xhci->quirks |= XHCI_AVOID_BEI;
+ }
+ if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
+ pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index c7c530c..950aef8 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -309,12 +309,123 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
+ /* Ring the host controller doorbell after placing a command on the ring */
+ void xhci_ring_cmd_db(struct xhci_hcd *xhci)
+ {
++ if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING))
++ return;
++
+ xhci_dbg(xhci, "// Ding dong!\n");
+ xhci_writel(xhci, DB_VALUE_HOST, &xhci->dba->doorbell[0]);
+ /* Flush PCI posted writes */
+ xhci_readl(xhci, &xhci->dba->doorbell[0]);
+ }
+
++static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
++{
++ u64 temp_64;
++ int ret;
++
++ xhci_dbg(xhci, "Abort command ring\n");
++
++ if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING)) {
++ xhci_dbg(xhci, "The command ring isn't running, "
++ "Have the command ring been stopped?\n");
++ return 0;
++ }
++
++ temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
++ if (!(temp_64 & CMD_RING_RUNNING)) {
++ xhci_dbg(xhci, "Command ring had been stopped\n");
++ return 0;
++ }
++ xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
++ xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
++ &xhci->op_regs->cmd_ring);
++
++ /* Section 4.6.1.2 of xHCI 1.0 spec says software should
++ * time the completion od all xHCI commands, including
++ * the Command Abort operation. If software doesn't see
++ * CRR negated in a timely manner (e.g. longer than 5
++ * seconds), then it should assume that the there are
++ * larger problems with the xHC and assert HCRST.
++ */
++ ret = handshake(xhci, &xhci->op_regs->cmd_ring,
++ CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
++ if (ret < 0) {
++ xhci_err(xhci, "Stopped the command ring failed, "
++ "maybe the host is dead\n");
++ xhci->xhc_state |= XHCI_STATE_DYING;
++ xhci_quiesce(xhci);
++ xhci_halt(xhci);
++ return -ESHUTDOWN;
++ }
++
++ return 0;
++}
++
++static int xhci_queue_cd(struct xhci_hcd *xhci,
++ struct xhci_command *command,
++ union xhci_trb *cmd_trb)
++{
++ struct xhci_cd *cd;
++ cd = kzalloc(sizeof(struct xhci_cd), GFP_ATOMIC);
++ if (!cd)
++ return -ENOMEM;
++ INIT_LIST_HEAD(&cd->cancel_cmd_list);
++
++ cd->command = command;
++ cd->cmd_trb = cmd_trb;
++ list_add_tail(&cd->cancel_cmd_list, &xhci->cancel_cmd_list);
++
++ return 0;
++}
++
++/*
++ * Cancel the command which has issue.
++ *
++ * Some commands may hang due to waiting for acknowledgement from
++ * usb device. It is outside of the xHC's ability to control and
++ * will cause the command ring is blocked. When it occurs software
++ * should intervene to recover the command ring.
++ * See Section 4.6.1.1 and 4.6.1.2
++ */
++int xhci_cancel_cmd(struct xhci_hcd *xhci, struct xhci_command *command,
++ union xhci_trb *cmd_trb)
++{
++ int retval = 0;
++ unsigned long flags;
++
++ spin_lock_irqsave(&xhci->lock, flags);
++
++ if (xhci->xhc_state & XHCI_STATE_DYING) {
++ xhci_warn(xhci, "Abort the command ring,"
++ " but the xHCI is dead.\n");
++ retval = -ESHUTDOWN;
++ goto fail;
++ }
++
++ /* queue the cmd desriptor to cancel_cmd_list */
++ retval = xhci_queue_cd(xhci, command, cmd_trb);
++ if (retval) {
++ xhci_warn(xhci, "Queuing command descriptor failed.\n");
++ goto fail;
++ }
++
++ /* abort command ring */
++ retval = xhci_abort_cmd_ring(xhci);
++ if (retval) {
++ xhci_err(xhci, "Abort command ring failed\n");
++ if (unlikely(retval == -ESHUTDOWN)) {
++ spin_unlock_irqrestore(&xhci->lock, flags);
++ usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
++ xhci_dbg(xhci, "xHCI host controller is dead.\n");
++ return retval;
++ }
++ }
++
++fail:
++ spin_unlock_irqrestore(&xhci->lock, flags);
++ return retval;
++}
++
+ void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
+ unsigned int slot_id,
+ unsigned int ep_index,
+@@ -1043,6 +1154,20 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci,
+ }
+ }
+
++/* Complete the command and detele it from the devcie's command queue.
++ */
++static void xhci_complete_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
++ struct xhci_command *command, u32 status)
++{
++ command->status = status;
++ list_del(&command->cmd_list);
++ if (command->completion)
++ complete(command->completion);
++ else
++ xhci_free_command(xhci, command);
++}
++
++
+ /* Check to see if a command in the device's command queue matches this one.
+ * Signal the completion or free the command, and return 1. Return 0 if the
+ * completed command isn't at the head of the command list.
+@@ -1061,15 +1186,144 @@ static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
+ if (xhci->cmd_ring->dequeue != command->command_trb)
+ return 0;
+
+- command->status = GET_COMP_CODE(le32_to_cpu(event->status));
+- list_del(&command->cmd_list);
+- if (command->completion)
+- complete(command->completion);
+- else
+- xhci_free_command(xhci, command);
++ xhci_complete_cmd_in_cmd_wait_list(xhci, command,
++ GET_COMP_CODE(le32_to_cpu(event->status)));
+ return 1;
+ }
+
++/*
++ * Finding the command trb need to be cancelled and modifying it to
++ * NO OP command. And if the command is in device's command wait
++ * list, finishing and freeing it.
++ *
++ * If we can't find the command trb, we think it had already been
++ * executed.
++ */
++static void xhci_cmd_to_noop(struct xhci_hcd *xhci, struct xhci_cd *cur_cd)
++{
++ struct xhci_segment *cur_seg;
++ union xhci_trb *cmd_trb;
++ u32 cycle_state;
++
++ if (xhci->cmd_ring->dequeue == xhci->cmd_ring->enqueue)
++ return;
++
++ /* find the current segment of command ring */
++ cur_seg = find_trb_seg(xhci->cmd_ring->first_seg,
++ xhci->cmd_ring->dequeue, &cycle_state);
++
++ /* find the command trb matched by cd from command ring */
++ for (cmd_trb = xhci->cmd_ring->dequeue;
++ cmd_trb != xhci->cmd_ring->enqueue;
++ next_trb(xhci, xhci->cmd_ring, &cur_seg, &cmd_trb)) {
++ /* If the trb is link trb, continue */
++ if (TRB_TYPE_LINK_LE32(cmd_trb->generic.field[3]))
++ continue;
++
++ if (cur_cd->cmd_trb == cmd_trb) {
++
++ /* If the command in device's command list, we should
++ * finish it and free the command structure.
++ */
++ if (cur_cd->command)
++ xhci_complete_cmd_in_cmd_wait_list(xhci,
++ cur_cd->command, COMP_CMD_STOP);
++
++ /* get cycle state from the origin command trb */
++ cycle_state = le32_to_cpu(cmd_trb->generic.field[3])
++ & TRB_CYCLE;
++
++ /* modify the command trb to NO OP command */
++ cmd_trb->generic.field[0] = 0;
++ cmd_trb->generic.field[1] = 0;
++ cmd_trb->generic.field[2] = 0;
++ cmd_trb->generic.field[3] = cpu_to_le32(
++ TRB_TYPE(TRB_CMD_NOOP) | cycle_state);
++ break;
++ }
++ }
++}
++
++static void xhci_cancel_cmd_in_cd_list(struct xhci_hcd *xhci)
++{
++ struct xhci_cd *cur_cd, *next_cd;
++
++ if (list_empty(&xhci->cancel_cmd_list))
++ return;
++
++ list_for_each_entry_safe(cur_cd, next_cd,
++ &xhci->cancel_cmd_list, cancel_cmd_list) {
++ xhci_cmd_to_noop(xhci, cur_cd);
++ list_del(&cur_cd->cancel_cmd_list);
++ kfree(cur_cd);
++ }
++}
++
++/*
++ * traversing the cancel_cmd_list. If the command descriptor according
++ * to cmd_trb is found, the function free it and return 1, otherwise
++ * return 0.
++ */
++static int xhci_search_cmd_trb_in_cd_list(struct xhci_hcd *xhci,
++ union xhci_trb *cmd_trb)
++{
++ struct xhci_cd *cur_cd, *next_cd;
++
++ if (list_empty(&xhci->cancel_cmd_list))
++ return 0;
++
++ list_for_each_entry_safe(cur_cd, next_cd,
++ &xhci->cancel_cmd_list, cancel_cmd_list) {
++ if (cur_cd->cmd_trb == cmd_trb) {
++ if (cur_cd->command)
++ xhci_complete_cmd_in_cmd_wait_list(xhci,
++ cur_cd->command, COMP_CMD_STOP);
++ list_del(&cur_cd->cancel_cmd_list);
++ kfree(cur_cd);
++ return 1;
++ }
++ }
++
++ return 0;
++}
++
++/*
++ * If the cmd_trb_comp_code is COMP_CMD_ABORT, we just check whether the
++ * trb pointed by the command ring dequeue pointer is the trb we want to
++ * cancel or not. And if the cmd_trb_comp_code is COMP_CMD_STOP, we will
++ * traverse the cancel_cmd_list to trun the all of the commands according
++ * to command descriptor to NO-OP trb.
++ */
++static int handle_stopped_cmd_ring(struct xhci_hcd *xhci,
++ int cmd_trb_comp_code)
++{
++ int cur_trb_is_good = 0;
++
++ /* Searching the cmd trb pointed by the command ring dequeue
++ * pointer in command descriptor list. If it is found, free it.
++ */
++ cur_trb_is_good = xhci_search_cmd_trb_in_cd_list(xhci,
++ xhci->cmd_ring->dequeue);
++
++ if (cmd_trb_comp_code == COMP_CMD_ABORT)
++ xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
++ else if (cmd_trb_comp_code == COMP_CMD_STOP) {
++ /* traversing the cancel_cmd_list and canceling
++ * the command according to command descriptor
++ */
++ xhci_cancel_cmd_in_cd_list(xhci);
++
++ xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
++ /*
++ * ring command ring doorbell again to restart the
++ * command ring
++ */
++ if (xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue)
++ xhci_ring_cmd_db(xhci);
++ }
++ return cur_trb_is_good;
++}
++
+ static void handle_cmd_completion(struct xhci_hcd *xhci,
+ struct xhci_event_cmd *event)
+ {
+@@ -1095,6 +1349,22 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
+ xhci->error_bitmask |= 1 << 5;
+ return;
+ }
++
++ if ((GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_ABORT) ||
++ (GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_STOP)) {
++ /* If the return value is 0, we think the trb pointed by
++ * command ring dequeue pointer is a good trb. The good
++ * trb means we don't want to cancel the trb, but it have
++ * been stopped by host. So we should handle it normally.
++ * Otherwise, driver should invoke inc_deq() and return.
++ */
++ if (handle_stopped_cmd_ring(xhci,
++ GET_COMP_CODE(le32_to_cpu(event->status)))) {
++ inc_deq(xhci, xhci->cmd_ring, false);
++ return;
++ }
++ }
++
+ switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])
+ & TRB_TYPE_BITMASK) {
+ case TRB_TYPE(TRB_ENABLE_SLOT):
+@@ -3356,7 +3626,9 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ } else {
+ td->last_trb = ep_ring->enqueue;
+ field |= TRB_IOC;
+- if (xhci->hci_version == 0x100) {
++ if (xhci->hci_version == 0x100 &&
++ !(xhci->quirks &
++ XHCI_AVOID_BEI)) {
+ /* Set BEI bit except for the last td */
+ if (i < num_tds - 1)
+ field |= TRB_BEI;
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 09872ee..f5c0f38 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -52,7 +52,7 @@ MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
+ * handshake done). There are two failure modes: "usec" have passed (major
+ * hardware flakeout), or the register reads as all-ones (hardware removed).
+ */
+-static int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
++int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
+ u32 mask, u32 done, int usec)
+ {
+ u32 result;
+@@ -105,8 +105,12 @@ int xhci_halt(struct xhci_hcd *xhci)
+
+ ret = handshake(xhci, &xhci->op_regs->status,
+ STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
+- if (!ret)
++ if (!ret) {
+ xhci->xhc_state |= XHCI_STATE_HALTED;
++ xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
++ } else
++ xhci_warn(xhci, "Host not halted after %u microseconds.\n",
++ XHCI_MAX_HALT_USEC);
+ return ret;
+ }
+
+@@ -459,6 +463,8 @@ static bool compliance_mode_recovery_timer_quirk_check(void)
+
+ dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
+ dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
++ if (!dmi_product_name || !dmi_sys_vendor)
++ return false;
+
+ if (!(strstr(dmi_sys_vendor, "Hewlett-Packard")))
+ return false;
+@@ -570,6 +576,7 @@ static int xhci_run_finished(struct xhci_hcd *xhci)
+ return -ENODEV;
+ }
+ xhci->shared_hcd->state = HC_STATE_RUNNING;
++ xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
+
+ if (xhci->quirks & XHCI_NEC_HOST)
+ xhci_ring_cmd_db(xhci);
+@@ -874,7 +881,7 @@ int xhci_suspend(struct xhci_hcd *xhci)
+ command &= ~CMD_RUN;
+ xhci_writel(xhci, command, &xhci->op_regs->command);
+ if (handshake(xhci, &xhci->op_regs->status,
+- STS_HALT, STS_HALT, 100*100)) {
++ STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC)) {
+ xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
+ spin_unlock_irq(&xhci->lock);
+ return -ETIMEDOUT;
+@@ -2506,6 +2513,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
+ struct completion *cmd_completion;
+ u32 *cmd_status;
+ struct xhci_virt_device *virt_dev;
++ union xhci_trb *cmd_trb;
+
+ spin_lock_irqsave(&xhci->lock, flags);
+ virt_dev = xhci->devs[udev->slot_id];
+@@ -2551,6 +2559,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
+ }
+ init_completion(cmd_completion);
+
++ cmd_trb = xhci->cmd_ring->dequeue;
+ if (!ctx_change)
+ ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
+ udev->slot_id, must_succeed);
+@@ -2572,14 +2581,17 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
+ /* Wait for the configure endpoint command to complete */
+ timeleft = wait_for_completion_interruptible_timeout(
+ cmd_completion,
+- USB_CTRL_SET_TIMEOUT);
++ XHCI_CMD_DEFAULT_TIMEOUT);
+ if (timeleft <= 0) {
+ xhci_warn(xhci, "%s while waiting for %s command\n",
+ timeleft == 0 ? "Timeout" : "Signal",
+ ctx_change == 0 ?
+ "configure endpoint" :
+ "evaluate context");
+- /* FIXME cancel the configure endpoint command */
++ /* cancel the configure endpoint command */
++ ret = xhci_cancel_cmd(xhci, command, cmd_trb);
++ if (ret < 0)
++ return ret;
+ return -ETIME;
+ }
+
+@@ -3528,8 +3540,10 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
+ unsigned long flags;
+ int timeleft;
+ int ret;
++ union xhci_trb *cmd_trb;
+
+ spin_lock_irqsave(&xhci->lock, flags);
++ cmd_trb = xhci->cmd_ring->dequeue;
+ ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
+ if (ret) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+@@ -3541,12 +3555,12 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
+
+ /* XXX: how much time for xHC slot assignment? */
+ timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
+- USB_CTRL_SET_TIMEOUT);
++ XHCI_CMD_DEFAULT_TIMEOUT);
+ if (timeleft <= 0) {
+ xhci_warn(xhci, "%s while waiting for a slot\n",
+ timeleft == 0 ? "Timeout" : "Signal");
+- /* FIXME cancel the enable slot request */
+- return 0;
++ /* cancel the enable slot request */
++ return xhci_cancel_cmd(xhci, NULL, cmd_trb);
+ }
+
+ if (!xhci->slot_id) {
+@@ -3607,6 +3621,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
+ struct xhci_slot_ctx *slot_ctx;
+ struct xhci_input_control_ctx *ctrl_ctx;
+ u64 temp_64;
++ union xhci_trb *cmd_trb;
+
+ if (!udev->slot_id) {
+ xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id);
+@@ -3645,6 +3660,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
+ xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
+
+ spin_lock_irqsave(&xhci->lock, flags);
++ cmd_trb = xhci->cmd_ring->dequeue;
+ ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma,
+ udev->slot_id);
+ if (ret) {
+@@ -3657,7 +3673,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
+
+ /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
+ timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
+- USB_CTRL_SET_TIMEOUT);
++ XHCI_CMD_DEFAULT_TIMEOUT);
+ /* FIXME: From section 4.3.4: "Software shall be responsible for timing
+ * the SetAddress() "recovery interval" required by USB and aborting the
+ * command on a timeout.
+@@ -3665,7 +3681,10 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
+ if (timeleft <= 0) {
+ xhci_warn(xhci, "%s while waiting for address device command\n",
+ timeleft == 0 ? "Timeout" : "Signal");
+- /* FIXME cancel the address device command */
++ /* cancel the address device command */
++ ret = xhci_cancel_cmd(xhci, NULL, cmd_trb);
++ if (ret < 0)
++ return ret;
+ return -ETIME;
+ }
+
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 44d518a..cc368c2 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1255,6 +1255,16 @@ struct xhci_td {
+ union xhci_trb *last_trb;
+ };
+
++/* xHCI command default timeout value */
++#define XHCI_CMD_DEFAULT_TIMEOUT (5 * HZ)
++
++/* command descriptor */
++struct xhci_cd {
++ struct list_head cancel_cmd_list;
++ struct xhci_command *command;
++ union xhci_trb *cmd_trb;
++};
++
+ struct xhci_dequeue_state {
+ struct xhci_segment *new_deq_seg;
+ union xhci_trb *new_deq_ptr;
+@@ -1402,6 +1412,11 @@ struct xhci_hcd {
+ /* data structures */
+ struct xhci_device_context_array *dcbaa;
+ struct xhci_ring *cmd_ring;
++ unsigned int cmd_ring_state;
++#define CMD_RING_STATE_RUNNING (1 << 0)
++#define CMD_RING_STATE_ABORTED (1 << 1)
++#define CMD_RING_STATE_STOPPED (1 << 2)
++ struct list_head cancel_cmd_list;
+ unsigned int cmd_ring_reserved_trbs;
+ struct xhci_ring *event_ring;
+ struct xhci_erst erst;
+@@ -1473,6 +1488,7 @@ struct xhci_hcd {
+ #define XHCI_TRUST_TX_LENGTH (1 << 10)
+ #define XHCI_SPURIOUS_REBOOT (1 << 13)
+ #define XHCI_COMP_MODE_QUIRK (1 << 14)
++#define XHCI_AVOID_BEI (1 << 15)
+ unsigned int num_active_eps;
+ unsigned int limit_active_eps;
+ /* There are two roothubs to keep track of bus suspend info for */
+@@ -1666,6 +1682,8 @@ static inline void xhci_unregister_pci(void) {}
+
+ /* xHCI host controller glue */
+ typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *);
++int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
++ u32 mask, u32 done, int usec);
+ void xhci_quiesce(struct xhci_hcd *xhci);
+ int xhci_halt(struct xhci_hcd *xhci);
+ int xhci_reset(struct xhci_hcd *xhci);
+@@ -1756,6 +1774,8 @@ void xhci_queue_config_ep_quirk(struct xhci_hcd *xhci,
+ unsigned int slot_id, unsigned int ep_index,
+ struct xhci_dequeue_state *deq_state);
+ void xhci_stop_endpoint_command_watchdog(unsigned long arg);
++int xhci_cancel_cmd(struct xhci_hcd *xhci, struct xhci_command *command,
++ union xhci_trb *cmd_trb);
+ void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id,
+ unsigned int ep_index, unsigned int stream_id);
+
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 7324bea..e29a664 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -584,6 +584,8 @@ static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(FTDI_VID, FTDI_IBS_PEDO_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_IBS_PROD_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_TAVIR_STK500_PID) },
++ { USB_DEVICE(FTDI_VID, FTDI_TIAO_UMPA_PID),
++ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ /*
+ * ELV devices:
+ */
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 06f6fd2..7b5eb74 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -517,6 +517,11 @@
+ */
+ #define FTDI_TAVIR_STK500_PID 0xFA33 /* STK500 AVR programmer */
+
++/*
++ * TIAO product ids (FTDI_VID)
++ * http://www.tiaowiki.com/w/Main_Page
++ */
++#define FTDI_TIAO_UMPA_PID 0x8a98 /* TIAO/DIYGADGET USB Multi-Protocol Adapter */
+
+
+ /********************************/
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index c068b4d..3fd4e6f 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -870,7 +870,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0153, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0155, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0156, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0159, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) },
+diff --git a/drivers/usb/serial/qcaux.c b/drivers/usb/serial/qcaux.c
+index a348198..87271e3 100644
+--- a/drivers/usb/serial/qcaux.c
++++ b/drivers/usb/serial/qcaux.c
+@@ -36,8 +36,6 @@
+ #define UTSTARCOM_PRODUCT_UM175_V1 0x3712
+ #define UTSTARCOM_PRODUCT_UM175_V2 0x3714
+ #define UTSTARCOM_PRODUCT_UM175_ALLTEL 0x3715
+-#define PANTECH_PRODUCT_UML190_VZW 0x3716
+-#define PANTECH_PRODUCT_UML290_VZW 0x3718
+
+ /* CMOTECH devices */
+ #define CMOTECH_VENDOR_ID 0x16d8
+@@ -68,11 +66,9 @@ static struct usb_device_id id_table[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(LG_VENDOR_ID, LG_PRODUCT_VX4400_6000, 0xff, 0xff, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(SANYO_VENDOR_ID, SANYO_PRODUCT_KATANA_LX, 0xff, 0xff, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_U520, 0xff, 0x00, 0x00) },
+- { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML190_VZW, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML190_VZW, 0xff, 0xfe, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML290_VZW, 0xff, 0xfd, 0xff) }, /* NMEA */
+- { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML290_VZW, 0xff, 0xfe, 0xff) }, /* WMC */
+- { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML290_VZW, 0xff, 0xff, 0xff) }, /* DIAG */
++ { USB_VENDOR_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, 0xff, 0xfd, 0xff) }, /* NMEA */
++ { USB_VENDOR_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, 0xff, 0xfe, 0xff) }, /* WMC */
++ { USB_VENDOR_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, 0xff, 0xff, 0xff) }, /* DIAG */
+ { },
+ };
+ MODULE_DEVICE_TABLE(usb, id_table);
+diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
+index f55ae23..790fa63 100644
+--- a/fs/autofs4/root.c
++++ b/fs/autofs4/root.c
+@@ -392,10 +392,12 @@ static struct vfsmount *autofs4_d_automount(struct path *path)
+ ino->flags |= AUTOFS_INF_PENDING;
+ spin_unlock(&sbi->fs_lock);
+ status = autofs4_mount_wait(dentry);
+- if (status)
+- return ERR_PTR(status);
+ spin_lock(&sbi->fs_lock);
+ ino->flags &= ~AUTOFS_INF_PENDING;
++ if (status) {
++ spin_unlock(&sbi->fs_lock);
++ return ERR_PTR(status);
++ }
+ }
+ done:
+ if (!(ino->flags & AUTOFS_INF_EXPIRING)) {
+diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
+index 6ff96c6..8dd615c 100644
+--- a/fs/binfmt_elf.c
++++ b/fs/binfmt_elf.c
+@@ -1668,30 +1668,19 @@ static int elf_note_info_init(struct elf_note_info *info)
+ return 0;
+ info->psinfo = kmalloc(sizeof(*info->psinfo), GFP_KERNEL);
+ if (!info->psinfo)
+- goto notes_free;
++ return 0;
+ info->prstatus = kmalloc(sizeof(*info->prstatus), GFP_KERNEL);
+ if (!info->prstatus)
+- goto psinfo_free;
++ return 0;
+ info->fpu = kmalloc(sizeof(*info->fpu), GFP_KERNEL);
+ if (!info->fpu)
+- goto prstatus_free;
++ return 0;
+ #ifdef ELF_CORE_COPY_XFPREGS
+ info->xfpu = kmalloc(sizeof(*info->xfpu), GFP_KERNEL);
+ if (!info->xfpu)
+- goto fpu_free;
++ return 0;
+ #endif
+ return 1;
+-#ifdef ELF_CORE_COPY_XFPREGS
+- fpu_free:
+- kfree(info->fpu);
+-#endif
+- prstatus_free:
+- kfree(info->prstatus);
+- psinfo_free:
+- kfree(info->psinfo);
+- notes_free:
+- kfree(info->notes);
+- return 0;
+ }
+
+ static int fill_note_info(struct elfhdr *elf, int phdrs,
+diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
+index a9f29b1..2262a77 100644
+--- a/fs/ecryptfs/ecryptfs_kernel.h
++++ b/fs/ecryptfs/ecryptfs_kernel.h
+@@ -559,6 +559,8 @@ struct ecryptfs_open_req {
+ struct inode *ecryptfs_get_inode(struct inode *lower_inode,
+ struct super_block *sb);
+ void ecryptfs_i_size_init(const char *page_virt, struct inode *inode);
++int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry,
++ struct inode *ecryptfs_inode);
+ int ecryptfs_decode_and_decrypt_filename(char **decrypted_name,
+ size_t *decrypted_name_size,
+ struct dentry *ecryptfs_dentry,
+diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
+index d3f95f9..841f24f 100644
+--- a/fs/ecryptfs/file.c
++++ b/fs/ecryptfs/file.c
+@@ -139,29 +139,50 @@ out:
+ return rc;
+ }
+
+-static void ecryptfs_vma_close(struct vm_area_struct *vma)
+-{
+- filemap_write_and_wait(vma->vm_file->f_mapping);
+-}
+-
+-static const struct vm_operations_struct ecryptfs_file_vm_ops = {
+- .close = ecryptfs_vma_close,
+- .fault = filemap_fault,
+-};
++struct kmem_cache *ecryptfs_file_info_cache;
+
+-static int ecryptfs_file_mmap(struct file *file, struct vm_area_struct *vma)
++static int read_or_initialize_metadata(struct dentry *dentry)
+ {
++ struct inode *inode = dentry->d_inode;
++ struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
++ struct ecryptfs_crypt_stat *crypt_stat;
+ int rc;
+
+- rc = generic_file_mmap(file, vma);
++ crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat;
++ mount_crypt_stat = &ecryptfs_superblock_to_private(
++ inode->i_sb)->mount_crypt_stat;
++ mutex_lock(&crypt_stat->cs_mutex);
++
++ if (crypt_stat->flags & ECRYPTFS_POLICY_APPLIED &&
++ crypt_stat->flags & ECRYPTFS_KEY_VALID) {
++ rc = 0;
++ goto out;
++ }
++
++ rc = ecryptfs_read_metadata(dentry);
+ if (!rc)
+- vma->vm_ops = &ecryptfs_file_vm_ops;
++ goto out;
++
++ if (mount_crypt_stat->flags & ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED) {
++ crypt_stat->flags &= ~(ECRYPTFS_I_SIZE_INITIALIZED
++ | ECRYPTFS_ENCRYPTED);
++ rc = 0;
++ goto out;
++ }
++
++ if (!(mount_crypt_stat->flags & ECRYPTFS_XATTR_METADATA_ENABLED) &&
++ !i_size_read(ecryptfs_inode_to_lower(inode))) {
++ rc = ecryptfs_initialize_file(dentry, inode);
++ if (!rc)
++ goto out;
++ }
+
++ rc = -EIO;
++out:
++ mutex_unlock(&crypt_stat->cs_mutex);
+ return rc;
+ }
+
+-struct kmem_cache *ecryptfs_file_info_cache;
+-
+ /**
+ * ecryptfs_open
+ * @inode: inode speciying file to open
+@@ -237,32 +258,9 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
+ rc = 0;
+ goto out;
+ }
+- mutex_lock(&crypt_stat->cs_mutex);
+- if (!(crypt_stat->flags & ECRYPTFS_POLICY_APPLIED)
+- || !(crypt_stat->flags & ECRYPTFS_KEY_VALID)) {
+- rc = ecryptfs_read_metadata(ecryptfs_dentry);
+- if (rc) {
+- ecryptfs_printk(KERN_DEBUG,
+- "Valid headers not found\n");
+- if (!(mount_crypt_stat->flags
+- & ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED)) {
+- rc = -EIO;
+- printk(KERN_WARNING "Either the lower file "
+- "is not in a valid eCryptfs format, "
+- "or the key could not be retrieved. "
+- "Plaintext passthrough mode is not "
+- "enabled; returning -EIO\n");
+- mutex_unlock(&crypt_stat->cs_mutex);
+- goto out_put;
+- }
+- rc = 0;
+- crypt_stat->flags &= ~(ECRYPTFS_I_SIZE_INITIALIZED
+- | ECRYPTFS_ENCRYPTED);
+- mutex_unlock(&crypt_stat->cs_mutex);
+- goto out;
+- }
+- }
+- mutex_unlock(&crypt_stat->cs_mutex);
++ rc = read_or_initialize_metadata(ecryptfs_dentry);
++ if (rc)
++ goto out_put;
+ ecryptfs_printk(KERN_DEBUG, "inode w/ addr = [0x%p], i_ino = "
+ "[0x%.16lx] size: [0x%.16llx]\n", inode, inode->i_ino,
+ (unsigned long long)i_size_read(inode));
+@@ -278,8 +276,14 @@ out:
+
+ static int ecryptfs_flush(struct file *file, fl_owner_t td)
+ {
+- return file->f_mode & FMODE_WRITE
+- ? filemap_write_and_wait(file->f_mapping) : 0;
++ struct file *lower_file = ecryptfs_file_to_lower(file);
++
++ if (lower_file->f_op && lower_file->f_op->flush) {
++ filemap_write_and_wait(file->f_mapping);
++ return lower_file->f_op->flush(lower_file, td);
++ }
++
++ return 0;
+ }
+
+ static int ecryptfs_release(struct inode *inode, struct file *file)
+@@ -293,15 +297,7 @@ static int ecryptfs_release(struct inode *inode, struct file *file)
+ static int
+ ecryptfs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
+ {
+- int rc = 0;
+-
+- rc = generic_file_fsync(file, start, end, datasync);
+- if (rc)
+- goto out;
+- rc = vfs_fsync_range(ecryptfs_file_to_lower(file), start, end,
+- datasync);
+-out:
+- return rc;
++ return vfs_fsync(ecryptfs_file_to_lower(file), datasync);
+ }
+
+ static int ecryptfs_fasync(int fd, struct file *file, int flag)
+@@ -370,7 +366,7 @@ const struct file_operations ecryptfs_main_fops = {
+ #ifdef CONFIG_COMPAT
+ .compat_ioctl = ecryptfs_compat_ioctl,
+ #endif
+- .mmap = ecryptfs_file_mmap,
++ .mmap = generic_file_mmap,
+ .open = ecryptfs_open,
+ .flush = ecryptfs_flush,
+ .release = ecryptfs_release,
+diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
+index 7c7556b..a9be90d 100644
+--- a/fs/ecryptfs/inode.c
++++ b/fs/ecryptfs/inode.c
+@@ -161,6 +161,31 @@ ecryptfs_create_underlying_file(struct inode *lower_dir_inode,
+ return vfs_create(lower_dir_inode, lower_dentry, mode, NULL);
+ }
+
++static int ecryptfs_do_unlink(struct inode *dir, struct dentry *dentry,
++ struct inode *inode)
++{
++ struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
++ struct inode *lower_dir_inode = ecryptfs_inode_to_lower(dir);
++ struct dentry *lower_dir_dentry;
++ int rc;
++
++ dget(lower_dentry);
++ lower_dir_dentry = lock_parent(lower_dentry);
++ rc = vfs_unlink(lower_dir_inode, lower_dentry);
++ if (rc) {
++ printk(KERN_ERR "Error in vfs_unlink; rc = [%d]\n", rc);
++ goto out_unlock;
++ }
++ fsstack_copy_attr_times(dir, lower_dir_inode);
++ set_nlink(inode, ecryptfs_inode_to_lower(inode)->i_nlink);
++ inode->i_ctime = dir->i_ctime;
++ d_drop(dentry);
++out_unlock:
++ unlock_dir(lower_dir_dentry);
++ dput(lower_dentry);
++ return rc;
++}
++
+ /**
+ * ecryptfs_do_create
+ * @directory_inode: inode of the new file's dentry's parent in ecryptfs
+@@ -201,8 +226,10 @@ ecryptfs_do_create(struct inode *directory_inode,
+ }
+ inode = __ecryptfs_get_inode(lower_dentry->d_inode,
+ directory_inode->i_sb);
+- if (IS_ERR(inode))
++ if (IS_ERR(inode)) {
++ vfs_unlink(lower_dir_dentry->d_inode, lower_dentry);
+ goto out_lock;
++ }
+ fsstack_copy_attr_times(directory_inode, lower_dir_dentry->d_inode);
+ fsstack_copy_inode_size(directory_inode, lower_dir_dentry->d_inode);
+ out_lock:
+@@ -219,8 +246,8 @@ out:
+ *
+ * Returns zero on success
+ */
+-static int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry,
+- struct inode *ecryptfs_inode)
++int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry,
++ struct inode *ecryptfs_inode)
+ {
+ struct ecryptfs_crypt_stat *crypt_stat =
+ &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
+@@ -284,7 +311,9 @@ ecryptfs_create(struct inode *directory_inode, struct dentry *ecryptfs_dentry,
+ * that this on disk file is prepared to be an ecryptfs file */
+ rc = ecryptfs_initialize_file(ecryptfs_dentry, ecryptfs_inode);
+ if (rc) {
+- drop_nlink(ecryptfs_inode);
++ ecryptfs_do_unlink(directory_inode, ecryptfs_dentry,
++ ecryptfs_inode);
++ make_bad_inode(ecryptfs_inode);
+ unlock_new_inode(ecryptfs_inode);
+ iput(ecryptfs_inode);
+ goto out;
+@@ -496,27 +525,7 @@ out_lock:
+
+ static int ecryptfs_unlink(struct inode *dir, struct dentry *dentry)
+ {
+- int rc = 0;
+- struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
+- struct inode *lower_dir_inode = ecryptfs_inode_to_lower(dir);
+- struct dentry *lower_dir_dentry;
+-
+- dget(lower_dentry);
+- lower_dir_dentry = lock_parent(lower_dentry);
+- rc = vfs_unlink(lower_dir_inode, lower_dentry);
+- if (rc) {
+- printk(KERN_ERR "Error in vfs_unlink; rc = [%d]\n", rc);
+- goto out_unlock;
+- }
+- fsstack_copy_attr_times(dir, lower_dir_inode);
+- set_nlink(dentry->d_inode,
+- ecryptfs_inode_to_lower(dentry->d_inode)->i_nlink);
+- dentry->d_inode->i_ctime = dir->i_ctime;
+- d_drop(dentry);
+-out_unlock:
+- unlock_dir(lower_dir_dentry);
+- dput(lower_dentry);
+- return rc;
++ return ecryptfs_do_unlink(dir, dentry, dentry->d_inode);
+ }
+
+ static int ecryptfs_symlink(struct inode *dir, struct dentry *dentry,
+@@ -1026,12 +1035,6 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
+ goto out;
+ }
+
+- if (S_ISREG(inode->i_mode)) {
+- rc = filemap_write_and_wait(inode->i_mapping);
+- if (rc)
+- goto out;
+- fsstack_copy_attr_all(inode, lower_inode);
+- }
+ memcpy(&lower_ia, ia, sizeof(lower_ia));
+ if (ia->ia_valid & ATTR_FILE)
+ lower_ia.ia_file = ecryptfs_file_to_lower(ia->ia_file);
+diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
+index b4a6bef..1cfef9f 100644
+--- a/fs/ecryptfs/main.c
++++ b/fs/ecryptfs/main.c
+@@ -162,6 +162,7 @@ void ecryptfs_put_lower_file(struct inode *inode)
+ inode_info = ecryptfs_inode_to_private(inode);
+ if (atomic_dec_and_mutex_lock(&inode_info->lower_file_count,
+ &inode_info->lower_file_mutex)) {
++ filemap_write_and_wait(inode->i_mapping);
+ fput(inode_info->lower_file);
+ inode_info->lower_file = NULL;
+ mutex_unlock(&inode_info->lower_file_mutex);
+diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
+index 6a44148..93a998a 100644
+--- a/fs/ecryptfs/mmap.c
++++ b/fs/ecryptfs/mmap.c
+@@ -62,18 +62,6 @@ static int ecryptfs_writepage(struct page *page, struct writeback_control *wbc)
+ {
+ int rc;
+
+- /*
+- * Refuse to write the page out if we are called from reclaim context
+- * since our writepage() path may potentially allocate memory when
+- * calling into the lower fs vfs_write() which may in turn invoke
+- * us again.
+- */
+- if (current->flags & PF_MEMALLOC) {
+- redirty_page_for_writepage(wbc, page);
+- rc = 0;
+- goto out;
+- }
+-
+ rc = ecryptfs_encrypt_page(page);
+ if (rc) {
+ ecryptfs_printk(KERN_WARNING, "Error encrypting "
+@@ -498,7 +486,6 @@ static int ecryptfs_write_end(struct file *file,
+ struct ecryptfs_crypt_stat *crypt_stat =
+ &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
+ int rc;
+- int need_unlock_page = 1;
+
+ ecryptfs_printk(KERN_DEBUG, "Calling fill_zeros_to_end_of_page"
+ "(page w/ index = [0x%.16lx], to = [%d])\n", index, to);
+@@ -519,26 +506,26 @@ static int ecryptfs_write_end(struct file *file,
+ "zeros in page with index = [0x%.16lx]\n", index);
+ goto out;
+ }
+- set_page_dirty(page);
+- unlock_page(page);
+- need_unlock_page = 0;
++ rc = ecryptfs_encrypt_page(page);
++ if (rc) {
++ ecryptfs_printk(KERN_WARNING, "Error encrypting page (upper "
++ "index [0x%.16lx])\n", index);
++ goto out;
++ }
+ if (pos + copied > i_size_read(ecryptfs_inode)) {
+ i_size_write(ecryptfs_inode, pos + copied);
+ ecryptfs_printk(KERN_DEBUG, "Expanded file size to "
+ "[0x%.16llx]\n",
+ (unsigned long long)i_size_read(ecryptfs_inode));
+- balance_dirty_pages_ratelimited(mapping);
+- rc = ecryptfs_write_inode_size_to_metadata(ecryptfs_inode);
+- if (rc) {
+- printk(KERN_ERR "Error writing inode size to metadata; "
+- "rc = [%d]\n", rc);
+- goto out;
+- }
+ }
+- rc = copied;
++ rc = ecryptfs_write_inode_size_to_metadata(ecryptfs_inode);
++ if (rc)
++ printk(KERN_ERR "Error writing inode size to metadata; "
++ "rc = [%d]\n", rc);
++ else
++ rc = copied;
+ out:
+- if (need_unlock_page)
+- unlock_page(page);
++ unlock_page(page);
+ page_cache_release(page);
+ return rc;
+ }
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 8b01f9f..bac2330 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -2382,6 +2382,16 @@ static int ext4_nonda_switch(struct super_block *sb)
+ free_blocks = EXT4_C2B(sbi,
+ percpu_counter_read_positive(&sbi->s_freeclusters_counter));
+ dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyclusters_counter);
++ /*
++ * Start pushing delalloc when 1/2 of free blocks are dirty.
++ */
++ if (dirty_blocks && (free_blocks < 2 * dirty_blocks) &&
++ !writeback_in_progress(sb->s_bdi) &&
++ down_read_trylock(&sb->s_umount)) {
++ writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE);
++ up_read(&sb->s_umount);
++ }
++
+ if (2 * free_blocks < 3 * dirty_blocks ||
+ free_blocks < (dirty_blocks + EXT4_FREECLUSTERS_WATERMARK)) {
+ /*
+@@ -2390,13 +2400,6 @@ static int ext4_nonda_switch(struct super_block *sb)
+ */
+ return 1;
+ }
+- /*
+- * Even if we don't switch but are nearing capacity,
+- * start pushing delalloc when 1/2 of free blocks are dirty.
+- */
+- if (free_blocks < 2 * dirty_blocks)
+- writeback_inodes_sb_if_idle(sb, WB_REASON_FS_FREE_SPACE);
+-
+ return 0;
+ }
+
+@@ -4004,6 +4007,7 @@ static int ext4_do_update_inode(handle_t *handle,
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ struct buffer_head *bh = iloc->bh;
+ int err = 0, rc, block;
++ int need_datasync = 0;
+
+ /* For fields not not tracking in the in-memory inode,
+ * initialise them to zero for new inodes. */
+@@ -4052,7 +4056,10 @@ static int ext4_do_update_inode(handle_t *handle,
+ raw_inode->i_file_acl_high =
+ cpu_to_le16(ei->i_file_acl >> 32);
+ raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
+- ext4_isize_set(raw_inode, ei->i_disksize);
++ if (ei->i_disksize != ext4_isize(raw_inode)) {
++ ext4_isize_set(raw_inode, ei->i_disksize);
++ need_datasync = 1;
++ }
+ if (ei->i_disksize > 0x7fffffffULL) {
+ struct super_block *sb = inode->i_sb;
+ if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+@@ -4105,7 +4112,7 @@ static int ext4_do_update_inode(handle_t *handle,
+ err = rc;
+ ext4_clear_inode_state(inode, EXT4_STATE_NEW);
+
+- ext4_update_inode_fsync_trans(handle, inode, 0);
++ ext4_update_inode_fsync_trans(handle, inode, need_datasync);
+ out_brelse:
+ brelse(bh);
+ ext4_std_error(inode->i_sb, err);
+diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
+index c5826c6..e2016f3 100644
+--- a/fs/ext4/move_extent.c
++++ b/fs/ext4/move_extent.c
+@@ -141,55 +141,21 @@ mext_next_extent(struct inode *inode, struct ext4_ext_path *path,
+ }
+
+ /**
+- * mext_check_null_inode - NULL check for two inodes
+- *
+- * If inode1 or inode2 is NULL, return -EIO. Otherwise, return 0.
+- */
+-static int
+-mext_check_null_inode(struct inode *inode1, struct inode *inode2,
+- const char *function, unsigned int line)
+-{
+- int ret = 0;
+-
+- if (inode1 == NULL) {
+- __ext4_error(inode2->i_sb, function, line,
+- "Both inodes should not be NULL: "
+- "inode1 NULL inode2 %lu", inode2->i_ino);
+- ret = -EIO;
+- } else if (inode2 == NULL) {
+- __ext4_error(inode1->i_sb, function, line,
+- "Both inodes should not be NULL: "
+- "inode1 %lu inode2 NULL", inode1->i_ino);
+- ret = -EIO;
+- }
+- return ret;
+-}
+-
+-/**
+ * double_down_write_data_sem - Acquire two inodes' write lock of i_data_sem
+ *
+- * @orig_inode: original inode structure
+- * @donor_inode: donor inode structure
+- * Acquire write lock of i_data_sem of the two inodes (orig and donor) by
+- * i_ino order.
++ * Acquire write lock of i_data_sem of the two inodes
+ */
+ static void
+-double_down_write_data_sem(struct inode *orig_inode, struct inode *donor_inode)
++double_down_write_data_sem(struct inode *first, struct inode *second)
+ {
+- struct inode *first = orig_inode, *second = donor_inode;
++ if (first < second) {
++ down_write(&EXT4_I(first)->i_data_sem);
++ down_write_nested(&EXT4_I(second)->i_data_sem, SINGLE_DEPTH_NESTING);
++ } else {
++ down_write(&EXT4_I(second)->i_data_sem);
++ down_write_nested(&EXT4_I(first)->i_data_sem, SINGLE_DEPTH_NESTING);
+
+- /*
+- * Use the inode number to provide the stable locking order instead
+- * of its address, because the C language doesn't guarantee you can
+- * compare pointers that don't come from the same array.
+- */
+- if (donor_inode->i_ino < orig_inode->i_ino) {
+- first = donor_inode;
+- second = orig_inode;
+ }
+-
+- down_write(&EXT4_I(first)->i_data_sem);
+- down_write_nested(&EXT4_I(second)->i_data_sem, SINGLE_DEPTH_NESTING);
+ }
+
+ /**
+@@ -969,14 +935,6 @@ mext_check_arguments(struct inode *orig_inode,
+ return -EINVAL;
+ }
+
+- /* Files should be in the same ext4 FS */
+- if (orig_inode->i_sb != donor_inode->i_sb) {
+- ext4_debug("ext4 move extent: The argument files "
+- "should be in same FS [ino:orig %lu, donor %lu]\n",
+- orig_inode->i_ino, donor_inode->i_ino);
+- return -EINVAL;
+- }
+-
+ /* Ext4 move extent supports only extent based file */
+ if (!(ext4_test_inode_flag(orig_inode, EXT4_INODE_EXTENTS))) {
+ ext4_debug("ext4 move extent: orig file is not extents "
+@@ -1072,35 +1030,19 @@ mext_check_arguments(struct inode *orig_inode,
+ * @inode1: the inode structure
+ * @inode2: the inode structure
+ *
+- * Lock two inodes' i_mutex by i_ino order.
+- * If inode1 or inode2 is NULL, return -EIO. Otherwise, return 0.
++ * Lock two inodes' i_mutex
+ */
+-static int
++static void
+ mext_inode_double_lock(struct inode *inode1, struct inode *inode2)
+ {
+- int ret = 0;
+-
+- BUG_ON(inode1 == NULL && inode2 == NULL);
+-
+- ret = mext_check_null_inode(inode1, inode2, __func__, __LINE__);
+- if (ret < 0)
+- goto out;
+-
+- if (inode1 == inode2) {
+- mutex_lock(&inode1->i_mutex);
+- goto out;
+- }
+-
+- if (inode1->i_ino < inode2->i_ino) {
++ BUG_ON(inode1 == inode2);
++ if (inode1 < inode2) {
+ mutex_lock_nested(&inode1->i_mutex, I_MUTEX_PARENT);
+ mutex_lock_nested(&inode2->i_mutex, I_MUTEX_CHILD);
+ } else {
+ mutex_lock_nested(&inode2->i_mutex, I_MUTEX_PARENT);
+ mutex_lock_nested(&inode1->i_mutex, I_MUTEX_CHILD);
+ }
+-
+-out:
+- return ret;
+ }
+
+ /**
+@@ -1109,28 +1051,13 @@ out:
+ * @inode1: the inode that is released first
+ * @inode2: the inode that is released second
+ *
+- * If inode1 or inode2 is NULL, return -EIO. Otherwise, return 0.
+ */
+
+-static int
++static void
+ mext_inode_double_unlock(struct inode *inode1, struct inode *inode2)
+ {
+- int ret = 0;
+-
+- BUG_ON(inode1 == NULL && inode2 == NULL);
+-
+- ret = mext_check_null_inode(inode1, inode2, __func__, __LINE__);
+- if (ret < 0)
+- goto out;
+-
+- if (inode1)
+- mutex_unlock(&inode1->i_mutex);
+-
+- if (inode2 && inode2 != inode1)
+- mutex_unlock(&inode2->i_mutex);
+-
+-out:
+- return ret;
++ mutex_unlock(&inode1->i_mutex);
++ mutex_unlock(&inode2->i_mutex);
+ }
+
+ /**
+@@ -1187,16 +1114,23 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
+ ext4_lblk_t block_end, seq_start, add_blocks, file_end, seq_blocks = 0;
+ ext4_lblk_t rest_blocks;
+ pgoff_t orig_page_offset = 0, seq_end_page;
+- int ret1, ret2, depth, last_extent = 0;
++ int ret, depth, last_extent = 0;
+ int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits;
+ int data_offset_in_page;
+ int block_len_in_page;
+ int uninit;
+
+- /* orig and donor should be different file */
+- if (orig_inode->i_ino == donor_inode->i_ino) {
++ if (orig_inode->i_sb != donor_inode->i_sb) {
++ ext4_debug("ext4 move extent: The argument files "
++ "should be in same FS [ino:orig %lu, donor %lu]\n",
++ orig_inode->i_ino, donor_inode->i_ino);
++ return -EINVAL;
++ }
++
++ /* orig and donor should be different inodes */
++ if (orig_inode == donor_inode) {
+ ext4_debug("ext4 move extent: The argument files should not "
+- "be same file [ino:orig %lu, donor %lu]\n",
++ "be same inode [ino:orig %lu, donor %lu]\n",
+ orig_inode->i_ino, donor_inode->i_ino);
+ return -EINVAL;
+ }
+@@ -1208,18 +1142,21 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
+ orig_inode->i_ino, donor_inode->i_ino);
+ return -EINVAL;
+ }
+-
++ /* TODO: This is non obvious task to swap blocks for inodes with full
++ jornaling enabled */
++ if (ext4_should_journal_data(orig_inode) ||
++ ext4_should_journal_data(donor_inode)) {
++ return -EINVAL;
++ }
+ /* Protect orig and donor inodes against a truncate */
+- ret1 = mext_inode_double_lock(orig_inode, donor_inode);
+- if (ret1 < 0)
+- return ret1;
++ mext_inode_double_lock(orig_inode, donor_inode);
+
+ /* Protect extent tree against block allocations via delalloc */
+ double_down_write_data_sem(orig_inode, donor_inode);
+ /* Check the filesystem environment whether move_extent can be done */
+- ret1 = mext_check_arguments(orig_inode, donor_inode, orig_start,
++ ret = mext_check_arguments(orig_inode, donor_inode, orig_start,
+ donor_start, &len);
+- if (ret1)
++ if (ret)
+ goto out;
+
+ file_end = (i_size_read(orig_inode) - 1) >> orig_inode->i_blkbits;
+@@ -1227,13 +1164,13 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
+ if (file_end < block_end)
+ len -= block_end - file_end;
+
+- ret1 = get_ext_path(orig_inode, block_start, &orig_path);
+- if (ret1)
++ ret = get_ext_path(orig_inode, block_start, &orig_path);
++ if (ret)
+ goto out;
+
+ /* Get path structure to check the hole */
+- ret1 = get_ext_path(orig_inode, block_start, &holecheck_path);
+- if (ret1)
++ ret = get_ext_path(orig_inode, block_start, &holecheck_path);
++ if (ret)
+ goto out;
+
+ depth = ext_depth(orig_inode);
+@@ -1252,13 +1189,13 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
+ last_extent = mext_next_extent(orig_inode,
+ holecheck_path, &ext_cur);
+ if (last_extent < 0) {
+- ret1 = last_extent;
++ ret = last_extent;
+ goto out;
+ }
+ last_extent = mext_next_extent(orig_inode, orig_path,
+ &ext_dummy);
+ if (last_extent < 0) {
+- ret1 = last_extent;
++ ret = last_extent;
+ goto out;
+ }
+ seq_start = le32_to_cpu(ext_cur->ee_block);
+@@ -1272,7 +1209,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
+ if (le32_to_cpu(ext_cur->ee_block) > block_end) {
+ ext4_debug("ext4 move extent: The specified range of file "
+ "may be the hole\n");
+- ret1 = -EINVAL;
++ ret = -EINVAL;
+ goto out;
+ }
+
+@@ -1292,7 +1229,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
+ last_extent = mext_next_extent(orig_inode, holecheck_path,
+ &ext_cur);
+ if (last_extent < 0) {
+- ret1 = last_extent;
++ ret = last_extent;
+ break;
+ }
+ add_blocks = ext4_ext_get_actual_len(ext_cur);
+@@ -1349,18 +1286,18 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
+ orig_page_offset,
+ data_offset_in_page,
+ block_len_in_page, uninit,
+- &ret1);
++ &ret);
+
+ /* Count how many blocks we have exchanged */
+ *moved_len += block_len_in_page;
+- if (ret1 < 0)
++ if (ret < 0)
+ break;
+ if (*moved_len > len) {
+ EXT4_ERROR_INODE(orig_inode,
+ "We replaced blocks too much! "
+ "sum of replaced: %llu requested: %llu",
+ *moved_len, len);
+- ret1 = -EIO;
++ ret = -EIO;
+ break;
+ }
+
+@@ -1374,22 +1311,22 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
+ }
+
+ double_down_write_data_sem(orig_inode, donor_inode);
+- if (ret1 < 0)
++ if (ret < 0)
+ break;
+
+ /* Decrease buffer counter */
+ if (holecheck_path)
+ ext4_ext_drop_refs(holecheck_path);
+- ret1 = get_ext_path(orig_inode, seq_start, &holecheck_path);
+- if (ret1)
++ ret = get_ext_path(orig_inode, seq_start, &holecheck_path);
++ if (ret)
+ break;
+ depth = holecheck_path->p_depth;
+
+ /* Decrease buffer counter */
+ if (orig_path)
+ ext4_ext_drop_refs(orig_path);
+- ret1 = get_ext_path(orig_inode, seq_start, &orig_path);
+- if (ret1)
++ ret = get_ext_path(orig_inode, seq_start, &orig_path);
++ if (ret)
+ break;
+
+ ext_cur = holecheck_path[depth].p_ext;
+@@ -1412,12 +1349,7 @@ out:
+ kfree(holecheck_path);
+ }
+ double_up_write_data_sem(orig_inode, donor_inode);
+- ret2 = mext_inode_double_unlock(orig_inode, donor_inode);
+-
+- if (ret1)
+- return ret1;
+- else if (ret2)
+- return ret2;
++ mext_inode_double_unlock(orig_inode, donor_inode);
+
+- return 0;
++ return ret;
+ }
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 4dd0890..88f97e5 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -1801,9 +1801,7 @@ retry:
+ err = PTR_ERR(inode);
+ if (!IS_ERR(inode)) {
+ init_special_inode(inode, inode->i_mode, rdev);
+-#ifdef CONFIG_EXT4_FS_XATTR
+ inode->i_op = &ext4_special_inode_operations;
+-#endif
+ err = ext4_add_nondir(handle, dentry, inode);
+ }
+ ext4_journal_stop(handle);
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index 54f5786..13bfa07 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -63,6 +63,7 @@ int writeback_in_progress(struct backing_dev_info *bdi)
+ {
+ return test_bit(BDI_writeback_running, &bdi->state);
+ }
++EXPORT_SYMBOL(writeback_in_progress);
+
+ static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
+ {
+diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
+index b09e51d..464cd76 100644
+--- a/fs/jffs2/wbuf.c
++++ b/fs/jffs2/wbuf.c
+@@ -1032,11 +1032,11 @@ int jffs2_check_oob_empty(struct jffs2_sb_info *c,
+ ops.datbuf = NULL;
+
+ ret = c->mtd->read_oob(c->mtd, jeb->offset, &ops);
+- if (ret || ops.oobretlen != ops.ooblen) {
++ if ((ret && !mtd_is_bitflip(ret)) || ops.oobretlen != ops.ooblen) {
+ printk(KERN_ERR "cannot read OOB for EB at %08x, requested %zd"
+ " bytes, read %zd bytes, error %d\n",
+ jeb->offset, ops.ooblen, ops.oobretlen, ret);
+- if (!ret)
++ if (!ret || mtd_is_bitflip(ret))
+ ret = -EIO;
+ return ret;
+ }
+@@ -1075,11 +1075,11 @@ int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c,
+ ops.datbuf = NULL;
+
+ ret = c->mtd->read_oob(c->mtd, jeb->offset, &ops);
+- if (ret || ops.oobretlen != ops.ooblen) {
++ if ((ret && !mtd_is_bitflip(ret)) || ops.oobretlen != ops.ooblen) {
+ printk(KERN_ERR "cannot read OOB for EB at %08x, requested %zd"
+ " bytes, read %zd bytes, error %d\n",
+ jeb->offset, ops.ooblen, ops.oobretlen, ret);
+- if (!ret)
++ if (!ret || mtd_is_bitflip(ret))
+ ret = -EIO;
+ return ret;
+ }
+diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
+index 23d7451..df753a1 100644
+--- a/fs/lockd/mon.c
++++ b/fs/lockd/mon.c
+@@ -40,6 +40,7 @@ struct nsm_args {
+ u32 proc;
+
+ char *mon_name;
++ char *nodename;
+ };
+
+ struct nsm_res {
+@@ -93,6 +94,7 @@ static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res)
+ .vers = 3,
+ .proc = NLMPROC_NSM_NOTIFY,
+ .mon_name = nsm->sm_mon_name,
++ .nodename = utsname()->nodename,
+ };
+ struct rpc_message msg = {
+ .rpc_argp = &args,
+@@ -429,7 +431,7 @@ static void encode_my_id(struct xdr_stream *xdr, const struct nsm_args *argp)
+ {
+ __be32 *p;
+
+- encode_nsm_string(xdr, utsname()->nodename);
++ encode_nsm_string(xdr, argp->nodename);
+ p = xdr_reserve_space(xdr, 4 + 4 + 4);
+ *p++ = cpu_to_be32(argp->prog);
+ *p++ = cpu_to_be32(argp->vers);
+diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
+index d774309..1aaa0ee 100644
+--- a/fs/nfs/blocklayout/blocklayout.c
++++ b/fs/nfs/blocklayout/blocklayout.c
+@@ -164,25 +164,39 @@ static struct bio *bl_alloc_init_bio(int npg, sector_t isect,
+ return bio;
+ }
+
+-static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw,
++static struct bio *do_add_page_to_bio(struct bio *bio, int npg, int rw,
+ sector_t isect, struct page *page,
+ struct pnfs_block_extent *be,
+ void (*end_io)(struct bio *, int err),
+- struct parallel_io *par)
++ struct parallel_io *par,
++ unsigned int offset, int len)
+ {
++ isect = isect + (offset >> SECTOR_SHIFT);
++ dprintk("%s: npg %d rw %d isect %llu offset %u len %d\n", __func__,
++ npg, rw, (unsigned long long)isect, offset, len);
+ retry:
+ if (!bio) {
+ bio = bl_alloc_init_bio(npg, isect, be, end_io, par);
+ if (!bio)
+ return ERR_PTR(-ENOMEM);
+ }
+- if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
++ if (bio_add_page(bio, page, len, offset) < len) {
+ bio = bl_submit_bio(rw, bio);
+ goto retry;
+ }
+ return bio;
+ }
+
++static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw,
++ sector_t isect, struct page *page,
++ struct pnfs_block_extent *be,
++ void (*end_io)(struct bio *, int err),
++ struct parallel_io *par)
++{
++ return do_add_page_to_bio(bio, npg, rw, isect, page, be,
++ end_io, par, 0, PAGE_CACHE_SIZE);
++}
++
+ /* This is basically copied from mpage_end_io_read */
+ static void bl_end_io_read(struct bio *bio, int err)
+ {
+@@ -446,6 +460,106 @@ map_block(struct buffer_head *bh, sector_t isect, struct pnfs_block_extent *be)
+ return;
+ }
+
++static void
++bl_read_single_end_io(struct bio *bio, int error)
++{
++ struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
++ struct page *page = bvec->bv_page;
++
++ /* Only one page in bvec */
++ unlock_page(page);
++}
++
++static int
++bl_do_readpage_sync(struct page *page, struct pnfs_block_extent *be,
++ unsigned int offset, unsigned int len)
++{
++ struct bio *bio;
++ struct page *shadow_page;
++ sector_t isect;
++ char *kaddr, *kshadow_addr;
++ int ret = 0;
++
++ dprintk("%s: offset %u len %u\n", __func__, offset, len);
++
++ shadow_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
++ if (shadow_page == NULL)
++ return -ENOMEM;
++
++ bio = bio_alloc(GFP_NOIO, 1);
++ if (bio == NULL)
++ return -ENOMEM;
++
++ isect = (page->index << PAGE_CACHE_SECTOR_SHIFT) +
++ (offset / SECTOR_SIZE);
++
++ bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
++ bio->bi_bdev = be->be_mdev;
++ bio->bi_end_io = bl_read_single_end_io;
++
++ lock_page(shadow_page);
++ if (bio_add_page(bio, shadow_page,
++ SECTOR_SIZE, round_down(offset, SECTOR_SIZE)) == 0) {
++ unlock_page(shadow_page);
++ bio_put(bio);
++ return -EIO;
++ }
++
++ submit_bio(READ, bio);
++ wait_on_page_locked(shadow_page);
++ if (unlikely(!test_bit(BIO_UPTODATE, &bio->bi_flags))) {
++ ret = -EIO;
++ } else {
++ kaddr = kmap_atomic(page);
++ kshadow_addr = kmap_atomic(shadow_page);
++ memcpy(kaddr + offset, kshadow_addr + offset, len);
++ kunmap_atomic(kshadow_addr);
++ kunmap_atomic(kaddr);
++ }
++ __free_page(shadow_page);
++ bio_put(bio);
++
++ return ret;
++}
++
++static int
++bl_read_partial_page_sync(struct page *page, struct pnfs_block_extent *be,
++ unsigned int dirty_offset, unsigned int dirty_len,
++ bool full_page)
++{
++ int ret = 0;
++ unsigned int start, end;
++
++ if (full_page) {
++ start = 0;
++ end = PAGE_CACHE_SIZE;
++ } else {
++ start = round_down(dirty_offset, SECTOR_SIZE);
++ end = round_up(dirty_offset + dirty_len, SECTOR_SIZE);
++ }
++
++ dprintk("%s: offset %u len %d\n", __func__, dirty_offset, dirty_len);
++ if (!be) {
++ zero_user_segments(page, start, dirty_offset,
++ dirty_offset + dirty_len, end);
++ if (start == 0 && end == PAGE_CACHE_SIZE &&
++ trylock_page(page)) {
++ SetPageUptodate(page);
++ unlock_page(page);
++ }
++ return ret;
++ }
++
++ if (start != dirty_offset)
++ ret = bl_do_readpage_sync(page, be, start, dirty_offset - start);
++
++ if (!ret && (dirty_offset + dirty_len < end))
++ ret = bl_do_readpage_sync(page, be, dirty_offset + dirty_len,
++ end - dirty_offset - dirty_len);
++
++ return ret;
++}
++
+ /* Given an unmapped page, zero it or read in page for COW, page is locked
+ * by caller.
+ */
+@@ -479,7 +593,6 @@ init_page_for_write(struct page *page, struct pnfs_block_extent *cow_read)
+ SetPageUptodate(page);
+
+ cleanup:
+- bl_put_extent(cow_read);
+ if (bh)
+ free_buffer_head(bh);
+ if (ret) {
+@@ -501,6 +614,7 @@ bl_write_pagelist(struct nfs_write_data *wdata, int sync)
+ struct parallel_io *par;
+ loff_t offset = wdata->args.offset;
+ size_t count = wdata->args.count;
++ unsigned int pg_offset, pg_len, saved_len;
+ struct page **pages = wdata->args.pages;
+ struct page *page;
+ pgoff_t index;
+@@ -615,10 +729,11 @@ next_page:
+ if (!extent_length) {
+ /* We've used up the previous extent */
+ bl_put_extent(be);
++ bl_put_extent(cow_read);
+ bio = bl_submit_bio(WRITE, bio);
+ /* Get the next one */
+ be = bl_find_get_extent(BLK_LSEG2EXT(wdata->lseg),
+- isect, NULL);
++ isect, &cow_read);
+ if (!be || !is_writable(be, isect)) {
+ wdata->pnfs_error = -EINVAL;
+ goto out;
+@@ -626,7 +741,26 @@ next_page:
+ extent_length = be->be_length -
+ (isect - be->be_f_offset);
+ }
+- if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
++
++ dprintk("%s offset %lld count %Zu\n", __func__, offset, count);
++ pg_offset = offset & ~PAGE_CACHE_MASK;
++ if (pg_offset + count > PAGE_CACHE_SIZE)
++ pg_len = PAGE_CACHE_SIZE - pg_offset;
++ else
++ pg_len = count;
++
++ saved_len = pg_len;
++ if (be->be_state == PNFS_BLOCK_INVALID_DATA &&
++ !bl_is_sector_init(be->be_inval, isect)) {
++ ret = bl_read_partial_page_sync(pages[i], cow_read,
++ pg_offset, pg_len, true);
++ if (ret) {
++ dprintk("%s bl_read_partial_page_sync fail %d\n",
++ __func__, ret);
++ wdata->pnfs_error = ret;
++ goto out;
++ }
++
+ ret = bl_mark_sectors_init(be->be_inval, isect,
+ PAGE_CACHE_SECTORS,
+ NULL);
+@@ -636,15 +770,35 @@ next_page:
+ wdata->pnfs_error = ret;
+ goto out;
+ }
++
++ /* Expand to full page write */
++ pg_offset = 0;
++ pg_len = PAGE_CACHE_SIZE;
++ } else if ((pg_offset & (SECTOR_SIZE - 1)) ||
++ (pg_len & (SECTOR_SIZE - 1))){
++ /* ahh, nasty case. We have to do sync full sector
++ * read-modify-write cycles.
++ */
++ unsigned int saved_offset = pg_offset;
++ ret = bl_read_partial_page_sync(pages[i], be, pg_offset,
++ pg_len, false);
++ pg_offset = round_down(pg_offset, SECTOR_SIZE);
++ pg_len = round_up(saved_offset + pg_len, SECTOR_SIZE)
++ - pg_offset;
+ }
+- bio = bl_add_page_to_bio(bio, wdata->npages - i, WRITE,
++
++
++ bio = do_add_page_to_bio(bio, wdata->npages - i, WRITE,
+ isect, pages[i], be,
+- bl_end_io_write, par);
++ bl_end_io_write, par,
++ pg_offset, pg_len);
+ if (IS_ERR(bio)) {
+ wdata->pnfs_error = PTR_ERR(bio);
+ bio = NULL;
+ goto out;
+ }
++ offset += saved_len;
++ count -= saved_len;
+ isect += PAGE_CACHE_SECTORS;
+ last_isect = isect;
+ extent_length -= PAGE_CACHE_SECTORS;
+@@ -662,12 +816,10 @@ next_page:
+ }
+
+ write_done:
+- wdata->res.count = (last_isect << SECTOR_SHIFT) - (offset);
+- if (count < wdata->res.count) {
+- wdata->res.count = count;
+- }
++ wdata->res.count = wdata->args.count;
+ out:
+ bl_put_extent(be);
++ bl_put_extent(cow_read);
+ bl_submit_bio(WRITE, bio);
+ put_parallel(par);
+ return PNFS_ATTEMPTED;
+diff --git a/fs/nfs/blocklayout/blocklayout.h b/fs/nfs/blocklayout/blocklayout.h
+index 42acf7e..519a9de 100644
+--- a/fs/nfs/blocklayout/blocklayout.h
++++ b/fs/nfs/blocklayout/blocklayout.h
+@@ -40,6 +40,7 @@
+
+ #define PAGE_CACHE_SECTORS (PAGE_CACHE_SIZE >> SECTOR_SHIFT)
+ #define PAGE_CACHE_SECTOR_SHIFT (PAGE_CACHE_SHIFT - SECTOR_SHIFT)
++#define SECTOR_SIZE (1 << SECTOR_SHIFT)
+
+ struct block_mount_id {
+ spinlock_t bm_lock; /* protects list */
+diff --git a/fs/udf/super.c b/fs/udf/super.c
+index 516b7f0..f66439e 100644
+--- a/fs/udf/super.c
++++ b/fs/udf/super.c
+@@ -1289,6 +1289,7 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
+ udf_err(sb, "error loading logical volume descriptor: "
+ "Partition table too long (%u > %lu)\n", table_len,
+ sb->s_blocksize - sizeof(*lvd));
++ ret = 1;
+ goto out_bh;
+ }
+
+@@ -1333,8 +1334,10 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
+ UDF_ID_SPARABLE,
+ strlen(UDF_ID_SPARABLE))) {
+ if (udf_load_sparable_map(sb, map,
+- (struct sparablePartitionMap *)gpm) < 0)
++ (struct sparablePartitionMap *)gpm) < 0) {
++ ret = 1;
+ goto out_bh;
++ }
+ } else if (!strncmp(upm2->partIdent.ident,
+ UDF_ID_METADATA,
+ strlen(UDF_ID_METADATA))) {
+diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
+index 7978eec..3e8f2f7 100644
+--- a/include/linux/mempolicy.h
++++ b/include/linux/mempolicy.h
+@@ -188,7 +188,7 @@ struct sp_node {
+
+ struct shared_policy {
+ struct rb_root root;
+- spinlock_t lock;
++ struct mutex mutex;
+ };
+
+ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index 67cc215..1874c5e 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -1823,7 +1823,6 @@
+ #define PCI_DEVICE_ID_SIIG_8S_20x_650 0x2081
+ #define PCI_DEVICE_ID_SIIG_8S_20x_850 0x2082
+ #define PCI_SUBDEVICE_ID_SIIG_QUARTET_SERIAL 0x2050
+-#define PCI_SUBDEVICE_ID_SIIG_DUAL_SERIAL 0x2530
+
+ #define PCI_VENDOR_ID_RADISYS 0x1331
+
+diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
+index e5a7b9a..416dcb0 100644
+--- a/include/net/ip_vs.h
++++ b/include/net/ip_vs.h
+@@ -1353,7 +1353,7 @@ static inline void ip_vs_notrack(struct sk_buff *skb)
+ struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+
+ if (!ct || !nf_ct_is_untracked(ct)) {
+- nf_reset(skb);
++ nf_conntrack_put(skb->nfct);
+ skb->nfct = &nf_ct_untracked_get()->ct_general;
+ skb->nfctinfo = IP_CT_NEW;
+ nf_conntrack_get(skb->nfct);
+diff --git a/kernel/rcutree.c b/kernel/rcutree.c
+index 6b76d81..a122196 100644
+--- a/kernel/rcutree.c
++++ b/kernel/rcutree.c
+@@ -292,7 +292,9 @@ cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
+ static int
+ cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
+ {
+- return *rdp->nxttail[RCU_DONE_TAIL] && !rcu_gp_in_progress(rsp);
++ return *rdp->nxttail[RCU_DONE_TAIL +
++ ACCESS_ONCE(rsp->completed) != rdp->completed] &&
++ !rcu_gp_in_progress(rsp);
+ }
+
+ /*
+diff --git a/kernel/sched_stoptask.c b/kernel/sched_stoptask.c
+index 8b44e7f..85e9da2 100644
+--- a/kernel/sched_stoptask.c
++++ b/kernel/sched_stoptask.c
+@@ -25,8 +25,10 @@ static struct task_struct *pick_next_task_stop(struct rq *rq)
+ {
+ struct task_struct *stop = rq->stop;
+
+- if (stop && stop->on_rq)
++ if (stop && stop->on_rq) {
++ stop->se.exec_start = rq->clock_task;
+ return stop;
++ }
+
+ return NULL;
+ }
+@@ -50,6 +52,21 @@ static void yield_task_stop(struct rq *rq)
+
+ static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
+ {
++ struct task_struct *curr = rq->curr;
++ u64 delta_exec;
++
++ delta_exec = rq->clock_task - curr->se.exec_start;
++ if (unlikely((s64)delta_exec < 0))
++ delta_exec = 0;
++
++ schedstat_set(curr->se.statistics.exec_max,
++ max(curr->se.statistics.exec_max, delta_exec));
++
++ curr->se.sum_exec_runtime += delta_exec;
++ account_group_exec_runtime(curr, delta_exec);
++
++ curr->se.exec_start = rq->clock_task;
++ cpuacct_charge(curr, delta_exec);
+ }
+
+ static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
+@@ -58,6 +75,9 @@ static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
+
+ static void set_curr_task_stop(struct rq *rq)
+ {
++ struct task_struct *stop = rq->stop;
++
++ stop->se.exec_start = rq->clock_task;
+ }
+
+ static void switched_to_stop(struct rq *rq, struct task_struct *p)
+diff --git a/kernel/sys.c b/kernel/sys.c
+index 481611f..c504302 100644
+--- a/kernel/sys.c
++++ b/kernel/sys.c
+@@ -365,6 +365,7 @@ EXPORT_SYMBOL(unregister_reboot_notifier);
+ void kernel_restart(char *cmd)
+ {
+ kernel_restart_prepare(cmd);
++ disable_nonboot_cpus();
+ if (!cmd)
+ printk(KERN_EMERG "Restarting system.\n");
+ else
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index b413138..43a19c5 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -1726,10 +1726,9 @@ static void move_linked_works(struct work_struct *work, struct list_head *head,
+ *nextp = n;
+ }
+
+-static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
++static void cwq_activate_delayed_work(struct work_struct *work)
+ {
+- struct work_struct *work = list_first_entry(&cwq->delayed_works,
+- struct work_struct, entry);
++ struct cpu_workqueue_struct *cwq = get_work_cwq(work);
+ struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq);
+
+ trace_workqueue_activate_work(work);
+@@ -1738,6 +1737,14 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
+ cwq->nr_active++;
+ }
+
++static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
++{
++ struct work_struct *work = list_first_entry(&cwq->delayed_works,
++ struct work_struct, entry);
++
++ cwq_activate_delayed_work(work);
++}
++
+ /**
+ * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
+ * @cwq: cwq of interest
+@@ -1869,7 +1876,9 @@ __acquires(&gcwq->lock)
+
+ spin_unlock_irq(&gcwq->lock);
+
++ smp_wmb(); /* paired with test_and_set_bit(PENDING) */
+ work_clear_pending(work);
++
+ lock_map_acquire_read(&cwq->wq->lockdep_map);
+ lock_map_acquire(&lockdep_map);
+ trace_workqueue_execute_start(work);
+@@ -2626,6 +2635,18 @@ static int try_to_grab_pending(struct work_struct *work)
+ smp_rmb();
+ if (gcwq == get_work_gcwq(work)) {
+ debug_work_deactivate(work);
++
++ /*
++ * A delayed work item cannot be grabbed directly
++ * because it might have linked NO_COLOR work items
++ * which, if left on the delayed_list, will confuse
++ * cwq->nr_active management later on and cause
++ * stall. Make sure the work item is activated
++ * before grabbing.
++ */
++ if (*work_data_bits(work) & WORK_STRUCT_DELAYED)
++ cwq_activate_delayed_work(work);
++
+ list_del_init(&work->entry);
+ cwq_dec_nr_in_flight(get_work_cwq(work),
+ get_work_color(work),
+diff --git a/lib/gcd.c b/lib/gcd.c
+index f879033..433d89b 100644
+--- a/lib/gcd.c
++++ b/lib/gcd.c
+@@ -9,6 +9,9 @@ unsigned long gcd(unsigned long a, unsigned long b)
+
+ if (a < b)
+ swap(a, b);
++
++ if (!b)
++ return a;
+ while ((r = a % b) != 0) {
+ a = b;
+ b = r;
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 0f897b8..d6c0fdf 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -2429,8 +2429,8 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
+ * from page cache lookup which is in HPAGE_SIZE units.
+ */
+ address = address & huge_page_mask(h);
+- pgoff = ((address - vma->vm_start) >> PAGE_SHIFT)
+- + (vma->vm_pgoff >> PAGE_SHIFT);
++ pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
++ vma->vm_pgoff;
+ mapping = vma->vm_file->f_dentry->d_inode->i_mapping;
+
+ /*
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index 11b8d47..4c82c21 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -607,24 +607,39 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
+ return first;
+ }
+
+-/* Apply policy to a single VMA */
+-static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
++/*
++ * Apply policy to a single VMA
++ * This must be called with the mmap_sem held for writing.
++ */
++static int vma_replace_policy(struct vm_area_struct *vma,
++ struct mempolicy *pol)
+ {
+- int err = 0;
+- struct mempolicy *old = vma->vm_policy;
++ int err;
++ struct mempolicy *old;
++ struct mempolicy *new;
+
+ pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
+ vma->vm_start, vma->vm_end, vma->vm_pgoff,
+ vma->vm_ops, vma->vm_file,
+ vma->vm_ops ? vma->vm_ops->set_policy : NULL);
+
+- if (vma->vm_ops && vma->vm_ops->set_policy)
++ new = mpol_dup(pol);
++ if (IS_ERR(new))
++ return PTR_ERR(new);
++
++ if (vma->vm_ops && vma->vm_ops->set_policy) {
+ err = vma->vm_ops->set_policy(vma, new);
+- if (!err) {
+- mpol_get(new);
+- vma->vm_policy = new;
+- mpol_put(old);
++ if (err)
++ goto err_out;
+ }
++
++ old = vma->vm_policy;
++ vma->vm_policy = new; /* protected by mmap_sem */
++ mpol_put(old);
++
++ return 0;
++ err_out:
++ mpol_put(new);
+ return err;
+ }
+
+@@ -675,7 +690,7 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
+ if (err)
+ goto out;
+ }
+- err = policy_vma(vma, new_pol);
++ err = vma_replace_policy(vma, new_pol);
+ if (err)
+ goto out;
+ }
+@@ -1507,8 +1522,18 @@ struct mempolicy *get_vma_policy(struct task_struct *task,
+ addr);
+ if (vpol)
+ pol = vpol;
+- } else if (vma->vm_policy)
++ } else if (vma->vm_policy) {
+ pol = vma->vm_policy;
++
++ /*
++ * shmem_alloc_page() passes MPOL_F_SHARED policy with
++ * a pseudo vma whose vma->vm_ops=NULL. Take a reference
++ * count on these policies which will be dropped by
++ * mpol_cond_put() later
++ */
++ if (mpol_needs_cond_ref(pol))
++ mpol_get(pol);
++ }
+ }
+ if (!pol)
+ pol = &default_policy;
+@@ -2032,7 +2057,7 @@ int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
+ */
+
+ /* lookup first element intersecting start-end */
+-/* Caller holds sp->lock */
++/* Caller holds sp->mutex */
+ static struct sp_node *
+ sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
+ {
+@@ -2096,36 +2121,50 @@ mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
+
+ if (!sp->root.rb_node)
+ return NULL;
+- spin_lock(&sp->lock);
++ mutex_lock(&sp->mutex);
+ sn = sp_lookup(sp, idx, idx+1);
+ if (sn) {
+ mpol_get(sn->policy);
+ pol = sn->policy;
+ }
+- spin_unlock(&sp->lock);
++ mutex_unlock(&sp->mutex);
+ return pol;
+ }
+
++static void sp_free(struct sp_node *n)
++{
++ mpol_put(n->policy);
++ kmem_cache_free(sn_cache, n);
++}
++
+ static void sp_delete(struct shared_policy *sp, struct sp_node *n)
+ {
+ pr_debug("deleting %lx-l%lx\n", n->start, n->end);
+ rb_erase(&n->nd, &sp->root);
+- mpol_put(n->policy);
+- kmem_cache_free(sn_cache, n);
++ sp_free(n);
+ }
+
+ static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
+ struct mempolicy *pol)
+ {
+- struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
++ struct sp_node *n;
++ struct mempolicy *newpol;
+
++ n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
+ if (!n)
+ return NULL;
++
++ newpol = mpol_dup(pol);
++ if (IS_ERR(newpol)) {
++ kmem_cache_free(sn_cache, n);
++ return NULL;
++ }
++ newpol->flags |= MPOL_F_SHARED;
++
+ n->start = start;
+ n->end = end;
+- mpol_get(pol);
+- pol->flags |= MPOL_F_SHARED; /* for unref */
+- n->policy = pol;
++ n->policy = newpol;
++
+ return n;
+ }
+
+@@ -2133,10 +2172,10 @@ static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
+ static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
+ unsigned long end, struct sp_node *new)
+ {
+- struct sp_node *n, *new2 = NULL;
++ struct sp_node *n;
++ int ret = 0;
+
+-restart:
+- spin_lock(&sp->lock);
++ mutex_lock(&sp->mutex);
+ n = sp_lookup(sp, start, end);
+ /* Take care of old policies in the same range. */
+ while (n && n->start < end) {
+@@ -2149,16 +2188,14 @@ restart:
+ } else {
+ /* Old policy spanning whole new range. */
+ if (n->end > end) {
++ struct sp_node *new2;
++ new2 = sp_alloc(end, n->end, n->policy);
+ if (!new2) {
+- spin_unlock(&sp->lock);
+- new2 = sp_alloc(end, n->end, n->policy);
+- if (!new2)
+- return -ENOMEM;
+- goto restart;
++ ret = -ENOMEM;
++ goto out;
+ }
+ n->end = start;
+ sp_insert(sp, new2);
+- new2 = NULL;
+ break;
+ } else
+ n->end = start;
+@@ -2169,12 +2206,9 @@ restart:
+ }
+ if (new)
+ sp_insert(sp, new);
+- spin_unlock(&sp->lock);
+- if (new2) {
+- mpol_put(new2->policy);
+- kmem_cache_free(sn_cache, new2);
+- }
+- return 0;
++out:
++ mutex_unlock(&sp->mutex);
++ return ret;
+ }
+
+ /**
+@@ -2192,7 +2226,7 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
+ int ret;
+
+ sp->root = RB_ROOT; /* empty tree == default mempolicy */
+- spin_lock_init(&sp->lock);
++ mutex_init(&sp->mutex);
+
+ if (mpol) {
+ struct vm_area_struct pvma;
+@@ -2246,7 +2280,7 @@ int mpol_set_shared_policy(struct shared_policy *info,
+ }
+ err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
+ if (err && new)
+- kmem_cache_free(sn_cache, new);
++ sp_free(new);
+ return err;
+ }
+
+@@ -2258,16 +2292,14 @@ void mpol_free_shared_policy(struct shared_policy *p)
+
+ if (!p->root.rb_node)
+ return;
+- spin_lock(&p->lock);
++ mutex_lock(&p->mutex);
+ next = rb_first(&p->root);
+ while (next) {
+ n = rb_entry(next, struct sp_node, nd);
+ next = rb_next(&n->nd);
+- rb_erase(&n->nd, &p->root);
+- mpol_put(n->policy);
+- kmem_cache_free(sn_cache, n);
++ sp_delete(p, n);
+ }
+- spin_unlock(&p->lock);
++ mutex_unlock(&p->mutex);
+ }
+
+ /* assumes fs == KERNEL_DS */
+diff --git a/mm/slab.c b/mm/slab.c
+index cd3ab93..4c3b671 100644
+--- a/mm/slab.c
++++ b/mm/slab.c
+@@ -1669,9 +1669,6 @@ void __init kmem_cache_init_late(void)
+
+ g_cpucache_up = LATE;
+
+- /* Annotate slab for lockdep -- annotate the malloc caches */
+- init_lock_keys();
+-
+ /* 6) resize the head arrays to their final sizes */
+ mutex_lock(&cache_chain_mutex);
+ list_for_each_entry(cachep, &cache_chain, next)
+@@ -1679,6 +1676,9 @@ void __init kmem_cache_init_late(void)
+ BUG();
+ mutex_unlock(&cache_chain_mutex);
+
++ /* Annotate slab for lockdep -- annotate the malloc caches */
++ init_lock_keys();
++
+ /* Done! */
+ g_cpucache_up = FULL;
+
+diff --git a/mm/truncate.c b/mm/truncate.c
+index 632b15e..00fb58a 100644
+--- a/mm/truncate.c
++++ b/mm/truncate.c
+@@ -394,11 +394,12 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
+ if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
+ return 0;
+
++ clear_page_mlock(page);
++
+ spin_lock_irq(&mapping->tree_lock);
+ if (PageDirty(page))
+ goto failed;
+
+- clear_page_mlock(page);
+ BUG_ON(page_has_private(page));
+ __delete_from_page_cache(page);
+ spin_unlock_irq(&mapping->tree_lock);
+diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+index de9da21..d7d63f4 100644
+--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
++++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+@@ -84,6 +84,14 @@ static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
+ *dataoff = nhoff + (iph->ihl << 2);
+ *protonum = iph->protocol;
+
++ /* Check bogus IP headers */
++ if (*dataoff > skb->len) {
++ pr_debug("nf_conntrack_ipv4: bogus IPv4 packet: "
++ "nhoff %u, ihl %u, skblen %u\n",
++ nhoff, iph->ihl << 2, skb->len);
++ return -NF_ACCEPT;
++ }
++
+ return NF_ACCEPT;
+ }
+
+diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c
+index 78844d9..6609a84 100644
+--- a/net/ipv4/netfilter/nf_nat_sip.c
++++ b/net/ipv4/netfilter/nf_nat_sip.c
+@@ -148,7 +148,7 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff,
+ if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen,
+ hdr, NULL, &matchoff, &matchlen,
+ &addr, &port) > 0) {
+- unsigned int matchend, poff, plen, buflen, n;
++ unsigned int olen, matchend, poff, plen, buflen, n;
+ char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")];
+
+ /* We're only interested in headers related to this
+@@ -163,11 +163,12 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff,
+ goto next;
+ }
+
++ olen = *datalen;
+ if (!map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen,
+ &addr, port))
+ return NF_DROP;
+
+- matchend = matchoff + matchlen;
++ matchend = matchoff + matchlen + *datalen - olen;
+
+ /* The maddr= parameter (RFC 2361) specifies where to send
+ * the reply. */
+@@ -501,7 +502,10 @@ static unsigned int ip_nat_sdp_media(struct sk_buff *skb, unsigned int dataoff,
+ ret = nf_ct_expect_related(rtcp_exp);
+ if (ret == 0)
+ break;
+- else if (ret != -EBUSY) {
++ else if (ret == -EBUSY) {
++ nf_ct_unexpect_related(rtp_exp);
++ continue;
++ } else if (ret < 0) {
+ nf_ct_unexpect_related(rtp_exp);
+ port = 0;
+ break;
+diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
+index 340c80d..7918eb7 100644
+--- a/net/netfilter/nf_conntrack_expect.c
++++ b/net/netfilter/nf_conntrack_expect.c
+@@ -366,23 +366,6 @@ static void evict_oldest_expect(struct nf_conn *master,
+ }
+ }
+
+-static inline int refresh_timer(struct nf_conntrack_expect *i)
+-{
+- struct nf_conn_help *master_help = nfct_help(i->master);
+- const struct nf_conntrack_expect_policy *p;
+-
+- if (!del_timer(&i->timeout))
+- return 0;
+-
+- p = &rcu_dereference_protected(
+- master_help->helper,
+- lockdep_is_held(&nf_conntrack_lock)
+- )->expect_policy[i->class];
+- i->timeout.expires = jiffies + p->timeout * HZ;
+- add_timer(&i->timeout);
+- return 1;
+-}
+-
+ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
+ {
+ const struct nf_conntrack_expect_policy *p;
+@@ -390,7 +373,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
+ struct nf_conn *master = expect->master;
+ struct nf_conn_help *master_help = nfct_help(master);
+ struct net *net = nf_ct_exp_net(expect);
+- struct hlist_node *n;
++ struct hlist_node *n, *next;
+ unsigned int h;
+ int ret = 1;
+
+@@ -401,12 +384,12 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
+ goto out;
+ }
+ h = nf_ct_expect_dst_hash(&expect->tuple);
+- hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) {
++ hlist_for_each_entry_safe(i, n, next, &net->ct.expect_hash[h], hnode) {
+ if (expect_matches(i, expect)) {
+- /* Refresh timer: if it's dying, ignore.. */
+- if (refresh_timer(i)) {
+- ret = 0;
+- goto out;
++ if (del_timer(&i->timeout)) {
++ nf_ct_unlink_expect(i);
++ nf_ct_expect_put(i);
++ break;
+ }
+ } else if (expect_clash(i, expect)) {
+ ret = -EBUSY;
+diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
+index dfd52ba..8f3f280 100644
+--- a/net/netfilter/xt_hashlimit.c
++++ b/net/netfilter/xt_hashlimit.c
+@@ -389,8 +389,7 @@ static void htable_put(struct xt_hashlimit_htable *hinfo)
+ #define CREDITS_PER_JIFFY POW2_BELOW32(MAX_CPJ)
+
+ /* Precision saver. */
+-static inline u_int32_t
+-user2credits(u_int32_t user)
++static u32 user2credits(u32 user)
+ {
+ /* If multiplying would overflow... */
+ if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
+@@ -400,7 +399,7 @@ user2credits(u_int32_t user)
+ return (user * HZ * CREDITS_PER_JIFFY) / XT_HASHLIMIT_SCALE;
+ }
+
+-static inline void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now)
++static void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now)
+ {
+ dh->rateinfo.credit += (now - dh->rateinfo.prev) * CREDITS_PER_JIFFY;
+ if (dh->rateinfo.credit > dh->rateinfo.credit_cap)
+@@ -531,8 +530,7 @@ hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
+ dh->rateinfo.prev = jiffies;
+ dh->rateinfo.credit = user2credits(hinfo->cfg.avg *
+ hinfo->cfg.burst);
+- dh->rateinfo.credit_cap = user2credits(hinfo->cfg.avg *
+- hinfo->cfg.burst);
++ dh->rateinfo.credit_cap = dh->rateinfo.credit;
+ dh->rateinfo.cost = user2credits(hinfo->cfg.avg);
+ } else {
+ /* update expiration timeout */
+diff --git a/net/netfilter/xt_limit.c b/net/netfilter/xt_limit.c
+index 32b7a57..a4c1e45 100644
+--- a/net/netfilter/xt_limit.c
++++ b/net/netfilter/xt_limit.c
+@@ -88,8 +88,7 @@ limit_mt(const struct sk_buff *skb, struct xt_action_param *par)
+ }
+
+ /* Precision saver. */
+-static u_int32_t
+-user2credits(u_int32_t user)
++static u32 user2credits(u32 user)
+ {
+ /* If multiplying would overflow... */
+ if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
+@@ -118,12 +117,12 @@ static int limit_mt_check(const struct xt_mtchk_param *par)
+
+ /* For SMP, we only want to use one set of state. */
+ r->master = priv;
++ /* User avg in seconds * XT_LIMIT_SCALE: convert to jiffies *
++ 128. */
++ priv->prev = jiffies;
++ priv->credit = user2credits(r->avg * r->burst); /* Credits full. */
+ if (r->cost == 0) {
+- /* User avg in seconds * XT_LIMIT_SCALE: convert to jiffies *
+- 128. */
+- priv->prev = jiffies;
+- priv->credit = user2credits(r->avg * r->burst); /* Credits full. */
+- r->credit_cap = user2credits(r->avg * r->burst); /* Credits full. */
++ r->credit_cap = priv->credit; /* Credits full. */
+ r->cost = user2credits(r->avg);
+ }
+ return 0;
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index c5391af..10a385b 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -1028,6 +1028,16 @@ static void xs_udp_data_ready(struct sock *sk, int len)
+ read_unlock_bh(&sk->sk_callback_lock);
+ }
+
++/*
++ * Helper function to force a TCP close if the server is sending
++ * junk and/or it has put us in CLOSE_WAIT
++ */
++static void xs_tcp_force_close(struct rpc_xprt *xprt)
++{
++ set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
++ xprt_force_disconnect(xprt);
++}
++
+ static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc)
+ {
+ struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
+@@ -1054,7 +1064,7 @@ static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_rea
+ /* Sanity check of the record length */
+ if (unlikely(transport->tcp_reclen < 8)) {
+ dprintk("RPC: invalid TCP record fragment length\n");
+- xprt_force_disconnect(xprt);
++ xs_tcp_force_close(xprt);
+ return;
+ }
+ dprintk("RPC: reading TCP record fragment of length %d\n",
+@@ -1135,7 +1145,7 @@ static inline void xs_tcp_read_calldir(struct sock_xprt *transport,
+ break;
+ default:
+ dprintk("RPC: invalid request message type\n");
+- xprt_force_disconnect(&transport->xprt);
++ xs_tcp_force_close(&transport->xprt);
+ }
+ xs_tcp_check_fraghdr(transport);
+ }
+@@ -1458,6 +1468,8 @@ static void xs_tcp_cancel_linger_timeout(struct rpc_xprt *xprt)
+ static void xs_sock_mark_closed(struct rpc_xprt *xprt)
+ {
+ smp_mb__before_clear_bit();
++ clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
++ clear_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
+ clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
+ clear_bit(XPRT_CLOSING, &xprt->state);
+ smp_mb__after_clear_bit();
+@@ -1515,8 +1527,8 @@ static void xs_tcp_state_change(struct sock *sk)
+ break;
+ case TCP_CLOSE_WAIT:
+ /* The server initiated a shutdown of the socket */
+- xprt_force_disconnect(xprt);
+ xprt->connect_cookie++;
++ xs_tcp_force_close(xprt);
+ case TCP_CLOSING:
+ /*
+ * If the server closed down the connection, make sure that
+@@ -2159,8 +2171,7 @@ static void xs_tcp_setup_socket(struct work_struct *work)
+ /* We're probably in TIME_WAIT. Get rid of existing socket,
+ * and retry
+ */
+- set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
+- xprt_force_disconnect(xprt);
++ xs_tcp_force_close(xprt);
+ break;
+ case -ECONNREFUSED:
+ case -ECONNRESET:
+diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
+index d897278..978416d 100644
+--- a/scripts/Kbuild.include
++++ b/scripts/Kbuild.include
+@@ -98,24 +98,24 @@ try-run = $(shell set -e; \
+ # Usage: cflags-y += $(call as-option,-Wa$(comma)-isa=foo,)
+
+ as-option = $(call try-run,\
+- $(CC) $(KBUILD_CFLAGS) $(1) -c -xassembler /dev/null -o "$$TMP",$(1),$(2))
++ $(CC) $(KBUILD_CFLAGS) $(1) -c -x assembler /dev/null -o "$$TMP",$(1),$(2))
+
+ # as-instr
+ # Usage: cflags-y += $(call as-instr,instr,option1,option2)
+
+ as-instr = $(call try-run,\
+- /bin/echo -e "$(1)" | $(CC) $(KBUILD_AFLAGS) -c -xassembler -o "$$TMP" -,$(2),$(3))
++ printf "%b\n" "$(1)" | $(CC) $(KBUILD_AFLAGS) -c -x assembler -o "$$TMP" -,$(2),$(3))
+
+ # cc-option
+ # Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586)
+
+ cc-option = $(call try-run,\
+- $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -xc /dev/null -o "$$TMP",$(1),$(2))
++ $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2))
+
+ # cc-option-yn
+ # Usage: flag := $(call cc-option-yn,-march=winchip-c6)
+ cc-option-yn = $(call try-run,\
+- $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -xc /dev/null -o "$$TMP",y,n)
++ $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",y,n)
+
+ # cc-option-align
+ # Prefix align with either -falign or -malign
+@@ -125,7 +125,7 @@ cc-option-align = $(subst -functions=0,,\
+ # cc-disable-warning
+ # Usage: cflags-y += $(call cc-disable-warning,unused-but-set-variable)
+ cc-disable-warning = $(call try-run,\
+- $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -W$(strip $(1)) -c -xc /dev/null -o "$$TMP",-Wno-$(strip $(1)))
++ $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1)))
+
+ # cc-version
+ # Usage gcc-ver := $(call cc-version)
+@@ -143,7 +143,7 @@ cc-ifversion = $(shell [ $(call cc-version, $(CC)) $(1) $(2) ] && echo $(3))
+ # cc-ldoption
+ # Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both)
+ cc-ldoption = $(call try-run,\
+- $(CC) $(1) -nostdlib -xc /dev/null -o "$$TMP",$(1),$(2))
++ $(CC) $(1) -nostdlib -x c /dev/null -o "$$TMP",$(1),$(2))
+
+ # ld-option
+ # Usage: LDFLAGS += $(call ld-option, -X)
+@@ -209,7 +209,7 @@ endif
+ # >$< substitution to preserve $ when reloading .cmd file
+ # note: when using inline perl scripts [perl -e '...$$t=1;...']
+ # in $(cmd_xxx) double $$ your perl vars
+-make-cmd = $(subst \#,\\\#,$(subst $$,$$$$,$(call escsq,$(cmd_$(1)))))
++make-cmd = $(subst \\,\\\\,$(subst \#,\\\#,$(subst $$,$$$$,$(call escsq,$(cmd_$(1))))))
+
+ # Find any prerequisites that is newer than target or that does not exist.
+ # PHONY targets skipped in both cases.
+diff --git a/scripts/gcc-version.sh b/scripts/gcc-version.sh
+index debecb5..7f2126d 100644
+--- a/scripts/gcc-version.sh
++++ b/scripts/gcc-version.sh
+@@ -22,10 +22,10 @@ if [ ${#compiler} -eq 0 ]; then
+ exit 1
+ fi
+
+-MAJOR=$(echo __GNUC__ | $compiler -E -xc - | tail -n 1)
+-MINOR=$(echo __GNUC_MINOR__ | $compiler -E -xc - | tail -n 1)
++MAJOR=$(echo __GNUC__ | $compiler -E -x c - | tail -n 1)
++MINOR=$(echo __GNUC_MINOR__ | $compiler -E -x c - | tail -n 1)
+ if [ "x$with_patchlevel" != "x" ] ; then
+- PATCHLEVEL=$(echo __GNUC_PATCHLEVEL__ | $compiler -E -xc - | tail -n 1)
++ PATCHLEVEL=$(echo __GNUC_PATCHLEVEL__ | $compiler -E -x c - | tail -n 1)
+ printf "%02d%02d%02d\\n" $MAJOR $MINOR $PATCHLEVEL
+ else
+ printf "%02d%02d\\n" $MAJOR $MINOR
+diff --git a/scripts/gcc-x86_32-has-stack-protector.sh b/scripts/gcc-x86_32-has-stack-protector.sh
+index 29493dc..12dbd0b 100644
+--- a/scripts/gcc-x86_32-has-stack-protector.sh
++++ b/scripts/gcc-x86_32-has-stack-protector.sh
+@@ -1,6 +1,6 @@
+ #!/bin/sh
+
+-echo "int foo(void) { char X[200]; return 3; }" | $* -S -xc -c -O0 -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
++echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
+ if [ "$?" -eq "0" ] ; then
+ echo y
+ else
+diff --git a/scripts/gcc-x86_64-has-stack-protector.sh b/scripts/gcc-x86_64-has-stack-protector.sh
+index afaec61..973e8c1 100644
+--- a/scripts/gcc-x86_64-has-stack-protector.sh
++++ b/scripts/gcc-x86_64-has-stack-protector.sh
+@@ -1,6 +1,6 @@
+ #!/bin/sh
+
+-echo "int foo(void) { char X[200]; return 3; }" | $* -S -xc -c -O0 -mcmodel=kernel -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
++echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
+ if [ "$?" -eq "0" ] ; then
+ echo y
+ else
+diff --git a/scripts/kconfig/check.sh b/scripts/kconfig/check.sh
+index fa59cbf..854d9c7 100755
+--- a/scripts/kconfig/check.sh
++++ b/scripts/kconfig/check.sh
+@@ -1,6 +1,6 @@
+ #!/bin/sh
+ # Needed for systems without gettext
+-$* -xc -o /dev/null - > /dev/null 2>&1 << EOF
++$* -x c -o /dev/null - > /dev/null 2>&1 << EOF
+ #include <libintl.h>
+ int main()
+ {
+diff --git a/scripts/kconfig/lxdialog/check-lxdialog.sh b/scripts/kconfig/lxdialog/check-lxdialog.sh
+index 82cc3a8..50df490 100644
+--- a/scripts/kconfig/lxdialog/check-lxdialog.sh
++++ b/scripts/kconfig/lxdialog/check-lxdialog.sh
+@@ -38,7 +38,7 @@ trap "rm -f $tmp" 0 1 2 3 15
+
+ # Check if we can link to ncurses
+ check() {
+- $cc -xc - -o $tmp 2>/dev/null <<'EOF'
++ $cc -x c - -o $tmp 2>/dev/null <<'EOF'
+ #include CURSES_LOC
+ main() {}
+ EOF
+diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl
+index bccf07dd..3346f42 100644
+--- a/scripts/kconfig/streamline_config.pl
++++ b/scripts/kconfig/streamline_config.pl
+@@ -463,6 +463,8 @@ while(<CIN>) {
+ if (defined($configs{$1})) {
+ if ($localyesconfig) {
+ $setconfigs{$1} = 'y';
++ print "$1=y\n";
++ next;
+ } else {
+ $setconfigs{$1} = $2;
+ }
+diff --git a/scripts/package/buildtar b/scripts/package/buildtar
+index 8a7b155..d0d748e 100644
+--- a/scripts/package/buildtar
++++ b/scripts/package/buildtar
+@@ -109,7 +109,7 @@ esac
+ if tar --owner=root --group=root --help >/dev/null 2>&1; then
+ opts="--owner=root --group=root"
+ fi
+- tar cf - . $opts | ${compress} > "${tarball}${file_ext}"
++ tar cf - boot/* lib/* $opts | ${compress} > "${tarball}${file_ext}"
+ )
+
+ echo "Tarball successfully created in ${tarball}${file_ext}"
+diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c
+index d83bafc..193ce81 100644
+--- a/sound/drivers/aloop.c
++++ b/sound/drivers/aloop.c
+@@ -119,6 +119,7 @@ struct loopback_pcm {
+ unsigned int period_size_frac;
+ unsigned long last_jiffies;
+ struct timer_list timer;
++ spinlock_t timer_lock;
+ };
+
+ static struct platform_device *devices[SNDRV_CARDS];
+@@ -169,6 +170,7 @@ static void loopback_timer_start(struct loopback_pcm *dpcm)
+ unsigned long tick;
+ unsigned int rate_shift = get_rate_shift(dpcm);
+
++ spin_lock(&dpcm->timer_lock);
+ if (rate_shift != dpcm->pcm_rate_shift) {
+ dpcm->pcm_rate_shift = rate_shift;
+ dpcm->period_size_frac = frac_pos(dpcm, dpcm->pcm_period_size);
+@@ -181,12 +183,15 @@ static void loopback_timer_start(struct loopback_pcm *dpcm)
+ tick = (tick + dpcm->pcm_bps - 1) / dpcm->pcm_bps;
+ dpcm->timer.expires = jiffies + tick;
+ add_timer(&dpcm->timer);
++ spin_unlock(&dpcm->timer_lock);
+ }
+
+ static inline void loopback_timer_stop(struct loopback_pcm *dpcm)
+ {
++ spin_lock(&dpcm->timer_lock);
+ del_timer(&dpcm->timer);
+ dpcm->timer.expires = 0;
++ spin_unlock(&dpcm->timer_lock);
+ }
+
+ #define CABLE_VALID_PLAYBACK (1 << SNDRV_PCM_STREAM_PLAYBACK)
+@@ -659,6 +664,7 @@ static int loopback_open(struct snd_pcm_substream *substream)
+ dpcm->substream = substream;
+ setup_timer(&dpcm->timer, loopback_timer_function,
+ (unsigned long)dpcm);
++ spin_lock_init(&dpcm->timer_lock);
+
+ cable = loopback->cables[substream->number][dev];
+ if (!cable) {
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 402f330..94f0c4a 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -139,6 +139,7 @@ struct conexant_spec {
+ unsigned int asus:1;
+ unsigned int pin_eapd_ctrls:1;
+ unsigned int single_adc_amp:1;
++ unsigned int fixup_stereo_dmic:1;
+
+ unsigned int adc_switching:1;
+
+@@ -4113,9 +4114,9 @@ static int cx_auto_init(struct hda_codec *codec)
+
+ static int cx_auto_add_volume_idx(struct hda_codec *codec, const char *basename,
+ const char *dir, int cidx,
+- hda_nid_t nid, int hda_dir, int amp_idx)
++ hda_nid_t nid, int hda_dir, int amp_idx, int chs)
+ {
+- static char name[32];
++ static char name[44];
+ static struct snd_kcontrol_new knew[] = {
+ HDA_CODEC_VOLUME(name, 0, 0, 0),
+ HDA_CODEC_MUTE(name, 0, 0, 0),
+@@ -4125,7 +4126,7 @@ static int cx_auto_add_volume_idx(struct hda_codec *codec, const char *basename,
+
+ for (i = 0; i < 2; i++) {
+ struct snd_kcontrol *kctl;
+- knew[i].private_value = HDA_COMPOSE_AMP_VAL(nid, 3, amp_idx,
++ knew[i].private_value = HDA_COMPOSE_AMP_VAL(nid, chs, amp_idx,
+ hda_dir);
+ knew[i].subdevice = HDA_SUBDEV_AMP_FLAG;
+ knew[i].index = cidx;
+@@ -4144,7 +4145,7 @@ static int cx_auto_add_volume_idx(struct hda_codec *codec, const char *basename,
+ }
+
+ #define cx_auto_add_volume(codec, str, dir, cidx, nid, hda_dir) \
+- cx_auto_add_volume_idx(codec, str, dir, cidx, nid, hda_dir, 0)
++ cx_auto_add_volume_idx(codec, str, dir, cidx, nid, hda_dir, 0, 3)
+
+ #define cx_auto_add_pb_volume(codec, nid, str, idx) \
+ cx_auto_add_volume(codec, str, " Playback", idx, nid, HDA_OUTPUT)
+@@ -4214,6 +4215,36 @@ static int cx_auto_build_output_controls(struct hda_codec *codec)
+ return 0;
+ }
+
++/* Returns zero if this is a normal stereo channel, and non-zero if it should
++ be split in two independent channels.
++ dest_label must be at least 44 characters. */
++static int cx_auto_get_rightch_label(struct hda_codec *codec, const char *label,
++ char *dest_label, int nid)
++{
++ struct conexant_spec *spec = codec->spec;
++ int i;
++
++ if (!spec->fixup_stereo_dmic)
++ return 0;
++
++ for (i = 0; i < AUTO_CFG_MAX_INS; i++) {
++ int def_conf;
++ if (spec->autocfg.inputs[i].pin != nid)
++ continue;
++
++ if (spec->autocfg.inputs[i].type != AUTO_PIN_MIC)
++ return 0;
++ def_conf = snd_hda_codec_get_pincfg(codec, nid);
++ if (snd_hda_get_input_pin_attr(def_conf) != INPUT_PIN_ATTR_INT)
++ return 0;
++
++ /* Finally found the inverted internal mic! */
++ snprintf(dest_label, 44, "Inverted %s", label);
++ return 1;
++ }
++ return 0;
++}
++
+ static int cx_auto_add_capture_volume(struct hda_codec *codec, hda_nid_t nid,
+ const char *label, const char *pfx,
+ int cidx)
+@@ -4222,14 +4253,25 @@ static int cx_auto_add_capture_volume(struct hda_codec *codec, hda_nid_t nid,
+ int i;
+
+ for (i = 0; i < spec->num_adc_nids; i++) {
++ char rightch_label[44];
+ hda_nid_t adc_nid = spec->adc_nids[i];
+ int idx = get_input_connection(codec, adc_nid, nid);
+ if (idx < 0)
+ continue;
+ if (spec->single_adc_amp)
+ idx = 0;
++
++ if (cx_auto_get_rightch_label(codec, label, rightch_label, nid)) {
++ /* Make two independent kcontrols for left and right */
++ int err = cx_auto_add_volume_idx(codec, label, pfx,
++ cidx, adc_nid, HDA_INPUT, idx, 1);
++ if (err < 0)
++ return err;
++ return cx_auto_add_volume_idx(codec, rightch_label, pfx,
++ cidx, adc_nid, HDA_INPUT, idx, 2);
++ }
+ return cx_auto_add_volume_idx(codec, label, pfx,
+- cidx, adc_nid, HDA_INPUT, idx);
++ cidx, adc_nid, HDA_INPUT, idx, 3);
+ }
+ return 0;
+ }
+@@ -4242,9 +4284,19 @@ static int cx_auto_add_boost_volume(struct hda_codec *codec, int idx,
+ int i, con;
+
+ nid = spec->imux_info[idx].pin;
+- if (get_wcaps(codec, nid) & AC_WCAP_IN_AMP)
++ if (get_wcaps(codec, nid) & AC_WCAP_IN_AMP) {
++ char rightch_label[44];
++ if (cx_auto_get_rightch_label(codec, label, rightch_label, nid)) {
++ int err = cx_auto_add_volume_idx(codec, label, " Boost",
++ cidx, nid, HDA_INPUT, 0, 1);
++ if (err < 0)
++ return err;
++ return cx_auto_add_volume_idx(codec, rightch_label, " Boost",
++ cidx, nid, HDA_INPUT, 0, 2);
++ }
+ return cx_auto_add_volume(codec, label, " Boost", cidx,
+ nid, HDA_INPUT);
++ }
+ con = __select_input_connection(codec, spec->imux_info[idx].adc, nid,
+ &mux, false, 0);
+ if (con < 0)
+@@ -4398,23 +4450,31 @@ static void apply_pincfg(struct hda_codec *codec, const struct cxt_pincfg *cfg)
+
+ }
+
+-static void apply_pin_fixup(struct hda_codec *codec,
++enum {
++ CXT_PINCFG_LENOVO_X200,
++ CXT_PINCFG_LENOVO_TP410,
++ CXT_FIXUP_STEREO_DMIC,
++};
++
++static void apply_fixup(struct hda_codec *codec,
+ const struct snd_pci_quirk *quirk,
+ const struct cxt_pincfg **table)
+ {
++ struct conexant_spec *spec = codec->spec;
++
+ quirk = snd_pci_quirk_lookup(codec->bus->pci, quirk);
+- if (quirk) {
++ if (quirk && table[quirk->value]) {
+ snd_printdd(KERN_INFO "hda_codec: applying pincfg for %s\n",
+ quirk->name);
+ apply_pincfg(codec, table[quirk->value]);
+ }
++ if (quirk->value == CXT_FIXUP_STEREO_DMIC) {
++ snd_printdd(KERN_INFO "hda_codec: applying internal mic workaround for %s\n",
++ quirk->name);
++ spec->fixup_stereo_dmic = 1;
++ }
+ }
+
+-enum {
+- CXT_PINCFG_LENOVO_X200,
+- CXT_PINCFG_LENOVO_TP410,
+-};
+-
+ /* ThinkPad X200 & co with cxt5051 */
+ static const struct cxt_pincfg cxt_pincfg_lenovo_x200[] = {
+ { 0x16, 0x042140ff }, /* HP (seq# overridden) */
+@@ -4434,6 +4494,7 @@ static const struct cxt_pincfg cxt_pincfg_lenovo_tp410[] = {
+ static const struct cxt_pincfg *cxt_pincfg_tbl[] = {
+ [CXT_PINCFG_LENOVO_X200] = cxt_pincfg_lenovo_x200,
+ [CXT_PINCFG_LENOVO_TP410] = cxt_pincfg_lenovo_tp410,
++ [CXT_FIXUP_STEREO_DMIC] = NULL,
+ };
+
+ static const struct snd_pci_quirk cxt5051_fixups[] = {
+@@ -4447,6 +4508,9 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
+ SND_PCI_QUIRK(0x17aa, 0x215f, "Lenovo T510", CXT_PINCFG_LENOVO_TP410),
+ SND_PCI_QUIRK(0x17aa, 0x21ce, "Lenovo T420", CXT_PINCFG_LENOVO_TP410),
+ SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520", CXT_PINCFG_LENOVO_TP410),
++ SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC),
++ SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC),
++ SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC),
+ {}
+ };
+
+@@ -4486,10 +4550,10 @@ static int patch_conexant_auto(struct hda_codec *codec)
+ break;
+ case 0x14f15051:
+ add_cx5051_fake_mutes(codec);
+- apply_pin_fixup(codec, cxt5051_fixups, cxt_pincfg_tbl);
++ apply_fixup(codec, cxt5051_fixups, cxt_pincfg_tbl);
+ break;
+ default:
+- apply_pin_fixup(codec, cxt5066_fixups, cxt_pincfg_tbl);
++ apply_fixup(codec, cxt5066_fixups, cxt_pincfg_tbl);
+ break;
+ }
+
+diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
+index 323d4d9..0961d88 100644
+--- a/tools/hv/hv_kvp_daemon.c
++++ b/tools/hv/hv_kvp_daemon.c
+@@ -348,7 +348,7 @@ int main(void)
+ fd = socket(AF_NETLINK, SOCK_DGRAM, NETLINK_CONNECTOR);
+ if (fd < 0) {
+ syslog(LOG_ERR, "netlink socket creation failed; error:%d", fd);
+- exit(-1);
++ exit(EXIT_FAILURE);
+ }
+ addr.nl_family = AF_NETLINK;
+ addr.nl_pad = 0;
+@@ -360,7 +360,7 @@ int main(void)
+ if (error < 0) {
+ syslog(LOG_ERR, "bind failed; error:%d", error);
+ close(fd);
+- exit(-1);
++ exit(EXIT_FAILURE);
+ }
+ sock_opt = addr.nl_groups;
+ setsockopt(fd, 270, 1, &sock_opt, sizeof(sock_opt));
+@@ -378,7 +378,7 @@ int main(void)
+ if (len < 0) {
+ syslog(LOG_ERR, "netlink_send failed; error:%d", len);
+ close(fd);
+- exit(-1);
++ exit(EXIT_FAILURE);
+ }
+
+ pfd.fd = fd;
+@@ -497,7 +497,7 @@ int main(void)
+ len = netlink_send(fd, incoming_cn_msg);
+ if (len < 0) {
+ syslog(LOG_ERR, "net_link send failed; error:%d", len);
+- exit(-1);
++ exit(EXIT_FAILURE);
+ }
+ }
+
+diff --git a/tools/perf/Makefile b/tools/perf/Makefile
+index b98e307..e45d2b1 100644
+--- a/tools/perf/Makefile
++++ b/tools/perf/Makefile
+@@ -56,7 +56,7 @@ ifeq ($(ARCH),x86_64)
+ ARCH := x86
+ IS_X86_64 := 0
+ ifeq (, $(findstring m32,$(EXTRA_CFLAGS)))
+- IS_X86_64 := $(shell echo __x86_64__ | ${CC} -E -xc - | tail -n 1)
++ IS_X86_64 := $(shell echo __x86_64__ | ${CC} -E -x c - | tail -n 1)
+ endif
+ ifeq (${IS_X86_64}, 1)
+ RAW_ARCH := x86_64
+diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile
+index e8a03ac..7db8da5 100644
+--- a/tools/power/cpupower/Makefile
++++ b/tools/power/cpupower/Makefile
+@@ -100,7 +100,7 @@ GMO_FILES = ${shell for HLANG in ${LANGUAGES}; do echo po/$$HLANG.gmo; done;}
+ export CROSS CC AR STRIP RANLIB CFLAGS LDFLAGS LIB_OBJS
+
+ # check if compiler option is supported
+-cc-supports = ${shell if $(CC) ${1} -S -o /dev/null -xc /dev/null > /dev/null 2>&1; then echo "$(1)"; fi;}
++cc-supports = ${shell if $(CC) ${1} -S -o /dev/null -x c /dev/null > /dev/null 2>&1; then echo "$(1)"; fi;}
+
+ # use '-Os' optimization if available, else use -O2
+ OPTIMIZATION := $(call cc-supports,-Os,-O2)
diff --git a/1032_linux-3.2.33.patch b/1032_linux-3.2.33.patch
new file mode 100644
index 00000000..c32fb75a
--- /dev/null
+++ b/1032_linux-3.2.33.patch
@@ -0,0 +1,3450 @@
+diff --git a/Makefile b/Makefile
+index b6d8282..63ca1ea2 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 32
++SUBLEVEL = 33
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/include/asm/vfpmacros.h b/arch/arm/include/asm/vfpmacros.h
+index 3d5fc41..bf53047 100644
+--- a/arch/arm/include/asm/vfpmacros.h
++++ b/arch/arm/include/asm/vfpmacros.h
+@@ -28,7 +28,7 @@
+ ldr \tmp, =elf_hwcap @ may not have MVFR regs
+ ldr \tmp, [\tmp, #0]
+ tst \tmp, #HWCAP_VFPv3D16
+- ldceq p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
++ ldceql p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
+ addne \base, \base, #32*4 @ step over unused register space
+ #else
+ VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
+@@ -52,7 +52,7 @@
+ ldr \tmp, =elf_hwcap @ may not have MVFR regs
+ ldr \tmp, [\tmp, #0]
+ tst \tmp, #HWCAP_VFPv3D16
+- stceq p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
++ stceql p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
+ addne \base, \base, #32*4 @ step over unused register space
+ #else
+ VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
+diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
+index 1d1710e..bfa0eeb 100644
+--- a/arch/arm/kernel/smp.c
++++ b/arch/arm/kernel/smp.c
+@@ -295,18 +295,24 @@ static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
+ asmlinkage void __cpuinit secondary_start_kernel(void)
+ {
+ struct mm_struct *mm = &init_mm;
+- unsigned int cpu = smp_processor_id();
++ unsigned int cpu;
++
++ /*
++ * The identity mapping is uncached (strongly ordered), so
++ * switch away from it before attempting any exclusive accesses.
++ */
++ cpu_switch_mm(mm->pgd, mm);
++ enter_lazy_tlb(mm, current);
++ local_flush_tlb_all();
+
+ /*
+ * All kernel threads share the same mm context; grab a
+ * reference and switch to it.
+ */
++ cpu = smp_processor_id();
+ atomic_inc(&mm->mm_count);
+ current->active_mm = mm;
+ cpumask_set_cpu(cpu, mm_cpumask(mm));
+- cpu_switch_mm(mm->pgd, mm);
+- enter_lazy_tlb(mm, current);
+- local_flush_tlb_all();
+
+ printk("CPU%u: Booted secondary processor\n", cpu);
+
+diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
+index f4546e9..23817a6 100644
+--- a/arch/mips/kernel/kgdb.c
++++ b/arch/mips/kernel/kgdb.c
+@@ -283,6 +283,15 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
+ struct pt_regs *regs = args->regs;
+ int trap = (regs->cp0_cause & 0x7c) >> 2;
+
++#ifdef CONFIG_KPROBES
++ /*
++ * Return immediately if the kprobes fault notifier has set
++ * DIE_PAGE_FAULT.
++ */
++ if (cmd == DIE_PAGE_FAULT)
++ return NOTIFY_DONE;
++#endif /* CONFIG_KPROBES */
++
+ /* Userspace events, ignore. */
+ if (user_mode(regs))
+ return NOTIFY_DONE;
+diff --git a/arch/s390/boot/compressed/vmlinux.lds.S b/arch/s390/boot/compressed/vmlinux.lds.S
+index d80f79d..8e1fb82 100644
+--- a/arch/s390/boot/compressed/vmlinux.lds.S
++++ b/arch/s390/boot/compressed/vmlinux.lds.S
+@@ -5,7 +5,7 @@ OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
+ OUTPUT_ARCH(s390:64-bit)
+ #else
+ OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390")
+-OUTPUT_ARCH(s390)
++OUTPUT_ARCH(s390:31-bit)
+ #endif
+
+ ENTRY(startup)
+diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
+index e4c79eb..e43d21e 100644
+--- a/arch/s390/kernel/vmlinux.lds.S
++++ b/arch/s390/kernel/vmlinux.lds.S
+@@ -8,7 +8,7 @@
+
+ #ifndef CONFIG_64BIT
+ OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390")
+-OUTPUT_ARCH(s390)
++OUTPUT_ARCH(s390:31-bit)
+ ENTRY(_start)
+ jiffies = jiffies_64 + 4;
+ #else
+diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
+index 614da62..3c8f220 100644
+--- a/arch/sparc/kernel/perf_event.c
++++ b/arch/sparc/kernel/perf_event.c
+@@ -555,11 +555,13 @@ static u64 nop_for_index(int idx)
+
+ static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
+ {
+- u64 val, mask = mask_for_index(idx);
++ u64 enc, val, mask = mask_for_index(idx);
++
++ enc = perf_event_get_enc(cpuc->events[idx]);
+
+ val = cpuc->pcr;
+ val &= ~mask;
+- val |= hwc->config;
++ val |= event_encoding(enc, idx);
+ cpuc->pcr = val;
+
+ pcr_ops->write(cpuc->pcr);
+@@ -1422,8 +1424,6 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
+ {
+ unsigned long ufp;
+
+- perf_callchain_store(entry, regs->tpc);
+-
+ ufp = regs->u_regs[UREG_I6] + STACK_BIAS;
+ do {
+ struct sparc_stackf *usf, sf;
+@@ -1444,8 +1444,6 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
+ {
+ unsigned long ufp;
+
+- perf_callchain_store(entry, regs->tpc);
+-
+ ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
+ do {
+ struct sparc_stackf32 *usf, sf;
+@@ -1464,6 +1462,11 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
+ void
+ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
+ {
++ perf_callchain_store(entry, regs->tpc);
++
++ if (!current->mm)
++ return;
++
+ flushw_user();
+ if (test_thread_flag(TIF_32BIT))
+ perf_callchain_user_32(entry, regs);
+diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
+index 441521a..5e4252b 100644
+--- a/arch/sparc/kernel/sys_sparc_64.c
++++ b/arch/sparc/kernel/sys_sparc_64.c
+@@ -519,12 +519,12 @@ SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality)
+ {
+ int ret;
+
+- if (current->personality == PER_LINUX32 &&
+- personality == PER_LINUX)
+- personality = PER_LINUX32;
++ if (personality(current->personality) == PER_LINUX32 &&
++ personality(personality) == PER_LINUX)
++ personality |= PER_LINUX32;
+ ret = sys_personality(personality);
+- if (ret == PER_LINUX32)
+- ret = PER_LINUX;
++ if (personality(ret) == PER_LINUX32)
++ ret &= ~PER_LINUX32;
+
+ return ret;
+ }
+diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
+index 1d7e274..7f5f65d 100644
+--- a/arch/sparc/kernel/syscalls.S
++++ b/arch/sparc/kernel/syscalls.S
+@@ -212,24 +212,20 @@ linux_sparc_syscall:
+ 3: stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
+ ret_sys_call:
+ ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %g3
+- ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
+ sra %o0, 0, %o0
+ mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2
+ sllx %g2, 32, %g2
+
+- /* Check if force_successful_syscall_return()
+- * was invoked.
+- */
+- ldub [%g6 + TI_SYS_NOERROR], %l2
+- brnz,a,pn %l2, 80f
+- stb %g0, [%g6 + TI_SYS_NOERROR]
+-
+ cmp %o0, -ERESTART_RESTARTBLOCK
+ bgeu,pn %xcc, 1f
+- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
+-80:
++ andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
++ ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
++
++2:
++ stb %g0, [%g6 + TI_SYS_NOERROR]
+ /* System call success, clear Carry condition code. */
+ andn %g3, %g2, %g3
++3:
+ stx %g3, [%sp + PTREGS_OFF + PT_V9_TSTATE]
+ bne,pn %icc, linux_syscall_trace2
+ add %l1, 0x4, %l2 ! npc = npc+4
+@@ -238,20 +234,20 @@ ret_sys_call:
+ stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC]
+
+ 1:
++ /* Check if force_successful_syscall_return()
++ * was invoked.
++ */
++ ldub [%g6 + TI_SYS_NOERROR], %l2
++ brnz,pn %l2, 2b
++ ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
+ /* System call failure, set Carry condition code.
+ * Also, get abs(errno) to return to the process.
+ */
+- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
+ sub %g0, %o0, %o0
+- or %g3, %g2, %g3
+ stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
+- stx %g3, [%sp + PTREGS_OFF + PT_V9_TSTATE]
+- bne,pn %icc, linux_syscall_trace2
+- add %l1, 0x4, %l2 ! npc = npc+4
+- stx %l1, [%sp + PTREGS_OFF + PT_V9_TPC]
++ ba,pt %xcc, 3b
++ or %g3, %g2, %g3
+
+- b,pt %xcc, rtrap
+- stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC]
+ linux_syscall_trace2:
+ call syscall_trace_leave
+ add %sp, PTREGS_OFF, %o0
+diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
+index 8e073d8..6ff4d78 100644
+--- a/arch/sparc/mm/init_64.c
++++ b/arch/sparc/mm/init_64.c
+@@ -2118,6 +2118,9 @@ EXPORT_SYMBOL(_PAGE_CACHE);
+ #ifdef CONFIG_SPARSEMEM_VMEMMAP
+ unsigned long vmemmap_table[VMEMMAP_SIZE];
+
++static long __meminitdata addr_start, addr_end;
++static int __meminitdata node_start;
++
+ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
+ {
+ unsigned long vstart = (unsigned long) start;
+@@ -2148,15 +2151,30 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
+
+ *vmem_pp = pte_base | __pa(block);
+
+- printk(KERN_INFO "[%p-%p] page_structs=%lu "
+- "node=%d entry=%lu/%lu\n", start, block, nr,
+- node,
+- addr >> VMEMMAP_CHUNK_SHIFT,
+- VMEMMAP_SIZE);
++ /* check to see if we have contiguous blocks */
++ if (addr_end != addr || node_start != node) {
++ if (addr_start)
++ printk(KERN_DEBUG " [%lx-%lx] on node %d\n",
++ addr_start, addr_end-1, node_start);
++ addr_start = addr;
++ node_start = node;
++ }
++ addr_end = addr + VMEMMAP_CHUNK;
+ }
+ }
+ return 0;
+ }
++
++void __meminit vmemmap_populate_print_last(void)
++{
++ if (addr_start) {
++ printk(KERN_DEBUG " [%lx-%lx] on node %d\n",
++ addr_start, addr_end-1, node_start);
++ addr_start = 0;
++ addr_end = 0;
++ node_start = 0;
++ }
++}
+ #endif /* CONFIG_SPARSEMEM_VMEMMAP */
+
+ static void prot_init_common(unsigned long page_none,
+diff --git a/arch/tile/Makefile b/arch/tile/Makefile
+index 17acce7..04c637c 100644
+--- a/arch/tile/Makefile
++++ b/arch/tile/Makefile
+@@ -26,6 +26,10 @@ $(error Set TILERA_ROOT or CROSS_COMPILE when building $(ARCH) on $(HOST_ARCH))
+ endif
+ endif
+
++# The tile compiler may emit .eh_frame information for backtracing.
++# In kernel modules, this causes load failures due to unsupported relocations.
++KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
++
+ ifneq ($(CONFIG_DEBUG_EXTRA_FLAGS),"")
+ KBUILD_CFLAGS += $(CONFIG_DEBUG_EXTRA_FLAGS)
+ endif
+diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
+index bcda816..4893d58 100644
+--- a/arch/x86/kernel/entry_32.S
++++ b/arch/x86/kernel/entry_32.S
+@@ -1025,7 +1025,7 @@ ENTRY(xen_sysenter_target)
+
+ ENTRY(xen_hypervisor_callback)
+ CFI_STARTPROC
+- pushl_cfi $0
++ pushl_cfi $-1 /* orig_ax = -1 => not a system call */
+ SAVE_ALL
+ TRACE_IRQS_OFF
+
+@@ -1067,14 +1067,16 @@ ENTRY(xen_failsafe_callback)
+ 2: mov 8(%esp),%es
+ 3: mov 12(%esp),%fs
+ 4: mov 16(%esp),%gs
++ /* EAX == 0 => Category 1 (Bad segment)
++ EAX != 0 => Category 2 (Bad IRET) */
+ testl %eax,%eax
+ popl_cfi %eax
+ lea 16(%esp),%esp
+ CFI_ADJUST_CFA_OFFSET -16
+ jz 5f
+ addl $16,%esp
+- jmp iret_exc # EAX != 0 => Category 2 (Bad IRET)
+-5: pushl_cfi $0 # EAX == 0 => Category 1 (Bad segment)
++ jmp iret_exc
++5: pushl_cfi $-1 /* orig_ax = -1 => not a system call */
+ SAVE_ALL
+ jmp ret_from_exception
+ CFI_ENDPROC
+diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
+index faf8d5e..6274f5f 100644
+--- a/arch/x86/kernel/entry_64.S
++++ b/arch/x86/kernel/entry_64.S
+@@ -1303,7 +1303,7 @@ ENTRY(xen_failsafe_callback)
+ CFI_RESTORE r11
+ addq $0x30,%rsp
+ CFI_ADJUST_CFA_OFFSET -0x30
+- pushq_cfi $0
++ pushq_cfi $-1 /* orig_ax = -1 => not a system call */
+ SAVE_ALL
+ jmp error_exit
+ CFI_ENDPROC
+diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
+index 75f9528..6bc0899 100644
+--- a/arch/x86/oprofile/nmi_int.c
++++ b/arch/x86/oprofile/nmi_int.c
+@@ -55,7 +55,7 @@ u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
+ val |= counter_config->extra;
+ event &= model->event_mask ? model->event_mask : 0xFF;
+ val |= event & 0xFF;
+- val |= (event & 0x0F00) << 24;
++ val |= (u64)(event & 0x0F00) << 24;
+
+ return val;
+ }
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index a1e21ae..69b9ef6 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -818,7 +818,16 @@ static void xen_write_cr4(unsigned long cr4)
+
+ native_write_cr4(cr4);
+ }
+-
++#ifdef CONFIG_X86_64
++static inline unsigned long xen_read_cr8(void)
++{
++ return 0;
++}
++static inline void xen_write_cr8(unsigned long val)
++{
++ BUG_ON(val);
++}
++#endif
+ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
+ {
+ int ret;
+@@ -987,6 +996,11 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
+ .read_cr4_safe = native_read_cr4_safe,
+ .write_cr4 = xen_write_cr4,
+
++#ifdef CONFIG_X86_64
++ .read_cr8 = xen_read_cr8,
++ .write_cr8 = xen_write_cr8,
++#endif
++
+ .wbinvd = native_wbinvd,
+
+ .read_msr = native_read_msr_safe,
+@@ -997,6 +1011,8 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
+ .read_tsc = native_read_tsc,
+ .read_pmc = native_read_pmc,
+
++ .read_tscp = native_read_tscp,
++
+ .iret = xen_iret,
+ .irq_enable_sysexit = xen_sysexit,
+ #ifdef CONFIG_X86_64
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index b19a18d..d2519b2 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -71,9 +71,6 @@ enum ec_command {
+ #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
+ #define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */
+
+-#define ACPI_EC_STORM_THRESHOLD 8 /* number of false interrupts
+- per one transaction */
+-
+ enum {
+ EC_FLAGS_QUERY_PENDING, /* Query is pending */
+ EC_FLAGS_GPE_STORM, /* GPE storm detected */
+@@ -87,6 +84,15 @@ static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
+ module_param(ec_delay, uint, 0644);
+ MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes");
+
++/*
++ * If the number of false interrupts per one transaction exceeds
++ * this threshold, will think there is a GPE storm happened and
++ * will disable the GPE for normal transaction.
++ */
++static unsigned int ec_storm_threshold __read_mostly = 8;
++module_param(ec_storm_threshold, uint, 0644);
++MODULE_PARM_DESC(ec_storm_threshold, "Maxim false GPE numbers not considered as GPE storm");
++
+ /* If we find an EC via the ECDT, we need to keep a ptr to its context */
+ /* External interfaces use first EC only, so remember */
+ typedef int (*acpi_ec_query_func) (void *data);
+@@ -319,7 +325,7 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
+ msleep(1);
+ /* It is safe to enable the GPE outside of the transaction. */
+ acpi_enable_gpe(NULL, ec->gpe);
+- } else if (t->irq_count > ACPI_EC_STORM_THRESHOLD) {
++ } else if (t->irq_count > ec_storm_threshold) {
+ pr_info(PREFIX "GPE storm detected, "
+ "transactions will use polling mode\n");
+ set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
+@@ -914,6 +920,17 @@ static int ec_flag_msi(const struct dmi_system_id *id)
+ return 0;
+ }
+
++/*
++ * Clevo M720 notebook actually works ok with IRQ mode, if we lifted
++ * the GPE storm threshold back to 20
++ */
++static int ec_enlarge_storm_threshold(const struct dmi_system_id *id)
++{
++ pr_debug("Setting the EC GPE storm threshold to 20\n");
++ ec_storm_threshold = 20;
++ return 0;
++}
++
+ static struct dmi_system_id __initdata ec_dmi_table[] = {
+ {
+ ec_skip_dsdt_scan, "Compal JFL92", {
+@@ -945,10 +962,13 @@ static struct dmi_system_id __initdata ec_dmi_table[] = {
+ {
+ ec_validate_ecdt, "ASUS hardware", {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc.") }, NULL},
++ {
++ ec_enlarge_storm_threshold, "CLEVO hardware", {
++ DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "M720T/M730T"),}, NULL},
+ {},
+ };
+
+-
+ int __init acpi_ec_ecdt_probe(void)
+ {
+ acpi_status status;
+diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
+index 10f92b3..7a987a7 100644
+--- a/drivers/bcma/main.c
++++ b/drivers/bcma/main.c
+@@ -124,9 +124,10 @@ static int bcma_register_cores(struct bcma_bus *bus)
+
+ static void bcma_unregister_cores(struct bcma_bus *bus)
+ {
+- struct bcma_device *core;
++ struct bcma_device *core, *tmp;
+
+- list_for_each_entry(core, &bus->cores, list) {
++ list_for_each_entry_safe(core, tmp, &bus->cores, list) {
++ list_del(&core->list);
+ if (core->dev_registered)
+ device_unregister(&core->dev);
+ }
+diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
+index b366b34..0d91655 100644
+--- a/drivers/char/tpm/tpm.c
++++ b/drivers/char/tpm/tpm.c
+@@ -1072,17 +1072,20 @@ ssize_t tpm_write(struct file *file, const char __user *buf,
+ size_t size, loff_t *off)
+ {
+ struct tpm_chip *chip = file->private_data;
+- size_t in_size = size, out_size;
++ size_t in_size = size;
++ ssize_t out_size;
+
+ /* cannot perform a write until the read has cleared
+- either via tpm_read or a user_read_timer timeout */
+- while (atomic_read(&chip->data_pending) != 0)
+- msleep(TPM_TIMEOUT);
+-
+- mutex_lock(&chip->buffer_mutex);
++ either via tpm_read or a user_read_timer timeout.
++ This also prevents splitted buffered writes from blocking here.
++ */
++ if (atomic_read(&chip->data_pending) != 0)
++ return -EBUSY;
+
+ if (in_size > TPM_BUFSIZE)
+- in_size = TPM_BUFSIZE;
++ return -E2BIG;
++
++ mutex_lock(&chip->buffer_mutex);
+
+ if (copy_from_user
+ (chip->data_buffer, (void __user *) buf, in_size)) {
+@@ -1092,6 +1095,10 @@ ssize_t tpm_write(struct file *file, const char __user *buf,
+
+ /* atomic tpm command send and result receive */
+ out_size = tpm_transmit(chip, chip->data_buffer, TPM_BUFSIZE);
++ if (out_size < 0) {
++ mutex_unlock(&chip->buffer_mutex);
++ return out_size;
++ }
+
+ atomic_set(&chip->data_pending, out_size);
+ mutex_unlock(&chip->buffer_mutex);
+diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
+index b7fe343..f6cd315 100644
+--- a/drivers/cpufreq/powernow-k8.c
++++ b/drivers/cpufreq/powernow-k8.c
+@@ -1216,14 +1216,7 @@ static int powernowk8_target(struct cpufreq_policy *pol,
+ struct powernowk8_target_arg pta = { .pol = pol, .targfreq = targfreq,
+ .relation = relation };
+
+- /*
+- * Must run on @pol->cpu. cpufreq core is responsible for ensuring
+- * that we're bound to the current CPU and pol->cpu stays online.
+- */
+- if (smp_processor_id() == pol->cpu)
+- return powernowk8_target_fn(&pta);
+- else
+- return work_on_cpu(pol->cpu, powernowk8_target_fn, &pta);
++ return work_on_cpu(pol->cpu, powernowk8_target_fn, &pta);
+ }
+
+ /* Driver entry point to verify the policy and range of frequencies */
+diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
+index c9eee6d..a9d5482 100644
+--- a/drivers/edac/amd64_edac.c
++++ b/drivers/edac/amd64_edac.c
+@@ -170,8 +170,11 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
+ * memory controller and apply to register. Search for the first
+ * bandwidth entry that is greater or equal than the setting requested
+ * and program that. If at last entry, turn off DRAM scrubbing.
++ *
++ * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
++ * by falling back to the last element in scrubrates[].
+ */
+- for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
++ for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
+ /*
+ * skip scrub rates which aren't recommended
+ * (see F10 BKDG, F3x58)
+@@ -181,12 +184,6 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
+
+ if (scrubrates[i].bandwidth <= new_bw)
+ break;
+-
+- /*
+- * if no suitable bandwidth found, turn off DRAM scrubbing
+- * entirely by falling back to the last element in the
+- * scrubrates array.
+- */
+ }
+
+ scrubval = scrubrates[i].scrubval;
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 33e1555..dbe4dbe 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -999,6 +999,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+ if (obj->phys_obj)
+ ret = i915_gem_phys_pwrite(dev, obj, args, file);
+ else if (obj->gtt_space &&
++ obj->tiling_mode == I915_TILING_NONE &&
+ obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
+ ret = i915_gem_object_pin(obj, 0, true);
+ if (ret)
+diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
+index f07bde2..57152a7 100644
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -771,6 +771,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
+ DMI_MATCH(DMI_BOARD_NAME, "MS-7469"),
+ },
+ },
++ {
++ .callback = intel_no_lvds_dmi_callback,
++ .ident = "ZOTAC ZBOXSD-ID12/ID13",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "ZOTAC"),
++ DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"),
++ },
++ },
+
+ { } /* terminating entry */
+ };
+diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+index 2f46e0c..3ad3cc6 100644
+--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
++++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+@@ -973,11 +973,7 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
+ static void radeon_ext_tmds_enc_destroy(struct drm_encoder *encoder)
+ {
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+- struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
+- if (tmds) {
+- if (tmds->i2c_bus)
+- radeon_i2c_destroy(tmds->i2c_bus);
+- }
++ /* don't destroy the i2c bus record here, this will be done in radeon_i2c_fini */
+ kfree(radeon_encoder->enc_priv);
+ drm_encoder_cleanup(encoder);
+ kfree(radeon_encoder);
+diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
+index 4065374..f4c3d28 100644
+--- a/drivers/hv/channel.c
++++ b/drivers/hv/channel.c
+@@ -146,14 +146,14 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
+
+ if (ret != 0) {
+ err = ret;
+- goto errorout;
++ goto error0;
+ }
+
+ ret = hv_ringbuffer_init(
+ &newchannel->inbound, in, recv_ringbuffer_size);
+ if (ret != 0) {
+ err = ret;
+- goto errorout;
++ goto error0;
+ }
+
+
+@@ -168,7 +168,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
+
+ if (ret != 0) {
+ err = ret;
+- goto errorout;
++ goto error0;
+ }
+
+ /* Create and init the channel open message */
+@@ -177,7 +177,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
+ GFP_KERNEL);
+ if (!open_info) {
+ err = -ENOMEM;
+- goto errorout;
++ goto error0;
+ }
+
+ init_completion(&open_info->waitevent);
+@@ -193,7 +193,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
+
+ if (userdatalen > MAX_USER_DEFINED_BYTES) {
+ err = -EINVAL;
+- goto errorout;
++ goto error0;
+ }
+
+ if (userdatalen)
+@@ -208,19 +208,18 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
+ sizeof(struct vmbus_channel_open_channel));
+
+ if (ret != 0)
+- goto cleanup;
++ goto error1;
+
+ t = wait_for_completion_timeout(&open_info->waitevent, 5*HZ);
+ if (t == 0) {
+ err = -ETIMEDOUT;
+- goto errorout;
++ goto error1;
+ }
+
+
+ if (open_info->response.open_result.status)
+ err = open_info->response.open_result.status;
+
+-cleanup:
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_del(&open_info->msglistentry);
+ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+@@ -228,9 +227,12 @@ cleanup:
+ kfree(open_info);
+ return err;
+
+-errorout:
+- hv_ringbuffer_cleanup(&newchannel->outbound);
+- hv_ringbuffer_cleanup(&newchannel->inbound);
++error1:
++ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
++ list_del(&open_info->msglistentry);
++ spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
++
++error0:
+ free_pages((unsigned long)out,
+ get_order(send_ringbuffer_size + recv_ringbuffer_size));
+ kfree(open_info);
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index 0634ee5..8f67c4d 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -2641,7 +2641,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
+ else {
+ bad_sectors -= (sector - first_bad);
+ if (max_sync > bad_sectors)
+- max_sync = max_sync;
++ max_sync = bad_sectors;
+ continue;
+ }
+ }
+diff --git a/drivers/media/video/au0828/au0828-video.c b/drivers/media/video/au0828/au0828-video.c
+index 0b3e481..eab0641 100644
+--- a/drivers/media/video/au0828/au0828-video.c
++++ b/drivers/media/video/au0828/au0828-video.c
+@@ -1692,14 +1692,18 @@ static int vidioc_streamoff(struct file *file, void *priv,
+ (AUVI_INPUT(i).audio_setup)(dev, 0);
+ }
+
+- videobuf_streamoff(&fh->vb_vidq);
+- res_free(fh, AU0828_RESOURCE_VIDEO);
++ if (res_check(fh, AU0828_RESOURCE_VIDEO)) {
++ videobuf_streamoff(&fh->vb_vidq);
++ res_free(fh, AU0828_RESOURCE_VIDEO);
++ }
+ } else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
+ dev->vbi_timeout_running = 0;
+ del_timer_sync(&dev->vbi_timeout);
+
+- videobuf_streamoff(&fh->vb_vbiq);
+- res_free(fh, AU0828_RESOURCE_VBI);
++ if (res_check(fh, AU0828_RESOURCE_VBI)) {
++ videobuf_streamoff(&fh->vb_vbiq);
++ res_free(fh, AU0828_RESOURCE_VBI);
++ }
+ }
+
+ return 0;
+diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
+index 3ed9c5e..daed698 100644
+--- a/drivers/mtd/nand/nand_base.c
++++ b/drivers/mtd/nand/nand_base.c
+@@ -2903,9 +2903,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
+ if (le16_to_cpu(p->features) & 1)
+ *busw = NAND_BUSWIDTH_16;
+
+- chip->options &= ~NAND_CHIPOPTIONS_MSK;
+- chip->options |= (NAND_NO_READRDY |
+- NAND_NO_AUTOINCR) & NAND_CHIPOPTIONS_MSK;
++ chip->options |= NAND_NO_READRDY | NAND_NO_AUTOINCR;
+
+ return 1;
+ }
+@@ -3069,9 +3067,8 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
+ mtd->erasesize <<= ((id_data[3] & 0x03) << 1);
+ }
+ }
+- /* Get chip options, preserve non chip based options */
+- chip->options &= ~NAND_CHIPOPTIONS_MSK;
+- chip->options |= type->options & NAND_CHIPOPTIONS_MSK;
++ /* Get chip options */
++ chip->options |= type->options;
+
+ /*
+ * Check if chip is not a Samsung device. Do not clear the
+diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
+index c5f6b0e..6546191 100644
+--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
++++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
+@@ -168,6 +168,8 @@ static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
+
+ static bool e1000_vlan_used(struct e1000_adapter *adapter);
+ static void e1000_vlan_mode(struct net_device *netdev, u32 features);
++static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
++ bool filter_on);
+ static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
+ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
+ static void e1000_restore_vlan(struct e1000_adapter *adapter);
+@@ -1219,7 +1221,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
+ if (err)
+ goto err_register;
+
+- e1000_vlan_mode(netdev, netdev->features);
++ e1000_vlan_filter_on_off(adapter, false);
+
+ /* print bus type/speed/width info */
+ e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
+@@ -4553,6 +4555,21 @@ static bool e1000_vlan_used(struct e1000_adapter *adapter)
+ return false;
+ }
+
++static void __e1000_vlan_mode(struct e1000_adapter *adapter, u32 features)
++{
++ struct e1000_hw *hw = &adapter->hw;
++ u32 ctrl;
++
++ ctrl = er32(CTRL);
++ if (features & NETIF_F_HW_VLAN_RX) {
++ /* enable VLAN tag insert/strip */
++ ctrl |= E1000_CTRL_VME;
++ } else {
++ /* disable VLAN tag insert/strip */
++ ctrl &= ~E1000_CTRL_VME;
++ }
++ ew32(CTRL, ctrl);
++}
+ static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
+ bool filter_on)
+ {
+@@ -4562,6 +4579,7 @@ static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
+ if (!test_bit(__E1000_DOWN, &adapter->flags))
+ e1000_irq_disable(adapter);
+
++ __e1000_vlan_mode(adapter, adapter->netdev->features);
+ if (filter_on) {
+ /* enable VLAN receive filtering */
+ rctl = er32(RCTL);
+@@ -4584,21 +4602,11 @@ static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
+ static void e1000_vlan_mode(struct net_device *netdev, u32 features)
+ {
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+- struct e1000_hw *hw = &adapter->hw;
+- u32 ctrl;
+
+ if (!test_bit(__E1000_DOWN, &adapter->flags))
+ e1000_irq_disable(adapter);
+
+- ctrl = er32(CTRL);
+- if (features & NETIF_F_HW_VLAN_RX) {
+- /* enable VLAN tag insert/strip */
+- ctrl |= E1000_CTRL_VME;
+- } else {
+- /* disable VLAN tag insert/strip */
+- ctrl &= ~E1000_CTRL_VME;
+- }
+- ew32(CTRL, ctrl);
++ __e1000_vlan_mode(adapter, features);
+
+ if (!test_bit(__E1000_DOWN, &adapter->flags))
+ e1000_irq_enable(adapter);
+diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
+index dea0cb4..57be855 100644
+--- a/drivers/net/ethernet/marvell/skge.c
++++ b/drivers/net/ethernet/marvell/skge.c
+@@ -4143,6 +4143,13 @@ static struct dmi_system_id skge_32bit_dma_boards[] = {
+ DMI_MATCH(DMI_BOARD_NAME, "nForce"),
+ },
+ },
++ {
++ .ident = "ASUS P5NSLI",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
++ DMI_MATCH(DMI_BOARD_NAME, "P5NSLI")
++ },
++ },
+ {}
+ };
+
+diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+index 026f9de..cc54153 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
++++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+@@ -835,107 +835,107 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
+
+ static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+- {0x0000a2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+- {0x0000a2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+- {0x0000a2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
++ {0x0000a2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
++ {0x0000a2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
++ {0x0000a2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+- {0x0000a510, 0x15000028, 0x15000028, 0x0f000202, 0x0f000202},
+- {0x0000a514, 0x1b00002b, 0x1b00002b, 0x12000400, 0x12000400},
+- {0x0000a518, 0x1f020028, 0x1f020028, 0x16000402, 0x16000402},
+- {0x0000a51c, 0x2502002b, 0x2502002b, 0x19000404, 0x19000404},
+- {0x0000a520, 0x2a04002a, 0x2a04002a, 0x1c000603, 0x1c000603},
+- {0x0000a524, 0x2e06002a, 0x2e06002a, 0x21000a02, 0x21000a02},
+- {0x0000a528, 0x3302202d, 0x3302202d, 0x25000a04, 0x25000a04},
+- {0x0000a52c, 0x3804202c, 0x3804202c, 0x28000a20, 0x28000a20},
+- {0x0000a530, 0x3c06202c, 0x3c06202c, 0x2c000e20, 0x2c000e20},
+- {0x0000a534, 0x4108202d, 0x4108202d, 0x30000e22, 0x30000e22},
+- {0x0000a538, 0x4506402d, 0x4506402d, 0x34000e24, 0x34000e24},
+- {0x0000a53c, 0x4906222d, 0x4906222d, 0x38001640, 0x38001640},
+- {0x0000a540, 0x4d062231, 0x4d062231, 0x3c001660, 0x3c001660},
+- {0x0000a544, 0x50082231, 0x50082231, 0x3f001861, 0x3f001861},
+- {0x0000a548, 0x5608422e, 0x5608422e, 0x43001a81, 0x43001a81},
+- {0x0000a54c, 0x5a08442e, 0x5a08442e, 0x47001a83, 0x47001a83},
+- {0x0000a550, 0x5e0a4431, 0x5e0a4431, 0x4a001c84, 0x4a001c84},
+- {0x0000a554, 0x640a4432, 0x640a4432, 0x4e001ce3, 0x4e001ce3},
+- {0x0000a558, 0x680a4434, 0x680a4434, 0x52001ce5, 0x52001ce5},
+- {0x0000a55c, 0x6c0a6434, 0x6c0a6434, 0x56001ce9, 0x56001ce9},
+- {0x0000a560, 0x6f0a6633, 0x6f0a6633, 0x5a001ceb, 0x5a001ceb},
+- {0x0000a564, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+- {0x0000a568, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+- {0x0000a56c, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+- {0x0000a570, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+- {0x0000a574, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+- {0x0000a578, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
+- {0x0000a57c, 0x730c6634, 0x730c6634, 0x5d001eec, 0x5d001eec},
++ {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
++ {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
++ {0x0000a518, 0x21002220, 0x21002220, 0x16000402, 0x16000402},
++ {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404},
++ {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
++ {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
++ {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
++ {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
++ {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
++ {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
++ {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
++ {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
++ {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
++ {0x0000a544, 0x52022470, 0x52022470, 0x3f001861, 0x3f001861},
++ {0x0000a548, 0x55022490, 0x55022490, 0x43001a81, 0x43001a81},
++ {0x0000a54c, 0x59022492, 0x59022492, 0x47001a83, 0x47001a83},
++ {0x0000a550, 0x5d022692, 0x5d022692, 0x4a001c84, 0x4a001c84},
++ {0x0000a554, 0x61022892, 0x61022892, 0x4e001ce3, 0x4e001ce3},
++ {0x0000a558, 0x65024890, 0x65024890, 0x52001ce5, 0x52001ce5},
++ {0x0000a55c, 0x69024892, 0x69024892, 0x56001ce9, 0x56001ce9},
++ {0x0000a560, 0x6e024c92, 0x6e024c92, 0x5a001ceb, 0x5a001ceb},
++ {0x0000a564, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
++ {0x0000a568, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
++ {0x0000a56c, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
++ {0x0000a570, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
++ {0x0000a574, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
++ {0x0000a578, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
++ {0x0000a57c, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+ {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+ {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
+ {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
+ {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
+- {0x0000a590, 0x15800028, 0x15800028, 0x0f800202, 0x0f800202},
+- {0x0000a594, 0x1b80002b, 0x1b80002b, 0x12800400, 0x12800400},
+- {0x0000a598, 0x1f820028, 0x1f820028, 0x16800402, 0x16800402},
+- {0x0000a59c, 0x2582002b, 0x2582002b, 0x19800404, 0x19800404},
+- {0x0000a5a0, 0x2a84002a, 0x2a84002a, 0x1c800603, 0x1c800603},
+- {0x0000a5a4, 0x2e86002a, 0x2e86002a, 0x21800a02, 0x21800a02},
+- {0x0000a5a8, 0x3382202d, 0x3382202d, 0x25800a04, 0x25800a04},
+- {0x0000a5ac, 0x3884202c, 0x3884202c, 0x28800a20, 0x28800a20},
+- {0x0000a5b0, 0x3c86202c, 0x3c86202c, 0x2c800e20, 0x2c800e20},
+- {0x0000a5b4, 0x4188202d, 0x4188202d, 0x30800e22, 0x30800e22},
+- {0x0000a5b8, 0x4586402d, 0x4586402d, 0x34800e24, 0x34800e24},
+- {0x0000a5bc, 0x4986222d, 0x4986222d, 0x38801640, 0x38801640},
+- {0x0000a5c0, 0x4d862231, 0x4d862231, 0x3c801660, 0x3c801660},
+- {0x0000a5c4, 0x50882231, 0x50882231, 0x3f801861, 0x3f801861},
+- {0x0000a5c8, 0x5688422e, 0x5688422e, 0x43801a81, 0x43801a81},
+- {0x0000a5cc, 0x5a88442e, 0x5a88442e, 0x47801a83, 0x47801a83},
+- {0x0000a5d0, 0x5e8a4431, 0x5e8a4431, 0x4a801c84, 0x4a801c84},
+- {0x0000a5d4, 0x648a4432, 0x648a4432, 0x4e801ce3, 0x4e801ce3},
+- {0x0000a5d8, 0x688a4434, 0x688a4434, 0x52801ce5, 0x52801ce5},
+- {0x0000a5dc, 0x6c8a6434, 0x6c8a6434, 0x56801ce9, 0x56801ce9},
+- {0x0000a5e0, 0x6f8a6633, 0x6f8a6633, 0x5a801ceb, 0x5a801ceb},
+- {0x0000a5e4, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+- {0x0000a5e8, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+- {0x0000a5ec, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+- {0x0000a5f0, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+- {0x0000a5f4, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+- {0x0000a5f8, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
+- {0x0000a5fc, 0x738c6634, 0x738c6634, 0x5d801eec, 0x5d801eec},
++ {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
++ {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400},
++ {0x0000a598, 0x21802220, 0x21802220, 0x16800402, 0x16800402},
++ {0x0000a59c, 0x27802223, 0x27802223, 0x19800404, 0x19800404},
++ {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603},
++ {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02},
++ {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04},
++ {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20},
++ {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20},
++ {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22},
++ {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24},
++ {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640},
++ {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
++ {0x0000a5c4, 0x52822470, 0x52822470, 0x3f801861, 0x3f801861},
++ {0x0000a5c8, 0x55822490, 0x55822490, 0x43801a81, 0x43801a81},
++ {0x0000a5cc, 0x59822492, 0x59822492, 0x47801a83, 0x47801a83},
++ {0x0000a5d0, 0x5d822692, 0x5d822692, 0x4a801c84, 0x4a801c84},
++ {0x0000a5d4, 0x61822892, 0x61822892, 0x4e801ce3, 0x4e801ce3},
++ {0x0000a5d8, 0x65824890, 0x65824890, 0x52801ce5, 0x52801ce5},
++ {0x0000a5dc, 0x69824892, 0x69824892, 0x56801ce9, 0x56801ce9},
++ {0x0000a5e0, 0x6e824c92, 0x6e824c92, 0x5a801ceb, 0x5a801ceb},
++ {0x0000a5e4, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
++ {0x0000a5e8, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
++ {0x0000a5ec, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
++ {0x0000a5f0, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
++ {0x0000a5f4, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
++ {0x0000a5f8, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
++ {0x0000a5fc, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+- {0x0000a608, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+- {0x0000a60c, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+- {0x0000a610, 0x01804601, 0x01804601, 0x00000000, 0x00000000},
+- {0x0000a614, 0x01804601, 0x01804601, 0x01404000, 0x01404000},
+- {0x0000a618, 0x01804601, 0x01804601, 0x01404501, 0x01404501},
+- {0x0000a61c, 0x01804601, 0x01804601, 0x02008501, 0x02008501},
+- {0x0000a620, 0x03408d02, 0x03408d02, 0x0280ca03, 0x0280ca03},
+- {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+- {0x0000a628, 0x03410d04, 0x03410d04, 0x04014c04, 0x04014c04},
+- {0x0000a62c, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+- {0x0000a630, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+- {0x0000a634, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+- {0x0000a638, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+- {0x0000a63c, 0x03410d04, 0x03410d04, 0x04015005, 0x04015005},
+- {0x0000b2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+- {0x0000b2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+- {0x0000b2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
++ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
++ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
++ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
++ {0x0000a614, 0x02004000, 0x02004000, 0x01404000, 0x01404000},
++ {0x0000a618, 0x02004801, 0x02004801, 0x01404501, 0x01404501},
++ {0x0000a61c, 0x02808a02, 0x02808a02, 0x02008501, 0x02008501},
++ {0x0000a620, 0x0380ce03, 0x0380ce03, 0x0280ca03, 0x0280ca03},
++ {0x0000a624, 0x04411104, 0x04411104, 0x03010c04, 0x03010c04},
++ {0x0000a628, 0x04411104, 0x04411104, 0x04014c04, 0x04014c04},
++ {0x0000a62c, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
++ {0x0000a630, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
++ {0x0000a634, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
++ {0x0000a638, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
++ {0x0000a63c, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
++ {0x0000b2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
++ {0x0000b2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
++ {0x0000b2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+- {0x0000c2dc, 0x000cfff0, 0x000cfff0, 0x03aaa352, 0x03aaa352},
+- {0x0000c2e0, 0x000f0000, 0x000f0000, 0x03ccc584, 0x03ccc584},
+- {0x0000c2e4, 0x03f00000, 0x03f00000, 0x03f0f800, 0x03f0f800},
++ {0x0000c2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
++ {0x0000c2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
++ {0x0000c2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
+ {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+- {0x00016048, 0x61200001, 0x61200001, 0x66480001, 0x66480001},
++ {0x00016048, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
+ {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+- {0x00016448, 0x61200001, 0x61200001, 0x66480001, 0x66480001},
++ {0x00016448, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
+ {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+- {0x00016848, 0x61200001, 0x61200001, 0x66480001, 0x66480001},
++ {0x00016848, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
+ {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
+ };
+
+diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
+index 56bd370..da567f0 100644
+--- a/drivers/net/wireless/ipw2x00/ipw2200.c
++++ b/drivers/net/wireless/ipw2x00/ipw2200.c
+@@ -10463,7 +10463,7 @@ static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
+ } else
+ len = src->len;
+
+- dst = alloc_skb(len + sizeof(*rt_hdr), GFP_ATOMIC);
++ dst = alloc_skb(len + sizeof(*rt_hdr) + sizeof(u16)*2, GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
+index 9b6b010..4ac4ef0 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
++++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
+@@ -193,7 +193,7 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
+ * See iwlagn_mac_channel_switch.
+ */
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+- struct iwl6000_channel_switch_cmd cmd;
++ struct iwl6000_channel_switch_cmd *cmd;
+ const struct iwl_channel_info *ch_info;
+ u32 switch_time_in_usec, ucode_switch_time;
+ u16 ch;
+@@ -203,18 +203,25 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
+ struct ieee80211_vif *vif = ctx->vif;
+ struct iwl_host_cmd hcmd = {
+ .id = REPLY_CHANNEL_SWITCH,
+- .len = { sizeof(cmd), },
++ .len = { sizeof(*cmd), },
+ .flags = CMD_SYNC,
+- .data = { &cmd, },
++ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ };
++ int err;
+
+- cmd.band = priv->band == IEEE80211_BAND_2GHZ;
++ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
++ if (!cmd)
++ return -ENOMEM;
++
++ hcmd.data[0] = cmd;
++
++ cmd->band = priv->band == IEEE80211_BAND_2GHZ;
+ ch = ch_switch->channel->hw_value;
+ IWL_DEBUG_11H(priv, "channel switch from %u to %u\n",
+ ctx->active.channel, ch);
+- cmd.channel = cpu_to_le16(ch);
+- cmd.rxon_flags = ctx->staging.flags;
+- cmd.rxon_filter_flags = ctx->staging.filter_flags;
++ cmd->channel = cpu_to_le16(ch);
++ cmd->rxon_flags = ctx->staging.flags;
++ cmd->rxon_filter_flags = ctx->staging.filter_flags;
+ switch_count = ch_switch->count;
+ tsf_low = ch_switch->timestamp & 0x0ffffffff;
+ /*
+@@ -230,30 +237,32 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
+ switch_count = 0;
+ }
+ if (switch_count <= 1)
+- cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
++ cmd->switch_time = cpu_to_le32(priv->ucode_beacon_time);
+ else {
+ switch_time_in_usec =
+ vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
+ ucode_switch_time = iwl_usecs_to_beacons(priv,
+ switch_time_in_usec,
+ beacon_interval);
+- cmd.switch_time = iwl_add_beacon_time(priv,
+- priv->ucode_beacon_time,
+- ucode_switch_time,
+- beacon_interval);
++ cmd->switch_time = iwl_add_beacon_time(priv,
++ priv->ucode_beacon_time,
++ ucode_switch_time,
++ beacon_interval);
+ }
+ IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
+- cmd.switch_time);
++ cmd->switch_time);
+ ch_info = iwl_get_channel_info(priv, priv->band, ch);
+ if (ch_info)
+- cmd.expect_beacon = is_channel_radar(ch_info);
++ cmd->expect_beacon = is_channel_radar(ch_info);
+ else {
+ IWL_ERR(priv, "invalid channel switch from %u to %u\n",
+ ctx->active.channel, ch);
+ return -EFAULT;
+ }
+
+- return iwl_trans_send_cmd(trans(priv), &hcmd);
++ err = iwl_trans_send_cmd(trans(priv), &hcmd);
++ kfree(cmd);
++ return err;
+ }
+
+ static struct iwl_lib_ops iwl6000_lib = {
+diff --git a/drivers/pcmcia/pxa2xx_sharpsl.c b/drivers/pcmcia/pxa2xx_sharpsl.c
+index 69ae2fd..b938163 100644
+--- a/drivers/pcmcia/pxa2xx_sharpsl.c
++++ b/drivers/pcmcia/pxa2xx_sharpsl.c
+@@ -219,7 +219,7 @@ static void sharpsl_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
+ sharpsl_pcmcia_init_reset(skt);
+ }
+
+-static struct pcmcia_low_level sharpsl_pcmcia_ops __initdata = {
++static struct pcmcia_low_level sharpsl_pcmcia_ops = {
+ .owner = THIS_MODULE,
+ .hw_init = sharpsl_pcmcia_hw_init,
+ .hw_shutdown = sharpsl_pcmcia_hw_shutdown,
+diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
+index af1e296..21bc1a7 100644
+--- a/drivers/platform/x86/samsung-laptop.c
++++ b/drivers/platform/x86/samsung-laptop.c
+@@ -21,6 +21,7 @@
+ #include <linux/dmi.h>
+ #include <linux/platform_device.h>
+ #include <linux/rfkill.h>
++#include <linux/acpi.h>
+
+ /*
+ * This driver is needed because a number of Samsung laptops do not hook
+@@ -226,6 +227,7 @@ static struct backlight_device *backlight_device;
+ static struct mutex sabi_mutex;
+ static struct platform_device *sdev;
+ static struct rfkill *rfk;
++static bool handle_backlight;
+ static bool has_stepping_quirk;
+
+ static int force;
+@@ -602,6 +604,13 @@ static int __init samsung_init(void)
+ int retval;
+
+ mutex_init(&sabi_mutex);
++ handle_backlight = true;
++
++#ifdef CONFIG_ACPI
++ /* Don't handle backlight here if the acpi video already handle it */
++ if (acpi_video_backlight_support())
++ handle_backlight = false;
++#endif
+
+ if (!force && !dmi_check_system(samsung_dmi_table))
+ return -ENODEV;
+@@ -661,7 +670,8 @@ static int __init samsung_init(void)
+ printk(KERN_DEBUG "ifaceP = 0x%08x\n", ifaceP);
+ printk(KERN_DEBUG "sabi_iface = %p\n", sabi_iface);
+
+- test_backlight();
++ if (handle_backlight)
++ test_backlight();
+ test_wireless();
+
+ retval = sabi_get_command(sabi_config->commands.get_brightness,
+@@ -680,13 +690,23 @@ static int __init samsung_init(void)
+ }
+
+ /* Check for stepping quirk */
+- check_for_stepping_quirk();
++ if (handle_backlight)
++ check_for_stepping_quirk();
++
++#ifdef CONFIG_ACPI
++ /* Only log that if we are really on a sabi platform */
++ if (acpi_video_backlight_support())
++ pr_info("Backlight controlled by ACPI video driver\n");
++#endif
+
+ /* knock up a platform device to hang stuff off of */
+ sdev = platform_device_register_simple("samsung", -1, NULL, 0);
+ if (IS_ERR(sdev))
+ goto error_no_platform;
+
++ if (!handle_backlight)
++ goto skip_backlight;
++
+ /* create a backlight device to talk to this one */
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
+@@ -702,6 +722,7 @@ static int __init samsung_init(void)
+ backlight_device->props.power = FB_BLANK_UNBLANK;
+ backlight_update_status(backlight_device);
+
++skip_backlight:
+ retval = init_wireless(sdev);
+ if (retval)
+ goto error_no_rfk;
+diff --git a/drivers/rtc/rtc-imxdi.c b/drivers/rtc/rtc-imxdi.c
+index d93a960..bc744b4 100644
+--- a/drivers/rtc/rtc-imxdi.c
++++ b/drivers/rtc/rtc-imxdi.c
+@@ -392,6 +392,8 @@ static int dryice_rtc_probe(struct platform_device *pdev)
+ if (imxdi->ioaddr == NULL)
+ return -ENOMEM;
+
++ spin_lock_init(&imxdi->irq_lock);
++
+ imxdi->irq = platform_get_irq(pdev, 0);
+ if (imxdi->irq < 0)
+ return imxdi->irq;
+diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
+index 6888b2c..b3a729c 100644
+--- a/drivers/scsi/scsi_debug.c
++++ b/drivers/scsi/scsi_debug.c
+@@ -2045,8 +2045,7 @@ static void unmap_region(sector_t lba, unsigned int len)
+ block = lba + alignment;
+ rem = do_div(block, granularity);
+
+- if (rem == 0 && lba + granularity <= end &&
+- block < map_size)
++ if (rem == 0 && lba + granularity < end && block < map_size)
+ clear_bit(block, map_storep);
+
+ lba += granularity - rem;
+diff --git a/drivers/staging/comedi/drivers/amplc_pc236.c b/drivers/staging/comedi/drivers/amplc_pc236.c
+index 48246cd..b4311bf 100644
+--- a/drivers/staging/comedi/drivers/amplc_pc236.c
++++ b/drivers/staging/comedi/drivers/amplc_pc236.c
+@@ -470,7 +470,7 @@ static int pc236_detach(struct comedi_device *dev)
+ {
+ printk(KERN_DEBUG "comedi%d: %s: detach\n", dev->minor,
+ PC236_DRIVER_NAME);
+- if (devpriv)
++ if (dev->iobase)
+ pc236_intr_disable(dev);
+
+ if (dev->irq)
+diff --git a/drivers/staging/hv/storvsc_drv.c b/drivers/staging/hv/storvsc_drv.c
+index ae8c33e..abc5ac5 100644
+--- a/drivers/staging/hv/storvsc_drv.c
++++ b/drivers/staging/hv/storvsc_drv.c
+@@ -1043,7 +1043,12 @@ static int storvsc_host_reset(struct hv_device *device)
+ /*
+ * At this point, all outstanding requests in the adapter
+ * should have been flushed out and return to us
++ * There is a potential race here where the host may be in
++ * the process of responding when we return from here.
++ * Just wait for all in-transit packets to be accounted for
++ * before we return from here.
+ */
++ storvsc_wait_to_drain(stor_device);
+
+ cleanup:
+ return ret;
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index dbf7d20..df7f15d 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -760,10 +760,6 @@ static const __u32 acm_tty_speed[] = {
+ 2500000, 3000000, 3500000, 4000000
+ };
+
+-static const __u8 acm_tty_size[] = {
+- 5, 6, 7, 8
+-};
+-
+ static void acm_tty_set_termios(struct tty_struct *tty,
+ struct ktermios *termios_old)
+ {
+@@ -780,7 +776,21 @@ static void acm_tty_set_termios(struct tty_struct *tty,
+ newline.bParityType = termios->c_cflag & PARENB ?
+ (termios->c_cflag & PARODD ? 1 : 2) +
+ (termios->c_cflag & CMSPAR ? 2 : 0) : 0;
+- newline.bDataBits = acm_tty_size[(termios->c_cflag & CSIZE) >> 4];
++ switch (termios->c_cflag & CSIZE) {
++ case CS5:
++ newline.bDataBits = 5;
++ break;
++ case CS6:
++ newline.bDataBits = 6;
++ break;
++ case CS7:
++ newline.bDataBits = 7;
++ break;
++ case CS8:
++ default:
++ newline.bDataBits = 8;
++ break;
++ }
+ /* FIXME: Needs to clear unsupported bits in the termios */
+ acm->clocal = ((termios->c_cflag & CLOCAL) != 0);
+
+@@ -1172,7 +1182,7 @@ made_compressed_probe:
+
+ if (usb_endpoint_xfer_int(epwrite))
+ usb_fill_int_urb(snd->urb, usb_dev,
+- usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress),
++ usb_sndintpipe(usb_dev, epwrite->bEndpointAddress),
+ NULL, acm->writesize, acm_write_bulk, snd, epwrite->bInterval);
+ else
+ usb_fill_bulk_urb(snd->urb, usb_dev,
+@@ -1496,6 +1506,9 @@ static const struct usb_device_id acm_ids[] = {
+ Maybe we should define a new
+ quirk for this. */
+ },
++ { USB_DEVICE(0x0572, 0x1340), /* Conexant CX93010-2x UCMxx */
++ .driver_info = NO_UNION_NORMAL,
++ },
+ { USB_DEVICE(0x1bbb, 0x0003), /* Alcatel OT-I650 */
+ .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
+ },
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 52340cc..a9a74d2 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -482,13 +482,16 @@ static void hub_tt_work(struct work_struct *work)
+ int limit = 100;
+
+ spin_lock_irqsave (&hub->tt.lock, flags);
+- while (--limit && !list_empty (&hub->tt.clear_list)) {
++ while (!list_empty(&hub->tt.clear_list)) {
+ struct list_head *next;
+ struct usb_tt_clear *clear;
+ struct usb_device *hdev = hub->hdev;
+ const struct hc_driver *drv;
+ int status;
+
++ if (!hub->quiescing && --limit < 0)
++ break;
++
+ next = hub->tt.clear_list.next;
+ clear = list_entry (next, struct usb_tt_clear, clear_list);
+ list_del (&clear->clear_list);
+@@ -952,7 +955,7 @@ static void hub_quiesce(struct usb_hub *hub, enum hub_quiescing_type type)
+ if (hub->has_indicators)
+ cancel_delayed_work_sync(&hub->leds);
+ if (hub->tt.hub)
+- cancel_work_sync(&hub->tt.clear_work);
++ flush_work_sync(&hub->tt.clear_work);
+ }
+
+ /* caller has locked the hub device */
+diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
+index d0ec2f0..c2815a5 100644
+--- a/drivers/usb/host/pci-quirks.c
++++ b/drivers/usb/host/pci-quirks.c
+@@ -545,7 +545,14 @@ static const struct dmi_system_id __devinitconst ehci_dmi_nohandoff_table[] = {
+ /* Pegatron Lucid (Ordissimo AIRIS) */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "M11JB"),
+- DMI_MATCH(DMI_BIOS_VERSION, "Lucid-GE-133"),
++ DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"),
++ },
++ },
++ {
++ /* Pegatron Lucid (Ordissimo) */
++ .matches = {
++ DMI_MATCH(DMI_BOARD_NAME, "Ordissimo"),
++ DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"),
+ },
+ },
+ { }
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 950aef8..0c6fb19 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -1212,6 +1212,17 @@ static void xhci_cmd_to_noop(struct xhci_hcd *xhci, struct xhci_cd *cur_cd)
+ cur_seg = find_trb_seg(xhci->cmd_ring->first_seg,
+ xhci->cmd_ring->dequeue, &cycle_state);
+
++ if (!cur_seg) {
++ xhci_warn(xhci, "Command ring mismatch, dequeue = %p %llx (dma)\n",
++ xhci->cmd_ring->dequeue,
++ (unsigned long long)
++ xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
++ xhci->cmd_ring->dequeue));
++ xhci_debug_ring(xhci, xhci->cmd_ring);
++ xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
++ return;
++ }
++
+ /* find the command trb matched by cd from command ring */
+ for (cmd_trb = xhci->cmd_ring->dequeue;
+ cmd_trb != xhci->cmd_ring->enqueue;
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index f5c0f38..5a23f4d 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -471,7 +471,8 @@ static bool compliance_mode_recovery_timer_quirk_check(void)
+
+ if (strstr(dmi_product_name, "Z420") ||
+ strstr(dmi_product_name, "Z620") ||
+- strstr(dmi_product_name, "Z820"))
++ strstr(dmi_product_name, "Z820") ||
++ strstr(dmi_product_name, "Z1"))
+ return true;
+
+ return false;
+diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
+index 42de17b..d3addb2 100644
+--- a/drivers/usb/serial/mct_u232.c
++++ b/drivers/usb/serial/mct_u232.c
+@@ -577,12 +577,14 @@ static void mct_u232_close(struct usb_serial_port *port)
+ {
+ dbg("%s port %d", __func__, port->number);
+
+- if (port->serial->dev) {
+- /* shutdown our urbs */
+- usb_kill_urb(port->write_urb);
+- usb_kill_urb(port->read_urb);
+- usb_kill_urb(port->interrupt_in_urb);
+- }
++ /*
++ * Must kill the read urb as it is actually an interrupt urb, which
++ * generic close thus fails to kill.
++ */
++ usb_kill_urb(port->read_urb);
++ usb_kill_urb(port->interrupt_in_urb);
++
++ usb_serial_generic_close(port);
+ } /* mct_u232_close */
+
+
+diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
+index b150ed9..d481f80 100644
+--- a/drivers/usb/serial/mos7840.c
++++ b/drivers/usb/serial/mos7840.c
+@@ -235,12 +235,10 @@ struct moschip_port {
+ int port_num; /*Actual port number in the device(1,2,etc) */
+ struct urb *write_urb; /* write URB for this port */
+ struct urb *read_urb; /* read URB for this port */
+- struct urb *int_urb;
+ __u8 shadowLCR; /* last LCR value received */
+ __u8 shadowMCR; /* last MCR value received */
+ char open;
+ char open_ports;
+- char zombie;
+ wait_queue_head_t wait_chase; /* for handling sleeping while waiting for chase to finish */
+ wait_queue_head_t delta_msr_wait; /* for handling sleeping while waiting for msr change to happen */
+ int delta_msr_cond;
+@@ -505,7 +503,6 @@ static void mos7840_control_callback(struct urb *urb)
+ unsigned char *data;
+ struct moschip_port *mos7840_port;
+ __u8 regval = 0x0;
+- int result = 0;
+ int status = urb->status;
+
+ mos7840_port = urb->context;
+@@ -524,7 +521,7 @@ static void mos7840_control_callback(struct urb *urb)
+ default:
+ dbg("%s - nonzero urb status received: %d", __func__,
+ status);
+- goto exit;
++ return;
+ }
+
+ dbg("%s urb buffer size is %d", __func__, urb->actual_length);
+@@ -537,17 +534,6 @@ static void mos7840_control_callback(struct urb *urb)
+ mos7840_handle_new_msr(mos7840_port, regval);
+ else if (mos7840_port->MsrLsr == 1)
+ mos7840_handle_new_lsr(mos7840_port, regval);
+-
+-exit:
+- spin_lock(&mos7840_port->pool_lock);
+- if (!mos7840_port->zombie)
+- result = usb_submit_urb(mos7840_port->int_urb, GFP_ATOMIC);
+- spin_unlock(&mos7840_port->pool_lock);
+- if (result) {
+- dev_err(&urb->dev->dev,
+- "%s - Error %d submitting interrupt urb\n",
+- __func__, result);
+- }
+ }
+
+ static int mos7840_get_reg(struct moschip_port *mcs, __u16 Wval, __u16 reg,
+@@ -655,14 +641,7 @@ static void mos7840_interrupt_callback(struct urb *urb)
+ wreg = MODEM_STATUS_REGISTER;
+ break;
+ }
+- spin_lock(&mos7840_port->pool_lock);
+- if (!mos7840_port->zombie) {
+- rv = mos7840_get_reg(mos7840_port, wval, wreg, &Data);
+- } else {
+- spin_unlock(&mos7840_port->pool_lock);
+- return;
+- }
+- spin_unlock(&mos7840_port->pool_lock);
++ rv = mos7840_get_reg(mos7840_port, wval, wreg, &Data);
+ }
+ }
+ }
+@@ -2594,7 +2573,6 @@ error:
+ kfree(mos7840_port->ctrl_buf);
+ usb_free_urb(mos7840_port->control_urb);
+ kfree(mos7840_port);
+- serial->port[i] = NULL;
+ }
+ return status;
+ }
+@@ -2625,9 +2603,6 @@ static void mos7840_disconnect(struct usb_serial *serial)
+ mos7840_port = mos7840_get_port_private(serial->port[i]);
+ dbg ("mos7840_port %d = %p", i, mos7840_port);
+ if (mos7840_port) {
+- spin_lock_irqsave(&mos7840_port->pool_lock, flags);
+- mos7840_port->zombie = 1;
+- spin_unlock_irqrestore(&mos7840_port->pool_lock, flags);
+ usb_kill_urb(mos7840_port->control_urb);
+ }
+ }
+@@ -2661,6 +2636,7 @@ static void mos7840_release(struct usb_serial *serial)
+ mos7840_port = mos7840_get_port_private(serial->port[i]);
+ dbg("mos7840_port %d = %p", i, mos7840_port);
+ if (mos7840_port) {
++ usb_free_urb(mos7840_port->control_urb);
+ kfree(mos7840_port->ctrl_buf);
+ kfree(mos7840_port->dr);
+ kfree(mos7840_port);
+diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
+index c248a91..d6c5ed6 100644
+--- a/drivers/usb/serial/opticon.c
++++ b/drivers/usb/serial/opticon.c
+@@ -160,7 +160,11 @@ static int send_control_msg(struct usb_serial_port *port, u8 requesttype,
+ {
+ struct usb_serial *serial = port->serial;
+ int retval;
+- u8 buffer[2];
++ u8 *buffer;
++
++ buffer = kzalloc(1, GFP_KERNEL);
++ if (!buffer)
++ return -ENOMEM;
+
+ buffer[0] = val;
+ /* Send the message to the vendor control endpoint
+@@ -169,6 +173,7 @@ static int send_control_msg(struct usb_serial_port *port, u8 requesttype,
+ requesttype,
+ USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
+ 0, 0, buffer, 1, 0);
++ kfree(buffer);
+
+ return retval;
+ }
+@@ -292,7 +297,7 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port,
+ if (!dr) {
+ dev_err(&port->dev, "out of memory\n");
+ count = -ENOMEM;
+- goto error;
++ goto error_no_dr;
+ }
+
+ dr->bRequestType = USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT;
+@@ -322,6 +327,8 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port,
+
+ return count;
+ error:
++ kfree(dr);
++error_no_dr:
+ usb_free_urb(urb);
+ error_no_urb:
+ kfree(buffer);
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 3fd4e6f..c334670 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -503,11 +503,19 @@ static const struct option_blacklist_info net_intf5_blacklist = {
+ .reserved = BIT(5),
+ };
+
++static const struct option_blacklist_info net_intf6_blacklist = {
++ .reserved = BIT(6),
++};
++
+ static const struct option_blacklist_info zte_mf626_blacklist = {
+ .sendsetup = BIT(0) | BIT(1),
+ .reserved = BIT(4),
+ };
+
++static const struct option_blacklist_info zte_1255_blacklist = {
++ .reserved = BIT(3) | BIT(4),
++};
++
+ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
+ { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
+@@ -853,13 +861,19 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0113, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0117, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0118, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0121, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0118, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0121, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0122, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0123, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0124, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0125, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0123, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0124, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0125, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0128, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0142, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0143, 0xff, 0xff, 0xff) },
+@@ -872,7 +886,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0156, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0159, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) },
+@@ -880,13 +895,22 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0191, 0xff, 0xff, 0xff), /* ZTE EuFi890 */
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0199, 0xff, 0xff, 0xff), /* ZTE MF820S */
++ .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0257, 0xff, 0xff, 0xff), /* ZTE MF821 */
++ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0326, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1021, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1059, 0xff, 0xff, 0xff) },
+@@ -1002,18 +1026,24 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1169, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1170, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1244, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1245, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1245, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1246, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1247, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1247, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1248, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1249, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1250, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1251, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1252, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1252, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1253, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1254, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1255, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1256, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1254, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1255, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&zte_1255_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1256, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1257, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1258, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1259, 0xff, 0xff, 0xff) },
+@@ -1058,8 +1088,16 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1401, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1402, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1424, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1425, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff), /* ZTE MF91 */
++ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff,
+ 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
+@@ -1071,15 +1109,21 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0094, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0147, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0168, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0168, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0170, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0176, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0176, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) },
+diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
+index 535d087..e1f1ebd 100644
+--- a/drivers/usb/serial/sierra.c
++++ b/drivers/usb/serial/sierra.c
+@@ -171,7 +171,6 @@ static int sierra_probe(struct usb_serial *serial,
+ {
+ int result = 0;
+ struct usb_device *udev;
+- struct sierra_intf_private *data;
+ u8 ifnum;
+
+ udev = serial->dev;
+@@ -199,11 +198,6 @@ static int sierra_probe(struct usb_serial *serial,
+ return -ENODEV;
+ }
+
+- data = serial->private = kzalloc(sizeof(struct sierra_intf_private), GFP_KERNEL);
+- if (!data)
+- return -ENOMEM;
+- spin_lock_init(&data->susp_lock);
+-
+ return result;
+ }
+
+@@ -915,6 +909,7 @@ static void sierra_dtr_rts(struct usb_serial_port *port, int on)
+ static int sierra_startup(struct usb_serial *serial)
+ {
+ struct usb_serial_port *port;
++ struct sierra_intf_private *intfdata;
+ struct sierra_port_private *portdata;
+ struct sierra_iface_info *himemoryp = NULL;
+ int i;
+@@ -922,6 +917,14 @@ static int sierra_startup(struct usb_serial *serial)
+
+ dev_dbg(&serial->dev->dev, "%s\n", __func__);
+
++ intfdata = kzalloc(sizeof(*intfdata), GFP_KERNEL);
++ if (!intfdata)
++ return -ENOMEM;
++
++ spin_lock_init(&intfdata->susp_lock);
++
++ usb_set_serial_data(serial, intfdata);
++
+ /* Set Device mode to D0 */
+ sierra_set_power_state(serial->dev, 0x0000);
+
+@@ -937,7 +940,7 @@ static int sierra_startup(struct usb_serial *serial)
+ dev_dbg(&port->dev, "%s: kmalloc for "
+ "sierra_port_private (%d) failed!\n",
+ __func__, i);
+- return -ENOMEM;
++ goto err;
+ }
+ spin_lock_init(&portdata->lock);
+ init_usb_anchor(&portdata->active);
+@@ -974,6 +977,14 @@ static int sierra_startup(struct usb_serial *serial)
+ }
+
+ return 0;
++err:
++ for (--i; i >= 0; --i) {
++ portdata = usb_get_serial_port_data(serial->port[i]);
++ kfree(portdata);
++ }
++ kfree(intfdata);
++
++ return -ENOMEM;
+ }
+
+ static void sierra_release(struct usb_serial *serial)
+@@ -993,6 +1004,7 @@ static void sierra_release(struct usb_serial *serial)
+ continue;
+ kfree(portdata);
+ }
++ kfree(serial->private);
+ }
+
+ #ifdef CONFIG_PM
+diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
+index 5b073bc..59d646d 100644
+--- a/drivers/usb/serial/whiteheat.c
++++ b/drivers/usb/serial/whiteheat.c
+@@ -576,6 +576,7 @@ no_firmware:
+ "%s: please contact support@connecttech.com\n",
+ serial->type->description);
+ kfree(result);
++ kfree(command);
+ return -ENODEV;
+
+ no_command_private:
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index 591f57f..fa8a1b2 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -1004,6 +1004,12 @@ UNUSUAL_DEV( 0x07cf, 0x1001, 0x1000, 0x9999,
+ USB_SC_8070, USB_PR_CB, NULL,
+ US_FL_NEED_OVERRIDE | US_FL_FIX_INQUIRY ),
+
++/* Submitted by Oleksandr Chumachenko <ledest@gmail.com> */
++UNUSUAL_DEV( 0x07cf, 0x1167, 0x0100, 0x0100,
++ "Casio",
++ "EX-N1 DigitalCamera",
++ USB_SC_8070, USB_PR_DEVICE, NULL, 0),
++
+ /* Submitted by Hartmut Wahl <hwahl@hwahl.de>*/
+ UNUSUAL_DEV( 0x0839, 0x000a, 0x0001, 0x0001,
+ "Samsung",
+diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
+index 882a51f..b76071e 100644
+--- a/drivers/vhost/net.c
++++ b/drivers/vhost/net.c
+@@ -371,7 +371,8 @@ static void handle_rx(struct vhost_net *net)
+ .hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE
+ };
+ size_t total_len = 0;
+- int err, headcount, mergeable;
++ int err, mergeable;
++ s16 headcount;
+ size_t vhost_hlen, sock_hlen;
+ size_t vhost_len, sock_len;
+ /* TODO: check that we are running from vhost_worker? */
+diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
+index 41746bb..cb5988f 100644
+--- a/drivers/video/udlfb.c
++++ b/drivers/video/udlfb.c
+@@ -646,7 +646,7 @@ static ssize_t dlfb_ops_write(struct fb_info *info, const char __user *buf,
+ result = fb_sys_write(info, buf, count, ppos);
+
+ if (result > 0) {
+- int start = max((int)(offset / info->fix.line_length) - 1, 0);
++ int start = max((int)(offset / info->fix.line_length), 0);
+ int lines = min((u32)((result / info->fix.line_length) + 1),
+ (u32)info->var.yres);
+
+diff --git a/drivers/video/via/via_clock.c b/drivers/video/via/via_clock.c
+index af8f26b..db1e392 100644
+--- a/drivers/video/via/via_clock.c
++++ b/drivers/video/via/via_clock.c
+@@ -25,6 +25,7 @@
+
+ #include <linux/kernel.h>
+ #include <linux/via-core.h>
++#include <asm/olpc.h>
+ #include "via_clock.h"
+ #include "global.h"
+ #include "debug.h"
+@@ -289,6 +290,10 @@ static void dummy_set_pll(struct via_pll_config config)
+ printk(KERN_INFO "Using undocumented set PLL.\n%s", via_slap);
+ }
+
++static void noop_set_clock_state(u8 state)
++{
++}
++
+ void via_clock_init(struct via_clock *clock, int gfx_chip)
+ {
+ switch (gfx_chip) {
+@@ -346,4 +351,18 @@ void via_clock_init(struct via_clock *clock, int gfx_chip)
+ break;
+
+ }
++
++ if (machine_is_olpc()) {
++ /* The OLPC XO-1.5 cannot suspend/resume reliably if the
++ * IGA1/IGA2 clocks are set as on or off (memory rot
++ * occasionally happens during suspend under such
++ * configurations).
++ *
++ * The only known stable scenario is to leave this bits as-is,
++ * which in their default states are documented to enable the
++ * clock only when it is needed.
++ */
++ clock->set_primary_clock_state = noop_set_clock_state;
++ clock->set_secondary_clock_state = noop_set_clock_state;
++ }
+ }
+diff --git a/fs/ceph/export.c b/fs/ceph/export.c
+index 9fbcdec..b001030 100644
+--- a/fs/ceph/export.c
++++ b/fs/ceph/export.c
+@@ -91,7 +91,7 @@ static int ceph_encode_fh(struct dentry *dentry, u32 *rawfh, int *max_len,
+ * FIXME: we should try harder by querying the mds for the ino.
+ */
+ static struct dentry *__fh_to_dentry(struct super_block *sb,
+- struct ceph_nfs_fh *fh)
++ struct ceph_nfs_fh *fh, int fh_len)
+ {
+ struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
+ struct inode *inode;
+@@ -99,6 +99,9 @@ static struct dentry *__fh_to_dentry(struct super_block *sb,
+ struct ceph_vino vino;
+ int err;
+
++ if (fh_len < sizeof(*fh) / 4)
++ return ERR_PTR(-ESTALE);
++
+ dout("__fh_to_dentry %llx\n", fh->ino);
+ vino.ino = fh->ino;
+ vino.snap = CEPH_NOSNAP;
+@@ -142,7 +145,7 @@ static struct dentry *__fh_to_dentry(struct super_block *sb,
+ * convert connectable fh to dentry
+ */
+ static struct dentry *__cfh_to_dentry(struct super_block *sb,
+- struct ceph_nfs_confh *cfh)
++ struct ceph_nfs_confh *cfh, int fh_len)
+ {
+ struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
+ struct inode *inode;
+@@ -150,6 +153,9 @@ static struct dentry *__cfh_to_dentry(struct super_block *sb,
+ struct ceph_vino vino;
+ int err;
+
++ if (fh_len < sizeof(*cfh) / 4)
++ return ERR_PTR(-ESTALE);
++
+ dout("__cfh_to_dentry %llx (%llx/%x)\n",
+ cfh->ino, cfh->parent_ino, cfh->parent_name_hash);
+
+@@ -199,9 +205,11 @@ static struct dentry *ceph_fh_to_dentry(struct super_block *sb, struct fid *fid,
+ int fh_len, int fh_type)
+ {
+ if (fh_type == 1)
+- return __fh_to_dentry(sb, (struct ceph_nfs_fh *)fid->raw);
++ return __fh_to_dentry(sb, (struct ceph_nfs_fh *)fid->raw,
++ fh_len);
+ else
+- return __cfh_to_dentry(sb, (struct ceph_nfs_confh *)fid->raw);
++ return __cfh_to_dentry(sb, (struct ceph_nfs_confh *)fid->raw,
++ fh_len);
+ }
+
+ /*
+@@ -222,6 +230,8 @@ static struct dentry *ceph_fh_to_parent(struct super_block *sb,
+
+ if (fh_type == 1)
+ return ERR_PTR(-ESTALE);
++ if (fh_len < sizeof(*cfh) / 4)
++ return ERR_PTR(-ESTALE);
+
+ pr_debug("fh_to_parent %llx/%d\n", cfh->parent_ino,
+ cfh->parent_name_hash);
+diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
+index 51352de..f854cf9 100644
+--- a/fs/compat_ioctl.c
++++ b/fs/compat_ioctl.c
+@@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
+
+ err = get_user(palp, &up->palette);
+ err |= get_user(length, &up->length);
++ if (err)
++ return -EFAULT;
+
+ up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
+ err = put_user(compat_ptr(palp), &up_native->palette);
+diff --git a/fs/exec.c b/fs/exec.c
+index 160cd2f..121ccae 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -1095,7 +1095,7 @@ int flush_old_exec(struct linux_binprm * bprm)
+ bprm->mm = NULL; /* We're using it now */
+
+ set_fs(USER_DS);
+- current->flags &= ~(PF_RANDOMIZE | PF_KTHREAD);
++ current->flags &= ~(PF_RANDOMIZE | PF_KTHREAD | PF_NOFREEZE);
+ flush_thread();
+ current->personality &= ~bprm->per_clear;
+
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 54f2bdc..191580a 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -2715,6 +2715,9 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
+ #define EXT4_EXT_MARK_UNINIT1 0x2 /* mark first half uninitialized */
+ #define EXT4_EXT_MARK_UNINIT2 0x4 /* mark second half uninitialized */
+
++#define EXT4_EXT_DATA_VALID1 0x8 /* first half contains valid data */
++#define EXT4_EXT_DATA_VALID2 0x10 /* second half contains valid data */
++
+ /*
+ * ext4_split_extent_at() splits an extent at given block.
+ *
+@@ -2750,6 +2753,9 @@ static int ext4_split_extent_at(handle_t *handle,
+ unsigned int ee_len, depth;
+ int err = 0;
+
++ BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) ==
++ (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2));
++
+ ext_debug("ext4_split_extents_at: inode %lu, logical"
+ "block %llu\n", inode->i_ino, (unsigned long long)split);
+
+@@ -2808,7 +2814,14 @@ static int ext4_split_extent_at(handle_t *handle,
+
+ err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
+ if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
+- err = ext4_ext_zeroout(inode, &orig_ex);
++ if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
++ if (split_flag & EXT4_EXT_DATA_VALID1)
++ err = ext4_ext_zeroout(inode, ex2);
++ else
++ err = ext4_ext_zeroout(inode, ex);
++ } else
++ err = ext4_ext_zeroout(inode, &orig_ex);
++
+ if (err)
+ goto fix_extent_len;
+ /* update the extent length and mark as initialized */
+@@ -2861,12 +2874,13 @@ static int ext4_split_extent(handle_t *handle,
+ uninitialized = ext4_ext_is_uninitialized(ex);
+
+ if (map->m_lblk + map->m_len < ee_block + ee_len) {
+- split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ?
+- EXT4_EXT_MAY_ZEROOUT : 0;
++ split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT;
+ flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
+ if (uninitialized)
+ split_flag1 |= EXT4_EXT_MARK_UNINIT1 |
+ EXT4_EXT_MARK_UNINIT2;
++ if (split_flag & EXT4_EXT_DATA_VALID2)
++ split_flag1 |= EXT4_EXT_DATA_VALID1;
+ err = ext4_split_extent_at(handle, inode, path,
+ map->m_lblk + map->m_len, split_flag1, flags1);
+ if (err)
+@@ -2879,8 +2893,8 @@ static int ext4_split_extent(handle_t *handle,
+ return PTR_ERR(path);
+
+ if (map->m_lblk >= ee_block) {
+- split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ?
+- EXT4_EXT_MAY_ZEROOUT : 0;
++ split_flag1 = split_flag & (EXT4_EXT_MAY_ZEROOUT |
++ EXT4_EXT_DATA_VALID2);
+ if (uninitialized)
+ split_flag1 |= EXT4_EXT_MARK_UNINIT1;
+ if (split_flag & EXT4_EXT_MARK_UNINIT2)
+@@ -3158,26 +3172,47 @@ static int ext4_split_unwritten_extents(handle_t *handle,
+
+ split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
+ split_flag |= EXT4_EXT_MARK_UNINIT2;
+-
++ if (flags & EXT4_GET_BLOCKS_CONVERT)
++ split_flag |= EXT4_EXT_DATA_VALID2;
+ flags |= EXT4_GET_BLOCKS_PRE_IO;
+ return ext4_split_extent(handle, inode, path, map, split_flag, flags);
+ }
+
+ static int ext4_convert_unwritten_extents_endio(handle_t *handle,
+- struct inode *inode,
+- struct ext4_ext_path *path)
++ struct inode *inode,
++ struct ext4_map_blocks *map,
++ struct ext4_ext_path *path)
+ {
+ struct ext4_extent *ex;
++ ext4_lblk_t ee_block;
++ unsigned int ee_len;
+ int depth;
+ int err = 0;
+
+ depth = ext_depth(inode);
+ ex = path[depth].p_ext;
++ ee_block = le32_to_cpu(ex->ee_block);
++ ee_len = ext4_ext_get_actual_len(ex);
+
+ ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical"
+ "block %llu, max_blocks %u\n", inode->i_ino,
+- (unsigned long long)le32_to_cpu(ex->ee_block),
+- ext4_ext_get_actual_len(ex));
++ (unsigned long long)ee_block, ee_len);
++
++ /* If extent is larger than requested then split is required */
++ if (ee_block != map->m_lblk || ee_len > map->m_len) {
++ err = ext4_split_unwritten_extents(handle, inode, map, path,
++ EXT4_GET_BLOCKS_CONVERT);
++ if (err < 0)
++ goto out;
++ ext4_ext_drop_refs(path);
++ path = ext4_ext_find_extent(inode, map->m_lblk, path);
++ if (IS_ERR(path)) {
++ err = PTR_ERR(path);
++ goto out;
++ }
++ depth = ext_depth(inode);
++ ex = path[depth].p_ext;
++ }
+
+ err = ext4_ext_get_access(handle, inode, path + depth);
+ if (err)
+@@ -3479,7 +3514,7 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
+ }
+ /* IO end_io complete, convert the filled extent to written */
+ if ((flags & EXT4_GET_BLOCKS_CONVERT)) {
+- ret = ext4_convert_unwritten_extents_endio(handle, inode,
++ ret = ext4_convert_unwritten_extents_endio(handle, inode, map,
+ path);
+ if (ret >= 0) {
+ ext4_update_inode_fsync_trans(handle, inode, 1);
+diff --git a/fs/gfs2/export.c b/fs/gfs2/export.c
+index fe9945f..5235d6e 100644
+--- a/fs/gfs2/export.c
++++ b/fs/gfs2/export.c
+@@ -167,6 +167,8 @@ static struct dentry *gfs2_fh_to_dentry(struct super_block *sb, struct fid *fid,
+ case GFS2_SMALL_FH_SIZE:
+ case GFS2_LARGE_FH_SIZE:
+ case GFS2_OLD_FH_SIZE:
++ if (fh_len < GFS2_SMALL_FH_SIZE)
++ return NULL;
+ this.no_formal_ino = ((u64)be32_to_cpu(fh[0])) << 32;
+ this.no_formal_ino |= be32_to_cpu(fh[1]);
+ this.no_addr = ((u64)be32_to_cpu(fh[2])) << 32;
+@@ -186,6 +188,8 @@ static struct dentry *gfs2_fh_to_parent(struct super_block *sb, struct fid *fid,
+ switch (fh_type) {
+ case GFS2_LARGE_FH_SIZE:
+ case GFS2_OLD_FH_SIZE:
++ if (fh_len < GFS2_LARGE_FH_SIZE)
++ return NULL;
+ parent.no_formal_ino = ((u64)be32_to_cpu(fh[4])) << 32;
+ parent.no_formal_ino |= be32_to_cpu(fh[5]);
+ parent.no_addr = ((u64)be32_to_cpu(fh[6])) << 32;
+diff --git a/fs/isofs/export.c b/fs/isofs/export.c
+index dd4687f..516eb21 100644
+--- a/fs/isofs/export.c
++++ b/fs/isofs/export.c
+@@ -179,7 +179,7 @@ static struct dentry *isofs_fh_to_parent(struct super_block *sb,
+ {
+ struct isofs_fid *ifid = (struct isofs_fid *)fid;
+
+- if (fh_type != 2)
++ if (fh_len < 2 || fh_type != 2)
+ return NULL;
+
+ return isofs_export_iget(sb,
+diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
+index 8799207..931bf95 100644
+--- a/fs/jbd/commit.c
++++ b/fs/jbd/commit.c
+@@ -86,7 +86,12 @@ nope:
+ static void release_data_buffer(struct buffer_head *bh)
+ {
+ if (buffer_freed(bh)) {
++ WARN_ON_ONCE(buffer_dirty(bh));
+ clear_buffer_freed(bh);
++ clear_buffer_mapped(bh);
++ clear_buffer_new(bh);
++ clear_buffer_req(bh);
++ bh->b_bdev = NULL;
+ release_buffer_page(bh);
+ } else
+ put_bh(bh);
+@@ -847,17 +852,35 @@ restart_loop:
+ * there's no point in keeping a checkpoint record for
+ * it. */
+
+- /* A buffer which has been freed while still being
+- * journaled by a previous transaction may end up still
+- * being dirty here, but we want to avoid writing back
+- * that buffer in the future after the "add to orphan"
+- * operation been committed, That's not only a performance
+- * gain, it also stops aliasing problems if the buffer is
+- * left behind for writeback and gets reallocated for another
+- * use in a different page. */
+- if (buffer_freed(bh) && !jh->b_next_transaction) {
+- clear_buffer_freed(bh);
+- clear_buffer_jbddirty(bh);
++ /*
++ * A buffer which has been freed while still being journaled by
++ * a previous transaction.
++ */
++ if (buffer_freed(bh)) {
++ /*
++ * If the running transaction is the one containing
++ * "add to orphan" operation (b_next_transaction !=
++ * NULL), we have to wait for that transaction to
++ * commit before we can really get rid of the buffer.
++ * So just clear b_modified to not confuse transaction
++ * credit accounting and refile the buffer to
++ * BJ_Forget of the running transaction. If the just
++ * committed transaction contains "add to orphan"
++ * operation, we can completely invalidate the buffer
++ * now. We are rather throughout in that since the
++ * buffer may be still accessible when blocksize <
++ * pagesize and it is attached to the last partial
++ * page.
++ */
++ jh->b_modified = 0;
++ if (!jh->b_next_transaction) {
++ clear_buffer_freed(bh);
++ clear_buffer_jbddirty(bh);
++ clear_buffer_mapped(bh);
++ clear_buffer_new(bh);
++ clear_buffer_req(bh);
++ bh->b_bdev = NULL;
++ }
+ }
+
+ if (buffer_jbddirty(bh)) {
+diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
+index 7e59c6e..edac004 100644
+--- a/fs/jbd/transaction.c
++++ b/fs/jbd/transaction.c
+@@ -1839,15 +1839,16 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
+ * We're outside-transaction here. Either or both of j_running_transaction
+ * and j_committing_transaction may be NULL.
+ */
+-static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
++static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
++ int partial_page)
+ {
+ transaction_t *transaction;
+ struct journal_head *jh;
+ int may_free = 1;
+- int ret;
+
+ BUFFER_TRACE(bh, "entry");
+
++retry:
+ /*
+ * It is safe to proceed here without the j_list_lock because the
+ * buffers cannot be stolen by try_to_free_buffers as long as we are
+@@ -1875,10 +1876,18 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
+ * clear the buffer dirty bit at latest at the moment when the
+ * transaction marking the buffer as freed in the filesystem
+ * structures is committed because from that moment on the
+- * buffer can be reallocated and used by a different page.
++ * block can be reallocated and used by a different page.
+ * Since the block hasn't been freed yet but the inode has
+ * already been added to orphan list, it is safe for us to add
+ * the buffer to BJ_Forget list of the newest transaction.
++ *
++ * Also we have to clear buffer_mapped flag of a truncated buffer
++ * because the buffer_head may be attached to the page straddling
++ * i_size (can happen only when blocksize < pagesize) and thus the
++ * buffer_head can be reused when the file is extended again. So we end
++ * up keeping around invalidated buffers attached to transactions'
++ * BJ_Forget list just to stop checkpointing code from cleaning up
++ * the transaction this buffer was modified in.
+ */
+ transaction = jh->b_transaction;
+ if (transaction == NULL) {
+@@ -1905,13 +1914,9 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
+ * committed, the buffer won't be needed any
+ * longer. */
+ JBUFFER_TRACE(jh, "checkpointed: add to BJ_Forget");
+- ret = __dispose_buffer(jh,
++ may_free = __dispose_buffer(jh,
+ journal->j_running_transaction);
+- journal_put_journal_head(jh);
+- spin_unlock(&journal->j_list_lock);
+- jbd_unlock_bh_state(bh);
+- spin_unlock(&journal->j_state_lock);
+- return ret;
++ goto zap_buffer;
+ } else {
+ /* There is no currently-running transaction. So the
+ * orphan record which we wrote for this file must have
+@@ -1919,13 +1924,9 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
+ * the committing transaction, if it exists. */
+ if (journal->j_committing_transaction) {
+ JBUFFER_TRACE(jh, "give to committing trans");
+- ret = __dispose_buffer(jh,
++ may_free = __dispose_buffer(jh,
+ journal->j_committing_transaction);
+- journal_put_journal_head(jh);
+- spin_unlock(&journal->j_list_lock);
+- jbd_unlock_bh_state(bh);
+- spin_unlock(&journal->j_state_lock);
+- return ret;
++ goto zap_buffer;
+ } else {
+ /* The orphan record's transaction has
+ * committed. We can cleanse this buffer */
+@@ -1946,10 +1947,24 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
+ }
+ /*
+ * The buffer is committing, we simply cannot touch
+- * it. So we just set j_next_transaction to the
+- * running transaction (if there is one) and mark
+- * buffer as freed so that commit code knows it should
+- * clear dirty bits when it is done with the buffer.
++ * it. If the page is straddling i_size we have to wait
++ * for commit and try again.
++ */
++ if (partial_page) {
++ tid_t tid = journal->j_committing_transaction->t_tid;
++
++ journal_put_journal_head(jh);
++ spin_unlock(&journal->j_list_lock);
++ jbd_unlock_bh_state(bh);
++ spin_unlock(&journal->j_state_lock);
++ log_wait_commit(journal, tid);
++ goto retry;
++ }
++ /*
++ * OK, buffer won't be reachable after truncate. We just set
++ * j_next_transaction to the running transaction (if there is
++ * one) and mark buffer as freed so that commit code knows it
++ * should clear dirty bits when it is done with the buffer.
+ */
+ set_buffer_freed(bh);
+ if (journal->j_running_transaction && buffer_jbddirty(bh))
+@@ -1972,6 +1987,14 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
+ }
+
+ zap_buffer:
++ /*
++ * This is tricky. Although the buffer is truncated, it may be reused
++ * if blocksize < pagesize and it is attached to the page straddling
++ * EOF. Since the buffer might have been added to BJ_Forget list of the
++ * running transaction, journal_get_write_access() won't clear
++ * b_modified and credit accounting gets confused. So clear b_modified
++ * here. */
++ jh->b_modified = 0;
+ journal_put_journal_head(jh);
+ zap_buffer_no_jh:
+ spin_unlock(&journal->j_list_lock);
+@@ -2020,7 +2043,8 @@ void journal_invalidatepage(journal_t *journal,
+ if (offset <= curr_off) {
+ /* This block is wholly outside the truncation point */
+ lock_buffer(bh);
+- may_free &= journal_unmap_buffer(journal, bh);
++ may_free &= journal_unmap_buffer(journal, bh,
++ offset > 0);
+ unlock_buffer(bh);
+ }
+ curr_off = next_off;
+diff --git a/fs/lockd/clntxdr.c b/fs/lockd/clntxdr.c
+index 36057ce..6e2a2d5 100644
+--- a/fs/lockd/clntxdr.c
++++ b/fs/lockd/clntxdr.c
+@@ -223,7 +223,7 @@ static void encode_nlm_stat(struct xdr_stream *xdr,
+ {
+ __be32 *p;
+
+- BUG_ON(be32_to_cpu(stat) > NLM_LCK_DENIED_GRACE_PERIOD);
++ WARN_ON_ONCE(be32_to_cpu(stat) > NLM_LCK_DENIED_GRACE_PERIOD);
+ p = xdr_reserve_space(xdr, 4);
+ *p = stat;
+ }
+diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
+index df753a1..23d7451 100644
+--- a/fs/lockd/mon.c
++++ b/fs/lockd/mon.c
+@@ -40,7 +40,6 @@ struct nsm_args {
+ u32 proc;
+
+ char *mon_name;
+- char *nodename;
+ };
+
+ struct nsm_res {
+@@ -94,7 +93,6 @@ static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res)
+ .vers = 3,
+ .proc = NLMPROC_NSM_NOTIFY,
+ .mon_name = nsm->sm_mon_name,
+- .nodename = utsname()->nodename,
+ };
+ struct rpc_message msg = {
+ .rpc_argp = &args,
+@@ -431,7 +429,7 @@ static void encode_my_id(struct xdr_stream *xdr, const struct nsm_args *argp)
+ {
+ __be32 *p;
+
+- encode_nsm_string(xdr, argp->nodename);
++ encode_nsm_string(xdr, utsname()->nodename);
+ p = xdr_reserve_space(xdr, 4 + 4 + 4);
+ *p++ = cpu_to_be32(argp->prog);
+ *p++ = cpu_to_be32(argp->vers);
+diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c
+index d27aab1..d413af3 100644
+--- a/fs/lockd/svcproc.c
++++ b/fs/lockd/svcproc.c
+@@ -67,7 +67,8 @@ nlmsvc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp,
+
+ /* Obtain file pointer. Not used by FREE_ALL call. */
+ if (filp != NULL) {
+- if ((error = nlm_lookup_file(rqstp, &file, &lock->fh)) != 0)
++ error = cast_status(nlm_lookup_file(rqstp, &file, &lock->fh));
++ if (error != 0)
+ goto no_locks;
+ *filp = file;
+
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 4cfe260..d225b51 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -3673,6 +3673,7 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
+
+ nfsd4_close_open_stateid(stp);
++ release_last_closed_stateid(oo);
+ oo->oo_last_closed_stid = stp;
+
+ /* place unused nfs4_stateowners on so_close_lru list to be
+diff --git a/fs/proc/stat.c b/fs/proc/stat.c
+index 0855e6f..4c9a859 100644
+--- a/fs/proc/stat.c
++++ b/fs/proc/stat.c
+@@ -24,11 +24,14 @@
+
+ static cputime64_t get_idle_time(int cpu)
+ {
+- u64 idle_time = get_cpu_idle_time_us(cpu, NULL);
++ u64 idle_time = -1ULL;
+ cputime64_t idle;
+
++ if (cpu_online(cpu))
++ idle_time = get_cpu_idle_time_us(cpu, NULL);
++
+ if (idle_time == -1ULL) {
+- /* !NO_HZ so we can rely on cpustat.idle */
++ /* !NO_HZ or cpu offline so we can rely on cpustat.idle */
+ idle = kstat_cpu(cpu).cpustat.idle;
+ idle = cputime64_add(idle, arch_idle_time(cpu));
+ } else
+@@ -39,11 +42,14 @@ static cputime64_t get_idle_time(int cpu)
+
+ static cputime64_t get_iowait_time(int cpu)
+ {
+- u64 iowait_time = get_cpu_iowait_time_us(cpu, NULL);
++ u64 iowait_time = -1ULL;
+ cputime64_t iowait;
+
++ if (cpu_online(cpu))
++ iowait_time = get_cpu_iowait_time_us(cpu, NULL);
++
+ if (iowait_time == -1ULL)
+- /* !NO_HZ so we can rely on cpustat.iowait */
++ /* !NO_HZ or cpu offline so we can rely on cpustat.iowait */
+ iowait = kstat_cpu(cpu).cpustat.iowait;
+ else
+ iowait = usecs_to_cputime64(iowait_time);
+diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
+index 950f13a..5809abb 100644
+--- a/fs/reiserfs/inode.c
++++ b/fs/reiserfs/inode.c
+@@ -1573,8 +1573,10 @@ struct dentry *reiserfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
+ reiserfs_warning(sb, "reiserfs-13077",
+ "nfsd/reiserfs, fhtype=%d, len=%d - odd",
+ fh_type, fh_len);
+- fh_type = 5;
++ fh_type = fh_len;
+ }
++ if (fh_len < 2)
++ return NULL;
+
+ return reiserfs_get_dentry(sb, fid->raw[0], fid->raw[1],
+ (fh_type == 3 || fh_type >= 5) ? fid->raw[2] : 0);
+@@ -1583,6 +1585,8 @@ struct dentry *reiserfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
+ struct dentry *reiserfs_fh_to_parent(struct super_block *sb, struct fid *fid,
+ int fh_len, int fh_type)
+ {
++ if (fh_type > fh_len)
++ fh_type = fh_len;
+ if (fh_type < 4)
+ return NULL;
+
+diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
+index 7fdf6a7..fabbb81 100644
+--- a/fs/sysfs/dir.c
++++ b/fs/sysfs/dir.c
+@@ -430,20 +430,18 @@ int __sysfs_add_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd)
+ /**
+ * sysfs_pathname - return full path to sysfs dirent
+ * @sd: sysfs_dirent whose path we want
+- * @path: caller allocated buffer
++ * @path: caller allocated buffer of size PATH_MAX
+ *
+ * Gives the name "/" to the sysfs_root entry; any path returned
+ * is relative to wherever sysfs is mounted.
+- *
+- * XXX: does no error checking on @path size
+ */
+ static char *sysfs_pathname(struct sysfs_dirent *sd, char *path)
+ {
+ if (sd->s_parent) {
+ sysfs_pathname(sd->s_parent, path);
+- strcat(path, "/");
++ strlcat(path, "/", PATH_MAX);
+ }
+- strcat(path, sd->s_name);
++ strlcat(path, sd->s_name, PATH_MAX);
+ return path;
+ }
+
+@@ -476,9 +474,11 @@ int sysfs_add_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd)
+ char *path = kzalloc(PATH_MAX, GFP_KERNEL);
+ WARN(1, KERN_WARNING
+ "sysfs: cannot create duplicate filename '%s'\n",
+- (path == NULL) ? sd->s_name :
+- strcat(strcat(sysfs_pathname(acxt->parent_sd, path), "/"),
+- sd->s_name));
++ (path == NULL) ? sd->s_name
++ : (sysfs_pathname(acxt->parent_sd, path),
++ strlcat(path, "/", PATH_MAX),
++ strlcat(path, sd->s_name, PATH_MAX),
++ path));
+ kfree(path);
+ }
+
+diff --git a/fs/xfs/xfs_export.c b/fs/xfs/xfs_export.c
+index 558910f..5703fb8 100644
+--- a/fs/xfs/xfs_export.c
++++ b/fs/xfs/xfs_export.c
+@@ -195,6 +195,9 @@ xfs_fs_fh_to_parent(struct super_block *sb, struct fid *fid,
+ struct xfs_fid64 *fid64 = (struct xfs_fid64 *)fid;
+ struct inode *inode = NULL;
+
++ if (fh_len < xfs_fileid_length(fileid_type))
++ return NULL;
++
+ switch (fileid_type) {
+ case FILEID_INO32_GEN_PARENT:
+ inode = xfs_nfs_get_inode(sb, fid->i32.parent_ino,
+diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
+index 12d5543..c944c4f 100644
+--- a/include/linux/if_vlan.h
++++ b/include/linux/if_vlan.h
+@@ -97,6 +97,8 @@ static inline int is_vlan_dev(struct net_device *dev)
+ }
+
+ #define vlan_tx_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT)
++#define vlan_tx_nonzero_tag_present(__skb) \
++ (vlan_tx_tag_present(__skb) && ((__skb)->vlan_tci & VLAN_VID_MASK))
+ #define vlan_tx_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT)
+
+ #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+@@ -106,7 +108,7 @@ extern struct net_device *__vlan_find_dev_deep(struct net_device *real_dev,
+ extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
+ extern u16 vlan_dev_vlan_id(const struct net_device *dev);
+
+-extern bool vlan_do_receive(struct sk_buff **skb, bool last_handler);
++extern bool vlan_do_receive(struct sk_buff **skb);
+ extern struct sk_buff *vlan_untag(struct sk_buff *skb);
+
+ #else
+@@ -128,10 +130,8 @@ static inline u16 vlan_dev_vlan_id(const struct net_device *dev)
+ return 0;
+ }
+
+-static inline bool vlan_do_receive(struct sk_buff **skb, bool last_handler)
++static inline bool vlan_do_receive(struct sk_buff **skb)
+ {
+- if (((*skb)->vlan_tci & VLAN_VID_MASK) && last_handler)
+- (*skb)->pkt_type = PACKET_OTHERHOST;
+ return false;
+ }
+
+diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
+index 904131b..b25b09b 100644
+--- a/include/linux/mtd/nand.h
++++ b/include/linux/mtd/nand.h
+@@ -215,9 +215,6 @@ typedef enum {
+ #define NAND_SUBPAGE_READ(chip) ((chip->ecc.mode == NAND_ECC_SOFT) \
+ && (chip->page_shift > 9))
+
+-/* Mask to zero out the chip options, which come from the id table */
+-#define NAND_CHIPOPTIONS_MSK (0x0000ffff & ~NAND_NO_AUTOINCR)
+-
+ /* Non chip related options */
+ /* This option skips the bbt scan during initialization. */
+ #define NAND_SKIP_BBTSCAN 0x00010000
+diff --git a/include/linux/netfilter/xt_set.h b/include/linux/netfilter/xt_set.h
+index c0405ac..e3a9978 100644
+--- a/include/linux/netfilter/xt_set.h
++++ b/include/linux/netfilter/xt_set.h
+@@ -58,8 +58,8 @@ struct xt_set_info_target_v1 {
+ struct xt_set_info_target_v2 {
+ struct xt_set_info add_set;
+ struct xt_set_info del_set;
+- u32 flags;
+- u32 timeout;
++ __u32 flags;
++ __u32 timeout;
+ };
+
+ #endif /*_XT_SET_H*/
+diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
+index a88fb69..ea6f8a4 100644
+--- a/include/net/netfilter/nf_conntrack_ecache.h
++++ b/include/net/netfilter/nf_conntrack_ecache.h
+@@ -18,6 +18,7 @@ struct nf_conntrack_ecache {
+ u16 ctmask; /* bitmask of ct events to be delivered */
+ u16 expmask; /* bitmask of expect events to be delivered */
+ u32 pid; /* netlink pid of destroyer */
++ struct timer_list timeout;
+ };
+
+ static inline struct nf_conntrack_ecache *
+diff --git a/kernel/cgroup.c b/kernel/cgroup.c
+index cdc0354..6337535 100644
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -1803,9 +1803,8 @@ static int cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp,
+ * trading it for newcg is protected by cgroup_mutex, we're safe to drop
+ * it here; it will be freed under RCU.
+ */
+- put_css_set(oldcg);
+-
+ set_bit(CGRP_RELEASABLE, &oldcgrp->flags);
++ put_css_set(oldcg);
+ return 0;
+ }
+
+diff --git a/kernel/module.c b/kernel/module.c
+index 6969ef0..6c8fa34 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -2659,6 +2659,10 @@ static int check_module_license_and_versions(struct module *mod)
+ if (strcmp(mod->name, "driverloader") == 0)
+ add_taint_module(mod, TAINT_PROPRIETARY_MODULE);
+
++ /* lve claims to be GPL but upstream won't provide source */
++ if (strcmp(mod->name, "lve") == 0)
++ add_taint_module(mod, TAINT_PROPRIETARY_MODULE);
++
+ #ifdef CONFIG_MODVERSIONS
+ if ((mod->num_syms && !mod->crcs)
+ || (mod->num_gpl_syms && !mod->gpl_crcs)
+diff --git a/kernel/sys.c b/kernel/sys.c
+index c504302..d7c4ab0 100644
+--- a/kernel/sys.c
++++ b/kernel/sys.c
+@@ -1171,15 +1171,16 @@ DECLARE_RWSEM(uts_sem);
+ * Work around broken programs that cannot handle "Linux 3.0".
+ * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
+ */
+-static int override_release(char __user *release, int len)
++static int override_release(char __user *release, size_t len)
+ {
+ int ret = 0;
+- char buf[65];
+
+ if (current->personality & UNAME26) {
+- char *rest = UTS_RELEASE;
++ const char *rest = UTS_RELEASE;
++ char buf[65] = { 0 };
+ int ndots = 0;
+ unsigned v;
++ size_t copy;
+
+ while (*rest) {
+ if (*rest == '.' && ++ndots >= 3)
+@@ -1189,8 +1190,9 @@ static int override_release(char __user *release, int len)
+ rest++;
+ }
+ v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
+- snprintf(buf, len, "2.6.%u%s", v, rest);
+- ret = copy_to_user(release, buf, len);
++ copy = min(sizeof(buf), max_t(size_t, 1, len));
++ copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
++ ret = copy_to_user(release, buf, copy + 1);
+ }
+ return ret;
+ }
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
+index 5ee1ac0..cb7f33e 100644
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -992,7 +992,7 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
+ }
+
+ /* Accumulate raw time */
+- raw_nsecs = timekeeper.raw_interval << shift;
++ raw_nsecs = (u64)timekeeper.raw_interval << shift;
+ raw_nsecs += raw_time.tv_nsec;
+ if (raw_nsecs >= NSEC_PER_SEC) {
+ u64 raw_secs = raw_nsecs;
+diff --git a/kernel/timer.c b/kernel/timer.c
+index 9c3c62b..c219db6 100644
+--- a/kernel/timer.c
++++ b/kernel/timer.c
+@@ -63,6 +63,7 @@ EXPORT_SYMBOL(jiffies_64);
+ #define TVR_SIZE (1 << TVR_BITS)
+ #define TVN_MASK (TVN_SIZE - 1)
+ #define TVR_MASK (TVR_SIZE - 1)
++#define MAX_TVAL ((unsigned long)((1ULL << (TVR_BITS + 4*TVN_BITS)) - 1))
+
+ struct tvec {
+ struct list_head vec[TVN_SIZE];
+@@ -356,11 +357,12 @@ static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
+ vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
+ } else {
+ int i;
+- /* If the timeout is larger than 0xffffffff on 64-bit
+- * architectures then we use the maximum timeout:
++ /* If the timeout is larger than MAX_TVAL (on 64-bit
++ * architectures or with CONFIG_BASE_SMALL=1) then we
++ * use the maximum timeout.
+ */
+- if (idx > 0xffffffffUL) {
+- idx = 0xffffffffUL;
++ if (idx > MAX_TVAL) {
++ idx = MAX_TVAL;
+ expires = idx + base->timer_jiffies;
+ }
+ i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
+diff --git a/lib/genalloc.c b/lib/genalloc.c
+index f352cc4..716f947 100644
+--- a/lib/genalloc.c
++++ b/lib/genalloc.c
+@@ -176,7 +176,7 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy
+ struct gen_pool_chunk *chunk;
+ int nbits = size >> pool->min_alloc_order;
+ int nbytes = sizeof(struct gen_pool_chunk) +
+- (nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE;
++ BITS_TO_LONGS(nbits) * sizeof(long);
+
+ chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid);
+ if (unlikely(chunk == NULL))
+diff --git a/mm/rmap.c b/mm/rmap.c
+index a4fd368..8685697 100644
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -56,6 +56,7 @@
+ #include <linux/mmu_notifier.h>
+ #include <linux/migrate.h>
+ #include <linux/hugetlb.h>
++#include <linux/backing-dev.h>
+
+ #include <asm/tlbflush.h>
+
+@@ -935,11 +936,8 @@ int page_mkclean(struct page *page)
+
+ if (page_mapped(page)) {
+ struct address_space *mapping = page_mapping(page);
+- if (mapping) {
++ if (mapping)
+ ret = page_mkclean_file(mapping, page);
+- if (page_test_and_clear_dirty(page_to_pfn(page), 1))
+- ret = 1;
+- }
+ }
+
+ return ret;
+@@ -1120,6 +1118,8 @@ void page_add_file_rmap(struct page *page)
+ */
+ void page_remove_rmap(struct page *page)
+ {
++ struct address_space *mapping = page_mapping(page);
++
+ /* page still mapped by someone else? */
+ if (!atomic_add_negative(-1, &page->_mapcount))
+ return;
+@@ -1130,8 +1130,19 @@ void page_remove_rmap(struct page *page)
+ * this if the page is anon, so about to be freed; but perhaps
+ * not if it's in swapcache - there might be another pte slot
+ * containing the swap entry, but page not yet written to swap.
++ *
++ * And we can skip it on file pages, so long as the filesystem
++ * participates in dirty tracking; but need to catch shm and tmpfs
++ * and ramfs pages which have been modified since creation by read
++ * fault.
++ *
++ * Note that mapping must be decided above, before decrementing
++ * mapcount (which luckily provides a barrier): once page is unmapped,
++ * it could be truncated and page->mapping reset to NULL at any moment.
++ * Note also that we are relying on page_mapping(page) to set mapping
++ * to &swapper_space when PageSwapCache(page).
+ */
+- if ((!PageAnon(page) || PageSwapCache(page)) &&
++ if (mapping && !mapping_cap_account_dirty(mapping) &&
+ page_test_and_clear_dirty(page_to_pfn(page), 1))
+ set_page_dirty(page);
+ /*
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 7a82174..126ca35 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -1962,12 +1962,14 @@ static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
+ {
+ struct inode *inode;
+ struct dentry *dentry = NULL;
+- u64 inum = fid->raw[2];
+- inum = (inum << 32) | fid->raw[1];
++ u64 inum;
+
+ if (fh_len < 3)
+ return NULL;
+
++ inum = fid->raw[2];
++ inum = (inum << 32) | fid->raw[1];
++
+ inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
+ shmem_match, fid->raw);
+ if (inode) {
+diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
+index 9ddbd4e..e860a4f 100644
+--- a/net/8021q/vlan_core.c
++++ b/net/8021q/vlan_core.c
+@@ -5,7 +5,7 @@
+ #include <linux/export.h>
+ #include "vlan.h"
+
+-bool vlan_do_receive(struct sk_buff **skbp, bool last_handler)
++bool vlan_do_receive(struct sk_buff **skbp)
+ {
+ struct sk_buff *skb = *skbp;
+ u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK;
+@@ -13,14 +13,8 @@ bool vlan_do_receive(struct sk_buff **skbp, bool last_handler)
+ struct vlan_pcpu_stats *rx_stats;
+
+ vlan_dev = vlan_find_dev(skb->dev, vlan_id);
+- if (!vlan_dev) {
+- /* Only the last call to vlan_do_receive() should change
+- * pkt_type to PACKET_OTHERHOST
+- */
+- if (vlan_id && last_handler)
+- skb->pkt_type = PACKET_OTHERHOST;
++ if (!vlan_dev)
+ return false;
+- }
+
+ skb = *skbp = skb_share_check(skb, GFP_ATOMIC);
+ if (unlikely(!skb))
+diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
+index c27b4e3..1849ee0 100644
+--- a/net/bluetooth/smp.c
++++ b/net/bluetooth/smp.c
+@@ -30,6 +30,8 @@
+
+ #define SMP_TIMEOUT 30000 /* 30 seconds */
+
++#define AUTH_REQ_MASK 0x07
++
+ static inline void swap128(u8 src[16], u8 dst[16])
+ {
+ int i;
+@@ -206,7 +208,7 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
+ req->max_key_size = SMP_MAX_ENC_KEY_SIZE;
+ req->init_key_dist = dist_keys;
+ req->resp_key_dist = dist_keys;
+- req->auth_req = authreq;
++ req->auth_req = (authreq & AUTH_REQ_MASK);
+ return;
+ }
+
+@@ -215,7 +217,7 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
+ rsp->max_key_size = SMP_MAX_ENC_KEY_SIZE;
+ rsp->init_key_dist = req->init_key_dist & dist_keys;
+ rsp->resp_key_dist = req->resp_key_dist & dist_keys;
+- rsp->auth_req = authreq;
++ rsp->auth_req = (authreq & AUTH_REQ_MASK);
+ }
+
+ static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size)
+diff --git a/net/core/dev.c b/net/core/dev.c
+index abe1147..f500a69 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3278,18 +3278,18 @@ another_round:
+ ncls:
+ #endif
+
+- rx_handler = rcu_dereference(skb->dev->rx_handler);
+ if (vlan_tx_tag_present(skb)) {
+ if (pt_prev) {
+ ret = deliver_skb(skb, pt_prev, orig_dev);
+ pt_prev = NULL;
+ }
+- if (vlan_do_receive(&skb, !rx_handler))
++ if (vlan_do_receive(&skb))
+ goto another_round;
+ else if (unlikely(!skb))
+ goto out;
+ }
+
++ rx_handler = rcu_dereference(skb->dev->rx_handler);
+ if (rx_handler) {
+ if (pt_prev) {
+ ret = deliver_skb(skb, pt_prev, orig_dev);
+@@ -3309,6 +3309,9 @@ ncls:
+ }
+ }
+
++ if (vlan_tx_nonzero_tag_present(skb))
++ skb->pkt_type = PACKET_OTHERHOST;
++
+ /* deliver only exact match when indicated */
+ null_or_dev = deliver_exact ? skb->dev : NULL;
+
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index 7aafaed..5b9709f 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -1254,8 +1254,6 @@ int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
+ if (!dst)
+ goto discard;
+
+- __skb_pull(skb, skb_network_offset(skb));
+-
+ if (!neigh_event_send(neigh, skb)) {
+ int err;
+ struct net_device *dev = neigh->dev;
+@@ -1265,6 +1263,7 @@ int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
+ neigh_hh_init(neigh, dst);
+
+ do {
++ __skb_pull(skb, skb_network_offset(skb));
+ seq = read_seqbegin(&neigh->ha_lock);
+ err = dev_hard_header(skb, dev, ntohs(skb->protocol),
+ neigh->ha, NULL, skb->len);
+@@ -1295,9 +1294,8 @@ int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
+ unsigned int seq;
+ int err;
+
+- __skb_pull(skb, skb_network_offset(skb));
+-
+ do {
++ __skb_pull(skb, skb_network_offset(skb));
+ seq = read_seqbegin(&neigh->ha_lock);
+ err = dev_hard_header(skb, dev, ntohs(skb->protocol),
+ neigh->ha, NULL, skb->len);
+diff --git a/net/core/pktgen.c b/net/core/pktgen.c
+index df878de..7bc9991 100644
+--- a/net/core/pktgen.c
++++ b/net/core/pktgen.c
+@@ -2935,7 +2935,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
+ sizeof(struct ipv6hdr) - sizeof(struct udphdr) -
+ pkt_dev->pkt_overhead;
+
+- if (datalen < sizeof(struct pktgen_hdr)) {
++ if (datalen < 0 || datalen < sizeof(struct pktgen_hdr)) {
+ datalen = sizeof(struct pktgen_hdr);
+ if (net_ratelimit())
+ pr_info("increased datalen to %d\n", datalen);
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index de69cec..58c09a0 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -651,10 +651,11 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
+ arg.csumoffset = offsetof(struct tcphdr, check) / 2;
+ arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
+ /* When socket is gone, all binding information is lost.
+- * routing might fail in this case. using iif for oif to
+- * make sure we can deliver it
++ * routing might fail in this case. No choice here, if we choose to force
++ * input interface, we will misroute in case of asymmetric route.
+ */
+- arg.bound_dev_if = sk ? sk->sk_bound_dev_if : inet_iif(skb);
++ if (sk)
++ arg.bound_dev_if = sk->sk_bound_dev_if;
+
+ net = dev_net(skb_dst(skb)->dev);
+ arg.tos = ip_hdr(skb)->tos;
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 4a56574..ccab3c8 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -1048,7 +1048,8 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
+ __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
+
+ fl6.flowi6_proto = IPPROTO_TCP;
+- fl6.flowi6_oif = inet6_iif(skb);
++ if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
++ fl6.flowi6_oif = inet6_iif(skb);
+ fl6.fl6_dport = t1->dest;
+ fl6.fl6_sport = t1->source;
+ security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
+diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
+index 28a39bb..a582504 100644
+--- a/net/mac80211/wpa.c
++++ b/net/mac80211/wpa.c
+@@ -106,7 +106,8 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
+ if (status->flag & RX_FLAG_MMIC_ERROR)
+ goto mic_fail;
+
+- if (!(status->flag & RX_FLAG_IV_STRIPPED) && rx->key)
++ if (!(status->flag & RX_FLAG_IV_STRIPPED) && rx->key &&
++ rx->key->conf.cipher == WLAN_CIPHER_SUITE_TKIP)
+ goto update_iv;
+
+ return RX_CONTINUE;
+diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
+index 1d15193..7489bd3 100644
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -247,12 +247,15 @@ static void death_by_event(unsigned long ul_conntrack)
+ {
+ struct nf_conn *ct = (void *)ul_conntrack;
+ struct net *net = nf_ct_net(ct);
++ struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
++
++ BUG_ON(ecache == NULL);
+
+ if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) {
+ /* bad luck, let's retry again */
+- ct->timeout.expires = jiffies +
++ ecache->timeout.expires = jiffies +
+ (random32() % net->ct.sysctl_events_retry_timeout);
+- add_timer(&ct->timeout);
++ add_timer(&ecache->timeout);
+ return;
+ }
+ /* we've got the event delivered, now it's dying */
+@@ -266,6 +269,9 @@ static void death_by_event(unsigned long ul_conntrack)
+ void nf_ct_insert_dying_list(struct nf_conn *ct)
+ {
+ struct net *net = nf_ct_net(ct);
++ struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
++
++ BUG_ON(ecache == NULL);
+
+ /* add this conntrack to the dying list */
+ spin_lock_bh(&nf_conntrack_lock);
+@@ -273,10 +279,10 @@ void nf_ct_insert_dying_list(struct nf_conn *ct)
+ &net->ct.dying);
+ spin_unlock_bh(&nf_conntrack_lock);
+ /* set a new timer to retry event delivery */
+- setup_timer(&ct->timeout, death_by_event, (unsigned long)ct);
+- ct->timeout.expires = jiffies +
++ setup_timer(&ecache->timeout, death_by_event, (unsigned long)ct);
++ ecache->timeout.expires = jiffies +
+ (random32() % net->ct.sysctl_events_retry_timeout);
+- add_timer(&ct->timeout);
++ add_timer(&ecache->timeout);
+ }
+ EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list);
+
+diff --git a/net/rds/send.c b/net/rds/send.c
+index 96531d4..88eace5 100644
+--- a/net/rds/send.c
++++ b/net/rds/send.c
+@@ -1122,7 +1122,7 @@ rds_send_pong(struct rds_connection *conn, __be16 dport)
+ rds_stats_inc(s_send_pong);
+
+ if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
+- rds_send_xmit(conn);
++ queue_delayed_work(rds_wq, &conn->c_send_w, 0);
+
+ rds_message_put(rm);
+ return 0;
+diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
+index 4530a91..237a2ee 100644
+--- a/net/sunrpc/cache.c
++++ b/net/sunrpc/cache.c
+@@ -1404,11 +1404,11 @@ static ssize_t read_flush(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos,
+ struct cache_detail *cd)
+ {
+- char tbuf[20];
++ char tbuf[22];
+ unsigned long p = *ppos;
+ size_t len;
+
+- sprintf(tbuf, "%lu\n", convert_to_wallclock(cd->flush_time));
++ snprintf(tbuf, sizeof(tbuf), "%lu\n", convert_to_wallclock(cd->flush_time));
+ len = strlen(tbuf);
+ if (p >= len)
+ return 0;
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index 10a385b..65fe23b 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -254,7 +254,6 @@ struct sock_xprt {
+ void (*old_data_ready)(struct sock *, int);
+ void (*old_state_change)(struct sock *);
+ void (*old_write_space)(struct sock *);
+- void (*old_error_report)(struct sock *);
+ };
+
+ /*
+@@ -737,10 +736,10 @@ static int xs_tcp_send_request(struct rpc_task *task)
+ dprintk("RPC: sendmsg returned unrecognized error %d\n",
+ -status);
+ case -ECONNRESET:
+- case -EPIPE:
+ xs_tcp_shutdown(xprt);
+ case -ECONNREFUSED:
+ case -ENOTCONN:
++ case -EPIPE:
+ clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
+ }
+
+@@ -781,7 +780,6 @@ static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk)
+ transport->old_data_ready = sk->sk_data_ready;
+ transport->old_state_change = sk->sk_state_change;
+ transport->old_write_space = sk->sk_write_space;
+- transport->old_error_report = sk->sk_error_report;
+ }
+
+ static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk)
+@@ -789,7 +787,6 @@ static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *s
+ sk->sk_data_ready = transport->old_data_ready;
+ sk->sk_state_change = transport->old_state_change;
+ sk->sk_write_space = transport->old_write_space;
+- sk->sk_error_report = transport->old_error_report;
+ }
+
+ static void xs_reset_transport(struct sock_xprt *transport)
+@@ -1465,7 +1462,7 @@ static void xs_tcp_cancel_linger_timeout(struct rpc_xprt *xprt)
+ xprt_clear_connecting(xprt);
+ }
+
+-static void xs_sock_mark_closed(struct rpc_xprt *xprt)
++static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt)
+ {
+ smp_mb__before_clear_bit();
+ clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
+@@ -1473,6 +1470,11 @@ static void xs_sock_mark_closed(struct rpc_xprt *xprt)
+ clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
+ clear_bit(XPRT_CLOSING, &xprt->state);
+ smp_mb__after_clear_bit();
++}
++
++static void xs_sock_mark_closed(struct rpc_xprt *xprt)
++{
++ xs_sock_reset_connection_flags(xprt);
+ /* Mark transport as closed and wake up all pending tasks */
+ xprt_disconnect_done(xprt);
+ }
+@@ -1528,6 +1530,7 @@ static void xs_tcp_state_change(struct sock *sk)
+ case TCP_CLOSE_WAIT:
+ /* The server initiated a shutdown of the socket */
+ xprt->connect_cookie++;
++ clear_bit(XPRT_CONNECTED, &xprt->state);
+ xs_tcp_force_close(xprt);
+ case TCP_CLOSING:
+ /*
+@@ -1552,25 +1555,6 @@ static void xs_tcp_state_change(struct sock *sk)
+ read_unlock_bh(&sk->sk_callback_lock);
+ }
+
+-/**
+- * xs_error_report - callback mainly for catching socket errors
+- * @sk: socket
+- */
+-static void xs_error_report(struct sock *sk)
+-{
+- struct rpc_xprt *xprt;
+-
+- read_lock_bh(&sk->sk_callback_lock);
+- if (!(xprt = xprt_from_sock(sk)))
+- goto out;
+- dprintk("RPC: %s client %p...\n"
+- "RPC: error %d\n",
+- __func__, xprt, sk->sk_err);
+- xprt_wake_pending_tasks(xprt, -EAGAIN);
+-out:
+- read_unlock_bh(&sk->sk_callback_lock);
+-}
+-
+ static void xs_write_space(struct sock *sk)
+ {
+ struct socket *sock;
+@@ -1870,7 +1854,6 @@ static int xs_local_finish_connecting(struct rpc_xprt *xprt,
+ sk->sk_user_data = xprt;
+ sk->sk_data_ready = xs_local_data_ready;
+ sk->sk_write_space = xs_udp_write_space;
+- sk->sk_error_report = xs_error_report;
+ sk->sk_allocation = GFP_ATOMIC;
+
+ xprt_clear_connected(xprt);
+@@ -1959,7 +1942,6 @@ static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
+ sk->sk_user_data = xprt;
+ sk->sk_data_ready = xs_udp_data_ready;
+ sk->sk_write_space = xs_udp_write_space;
+- sk->sk_error_report = xs_error_report;
+ sk->sk_no_check = UDP_CSUM_NORCV;
+ sk->sk_allocation = GFP_ATOMIC;
+
+@@ -2027,10 +2009,8 @@ static void xs_abort_connection(struct sock_xprt *transport)
+ any.sa_family = AF_UNSPEC;
+ result = kernel_connect(transport->sock, &any, sizeof(any), 0);
+ if (!result)
+- xs_sock_mark_closed(&transport->xprt);
+- else
+- dprintk("RPC: AF_UNSPEC connect return code %d\n",
+- result);
++ xs_sock_reset_connection_flags(&transport->xprt);
++ dprintk("RPC: AF_UNSPEC connect return code %d\n", result);
+ }
+
+ static void xs_tcp_reuse_connection(struct sock_xprt *transport)
+@@ -2075,7 +2055,6 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
+ sk->sk_data_ready = xs_tcp_data_ready;
+ sk->sk_state_change = xs_tcp_state_change;
+ sk->sk_write_space = xs_tcp_write_space;
+- sk->sk_error_report = xs_error_report;
+ sk->sk_allocation = GFP_ATOMIC;
+
+ /* socket options */
+@@ -2488,6 +2467,7 @@ static struct rpc_xprt_ops xs_tcp_ops = {
+ static struct rpc_xprt_ops bc_tcp_ops = {
+ .reserve_xprt = xprt_reserve_xprt,
+ .release_xprt = xprt_release_xprt,
++ .alloc_slot = xprt_alloc_slot,
+ .buf_alloc = bc_malloc,
+ .buf_free = bc_free,
+ .send_request = bc_send_request,
+diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
+index fac51ee..1e7cfba 100644
+--- a/sound/pci/ac97/ac97_codec.c
++++ b/sound/pci/ac97/ac97_codec.c
+@@ -1271,6 +1271,8 @@ static int snd_ac97_cvol_new(struct snd_card *card, char *name, int reg, unsigne
+ tmp.index = ac97->num;
+ kctl = snd_ctl_new1(&tmp, ac97);
+ }
++ if (!kctl)
++ return -ENOMEM;
+ if (reg >= AC97_PHONE && reg <= AC97_PCM)
+ set_tlv_db_scale(kctl, db_scale_5bit_12db_max);
+ else
+diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c
+index 6a3e567..d37b946 100644
+--- a/sound/pci/emu10k1/emu10k1_main.c
++++ b/sound/pci/emu10k1/emu10k1_main.c
+@@ -1416,6 +1416,15 @@ static struct snd_emu_chip_details emu_chip_details[] = {
+ .ca0108_chip = 1,
+ .spk71 = 1,
+ .emu_model = EMU_MODEL_EMU1010B}, /* EMU 1010 new revision */
++ /* Tested by Maxim Kachur <mcdebugger@duganet.ru> 17th Oct 2012. */
++ /* This is MAEM8986, 0202 is MAEM8980 */
++ {.vendor = 0x1102, .device = 0x0008, .subsystem = 0x40071102,
++ .driver = "Audigy2", .name = "E-mu 1010 PCIe [MAEM8986]",
++ .id = "EMU1010",
++ .emu10k2_chip = 1,
++ .ca0108_chip = 1,
++ .spk71 = 1,
++ .emu_model = EMU_MODEL_EMU1010B}, /* EMU 1010 PCIe */
+ /* Tested by James@superbug.co.uk 8th July 2005. */
+ /* This is MAEM8810, 0202 is MAEM8820 */
+ {.vendor = 0x1102, .device = 0x0004, .subsystem = 0x40011102,
+diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
+index ec0518e..e449278 100644
+--- a/sound/pci/hda/patch_cirrus.c
++++ b/sound/pci/hda/patch_cirrus.c
+@@ -1404,7 +1404,7 @@ static int patch_cs420x(struct hda_codec *codec)
+ return 0;
+
+ error:
+- kfree(codec->spec);
++ cs_free(codec);
+ codec->spec = NULL;
+ return err;
+ }
+@@ -1949,7 +1949,7 @@ static int patch_cs421x(struct hda_codec *codec)
+ return 0;
+
+ error:
+- kfree(codec->spec);
++ cs_free(codec);
+ codec->spec = NULL;
+ return err;
+ }
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 94f0c4a..58c287b 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -4463,7 +4463,9 @@ static void apply_fixup(struct hda_codec *codec,
+ struct conexant_spec *spec = codec->spec;
+
+ quirk = snd_pci_quirk_lookup(codec->bus->pci, quirk);
+- if (quirk && table[quirk->value]) {
++ if (!quirk)
++ return;
++ if (table[quirk->value]) {
+ snd_printdd(KERN_INFO "hda_codec: applying pincfg for %s\n",
+ quirk->name);
+ apply_pincfg(codec, table[quirk->value]);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 32c8169..c2c7f90 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -620,6 +620,8 @@ static void alc_line_automute(struct hda_codec *codec)
+ {
+ struct alc_spec *spec = codec->spec;
+
++ if (spec->autocfg.line_out_type == AUTO_PIN_SPEAKER_OUT)
++ return;
+ /* check LO jack only when it's different from HP */
+ if (spec->autocfg.line_out_pins[0] == spec->autocfg.hp_pins[0])
+ return;
+@@ -2663,8 +2665,10 @@ static const char *alc_get_line_out_pfx(struct alc_spec *spec, int ch,
+ return "PCM";
+ break;
+ }
+- if (snd_BUG_ON(ch >= ARRAY_SIZE(channel_name)))
++ if (ch >= ARRAY_SIZE(channel_name)) {
++ snd_BUG();
+ return "PCM";
++ }
+
+ return channel_name[ch];
+ }
+@@ -5080,6 +5084,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", ALC269_FIXUP_SKU_IGNORE),
+ SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x21fa, "Thinkpad X230", ALC269_FIXUP_LENOVO_DOCK),
++ SND_PCI_QUIRK(0x17aa, 0x21f3, "Thinkpad T430", ALC269_FIXUP_LENOVO_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
+diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c
+index af0f22f..aca6edc 100644
+--- a/usr/gen_init_cpio.c
++++ b/usr/gen_init_cpio.c
+@@ -303,7 +303,7 @@ static int cpio_mkfile(const char *name, const char *location,
+ int retval;
+ int rc = -1;
+ int namesize;
+- int i;
++ unsigned int i;
+
+ mode |= S_IFREG;
+
+@@ -381,25 +381,28 @@ error:
+
+ static char *cpio_replace_env(char *new_location)
+ {
+- char expanded[PATH_MAX + 1];
+- char env_var[PATH_MAX + 1];
+- char *start;
+- char *end;
+-
+- for (start = NULL; (start = strstr(new_location, "${")); ) {
+- end = strchr(start, '}');
+- if (start < end) {
+- *env_var = *expanded = '\0';
+- strncat(env_var, start + 2, end - start - 2);
+- strncat(expanded, new_location, start - new_location);
+- strncat(expanded, getenv(env_var), PATH_MAX);
+- strncat(expanded, end + 1, PATH_MAX);
+- strncpy(new_location, expanded, PATH_MAX);
+- } else
+- break;
+- }
+-
+- return new_location;
++ char expanded[PATH_MAX + 1];
++ char env_var[PATH_MAX + 1];
++ char *start;
++ char *end;
++
++ for (start = NULL; (start = strstr(new_location, "${")); ) {
++ end = strchr(start, '}');
++ if (start < end) {
++ *env_var = *expanded = '\0';
++ strncat(env_var, start + 2, end - start - 2);
++ strncat(expanded, new_location, start - new_location);
++ strncat(expanded, getenv(env_var),
++ PATH_MAX - strlen(expanded));
++ strncat(expanded, end + 1,
++ PATH_MAX - strlen(expanded));
++ strncpy(new_location, expanded, PATH_MAX);
++ new_location[PATH_MAX] = 0;
++ } else
++ break;
++ }
++
++ return new_location;
+ }
+
+
diff --git a/1033_linux-3.2.34.patch b/1033_linux-3.2.34.patch
new file mode 100644
index 00000000..d647b38a
--- /dev/null
+++ b/1033_linux-3.2.34.patch
@@ -0,0 +1,3678 @@
+diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
+index 3d84912..47c4ec2 100644
+--- a/Documentation/feature-removal-schedule.txt
++++ b/Documentation/feature-removal-schedule.txt
+@@ -6,14 +6,6 @@ be removed from this file.
+
+ ---------------------------
+
+-What: x86 floppy disable_hlt
+-When: 2012
+-Why: ancient workaround of dubious utility clutters the
+- code used by everybody else.
+-Who: Len Brown <len.brown@intel.com>
+-
+----------------------------
+-
+ What: CONFIG_APM_CPU_IDLE, and its ability to call APM BIOS in idle
+ When: 2012
+ Why: This optional sub-feature of APM is of dubious reliability,
+diff --git a/Makefile b/Makefile
+index 63ca1ea2..14ebacf 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 33
++SUBLEVEL = 34
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/mach-at91/at91rm9200_devices.c b/arch/arm/mach-at91/at91rm9200_devices.c
+index 143eebb..929fd91 100644
+--- a/arch/arm/mach-at91/at91rm9200_devices.c
++++ b/arch/arm/mach-at91/at91rm9200_devices.c
+@@ -462,7 +462,7 @@ static struct i2c_gpio_platform_data pdata = {
+
+ static struct platform_device at91rm9200_twi_device = {
+ .name = "i2c-gpio",
+- .id = -1,
++ .id = 0,
+ .dev.platform_data = &pdata,
+ };
+
+diff --git a/arch/arm/mach-at91/at91sam9260_devices.c b/arch/arm/mach-at91/at91sam9260_devices.c
+index 2590988..465e026 100644
+--- a/arch/arm/mach-at91/at91sam9260_devices.c
++++ b/arch/arm/mach-at91/at91sam9260_devices.c
+@@ -467,7 +467,7 @@ static struct i2c_gpio_platform_data pdata = {
+
+ static struct platform_device at91sam9260_twi_device = {
+ .name = "i2c-gpio",
+- .id = -1,
++ .id = 0,
+ .dev.platform_data = &pdata,
+ };
+
+diff --git a/arch/arm/mach-at91/at91sam9261_devices.c b/arch/arm/mach-at91/at91sam9261_devices.c
+index daf3e66..d6d1e76 100644
+--- a/arch/arm/mach-at91/at91sam9261_devices.c
++++ b/arch/arm/mach-at91/at91sam9261_devices.c
+@@ -284,7 +284,7 @@ static struct i2c_gpio_platform_data pdata = {
+
+ static struct platform_device at91sam9261_twi_device = {
+ .name = "i2c-gpio",
+- .id = -1,
++ .id = 0,
+ .dev.platform_data = &pdata,
+ };
+
+diff --git a/arch/arm/mach-at91/at91sam9263_devices.c b/arch/arm/mach-at91/at91sam9263_devices.c
+index 32a7e43..e051376e 100644
+--- a/arch/arm/mach-at91/at91sam9263_devices.c
++++ b/arch/arm/mach-at91/at91sam9263_devices.c
+@@ -540,7 +540,7 @@ static struct i2c_gpio_platform_data pdata = {
+
+ static struct platform_device at91sam9263_twi_device = {
+ .name = "i2c-gpio",
+- .id = -1,
++ .id = 0,
+ .dev.platform_data = &pdata,
+ };
+
+diff --git a/arch/arm/mach-at91/at91sam9rl_devices.c b/arch/arm/mach-at91/at91sam9rl_devices.c
+index 628eb56..4862b23 100644
+--- a/arch/arm/mach-at91/at91sam9rl_devices.c
++++ b/arch/arm/mach-at91/at91sam9rl_devices.c
+@@ -319,7 +319,7 @@ static struct i2c_gpio_platform_data pdata = {
+
+ static struct platform_device at91sam9rl_twi_device = {
+ .name = "i2c-gpio",
+- .id = -1,
++ .id = 0,
+ .dev.platform_data = &pdata,
+ };
+
+diff --git a/arch/arm/mach-at91/setup.c b/arch/arm/mach-at91/setup.c
+index f5bbe0ef..0d264bf 100644
+--- a/arch/arm/mach-at91/setup.c
++++ b/arch/arm/mach-at91/setup.c
+@@ -163,7 +163,7 @@ static void __init soc_detect(u32 dbgu_base)
+ }
+
+ /* at91sam9g10 */
+- if ((cidr & ~AT91_CIDR_EXT) == ARCH_ID_AT91SAM9G10) {
++ if ((socid & ~AT91_CIDR_EXT) == ARCH_ID_AT91SAM9G10) {
+ at91_soc_initdata.type = AT91_SOC_SAM9G10;
+ at91_boot_soc = at91sam9261_soc;
+ }
+diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
+index 2d2f01c..d75adff 100644
+--- a/arch/x86/include/asm/system.h
++++ b/arch/x86/include/asm/system.h
+@@ -93,10 +93,6 @@ do { \
+ "memory"); \
+ } while (0)
+
+-/*
+- * disable hlt during certain critical i/o operations
+- */
+-#define HAVE_DISABLE_HLT
+ #else
+
+ /* frame pointer must be last for get_wchan */
+@@ -392,9 +388,6 @@ static inline void clflush(volatile void *__p)
+
+ #define nop() asm volatile ("nop")
+
+-void disable_hlt(void);
+-void enable_hlt(void);
+-
+ void cpu_idle_wait(void);
+
+ extern unsigned long arch_align_stack(unsigned long sp);
+diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
+index ee5d4fb..59b9b37 100644
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -341,34 +341,10 @@ void (*pm_idle)(void);
+ EXPORT_SYMBOL(pm_idle);
+ #endif
+
+-#ifdef CONFIG_X86_32
+-/*
+- * This halt magic was a workaround for ancient floppy DMA
+- * wreckage. It should be safe to remove.
+- */
+-static int hlt_counter;
+-void disable_hlt(void)
+-{
+- hlt_counter++;
+-}
+-EXPORT_SYMBOL(disable_hlt);
+-
+-void enable_hlt(void)
+-{
+- hlt_counter--;
+-}
+-EXPORT_SYMBOL(enable_hlt);
+-
+-static inline int hlt_use_halt(void)
+-{
+- return (!hlt_counter && boot_cpu_data.hlt_works_ok);
+-}
+-#else
+ static inline int hlt_use_halt(void)
+ {
+ return 1;
+ }
+-#endif
+
+ /*
+ * We use this if we don't have any better
+diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
+index ec3d603..2b8b0de 100644
+--- a/arch/x86/xen/mmu.c
++++ b/arch/x86/xen/mmu.c
+@@ -1203,6 +1203,25 @@ unsigned long xen_read_cr2_direct(void)
+ return percpu_read(xen_vcpu_info.arch.cr2);
+ }
+
++void xen_flush_tlb_all(void)
++{
++ struct mmuext_op *op;
++ struct multicall_space mcs;
++
++ trace_xen_mmu_flush_tlb_all(0);
++
++ preempt_disable();
++
++ mcs = xen_mc_entry(sizeof(*op));
++
++ op = mcs.args;
++ op->cmd = MMUEXT_TLB_FLUSH_ALL;
++ MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
++
++ xen_mc_issue(PARAVIRT_LAZY_MMU);
++
++ preempt_enable();
++}
+ static void xen_flush_tlb(void)
+ {
+ struct mmuext_op *op;
+@@ -2366,7 +2385,7 @@ int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
+ err = 0;
+ out:
+
+- flush_tlb_all();
++ xen_flush_tlb_all();
+
+ return err;
+ }
+diff --git a/crypto/cryptd.c b/crypto/cryptd.c
+index 671d4d6..7bdd61b 100644
+--- a/crypto/cryptd.c
++++ b/crypto/cryptd.c
+@@ -137,13 +137,18 @@ static void cryptd_queue_worker(struct work_struct *work)
+ struct crypto_async_request *req, *backlog;
+
+ cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
+- /* Only handle one request at a time to avoid hogging crypto
+- * workqueue. preempt_disable/enable is used to prevent
+- * being preempted by cryptd_enqueue_request() */
++ /*
++ * Only handle one request at a time to avoid hogging crypto workqueue.
++ * preempt_disable/enable is used to prevent being preempted by
++ * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
++ * cryptd_enqueue_request() being accessed from software interrupts.
++ */
++ local_bh_disable();
+ preempt_disable();
+ backlog = crypto_get_backlog(&cpu_queue->queue);
+ req = crypto_dequeue_request(&cpu_queue->queue);
+ preempt_enable();
++ local_bh_enable();
+
+ if (!req)
+ return;
+diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
+index c864add..7a90d4a 100644
+--- a/drivers/block/floppy.c
++++ b/drivers/block/floppy.c
+@@ -1032,37 +1032,6 @@ static int fd_wait_for_completion(unsigned long delay, timeout_fn function)
+ return 0;
+ }
+
+-static DEFINE_SPINLOCK(floppy_hlt_lock);
+-static int hlt_disabled;
+-static void floppy_disable_hlt(void)
+-{
+- unsigned long flags;
+-
+- WARN_ONCE(1, "floppy_disable_hlt() scheduled for removal in 2012");
+- spin_lock_irqsave(&floppy_hlt_lock, flags);
+- if (!hlt_disabled) {
+- hlt_disabled = 1;
+-#ifdef HAVE_DISABLE_HLT
+- disable_hlt();
+-#endif
+- }
+- spin_unlock_irqrestore(&floppy_hlt_lock, flags);
+-}
+-
+-static void floppy_enable_hlt(void)
+-{
+- unsigned long flags;
+-
+- spin_lock_irqsave(&floppy_hlt_lock, flags);
+- if (hlt_disabled) {
+- hlt_disabled = 0;
+-#ifdef HAVE_DISABLE_HLT
+- enable_hlt();
+-#endif
+- }
+- spin_unlock_irqrestore(&floppy_hlt_lock, flags);
+-}
+-
+ static void setup_DMA(void)
+ {
+ unsigned long f;
+@@ -1107,7 +1076,6 @@ static void setup_DMA(void)
+ fd_enable_dma();
+ release_dma_lock(f);
+ #endif
+- floppy_disable_hlt();
+ }
+
+ static void show_floppy(void);
+@@ -1709,7 +1677,6 @@ irqreturn_t floppy_interrupt(int irq, void *dev_id)
+ fd_disable_dma();
+ release_dma_lock(f);
+
+- floppy_enable_hlt();
+ do_floppy = NULL;
+ if (fdc >= N_FDC || FDCS->address == -1) {
+ /* we don't even know which FDC is the culprit */
+@@ -1858,8 +1825,6 @@ static void floppy_shutdown(unsigned long data)
+ show_floppy();
+ cancel_activity();
+
+- floppy_enable_hlt();
+-
+ flags = claim_dma_lock();
+ fd_disable_dma();
+ release_dma_lock(flags);
+@@ -4198,6 +4163,7 @@ static int __init floppy_init(void)
+
+ disks[dr]->queue = blk_init_queue(do_fd_request, &floppy_lock);
+ if (!disks[dr]->queue) {
++ put_disk(disks[dr]);
+ err = -ENOMEM;
+ goto out_put_disk;
+ }
+@@ -4339,7 +4305,7 @@ static int __init floppy_init(void)
+
+ err = platform_device_register(&floppy_device[drive]);
+ if (err)
+- goto out_flush_work;
++ goto out_remove_drives;
+
+ err = device_create_file(&floppy_device[drive].dev,
+ &dev_attr_cmos);
+@@ -4357,6 +4323,15 @@ static int __init floppy_init(void)
+
+ out_unreg_platform_dev:
+ platform_device_unregister(&floppy_device[drive]);
++out_remove_drives:
++ while (drive--) {
++ if ((allowed_drive_mask & (1 << drive)) &&
++ fdc_state[FDC(drive)].version != FDC_NONE) {
++ del_gendisk(disks[drive]);
++ device_remove_file(&floppy_device[drive].dev, &dev_attr_cmos);
++ platform_device_unregister(&floppy_device[drive]);
++ }
++ }
+ out_flush_work:
+ flush_work_sync(&floppy_work);
+ if (atomic_read(&usage_count))
+@@ -4510,7 +4485,6 @@ static void floppy_release_irq_and_dma(void)
+ #if N_FDC > 1
+ set_dor(1, ~8, 0);
+ #endif
+- floppy_enable_hlt();
+
+ if (floppy_track_buffer && max_buffer_sectors) {
+ tmpsize = max_buffer_sectors * 1024;
+diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c
+index c593bd4..edff410 100644
+--- a/drivers/gpio/gpio-timberdale.c
++++ b/drivers/gpio/gpio-timberdale.c
+@@ -116,7 +116,7 @@ static void timbgpio_irq_disable(struct irq_data *d)
+ unsigned long flags;
+
+ spin_lock_irqsave(&tgpio->lock, flags);
+- tgpio->last_ier &= ~(1 << offset);
++ tgpio->last_ier &= ~(1UL << offset);
+ iowrite32(tgpio->last_ier, tgpio->membase + TGPIO_IER);
+ spin_unlock_irqrestore(&tgpio->lock, flags);
+ }
+@@ -128,7 +128,7 @@ static void timbgpio_irq_enable(struct irq_data *d)
+ unsigned long flags;
+
+ spin_lock_irqsave(&tgpio->lock, flags);
+- tgpio->last_ier |= 1 << offset;
++ tgpio->last_ier |= 1UL << offset;
+ iowrite32(tgpio->last_ier, tgpio->membase + TGPIO_IER);
+ spin_unlock_irqrestore(&tgpio->lock, flags);
+ }
+diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
+index 828bf65..020b103 100644
+--- a/drivers/gpu/drm/drm_fops.c
++++ b/drivers/gpu/drm/drm_fops.c
+@@ -136,8 +136,11 @@ int drm_open(struct inode *inode, struct file *filp)
+ retcode = drm_open_helper(inode, filp, dev);
+ if (!retcode) {
+ atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
+- if (!dev->open_count++)
++ if (!dev->open_count++) {
+ retcode = drm_setup(dev);
++ if (retcode)
++ dev->open_count--;
++ }
+ }
+ if (!retcode) {
+ mutex_lock(&dev->struct_mutex);
+diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
+index 83e820e..bcadf74 100644
+--- a/drivers/gpu/drm/i915/intel_drv.h
++++ b/drivers/gpu/drm/i915/intel_drv.h
+@@ -227,12 +227,12 @@ struct dip_infoframe {
+ uint16_t bottom_bar_start;
+ uint16_t left_bar_end;
+ uint16_t right_bar_start;
+- } avi;
++ } __attribute__ ((packed)) avi;
+ struct {
+ uint8_t vn[8];
+ uint8_t pd[16];
+ uint8_t sdi;
+- } spd;
++ } __attribute__ ((packed)) spd;
+ uint8_t payload[27];
+ } __attribute__ ((packed)) body;
+ } __attribute__((packed));
+diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
+index cdf17d4..478b51f 100644
+--- a/drivers/gpu/drm/i915/intel_overlay.c
++++ b/drivers/gpu/drm/i915/intel_overlay.c
+@@ -428,9 +428,17 @@ static int intel_overlay_off(struct intel_overlay *overlay)
+ OUT_RING(flip_addr);
+ OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ /* turn overlay off */
+- OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+- OUT_RING(flip_addr);
+- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
++ if (IS_I830(dev)) {
++ /* Workaround: Don't disable the overlay fully, since otherwise
++ * it dies on the next OVERLAY_ON cmd. */
++ OUT_RING(MI_NOOP);
++ OUT_RING(MI_NOOP);
++ OUT_RING(MI_NOOP);
++ } else {
++ OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
++ OUT_RING(flip_addr);
++ OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
++ }
+ ADVANCE_LP_RING();
+
+ return intel_overlay_do_wait_request(overlay, request,
+diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
+index bbf247c..3f4afba 100644
+--- a/drivers/gpu/drm/i915/intel_sdvo.c
++++ b/drivers/gpu/drm/i915/intel_sdvo.c
+@@ -868,31 +868,38 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
+ }
+ #endif
+
+-static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
++static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
++ unsigned if_index, uint8_t tx_rate,
++ uint8_t *data, unsigned length)
+ {
+- struct dip_infoframe avi_if = {
+- .type = DIP_TYPE_AVI,
+- .ver = DIP_VERSION_AVI,
+- .len = DIP_LEN_AVI,
+- };
+- uint8_t tx_rate = SDVO_HBUF_TX_VSYNC;
+- uint8_t set_buf_index[2] = { 1, 0 };
+- uint64_t *data = (uint64_t *)&avi_if;
+- unsigned i;
+-
+- intel_dip_infoframe_csum(&avi_if);
++ uint8_t set_buf_index[2] = { if_index, 0 };
++ uint8_t hbuf_size, tmp[8];
++ int i;
+
+ if (!intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_HBUF_INDEX,
+ set_buf_index, 2))
+ return false;
+
+- for (i = 0; i < sizeof(avi_if); i += 8) {
++ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_INFO,
++ &hbuf_size, 1))
++ return false;
++
++ /* Buffer size is 0 based, hooray! */
++ hbuf_size++;
++
++ DRM_DEBUG_KMS("writing sdvo hbuf: %i, hbuf_size %i, hbuf_size: %i\n",
++ if_index, length, hbuf_size);
++
++ for (i = 0; i < hbuf_size; i += 8) {
++ memset(tmp, 0, 8);
++ if (i < length)
++ memcpy(tmp, data + i, min_t(unsigned, 8, length - i));
++
+ if (!intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_HBUF_DATA,
+- data, 8))
++ tmp, 8))
+ return false;
+- data++;
+ }
+
+ return intel_sdvo_set_value(intel_sdvo,
+@@ -900,6 +907,28 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
+ &tx_rate, 1);
+ }
+
++static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
++{
++ struct dip_infoframe avi_if = {
++ .type = DIP_TYPE_AVI,
++ .ver = DIP_VERSION_AVI,
++ .len = DIP_LEN_AVI,
++ };
++ uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)];
++
++ intel_dip_infoframe_csum(&avi_if);
++
++ /* sdvo spec says that the ecc is handled by the hw, and it looks like
++ * we must not send the ecc field, either. */
++ memcpy(sdvo_data, &avi_if, 3);
++ sdvo_data[3] = avi_if.checksum;
++ memcpy(&sdvo_data[4], &avi_if.body, sizeof(avi_if.body.avi));
++
++ return intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF,
++ SDVO_HBUF_TX_VSYNC,
++ sdvo_data, sizeof(sdvo_data));
++}
++
+ static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo)
+ {
+ struct intel_sdvo_tv_format format;
+diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h
+index 372f33b..4193c54 100644
+--- a/drivers/gpu/drm/i915/intel_sdvo_regs.h
++++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h
+@@ -708,6 +708,8 @@ struct intel_sdvo_enhancements_arg {
+ #define SDVO_CMD_SET_AUDIO_STAT 0x91
+ #define SDVO_CMD_GET_AUDIO_STAT 0x92
+ #define SDVO_CMD_SET_HBUF_INDEX 0x93
++ #define SDVO_HBUF_INDEX_ELD 0
++ #define SDVO_HBUF_INDEX_AVI_IF 1
+ #define SDVO_CMD_GET_HBUF_INDEX 0x94
+ #define SDVO_CMD_GET_HBUF_INFO 0x95
+ #define SDVO_CMD_SET_HBUF_AV_SPLIT 0x96
+diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
+index 9791d13..8c084c0 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
++++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
+@@ -178,8 +178,10 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+- NV_INFO(dev, "Disabling fbcon acceleration...\n");
+- nouveau_fbcon_save_disable_accel(dev);
++ if (dev->mode_config.num_crtc) {
++ NV_INFO(dev, "Disabling fbcon acceleration...\n");
++ nouveau_fbcon_save_disable_accel(dev);
++ }
+
+ NV_INFO(dev, "Unpinning framebuffer(s)...\n");
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+@@ -246,10 +248,12 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+
+- console_lock();
+- nouveau_fbcon_set_suspend(dev, 1);
+- console_unlock();
+- nouveau_fbcon_restore_accel(dev);
++ if (dev->mode_config.num_crtc) {
++ console_lock();
++ nouveau_fbcon_set_suspend(dev, 1);
++ console_unlock();
++ nouveau_fbcon_restore_accel(dev);
++ }
+ return 0;
+
+ out_abort:
+@@ -275,7 +279,8 @@ nouveau_pci_resume(struct pci_dev *pdev)
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+- nouveau_fbcon_save_disable_accel(dev);
++ if (dev->mode_config.num_crtc)
++ nouveau_fbcon_save_disable_accel(dev);
+
+ NV_INFO(dev, "We're back, enabling device...\n");
+ pci_set_power_state(pdev, PCI_D0);
+@@ -376,15 +381,18 @@ nouveau_pci_resume(struct pci_dev *pdev)
+ nv_crtc->lut.depth = 0;
+ }
+
+- console_lock();
+- nouveau_fbcon_set_suspend(dev, 0);
+- console_unlock();
++ if (dev->mode_config.num_crtc) {
++ console_lock();
++ nouveau_fbcon_set_suspend(dev, 0);
++ console_unlock();
+
+- nouveau_fbcon_zfill_all(dev);
++ nouveau_fbcon_zfill_all(dev);
++ }
+
+ drm_helper_resume_force_mode(dev);
+
+- nouveau_fbcon_restore_accel(dev);
++ if (dev->mode_config.num_crtc)
++ nouveau_fbcon_restore_accel(dev);
+ return 0;
+ }
+
+@@ -466,9 +474,7 @@ static int __init nouveau_init(void)
+ #ifdef CONFIG_VGA_CONSOLE
+ if (vgacon_text_force())
+ nouveau_modeset = 0;
+- else
+ #endif
+- nouveau_modeset = 1;
+ }
+
+ if (!nouveau_modeset)
+diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
+index d8831ab..01adcfb 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_state.c
++++ b/drivers/gpu/drm/nouveau/nouveau_state.c
+@@ -46,6 +46,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
+ {
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->engine;
++ u32 pclass = dev->pdev->class >> 8;
+
+ switch (dev_priv->chipset & 0xf0) {
+ case 0x00:
+@@ -481,7 +482,8 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
+ }
+
+ /* headless mode */
+- if (nouveau_modeset == 2) {
++ if (nouveau_modeset == 2 ||
++ (nouveau_modeset < 0 && pclass != PCI_CLASS_DISPLAY_VGA)) {
+ engine->display.early_init = nouveau_stub_init;
+ engine->display.late_takedown = nouveau_stub_takedown;
+ engine->display.create = nouveau_stub_init;
+diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
+index e000455..2d6bfd0 100644
+--- a/drivers/gpu/drm/nouveau/nv04_dac.c
++++ b/drivers/gpu/drm/nouveau/nv04_dac.c
+@@ -209,7 +209,7 @@ out:
+ NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode);
+
+ if (blue == 0x18) {
+- NV_INFO(dev, "Load detected on head A\n");
++ NV_DEBUG(dev, "Load detected on head A\n");
+ return connector_status_connected;
+ }
+
+@@ -323,7 +323,7 @@ nv17_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+
+ if (nv17_dac_sample_load(encoder) &
+ NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI) {
+- NV_INFO(dev, "Load detected on output %c\n",
++ NV_DEBUG(dev, "Load detected on output %c\n",
+ '@' + ffs(dcb->or));
+ return connector_status_connected;
+ } else {
+@@ -398,7 +398,7 @@ static void nv04_dac_commit(struct drm_encoder *encoder)
+
+ helper->dpms(encoder, DRM_MODE_DPMS_ON);
+
+- NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
++ NV_DEBUG(dev, "Output %s is running on CRTC %d using output %c\n",
+ drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
+ nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
+ }
+@@ -447,7 +447,7 @@ static void nv04_dac_dpms(struct drm_encoder *encoder, int mode)
+ return;
+ nv_encoder->last_dpms = mode;
+
+- NV_INFO(dev, "Setting dpms mode %d on vga encoder (output %d)\n",
++ NV_DEBUG(dev, "Setting dpms mode %d on vga encoder (output %d)\n",
+ mode, nv_encoder->dcb->index);
+
+ nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON);
+diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
+index 12098bf..752440c 100644
+--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
++++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
+@@ -468,7 +468,7 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
+
+ helper->dpms(encoder, DRM_MODE_DPMS_ON);
+
+- NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
++ NV_DEBUG(dev, "Output %s is running on CRTC %d using output %c\n",
+ drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
+ nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
+ }
+@@ -511,7 +511,7 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
+ return;
+ nv_encoder->last_dpms = mode;
+
+- NV_INFO(dev, "Setting dpms mode %d on lvds encoder (output %d)\n",
++ NV_DEBUG(dev, "Setting dpms mode %d on lvds encoder (output %d)\n",
+ mode, nv_encoder->dcb->index);
+
+ if (was_powersaving && is_powersaving_dpms(mode))
+@@ -556,7 +556,7 @@ static void nv04_tmds_dpms(struct drm_encoder *encoder, int mode)
+ return;
+ nv_encoder->last_dpms = mode;
+
+- NV_INFO(dev, "Setting dpms mode %d on tmds encoder (output %d)\n",
++ NV_DEBUG(dev, "Setting dpms mode %d on tmds encoder (output %d)\n",
+ mode, nv_encoder->dcb->index);
+
+ nv04_dfp_update_backlight(encoder, mode);
+diff --git a/drivers/gpu/drm/nouveau/nv04_tv.c b/drivers/gpu/drm/nouveau/nv04_tv.c
+index 3eb605d..4de1fbe 100644
+--- a/drivers/gpu/drm/nouveau/nv04_tv.c
++++ b/drivers/gpu/drm/nouveau/nv04_tv.c
+@@ -69,7 +69,7 @@ static void nv04_tv_dpms(struct drm_encoder *encoder, int mode)
+ struct nv04_mode_state *state = &dev_priv->mode_reg;
+ uint8_t crtc1A;
+
+- NV_INFO(dev, "Setting dpms mode %d on TV encoder (output %d)\n",
++ NV_DEBUG(dev, "Setting dpms mode %d on TV encoder (output %d)\n",
+ mode, nv_encoder->dcb->index);
+
+ state->pllsel &= ~(PLLSEL_TV_CRTC1_MASK | PLLSEL_TV_CRTC2_MASK);
+@@ -162,7 +162,7 @@ static void nv04_tv_commit(struct drm_encoder *encoder)
+
+ helper->dpms(encoder, DRM_MODE_DPMS_ON);
+
+- NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
++ NV_DEBUG(dev, "Output %s is running on CRTC %d using output %c\n",
+ drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), nv_crtc->index,
+ '@' + ffs(nv_encoder->dcb->or));
+ }
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index b61f490..ca94e23 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -1164,7 +1164,7 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
+ WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
+
+ for (i = 0; i < rdev->num_crtc; i++) {
+- if (save->crtc_enabled) {
++ if (save->crtc_enabled[i]) {
+ tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
+ tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+ WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+index 3ad3cc6..8165953 100644
+--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
++++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+@@ -650,6 +650,7 @@ static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_enc
+ tmp |= RADEON_DAC_RANGE_CNTL_PS2 | RADEON_DAC_CMP_EN;
+ WREG32(RADEON_DAC_CNTL, tmp);
+
++ tmp = dac_macro_cntl;
+ tmp &= ~(RADEON_DAC_PDWN_R |
+ RADEON_DAC_PDWN_G |
+ RADEON_DAC_PDWN_B);
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+index 3fa884d..27151f7 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+@@ -306,7 +306,7 @@ void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
+
+ BUG_ON(!atomic_read(&bo->reserved));
+ BUG_ON(old_mem_type != TTM_PL_VRAM &&
+- old_mem_type != VMW_PL_FLAG_GMR);
++ old_mem_type != VMW_PL_GMR);
+
+ pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED;
+ if (pin)
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+index 033fc96..b639536 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+@@ -1048,6 +1048,11 @@ static void vmw_pm_complete(struct device *kdev)
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct vmw_private *dev_priv = vmw_priv(dev);
+
++ mutex_lock(&dev_priv->hw_mutex);
++ vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
++ (void) vmw_read(dev_priv, SVGA_REG_ID);
++ mutex_unlock(&dev_priv->hw_mutex);
++
+ /**
+ * Reclaim 3d reference held by fbdev and potentially
+ * start fifo.
+diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
+index e5c699b..3899989 100644
+--- a/drivers/hid/hid-microsoft.c
++++ b/drivers/hid/hid-microsoft.c
+@@ -29,22 +29,30 @@
+ #define MS_RDESC 0x08
+ #define MS_NOGET 0x10
+ #define MS_DUPLICATE_USAGES 0x20
++#define MS_RDESC_3K 0x40
+
+-/*
+- * Microsoft Wireless Desktop Receiver (Model 1028) has
+- * 'Usage Min/Max' where it ought to have 'Physical Min/Max'
+- */
+ static __u8 *ms_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+ {
+ unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
+
++ /*
++ * Microsoft Wireless Desktop Receiver (Model 1028) has
++ * 'Usage Min/Max' where it ought to have 'Physical Min/Max'
++ */
+ if ((quirks & MS_RDESC) && *rsize == 571 && rdesc[557] == 0x19 &&
+ rdesc[559] == 0x29) {
+ hid_info(hdev, "fixing up Microsoft Wireless Receiver Model 1028 report descriptor\n");
+ rdesc[557] = 0x35;
+ rdesc[559] = 0x45;
+ }
++ /* the same as above (s/usage/physical/) */
++ if ((quirks & MS_RDESC_3K) && *rsize == 106 &&
++ !memcmp((char []){ 0x19, 0x00, 0x29, 0xff },
++ &rdesc[94], 4)) {
++ rdesc[94] = 0x35;
++ rdesc[96] = 0x45;
++ }
+ return rdesc;
+ }
+
+@@ -193,7 +201,7 @@ static const struct hid_device_id ms_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB),
+ .driver_data = MS_PRESENTER },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K),
+- .driver_data = MS_ERGONOMY },
++ .driver_data = MS_ERGONOMY | MS_RDESC_3K },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0),
+ .driver_data = MS_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500),
+diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
+index ceaec92..b6a3ce3 100644
+--- a/drivers/hwmon/w83627ehf.c
++++ b/drivers/hwmon/w83627ehf.c
+@@ -2015,6 +2015,7 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
+ mutex_init(&data->lock);
+ mutex_init(&data->update_lock);
+ data->name = w83627ehf_device_names[sio_data->kind];
++ data->bank = 0xff; /* Force initial bank selection */
+ platform_set_drvdata(pdev, data);
+
+ /* 627EHG and 627EHF have 10 voltage inputs; 627DHG and 667HG have 9 */
+diff --git a/drivers/input/touchscreen/tsc40.c b/drivers/input/touchscreen/tsc40.c
+index 29d5ed4..80d4610 100644
+--- a/drivers/input/touchscreen/tsc40.c
++++ b/drivers/input/touchscreen/tsc40.c
+@@ -107,7 +107,6 @@ static int tsc_connect(struct serio *serio, struct serio_driver *drv)
+ __set_bit(BTN_TOUCH, input_dev->keybit);
+ input_set_abs_params(ptsc->dev, ABS_X, 0, 0x3ff, 0, 0);
+ input_set_abs_params(ptsc->dev, ABS_Y, 0, 0x3ff, 0, 0);
+- input_set_abs_params(ptsc->dev, ABS_PRESSURE, 0, 0, 0, 0);
+
+ serio_set_drvdata(serio, ptsc);
+
+diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
+index 11ddd838..69fc888 100644
+--- a/drivers/net/ethernet/marvell/sky2.c
++++ b/drivers/net/ethernet/marvell/sky2.c
+@@ -3060,8 +3060,10 @@ static irqreturn_t sky2_intr(int irq, void *dev_id)
+
+ /* Reading this mask interrupts as side effect */
+ status = sky2_read32(hw, B0_Y2_SP_ISRC2);
+- if (status == 0 || status == ~0)
++ if (status == 0 || status == ~0) {
++ sky2_write32(hw, B0_Y2_SP_ICR, 2);
+ return IRQ_NONE;
++ }
+
+ prefetch(&hw->st_le[hw->st_idx]);
+
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index 4b43bc5..b8db4cd 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -77,7 +77,7 @@ static const int multicast_filter_limit = 32;
+ #define MAC_ADDR_LEN 6
+
+ #define MAX_READ_REQUEST_SHIFT 12
+-#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
++#define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */
+ #define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */
+ #define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
+
+@@ -3521,6 +3521,8 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ switch (tp->mac_version) {
++ case RTL_GIGA_MAC_VER_25:
++ case RTL_GIGA_MAC_VER_26:
+ case RTL_GIGA_MAC_VER_29:
+ case RTL_GIGA_MAC_VER_30:
+ case RTL_GIGA_MAC_VER_32:
+@@ -6064,6 +6066,9 @@ static void rtl_set_rx_mode(struct net_device *dev)
+ mc_filter[1] = swab32(data);
+ }
+
++ if (tp->mac_version == RTL_GIGA_MAC_VER_35)
++ mc_filter[1] = mc_filter[0] = 0xffffffff;
++
+ RTL_W32(MAR0 + 4, mc_filter[1]);
+ RTL_W32(MAR0 + 0, mc_filter[0]);
+
+diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
+index b873b5d..dc53a8f 100644
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -1156,6 +1156,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
+ usb_anchor_urb(urb, &dev->deferred);
+ /* no use to process more packets */
+ netif_stop_queue(net);
++ usb_put_urb(urb);
+ spin_unlock_irqrestore(&dev->txq.lock, flags);
+ netdev_dbg(dev->net, "Delaying transmission for resumption\n");
+ goto deferred;
+@@ -1297,6 +1298,8 @@ void usbnet_disconnect (struct usb_interface *intf)
+
+ cancel_work_sync(&dev->kevent);
+
++ usb_scuttle_anchored_urbs(&dev->deferred);
++
+ if (dev->driver_info->unbind)
+ dev->driver_info->unbind (dev, intf);
+
+diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
+index c59c592..18da100 100644
+--- a/drivers/net/wireless/ath/ath9k/xmit.c
++++ b/drivers/net/wireless/ath/ath9k/xmit.c
+@@ -288,6 +288,7 @@ static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
+ }
+
+ bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
++ bf->bf_next = NULL;
+ list_del(&bf->list);
+
+ spin_unlock_bh(&sc->tx.txbuflock);
+@@ -369,7 +370,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
+ u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
+ u32 ba[WME_BA_BMP_SIZE >> 5];
+ int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
+- bool rc_update = true;
++ bool rc_update = true, isba;
+ struct ieee80211_tx_rate rates[4];
+ struct ath_frame_info *fi;
+ int nframes;
+@@ -407,13 +408,17 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
+ an = (struct ath_node *)sta->drv_priv;
+ tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
+ tid = ATH_AN_2_TID(an, tidno);
++ isba = ts->ts_flags & ATH9K_TX_BA;
+
+ /*
+ * The hardware occasionally sends a tx status for the wrong TID.
+ * In this case, the BA status cannot be considered valid and all
+ * subframes need to be retransmitted
++ *
++ * Only BlockAcks have a TID and therefore normal Acks cannot be
++ * checked
+ */
+- if (tidno != ts->tid)
++ if (isba && tidno != ts->tid)
+ txok = false;
+
+ isaggr = bf_isaggr(bf);
+@@ -1710,6 +1715,7 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
+ if (tid)
+ INCR(tid->seq_start, IEEE80211_SEQ_MAX);
+
++ bf->bf_next = NULL;
+ bf->bf_lastbf = bf;
+ ath_tx_fill_desc(sc, bf, txq, fi->framelen);
+ ath_tx_txqaddbuf(sc, txq, &bf_head, false);
+diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
+index 1ba079d..fb19447 100644
+--- a/drivers/net/wireless/rt2x00/rt2800lib.c
++++ b/drivers/net/wireless/rt2x00/rt2800lib.c
+@@ -2141,7 +2141,7 @@ static int rt2800_get_gain_calibration_delta(struct rt2x00_dev *rt2x00dev)
+ /*
+ * Check if temperature compensation is supported.
+ */
+- if (tssi_bounds[4] == 0xff)
++ if (tssi_bounds[4] == 0xff || step == 0xff)
+ return 0;
+
+ /*
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index f35cb10..6fa7222 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -3523,7 +3523,9 @@ restart:
+ */
+ iscsit_thread_check_cpumask(conn, current, 1);
+
+- schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT);
++ wait_event_interruptible(conn->queues_wq,
++ !iscsit_conn_all_queues_empty(conn) ||
++ ts->status == ISCSI_THREAD_SET_RESET);
+
+ if ((ts->status == ISCSI_THREAD_SET_RESET) ||
+ signal_pending(current))
+diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
+index dae283f..bd8ce01 100644
+--- a/drivers/target/iscsi/iscsi_target_core.h
++++ b/drivers/target/iscsi/iscsi_target_core.h
+@@ -491,6 +491,7 @@ struct iscsi_tmr_req {
+ };
+
+ struct iscsi_conn {
++ wait_queue_head_t queues_wq;
+ /* Authentication Successful for this connection */
+ u8 auth_complete;
+ /* State connection is currently in */
+diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
+index 2ec5339..eb0c9fe 100644
+--- a/drivers/target/iscsi/iscsi_target_login.c
++++ b/drivers/target/iscsi/iscsi_target_login.c
+@@ -44,6 +44,7 @@ extern spinlock_t sess_idr_lock;
+
+ static int iscsi_login_init_conn(struct iscsi_conn *conn)
+ {
++ init_waitqueue_head(&conn->queues_wq);
+ INIT_LIST_HEAD(&conn->conn_list);
+ INIT_LIST_HEAD(&conn->conn_cmd_list);
+ INIT_LIST_HEAD(&conn->immed_queue_list);
+diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
+index 99f2af3..e612722 100644
+--- a/drivers/target/iscsi/iscsi_target_util.c
++++ b/drivers/target/iscsi/iscsi_target_util.c
+@@ -659,7 +659,7 @@ void iscsit_add_cmd_to_immediate_queue(
+ atomic_set(&conn->check_immediate_queue, 1);
+ spin_unlock_bh(&conn->immed_queue_lock);
+
+- wake_up_process(conn->thread_set->tx_thread);
++ wake_up(&conn->queues_wq);
+ }
+
+ struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *conn)
+@@ -733,7 +733,7 @@ void iscsit_add_cmd_to_response_queue(
+ atomic_inc(&cmd->response_queue_count);
+ spin_unlock_bh(&conn->response_queue_lock);
+
+- wake_up_process(conn->thread_set->tx_thread);
++ wake_up(&conn->queues_wq);
+ }
+
+ struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn)
+@@ -787,6 +787,24 @@ static void iscsit_remove_cmd_from_response_queue(
+ }
+ }
+
++bool iscsit_conn_all_queues_empty(struct iscsi_conn *conn)
++{
++ bool empty;
++
++ spin_lock_bh(&conn->immed_queue_lock);
++ empty = list_empty(&conn->immed_queue_list);
++ spin_unlock_bh(&conn->immed_queue_lock);
++
++ if (!empty)
++ return empty;
++
++ spin_lock_bh(&conn->response_queue_lock);
++ empty = list_empty(&conn->response_queue_list);
++ spin_unlock_bh(&conn->response_queue_lock);
++
++ return empty;
++}
++
+ void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn)
+ {
+ struct iscsi_queue_req *qr, *qr_tmp;
+diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
+index 835bf7d..cfac698 100644
+--- a/drivers/target/iscsi/iscsi_target_util.h
++++ b/drivers/target/iscsi/iscsi_target_util.h
+@@ -28,6 +28,7 @@ extern struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_
+ extern void iscsit_add_cmd_to_response_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
+ extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *);
+ extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_conn *);
++extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *);
+ extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *);
+ extern void iscsit_release_cmd(struct iscsi_cmd *);
+ extern void iscsit_free_cmd(struct iscsi_cmd *);
+diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
+index 0b01bfc..013b133 100644
+--- a/drivers/target/target_core_configfs.c
++++ b/drivers/target/target_core_configfs.c
+@@ -3205,7 +3205,8 @@ static int __init target_core_init_configfs(void)
+ if (ret < 0)
+ goto out;
+
+- if (core_dev_setup_virtual_lun0() < 0)
++ ret = core_dev_setup_virtual_lun0();
++ if (ret < 0)
+ goto out;
+
+ return 0;
+diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
+index f8773ae..a0143a0 100644
+--- a/drivers/target/target_core_device.c
++++ b/drivers/target/target_core_device.c
+@@ -835,20 +835,20 @@ int se_dev_check_shutdown(struct se_device *dev)
+
+ u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
+ {
+- u32 tmp, aligned_max_sectors;
++ u32 aligned_max_sectors;
++ u32 alignment;
+ /*
+ * Limit max_sectors to a PAGE_SIZE aligned value for modern
+ * transport_allocate_data_tasks() operation.
+ */
+- tmp = rounddown((max_sectors * block_size), PAGE_SIZE);
+- aligned_max_sectors = (tmp / block_size);
+- if (max_sectors != aligned_max_sectors) {
+- printk(KERN_INFO "Rounding down aligned max_sectors from %u"
+- " to %u\n", max_sectors, aligned_max_sectors);
+- return aligned_max_sectors;
+- }
++ alignment = max(1ul, PAGE_SIZE / block_size);
++ aligned_max_sectors = rounddown(max_sectors, alignment);
++
++ if (max_sectors != aligned_max_sectors)
++ pr_info("Rounding down aligned max_sectors from %u to %u\n",
++ max_sectors, aligned_max_sectors);
+
+- return max_sectors;
++ return aligned_max_sectors;
+ }
+
+ void se_dev_set_default_attribs(
+diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
+index d481f80..43a38aa 100644
+--- a/drivers/usb/serial/mos7840.c
++++ b/drivers/usb/serial/mos7840.c
+@@ -2585,7 +2585,6 @@ error:
+ static void mos7840_disconnect(struct usb_serial *serial)
+ {
+ int i;
+- unsigned long flags;
+ struct moschip_port *mos7840_port;
+ dbg("%s", " disconnect :entering..........");
+
+diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
+index 625890c..080b186 100644
+--- a/drivers/xen/gntdev.c
++++ b/drivers/xen/gntdev.c
+@@ -105,6 +105,21 @@ static void gntdev_print_maps(struct gntdev_priv *priv,
+ #endif
+ }
+
++static void gntdev_free_map(struct grant_map *map)
++{
++ if (map == NULL)
++ return;
++
++ if (map->pages)
++ free_xenballooned_pages(map->count, map->pages);
++ kfree(map->pages);
++ kfree(map->grants);
++ kfree(map->map_ops);
++ kfree(map->unmap_ops);
++ kfree(map->kmap_ops);
++ kfree(map);
++}
++
+ static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
+ {
+ struct grant_map *add;
+@@ -142,12 +157,7 @@ static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
+ return add;
+
+ err:
+- kfree(add->pages);
+- kfree(add->grants);
+- kfree(add->map_ops);
+- kfree(add->unmap_ops);
+- kfree(add->kmap_ops);
+- kfree(add);
++ gntdev_free_map(add);
+ return NULL;
+ }
+
+@@ -196,17 +206,9 @@ static void gntdev_put_map(struct grant_map *map)
+ if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT)
+ notify_remote_via_evtchn(map->notify.event);
+
+- if (map->pages) {
+- if (!use_ptemod)
+- unmap_grant_pages(map, 0, map->count);
+-
+- free_xenballooned_pages(map->count, map->pages);
+- }
+- kfree(map->pages);
+- kfree(map->grants);
+- kfree(map->map_ops);
+- kfree(map->unmap_ops);
+- kfree(map);
++ if (map->pages && !use_ptemod)
++ unmap_grant_pages(map, 0, map->count);
++ gntdev_free_map(map);
+ }
+
+ /* ------------------------------------------------------------------ */
+diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
+index 72ddf23..b3522af 100644
+--- a/fs/cifs/cifsacl.c
++++ b/fs/cifs/cifsacl.c
+@@ -225,6 +225,13 @@ sid_to_str(struct cifs_sid *sidptr, char *sidstr)
+ }
+
+ static void
++cifs_copy_sid(struct cifs_sid *dst, const struct cifs_sid *src)
++{
++ memcpy(dst, src, sizeof(*dst));
++ dst->num_subauth = min_t(u8, src->num_subauth, NUM_SUBAUTHS);
++}
++
++static void
+ id_rb_insert(struct rb_root *root, struct cifs_sid *sidptr,
+ struct cifs_sid_id **psidid, char *typestr)
+ {
+@@ -248,7 +255,7 @@ id_rb_insert(struct rb_root *root, struct cifs_sid *sidptr,
+ }
+ }
+
+- memcpy(&(*psidid)->sid, sidptr, sizeof(struct cifs_sid));
++ cifs_copy_sid(&(*psidid)->sid, sidptr);
+ (*psidid)->time = jiffies - (SID_MAP_RETRY + 1);
+ (*psidid)->refcount = 0;
+
+@@ -354,7 +361,7 @@ id_to_sid(unsigned long cid, uint sidtype, struct cifs_sid *ssid)
+ * any fields of the node after a reference is put .
+ */
+ if (test_bit(SID_ID_MAPPED, &psidid->state)) {
+- memcpy(ssid, &psidid->sid, sizeof(struct cifs_sid));
++ cifs_copy_sid(ssid, &psidid->sid);
+ psidid->time = jiffies; /* update ts for accessing */
+ goto id_sid_out;
+ }
+@@ -370,14 +377,14 @@ id_to_sid(unsigned long cid, uint sidtype, struct cifs_sid *ssid)
+ if (IS_ERR(sidkey)) {
+ rc = -EINVAL;
+ cFYI(1, "%s: Can't map and id to a SID", __func__);
++ } else if (sidkey->datalen < sizeof(struct cifs_sid)) {
++ rc = -EIO;
++ cFYI(1, "%s: Downcall contained malformed key "
++ "(datalen=%hu)", __func__, sidkey->datalen);
+ } else {
+ lsid = (struct cifs_sid *)sidkey->payload.data;
+- memcpy(&psidid->sid, lsid,
+- sidkey->datalen < sizeof(struct cifs_sid) ?
+- sidkey->datalen : sizeof(struct cifs_sid));
+- memcpy(ssid, &psidid->sid,
+- sidkey->datalen < sizeof(struct cifs_sid) ?
+- sidkey->datalen : sizeof(struct cifs_sid));
++ cifs_copy_sid(&psidid->sid, lsid);
++ cifs_copy_sid(ssid, &psidid->sid);
+ set_bit(SID_ID_MAPPED, &psidid->state);
+ key_put(sidkey);
+ kfree(psidid->sidstr);
+@@ -396,7 +403,7 @@ id_to_sid(unsigned long cid, uint sidtype, struct cifs_sid *ssid)
+ return rc;
+ }
+ if (test_bit(SID_ID_MAPPED, &psidid->state))
+- memcpy(ssid, &psidid->sid, sizeof(struct cifs_sid));
++ cifs_copy_sid(ssid, &psidid->sid);
+ else
+ rc = -EINVAL;
+ }
+@@ -674,8 +681,6 @@ int compare_sids(const struct cifs_sid *ctsid, const struct cifs_sid *cwsid)
+ static void copy_sec_desc(const struct cifs_ntsd *pntsd,
+ struct cifs_ntsd *pnntsd, __u32 sidsoffset)
+ {
+- int i;
+-
+ struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
+ struct cifs_sid *nowner_sid_ptr, *ngroup_sid_ptr;
+
+@@ -691,26 +696,14 @@ static void copy_sec_desc(const struct cifs_ntsd *pntsd,
+ owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
+ le32_to_cpu(pntsd->osidoffset));
+ nowner_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset);
+-
+- nowner_sid_ptr->revision = owner_sid_ptr->revision;
+- nowner_sid_ptr->num_subauth = owner_sid_ptr->num_subauth;
+- for (i = 0; i < 6; i++)
+- nowner_sid_ptr->authority[i] = owner_sid_ptr->authority[i];
+- for (i = 0; i < 5; i++)
+- nowner_sid_ptr->sub_auth[i] = owner_sid_ptr->sub_auth[i];
++ cifs_copy_sid(nowner_sid_ptr, owner_sid_ptr);
+
+ /* copy group sid */
+ group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
+ le32_to_cpu(pntsd->gsidoffset));
+ ngroup_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset +
+ sizeof(struct cifs_sid));
+-
+- ngroup_sid_ptr->revision = group_sid_ptr->revision;
+- ngroup_sid_ptr->num_subauth = group_sid_ptr->num_subauth;
+- for (i = 0; i < 6; i++)
+- ngroup_sid_ptr->authority[i] = group_sid_ptr->authority[i];
+- for (i = 0; i < 5; i++)
+- ngroup_sid_ptr->sub_auth[i] = group_sid_ptr->sub_auth[i];
++ cifs_copy_sid(ngroup_sid_ptr, group_sid_ptr);
+
+ return;
+ }
+@@ -1117,8 +1110,7 @@ static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
+ kfree(nowner_sid_ptr);
+ return rc;
+ }
+- memcpy(owner_sid_ptr, nowner_sid_ptr,
+- sizeof(struct cifs_sid));
++ cifs_copy_sid(owner_sid_ptr, nowner_sid_ptr);
+ kfree(nowner_sid_ptr);
+ *aclflag = CIFS_ACL_OWNER;
+ }
+@@ -1136,8 +1128,7 @@ static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
+ kfree(ngroup_sid_ptr);
+ return rc;
+ }
+- memcpy(group_sid_ptr, ngroup_sid_ptr,
+- sizeof(struct cifs_sid));
++ cifs_copy_sid(group_sid_ptr, ngroup_sid_ptr);
+ kfree(ngroup_sid_ptr);
+ *aclflag = CIFS_ACL_GROUP;
+ }
+diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
+index 1cfef9f..94afdfd 100644
+--- a/fs/ecryptfs/main.c
++++ b/fs/ecryptfs/main.c
+@@ -280,6 +280,7 @@ static int ecryptfs_parse_options(struct ecryptfs_sb_info *sbi, char *options,
+ char *fnek_src;
+ char *cipher_key_bytes_src;
+ char *fn_cipher_key_bytes_src;
++ u8 cipher_code;
+
+ *check_ruid = 0;
+
+@@ -421,6 +422,18 @@ static int ecryptfs_parse_options(struct ecryptfs_sb_info *sbi, char *options,
+ && !fn_cipher_key_bytes_set)
+ mount_crypt_stat->global_default_fn_cipher_key_bytes =
+ mount_crypt_stat->global_default_cipher_key_size;
++
++ cipher_code = ecryptfs_code_for_cipher_string(
++ mount_crypt_stat->global_default_cipher_name,
++ mount_crypt_stat->global_default_cipher_key_size);
++ if (!cipher_code) {
++ ecryptfs_printk(KERN_ERR,
++ "eCryptfs doesn't support cipher: %s",
++ mount_crypt_stat->global_default_cipher_name);
++ rc = -EINVAL;
++ goto out;
++ }
++
+ mutex_lock(&key_tfm_list_mutex);
+ if (!ecryptfs_tfm_exists(mount_crypt_stat->global_default_cipher_name,
+ NULL)) {
+@@ -506,7 +519,6 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
+ goto out;
+ }
+
+- s->s_flags = flags;
+ rc = bdi_setup_and_register(&sbi->bdi, "ecryptfs", BDI_CAP_MAP_COPY);
+ if (rc)
+ goto out1;
+@@ -542,6 +554,15 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
+ }
+
+ ecryptfs_set_superblock_lower(s, path.dentry->d_sb);
++
++ /**
++ * Set the POSIX ACL flag based on whether they're enabled in the lower
++ * mount. Force a read-only eCryptfs mount if the lower mount is ro.
++ * Allow a ro eCryptfs mount even when the lower mount is rw.
++ */
++ s->s_flags = flags & ~MS_POSIXACL;
++ s->s_flags |= path.dentry->d_sb->s_flags & (MS_RDONLY | MS_POSIXACL);
++
+ s->s_maxbytes = path.dentry->d_sb->s_maxbytes;
+ s->s_blocksize = path.dentry->d_sb->s_blocksize;
+ s->s_magic = ECRYPTFS_SUPER_MAGIC;
+diff --git a/fs/nfs/dns_resolve.c b/fs/nfs/dns_resolve.c
+index a6e711a..ee02db5 100644
+--- a/fs/nfs/dns_resolve.c
++++ b/fs/nfs/dns_resolve.c
+@@ -213,7 +213,7 @@ static int nfs_dns_parse(struct cache_detail *cd, char *buf, int buflen)
+ {
+ char buf1[NFS_DNS_HOSTNAME_MAXLEN+1];
+ struct nfs_dns_ent key, *item;
+- unsigned long ttl;
++ unsigned int ttl;
+ ssize_t len;
+ int ret = -EINVAL;
+
+@@ -236,7 +236,8 @@ static int nfs_dns_parse(struct cache_detail *cd, char *buf, int buflen)
+ key.namelen = len;
+ memset(&key.h, 0, sizeof(key.h));
+
+- ttl = get_expiry(&buf);
++ if (get_uint(&buf, &ttl) < 0)
++ goto out;
+ if (ttl == 0)
+ goto out;
+ key.h.expiry_time = ttl + seconds_since_boot();
+diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
+index 68b3f20..c5af878 100644
+--- a/fs/nfs/internal.h
++++ b/fs/nfs/internal.h
+@@ -274,8 +274,9 @@ extern void nfs_sb_active(struct super_block *sb);
+ extern void nfs_sb_deactive(struct super_block *sb);
+
+ /* namespace.c */
++#define NFS_PATH_CANONICAL 1
+ extern char *nfs_path(char **p, struct dentry *dentry,
+- char *buffer, ssize_t buflen);
++ char *buffer, ssize_t buflen, unsigned flags);
+ extern struct vfsmount *nfs_d_automount(struct path *path);
+ #ifdef CONFIG_NFS_V4
+ rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *);
+@@ -364,7 +365,7 @@ static inline char *nfs_devname(struct dentry *dentry,
+ char *buffer, ssize_t buflen)
+ {
+ char *dummy;
+- return nfs_path(&dummy, dentry, buffer, buflen);
++ return nfs_path(&dummy, dentry, buffer, buflen, NFS_PATH_CANONICAL);
+ }
+
+ /*
+diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c
+index d4c2d6b..3d93216 100644
+--- a/fs/nfs/mount_clnt.c
++++ b/fs/nfs/mount_clnt.c
+@@ -181,7 +181,7 @@ int nfs_mount(struct nfs_mount_request *info)
+ else
+ msg.rpc_proc = &mnt_clnt->cl_procinfo[MOUNTPROC_MNT];
+
+- status = rpc_call_sync(mnt_clnt, &msg, 0);
++ status = rpc_call_sync(mnt_clnt, &msg, RPC_TASK_SOFT|RPC_TASK_TIMEOUT);
+ rpc_shutdown_client(mnt_clnt);
+
+ if (status < 0)
+diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
+index 8102391..a86873e 100644
+--- a/fs/nfs/namespace.c
++++ b/fs/nfs/namespace.c
+@@ -37,6 +37,7 @@ static struct vfsmount *nfs_do_submount(struct dentry *dentry,
+ * @dentry - pointer to dentry
+ * @buffer - result buffer
+ * @buflen - length of buffer
++ * @flags - options (see below)
+ *
+ * Helper function for constructing the server pathname
+ * by arbitrary hashed dentry.
+@@ -44,8 +45,14 @@ static struct vfsmount *nfs_do_submount(struct dentry *dentry,
+ * This is mainly for use in figuring out the path on the
+ * server side when automounting on top of an existing partition
+ * and in generating /proc/mounts and friends.
++ *
++ * Supported flags:
++ * NFS_PATH_CANONICAL: ensure there is exactly one slash after
++ * the original device (export) name
++ * (if unset, the original name is returned verbatim)
+ */
+-char *nfs_path(char **p, struct dentry *dentry, char *buffer, ssize_t buflen)
++char *nfs_path(char **p, struct dentry *dentry, char *buffer, ssize_t buflen,
++ unsigned flags)
+ {
+ char *end;
+ int namelen;
+@@ -78,7 +85,7 @@ rename_retry:
+ rcu_read_unlock();
+ goto rename_retry;
+ }
+- if (*end != '/') {
++ if ((flags & NFS_PATH_CANONICAL) && *end != '/') {
+ if (--buflen < 0) {
+ spin_unlock(&dentry->d_lock);
+ rcu_read_unlock();
+@@ -95,9 +102,11 @@ rename_retry:
+ return end;
+ }
+ namelen = strlen(base);
+- /* Strip off excess slashes in base string */
+- while (namelen > 0 && base[namelen - 1] == '/')
+- namelen--;
++ if (flags & NFS_PATH_CANONICAL) {
++ /* Strip off excess slashes in base string */
++ while (namelen > 0 && base[namelen - 1] == '/')
++ namelen--;
++ }
+ buflen -= namelen;
+ if (buflen < 0) {
+ spin_unlock(&dentry->d_lock);
+diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c
+index bb80c49..96f2b67 100644
+--- a/fs/nfs/nfs4namespace.c
++++ b/fs/nfs/nfs4namespace.c
+@@ -57,7 +57,8 @@ Elong:
+ static char *nfs4_path(struct dentry *dentry, char *buffer, ssize_t buflen)
+ {
+ char *limit;
+- char *path = nfs_path(&limit, dentry, buffer, buflen);
++ char *path = nfs_path(&limit, dentry, buffer, buflen,
++ NFS_PATH_CANONICAL);
+ if (!IS_ERR(path)) {
+ char *colon = strchr(path, ':');
+ if (colon && colon < limit)
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 61796a40..864b831 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -303,8 +303,7 @@ static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struc
+ dprintk("%s ERROR: %d Reset session\n", __func__,
+ errorcode);
+ nfs4_schedule_session_recovery(clp->cl_session);
+- exception->retry = 1;
+- break;
++ goto wait_on_recovery;
+ #endif /* defined(CONFIG_NFS_V4_1) */
+ case -NFS4ERR_FILE_OPEN:
+ if (exception->timeout > HZ) {
+@@ -1464,9 +1463,11 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
+ data->timestamp = jiffies;
+ if (nfs4_setup_sequence(data->o_arg.server,
+ &data->o_arg.seq_args,
+- &data->o_res.seq_res, 1, task))
+- return;
+- rpc_call_start(task);
++ &data->o_res.seq_res,
++ 1, task) != 0)
++ nfs_release_seqid(data->o_arg.seqid);
++ else
++ rpc_call_start(task);
+ return;
+ unlock_no_action:
+ rcu_read_unlock();
+@@ -2046,9 +2047,10 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
+ calldata->timestamp = jiffies;
+ if (nfs4_setup_sequence(NFS_SERVER(calldata->inode),
+ &calldata->arg.seq_args, &calldata->res.seq_res,
+- 1, task))
+- return;
+- rpc_call_start(task);
++ 1, task) != 0)
++ nfs_release_seqid(calldata->arg.seqid);
++ else
++ rpc_call_start(task);
+ }
+
+ static const struct rpc_call_ops nfs4_close_ops = {
+@@ -4148,6 +4150,7 @@ static void nfs4_locku_done(struct rpc_task *task, void *data)
+ if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN)
+ rpc_restart_call_prepare(task);
+ }
++ nfs_release_seqid(calldata->arg.seqid);
+ }
+
+ static void nfs4_locku_prepare(struct rpc_task *task, void *data)
+@@ -4164,9 +4167,11 @@ static void nfs4_locku_prepare(struct rpc_task *task, void *data)
+ calldata->timestamp = jiffies;
+ if (nfs4_setup_sequence(calldata->server,
+ &calldata->arg.seq_args,
+- &calldata->res.seq_res, 1, task))
+- return;
+- rpc_call_start(task);
++ &calldata->res.seq_res,
++ 1, task) != 0)
++ nfs_release_seqid(calldata->arg.seqid);
++ else
++ rpc_call_start(task);
+ }
+
+ static const struct rpc_call_ops nfs4_locku_ops = {
+@@ -4310,7 +4315,7 @@ static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
+ /* Do we need to do an open_to_lock_owner? */
+ if (!(data->arg.lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED)) {
+ if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0)
+- return;
++ goto out_release_lock_seqid;
+ data->arg.open_stateid = &state->stateid;
+ data->arg.new_lock_owner = 1;
+ data->res.open_seqid = data->arg.open_seqid;
+@@ -4319,10 +4324,15 @@ static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
+ data->timestamp = jiffies;
+ if (nfs4_setup_sequence(data->server,
+ &data->arg.seq_args,
+- &data->res.seq_res, 1, task))
++ &data->res.seq_res,
++ 1, task) == 0) {
++ rpc_call_start(task);
+ return;
+- rpc_call_start(task);
+- dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status);
++ }
++ nfs_release_seqid(data->arg.open_seqid);
++out_release_lock_seqid:
++ nfs_release_seqid(data->arg.lock_seqid);
++ dprintk("%s: done!, ret = %d\n", __func__, task->tk_status);
+ }
+
+ static void nfs4_recover_lock_prepare(struct rpc_task *task, void *calldata)
+diff --git a/fs/nfs/super.c b/fs/nfs/super.c
+index e42d6f6..8150344 100644
+--- a/fs/nfs/super.c
++++ b/fs/nfs/super.c
+@@ -768,7 +768,7 @@ static int nfs_show_devname(struct seq_file *m, struct vfsmount *mnt)
+ int err = 0;
+ if (!page)
+ return -ENOMEM;
+- devname = nfs_path(&dummy, mnt->mnt_root, page, PAGE_SIZE);
++ devname = nfs_path(&dummy, mnt->mnt_root, page, PAGE_SIZE, 0);
+ if (IS_ERR(devname))
+ err = PTR_ERR(devname);
+ else
+diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
+index 5f312ab..a0205fc 100644
+--- a/fs/nfsd/export.c
++++ b/fs/nfsd/export.c
+@@ -401,7 +401,7 @@ fsloc_parse(char **mesg, char *buf, struct nfsd4_fs_locations *fsloc)
+ int migrated, i, err;
+
+ /* listsize */
+- err = get_int(mesg, &fsloc->locations_count);
++ err = get_uint(mesg, &fsloc->locations_count);
+ if (err)
+ return err;
+ if (fsloc->locations_count > MAX_FS_LOCATIONS)
+@@ -459,7 +459,7 @@ static int secinfo_parse(char **mesg, char *buf, struct svc_export *exp)
+ return -EINVAL;
+
+ for (f = exp->ex_flavors; f < exp->ex_flavors + listsize; f++) {
+- err = get_int(mesg, &f->pseudoflavor);
++ err = get_uint(mesg, &f->pseudoflavor);
+ if (err)
+ return err;
+ /*
+@@ -468,7 +468,7 @@ static int secinfo_parse(char **mesg, char *buf, struct svc_export *exp)
+ * problem at export time instead of when a client fails
+ * to authenticate.
+ */
+- err = get_int(mesg, &f->flags);
++ err = get_uint(mesg, &f->flags);
+ if (err)
+ return err;
+ /* Only some flags are allowed to differ between flavors: */
+diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
+index f35794b..a506360 100644
+--- a/fs/notify/fanotify/fanotify.c
++++ b/fs/notify/fanotify/fanotify.c
+@@ -21,6 +21,7 @@ static bool should_merge(struct fsnotify_event *old, struct fsnotify_event *new)
+ if ((old->path.mnt == new->path.mnt) &&
+ (old->path.dentry == new->path.dentry))
+ return true;
++ break;
+ case (FSNOTIFY_EVENT_NONE):
+ return true;
+ default:
+diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
+index 4f5d0ce..86ca506 100644
+--- a/fs/xfs/xfs_log_recover.c
++++ b/fs/xfs/xfs_log_recover.c
+@@ -3514,7 +3514,7 @@ xlog_do_recovery_pass(
+ * - order is important.
+ */
+ error = xlog_bread_offset(log, 0,
+- bblks - split_bblks, hbp,
++ bblks - split_bblks, dbp,
+ offset + BBTOB(split_bblks));
+ if (error)
+ goto bread_err2;
+diff --git a/include/linux/if_link.h b/include/linux/if_link.h
+index c52d4b5..4b24ff4 100644
+--- a/include/linux/if_link.h
++++ b/include/linux/if_link.h
+@@ -137,6 +137,7 @@ enum {
+ IFLA_AF_SPEC,
+ IFLA_GROUP, /* Group the device belongs to */
+ IFLA_NET_NS_FD,
++ IFLA_EXT_MASK, /* Extended info mask, VFs, etc */
+ __IFLA_MAX
+ };
+
+diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
+index 8e872ea..577592e 100644
+--- a/include/linux/rtnetlink.h
++++ b/include/linux/rtnetlink.h
+@@ -602,6 +602,9 @@ struct tcamsg {
+ #define TCA_ACT_TAB 1 /* attr type must be >=1 */
+ #define TCAA_MAX 1
+
++/* New extended info filters for IFLA_EXT_MASK */
++#define RTEXT_FILTER_VF (1 << 0)
++
+ /* End of information exported to user level */
+
+ #ifdef __KERNEL__
+diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
+index 5efd8ce..f0c6ab5 100644
+--- a/include/linux/sunrpc/cache.h
++++ b/include/linux/sunrpc/cache.h
+@@ -224,6 +224,22 @@ static inline int get_int(char **bpp, int *anint)
+ return 0;
+ }
+
++static inline int get_uint(char **bpp, unsigned int *anint)
++{
++ char buf[50];
++ int len = qword_get(bpp, buf, sizeof(buf));
++
++ if (len < 0)
++ return -EINVAL;
++ if (len == 0)
++ return -ENOENT;
++
++ if (kstrtouint(buf, 0, anint))
++ return -EINVAL;
++
++ return 0;
++}
++
+ /*
+ * timestamps kept in the cache are expressed in seconds
+ * since boot. This is the best for measuring differences in
+diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
+index 95852e3..19d632d 100644
+--- a/include/net/cfg80211.h
++++ b/include/net/cfg80211.h
+@@ -2431,6 +2431,15 @@ unsigned int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb);
+ unsigned int __attribute_const__ ieee80211_hdrlen(__le16 fc);
+
+ /**
++ * ieee80211_get_mesh_hdrlen - get mesh extension header length
++ * @meshhdr: the mesh extension header, only the flags field
++ * (first byte) will be accessed
++ * Returns the length of the extension header, which is always at
++ * least 6 bytes and at most 18 if address 5 and 6 are present.
++ */
++unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr);
++
++/**
+ * DOC: Data path helpers
+ *
+ * In addition to generic utilities, cfg80211 also offers
+diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
+index 678f1ff..3702939 100644
+--- a/include/net/rtnetlink.h
++++ b/include/net/rtnetlink.h
+@@ -6,7 +6,7 @@
+
+ typedef int (*rtnl_doit_func)(struct sk_buff *, struct nlmsghdr *, void *);
+ typedef int (*rtnl_dumpit_func)(struct sk_buff *, struct netlink_callback *);
+-typedef u16 (*rtnl_calcit_func)(struct sk_buff *);
++typedef u16 (*rtnl_calcit_func)(struct sk_buff *, struct nlmsghdr *);
+
+ extern int __rtnl_register(int protocol, int msgtype,
+ rtnl_doit_func, rtnl_dumpit_func,
+diff --git a/include/sound/core.h b/include/sound/core.h
+index 3be5ab7..222f11e 100644
+--- a/include/sound/core.h
++++ b/include/sound/core.h
+@@ -132,6 +132,7 @@ struct snd_card {
+ int shutdown; /* this card is going down */
+ int free_on_last_close; /* free in context of file_release */
+ wait_queue_head_t shutdown_sleep;
++ atomic_t refcount; /* refcount for disconnection */
+ struct device *dev; /* device assigned to this card */
+ struct device *card_dev; /* cardX object for sysfs */
+
+@@ -189,6 +190,7 @@ struct snd_minor {
+ const struct file_operations *f_ops; /* file operations */
+ void *private_data; /* private data for f_ops->open */
+ struct device *dev; /* device for sysfs */
++ struct snd_card *card_ptr; /* assigned card instance */
+ };
+
+ /* return a device pointer linked to each sound device as a parent */
+@@ -295,6 +297,7 @@ int snd_card_info_done(void);
+ int snd_component_add(struct snd_card *card, const char *component);
+ int snd_card_file_add(struct snd_card *card, struct file *file);
+ int snd_card_file_remove(struct snd_card *card, struct file *file);
++void snd_card_unref(struct snd_card *card);
+
+ #define snd_card_set_dev(card, devptr) ((card)->dev = (devptr))
+
+diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h
+index 92f1a79..348c4fe 100644
+--- a/include/trace/events/xen.h
++++ b/include/trace/events/xen.h
+@@ -377,6 +377,14 @@ DECLARE_EVENT_CLASS(xen_mmu_pgd,
+ DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_pin);
+ DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_unpin);
+
++TRACE_EVENT(xen_mmu_flush_tlb_all,
++ TP_PROTO(int x),
++ TP_ARGS(x),
++ TP_STRUCT__entry(__array(char, x, 0)),
++ TP_fast_assign((void)x),
++ TP_printk("%s", "")
++ );
++
+ TRACE_EVENT(xen_mmu_flush_tlb,
+ TP_PROTO(int x),
+ TP_ARGS(x),
+diff --git a/kernel/module.c b/kernel/module.c
+index 6c8fa34..65362d9 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -2193,15 +2193,17 @@ static void layout_symtab(struct module *mod, struct load_info *info)
+
+ src = (void *)info->hdr + symsect->sh_offset;
+ nsrc = symsect->sh_size / sizeof(*src);
+- for (ndst = i = 1; i < nsrc; ++i, ++src)
+- if (is_core_symbol(src, info->sechdrs, info->hdr->e_shnum)) {
+- unsigned int j = src->st_name;
++ for (ndst = i = 0; i < nsrc; i++) {
++ if (i == 0 ||
++ is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) {
++ unsigned int j = src[i].st_name;
+
+ while (!__test_and_set_bit(j, info->strmap)
+ && info->strtab[j])
+ ++j;
+ ++ndst;
+ }
++ }
+
+ /* Append room for core symbols at end of core part. */
+ info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
+@@ -2238,14 +2240,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
+
+ mod->core_symtab = dst = mod->module_core + info->symoffs;
+ src = mod->symtab;
+- *dst = *src;
+- for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
+- if (!is_core_symbol(src, info->sechdrs, info->hdr->e_shnum))
+- continue;
+- dst[ndst] = *src;
+- dst[ndst].st_name = bitmap_weight(info->strmap,
+- dst[ndst].st_name);
+- ++ndst;
++ for (ndst = i = 0; i < mod->num_symtab; i++) {
++ if (i == 0 ||
++ is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) {
++ dst[ndst] = src[i];
++ dst[ndst].st_name = bitmap_weight(info->strmap,
++ dst[ndst].st_name);
++ ++ndst;
++ }
+ }
+ mod->core_num_syms = ndst;
+
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 86eb848..313381c 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -3015,6 +3015,8 @@ static int kswapd(void *p)
+ &balanced_classzone_idx);
+ }
+ }
++
++ current->reclaim_state = NULL;
+ return 0;
+ }
+
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index 1fb1aec..aa12649 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -642,8 +642,10 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
+ {
+ BT_DBG("conn %p", conn);
+
++#ifdef CONFIG_BT_L2CAP
+ if (conn->type == LE_LINK)
+ return smp_conn_security(conn, sec_level);
++#endif
+
+ /* For sdp we don't need the link key. */
+ if (sec_level == BT_SECURITY_SDP)
+diff --git a/net/core/dev.c b/net/core/dev.c
+index f500a69..480be72 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -1633,7 +1633,7 @@ static inline int deliver_skb(struct sk_buff *skb,
+
+ static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
+ {
+- if (ptype->af_packet_priv == NULL)
++ if (!ptype->af_packet_priv || !skb->sk)
+ return false;
+
+ if (ptype->id_match)
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 0cf604b..5229c7f 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -60,7 +60,6 @@ struct rtnl_link {
+ };
+
+ static DEFINE_MUTEX(rtnl_mutex);
+-static u16 min_ifinfo_dump_size;
+
+ void rtnl_lock(void)
+ {
+@@ -727,10 +726,11 @@ static void copy_rtnl_link_stats64(void *v, const struct rtnl_link_stats64 *b)
+ }
+
+ /* All VF info */
+-static inline int rtnl_vfinfo_size(const struct net_device *dev)
++static inline int rtnl_vfinfo_size(const struct net_device *dev,
++ u32 ext_filter_mask)
+ {
+- if (dev->dev.parent && dev_is_pci(dev->dev.parent)) {
+-
++ if (dev->dev.parent && dev_is_pci(dev->dev.parent) &&
++ (ext_filter_mask & RTEXT_FILTER_VF)) {
+ int num_vfs = dev_num_vf(dev->dev.parent);
+ size_t size = nla_total_size(sizeof(struct nlattr));
+ size += nla_total_size(num_vfs * sizeof(struct nlattr));
+@@ -769,7 +769,8 @@ static size_t rtnl_port_size(const struct net_device *dev)
+ return port_self_size;
+ }
+
+-static noinline size_t if_nlmsg_size(const struct net_device *dev)
++static noinline size_t if_nlmsg_size(const struct net_device *dev,
++ u32 ext_filter_mask)
+ {
+ return NLMSG_ALIGN(sizeof(struct ifinfomsg))
+ + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
+@@ -787,8 +788,9 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev)
+ + nla_total_size(4) /* IFLA_MASTER */
+ + nla_total_size(1) /* IFLA_OPERSTATE */
+ + nla_total_size(1) /* IFLA_LINKMODE */
+- + nla_total_size(4) /* IFLA_NUM_VF */
+- + rtnl_vfinfo_size(dev) /* IFLA_VFINFO_LIST */
++ + nla_total_size(ext_filter_mask
++ & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
++ + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
+ + rtnl_port_size(dev) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
+ + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
+ + rtnl_link_get_af_size(dev); /* IFLA_AF_SPEC */
+@@ -871,7 +873,7 @@ static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev)
+
+ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
+ int type, u32 pid, u32 seq, u32 change,
+- unsigned int flags)
++ unsigned int flags, u32 ext_filter_mask)
+ {
+ struct ifinfomsg *ifm;
+ struct nlmsghdr *nlh;
+@@ -944,10 +946,11 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
+ goto nla_put_failure;
+ copy_rtnl_link_stats64(nla_data(attr), stats);
+
+- if (dev->dev.parent)
++ if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF))
+ NLA_PUT_U32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent));
+
+- if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent) {
++ if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent
++ && (ext_filter_mask & RTEXT_FILTER_VF)) {
+ int i;
+
+ struct nlattr *vfinfo, *vf;
+@@ -1051,6 +1054,8 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
+ struct net_device *dev;
+ struct hlist_head *head;
+ struct hlist_node *node;
++ struct nlattr *tb[IFLA_MAX+1];
++ u32 ext_filter_mask = 0;
+
+ s_h = cb->args[0];
+ s_idx = cb->args[1];
+@@ -1058,6 +1063,13 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
+ rcu_read_lock();
+ cb->seq = net->dev_base_seq;
+
++ if (nlmsg_parse(cb->nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
++ ifla_policy) >= 0) {
++
++ if (tb[IFLA_EXT_MASK])
++ ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
++ }
++
+ for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
+ idx = 0;
+ head = &net->dev_index_head[h];
+@@ -1067,7 +1079,8 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
+ if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
+ NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq, 0,
+- NLM_F_MULTI) <= 0)
++ NLM_F_MULTI,
++ ext_filter_mask) <= 0)
+ goto out;
+
+ nl_dump_check_consistent(cb, nlmsg_hdr(skb));
+@@ -1103,6 +1116,7 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
+ [IFLA_VF_PORTS] = { .type = NLA_NESTED },
+ [IFLA_PORT_SELF] = { .type = NLA_NESTED },
+ [IFLA_AF_SPEC] = { .type = NLA_NESTED },
++ [IFLA_EXT_MASK] = { .type = NLA_U32 },
+ };
+ EXPORT_SYMBOL(ifla_policy);
+
+@@ -1845,6 +1859,7 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
+ struct net_device *dev = NULL;
+ struct sk_buff *nskb;
+ int err;
++ u32 ext_filter_mask = 0;
+
+ err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
+ if (err < 0)
+@@ -1853,6 +1868,9 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
+ if (tb[IFLA_IFNAME])
+ nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
+
++ if (tb[IFLA_EXT_MASK])
++ ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
++
+ ifm = nlmsg_data(nlh);
+ if (ifm->ifi_index > 0)
+ dev = __dev_get_by_index(net, ifm->ifi_index);
+@@ -1864,12 +1882,12 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
+ if (dev == NULL)
+ return -ENODEV;
+
+- nskb = nlmsg_new(if_nlmsg_size(dev), GFP_KERNEL);
++ nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL);
+ if (nskb == NULL)
+ return -ENOBUFS;
+
+ err = rtnl_fill_ifinfo(nskb, dev, RTM_NEWLINK, NETLINK_CB(skb).pid,
+- nlh->nlmsg_seq, 0, 0);
++ nlh->nlmsg_seq, 0, 0, ext_filter_mask);
+ if (err < 0) {
+ /* -EMSGSIZE implies BUG in if_nlmsg_size */
+ WARN_ON(err == -EMSGSIZE);
+@@ -1880,8 +1898,32 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
+ return err;
+ }
+
+-static u16 rtnl_calcit(struct sk_buff *skb)
++static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
+ {
++ struct net *net = sock_net(skb->sk);
++ struct net_device *dev;
++ struct nlattr *tb[IFLA_MAX+1];
++ u32 ext_filter_mask = 0;
++ u16 min_ifinfo_dump_size = 0;
++
++ if (nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
++ ifla_policy) >= 0) {
++ if (tb[IFLA_EXT_MASK])
++ ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
++ }
++
++ if (!ext_filter_mask)
++ return NLMSG_GOODSIZE;
++ /*
++ * traverse the list of net devices and compute the minimum
++ * buffer size based upon the filter mask.
++ */
++ list_for_each_entry(dev, &net->dev_base_head, dev_list) {
++ min_ifinfo_dump_size = max_t(u16, min_ifinfo_dump_size,
++ if_nlmsg_size(dev,
++ ext_filter_mask));
++ }
++
+ return min_ifinfo_dump_size;
+ }
+
+@@ -1916,13 +1958,11 @@ void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change)
+ int err = -ENOBUFS;
+ size_t if_info_size;
+
+- skb = nlmsg_new((if_info_size = if_nlmsg_size(dev)), GFP_KERNEL);
++ skb = nlmsg_new((if_info_size = if_nlmsg_size(dev, 0)), GFP_KERNEL);
+ if (skb == NULL)
+ goto errout;
+
+- min_ifinfo_dump_size = max_t(u16, if_info_size, min_ifinfo_dump_size);
+-
+- err = rtnl_fill_ifinfo(skb, dev, type, 0, 0, change, 0);
++ err = rtnl_fill_ifinfo(skb, dev, type, 0, 0, change, 0, 0);
+ if (err < 0) {
+ /* -EMSGSIZE implies BUG in if_nlmsg_size() */
+ WARN_ON(err == -EMSGSIZE);
+@@ -1980,7 +2020,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ return -EOPNOTSUPP;
+ calcit = rtnl_get_calcit(family, type);
+ if (calcit)
+- min_dump_alloc = calcit(skb);
++ min_dump_alloc = calcit(skb, nlh);
+
+ __rtnl_unlock();
+ rtnl = net->rtnl;
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 7397ad8..52edbb8 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -481,14 +481,12 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
+ !tp->urg_data ||
+ before(tp->urg_seq, tp->copied_seq) ||
+ !before(tp->urg_seq, tp->rcv_nxt)) {
+- struct sk_buff *skb;
+
+ answ = tp->rcv_nxt - tp->copied_seq;
+
+- /* Subtract 1, if FIN is in queue. */
+- skb = skb_peek_tail(&sk->sk_receive_queue);
+- if (answ && skb)
+- answ -= tcp_hdr(skb)->fin;
++ /* Subtract 1, if FIN was received */
++ if (answ && sock_flag(sk, SOCK_DONE))
++ answ--;
+ } else
+ answ = tp->urg_seq - tp->copied_seq;
+ release_sock(sk);
+diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
+index 813b43a..834857f 100644
+--- a/net/ipv4/tcp_illinois.c
++++ b/net/ipv4/tcp_illinois.c
+@@ -313,11 +313,13 @@ static void tcp_illinois_info(struct sock *sk, u32 ext,
+ .tcpv_rttcnt = ca->cnt_rtt,
+ .tcpv_minrtt = ca->base_rtt,
+ };
+- u64 t = ca->sum_rtt;
+
+- do_div(t, ca->cnt_rtt);
+- info.tcpv_rtt = t;
++ if (info.tcpv_rttcnt > 0) {
++ u64 t = ca->sum_rtt;
+
++ do_div(t, info.tcpv_rttcnt);
++ info.tcpv_rtt = t;
++ }
+ nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info);
+ }
+ }
+diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
+index 0cb78d7..9ffc37f 100644
+--- a/net/ipv6/ndisc.c
++++ b/net/ipv6/ndisc.c
+@@ -606,7 +606,7 @@ static void ndisc_send_unsol_na(struct net_device *dev)
+ {
+ struct inet6_dev *idev;
+ struct inet6_ifaddr *ifa;
+- struct in6_addr mcaddr;
++ struct in6_addr mcaddr = IN6ADDR_LINKLOCAL_ALLNODES_INIT;
+
+ idev = in6_dev_get(dev);
+ if (!idev)
+@@ -614,7 +614,6 @@ static void ndisc_send_unsol_na(struct net_device *dev)
+
+ read_lock_bh(&idev->lock);
+ list_for_each_entry(ifa, &idev->addr_list, if_list) {
+- addrconf_addr_solict_mult(&ifa->addr, &mcaddr);
+ ndisc_send_na(dev, NULL, &mcaddr, &ifa->addr,
+ /*router=*/ !!idev->cnf.forwarding,
+ /*solicited=*/ false, /*override=*/ true,
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 488a1b7..19724bd 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -185,7 +185,7 @@ static struct dst_ops ip6_dst_blackhole_ops = {
+ };
+
+ static const u32 ip6_template_metrics[RTAX_MAX] = {
+- [RTAX_HOPLIMIT - 1] = 255,
++ [RTAX_HOPLIMIT - 1] = 0,
+ };
+
+ static struct rt6_info ip6_null_entry_template = {
+@@ -1097,7 +1097,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
+ ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
+ rt->rt6i_dst.plen = 128;
+ rt->rt6i_idev = idev;
+- dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255);
++ dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
+
+ spin_lock_bh(&icmp6_dst_lock);
+ rt->dst.next = icmp6_dst_gc_list;
+diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
+index 2cef50b..64164fb 100644
+--- a/net/l2tp/l2tp_eth.c
++++ b/net/l2tp/l2tp_eth.c
+@@ -269,6 +269,7 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
+
+ out_del_dev:
+ free_netdev(dev);
++ spriv->dev = NULL;
+ out_del_session:
+ l2tp_session_delete(session);
+ out:
+diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
+index 3ece106..8c7364b 100644
+--- a/net/mac80211/ibss.c
++++ b/net/mac80211/ibss.c
+@@ -940,7 +940,7 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
+ sdata->u.ibss.state = IEEE80211_IBSS_MLME_SEARCH;
+ sdata->u.ibss.ibss_join_req = jiffies;
+
+- memcpy(sdata->u.ibss.ssid, params->ssid, IEEE80211_MAX_SSID_LEN);
++ memcpy(sdata->u.ibss.ssid, params->ssid, params->ssid_len);
+ sdata->u.ibss.ssid_len = params->ssid_len;
+
+ mutex_unlock(&sdata->u.ibss.mtx);
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index cda4875..cd6cbdb 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -515,6 +515,11 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
+
+ if (ieee80211_is_action(hdr->frame_control)) {
+ u8 category;
++
++ /* make sure category field is present */
++ if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE)
++ return RX_DROP_MONITOR;
++
+ mgmt = (struct ieee80211_mgmt *)hdr;
+ category = mgmt->u.action.category;
+ if (category != WLAN_CATEGORY_MESH_ACTION &&
+@@ -854,14 +859,16 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
+ (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) {
+ if (rx->sta && rx->sta->dummy &&
+ ieee80211_is_data_present(hdr->frame_control)) {
+- u16 ethertype;
+- u8 *payload;
+-
+- payload = rx->skb->data +
+- ieee80211_hdrlen(hdr->frame_control);
+- ethertype = (payload[6] << 8) | payload[7];
+- if (cpu_to_be16(ethertype) ==
+- rx->sdata->control_port_protocol)
++ unsigned int hdrlen;
++ __be16 ethertype;
++
++ hdrlen = ieee80211_hdrlen(hdr->frame_control);
++
++ if (rx->skb->len < hdrlen + 8)
++ return RX_DROP_MONITOR;
++
++ skb_copy_bits(rx->skb, hdrlen + 6, &ethertype, 2);
++ if (ethertype == rx->sdata->control_port_protocol)
+ return RX_CONTINUE;
+ }
+ return RX_DROP_MONITOR;
+@@ -1449,11 +1456,14 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
+
+ hdr = (struct ieee80211_hdr *)rx->skb->data;
+ fc = hdr->frame_control;
++
++ if (ieee80211_is_ctl(fc))
++ return RX_CONTINUE;
++
+ sc = le16_to_cpu(hdr->seq_ctrl);
+ frag = sc & IEEE80211_SCTL_FRAG;
+
+ if (likely((!ieee80211_has_morefrags(fc) && frag == 0) ||
+- (rx->skb)->len < 24 ||
+ is_multicast_ether_addr(hdr->addr1))) {
+ /* not fragmented */
+ goto out;
+@@ -1887,6 +1897,20 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
+
+ hdr = (struct ieee80211_hdr *) skb->data;
+ hdrlen = ieee80211_hdrlen(hdr->frame_control);
++
++ /* make sure fixed part of mesh header is there, also checks skb len */
++ if (!pskb_may_pull(rx->skb, hdrlen + 6))
++ return RX_DROP_MONITOR;
++
++ mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
++
++ /* make sure full mesh header is there, also checks skb len */
++ if (!pskb_may_pull(rx->skb,
++ hdrlen + ieee80211_get_mesh_hdrlen(mesh_hdr)))
++ return RX_DROP_MONITOR;
++
++ /* reload pointers */
++ hdr = (struct ieee80211_hdr *) skb->data;
+ mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
+
+ /* frame is in RMC, don't forward */
+@@ -1895,7 +1919,8 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
+ mesh_rmc_check(hdr->addr3, mesh_hdr, rx->sdata))
+ return RX_DROP_MONITOR;
+
+- if (!ieee80211_is_data(hdr->frame_control))
++ if (!ieee80211_is_data(hdr->frame_control) ||
++ !(status->rx_flags & IEEE80211_RX_RA_MATCH))
+ return RX_CONTINUE;
+
+ if (!mesh_hdr->ttl)
+@@ -1916,9 +1941,12 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
+ if (is_multicast_ether_addr(hdr->addr1)) {
+ mpp_addr = hdr->addr3;
+ proxied_addr = mesh_hdr->eaddr1;
+- } else {
++ } else if (mesh_hdr->flags & MESH_FLAGS_AE_A5_A6) {
++ /* has_a4 already checked in ieee80211_rx_mesh_check */
+ mpp_addr = hdr->addr4;
+ proxied_addr = mesh_hdr->eaddr2;
++ } else {
++ return RX_DROP_MONITOR;
+ }
+
+ rcu_read_lock();
+@@ -1941,7 +1969,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
+
+ mesh_hdr->ttl--;
+
+- if (status->rx_flags & IEEE80211_RX_RA_MATCH) {
++ {
+ if (!mesh_hdr->ttl)
+ IEEE80211_IFSTA_MESH_CTR_INC(&rx->sdata->u.mesh,
+ dropped_frames_ttl);
+@@ -2295,6 +2323,10 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
+ }
+ break;
+ case WLAN_CATEGORY_SELF_PROTECTED:
++ if (len < (IEEE80211_MIN_ACTION_SIZE +
++ sizeof(mgmt->u.action.u.self_prot.action_code)))
++ break;
++
+ switch (mgmt->u.action.u.self_prot.action_code) {
+ case WLAN_SP_MESH_PEERING_OPEN:
+ case WLAN_SP_MESH_PEERING_CLOSE:
+@@ -2313,6 +2345,10 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
+ }
+ break;
+ case WLAN_CATEGORY_MESH_ACTION:
++ if (len < (IEEE80211_MIN_ACTION_SIZE +
++ sizeof(mgmt->u.action.u.mesh_action.action_code)))
++ break;
++
+ if (!ieee80211_vif_is_mesh(&sdata->vif))
+ break;
+ if (mesh_action_is_path_sel(mgmt) &&
+@@ -2870,10 +2906,15 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
+ test_bit(SCAN_OFF_CHANNEL, &local->scanning)))
+ status->rx_flags |= IEEE80211_RX_IN_SCAN;
+
+- if (ieee80211_is_mgmt(fc))
+- err = skb_linearize(skb);
+- else
++ if (ieee80211_is_mgmt(fc)) {
++ /* drop frame if too short for header */
++ if (skb->len < ieee80211_hdrlen(fc))
++ err = -ENOBUFS;
++ else
++ err = skb_linearize(skb);
++ } else {
+ err = !pskb_may_pull(skb, ieee80211_hdrlen(fc));
++ }
+
+ if (err) {
+ dev_kfree_skb(skb);
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 38b78b9..3d1d55d 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -137,6 +137,8 @@ static void netlink_destroy_callback(struct netlink_callback *cb);
+ static DEFINE_RWLOCK(nl_table_lock);
+ static atomic_t nl_table_users = ATOMIC_INIT(0);
+
++#define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
++
+ static ATOMIC_NOTIFIER_HEAD(netlink_chain);
+
+ static u32 netlink_group_mask(u32 group)
+@@ -331,6 +333,11 @@ netlink_update_listeners(struct sock *sk)
+ struct hlist_node *node;
+ unsigned long mask;
+ unsigned int i;
++ struct listeners *listeners;
++
++ listeners = nl_deref_protected(tbl->listeners);
++ if (!listeners)
++ return;
+
+ for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
+ mask = 0;
+@@ -338,7 +345,7 @@ netlink_update_listeners(struct sock *sk)
+ if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
+ mask |= nlk_sk(sk)->groups[i];
+ }
+- tbl->listeners->masks[i] = mask;
++ listeners->masks[i] = mask;
+ }
+ /* this function is only called with the netlink table "grabbed", which
+ * makes sure updates are visible before bind or setsockopt return. */
+@@ -519,7 +526,11 @@ static int netlink_release(struct socket *sock)
+ if (netlink_is_kernel(sk)) {
+ BUG_ON(nl_table[sk->sk_protocol].registered == 0);
+ if (--nl_table[sk->sk_protocol].registered == 0) {
+- kfree(nl_table[sk->sk_protocol].listeners);
++ struct listeners *old;
++
++ old = nl_deref_protected(nl_table[sk->sk_protocol].listeners);
++ RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL);
++ kfree_rcu(old, rcu);
+ nl_table[sk->sk_protocol].module = NULL;
+ nl_table[sk->sk_protocol].registered = 0;
+ }
+@@ -950,7 +961,7 @@ int netlink_has_listeners(struct sock *sk, unsigned int group)
+ rcu_read_lock();
+ listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
+
+- if (group - 1 < nl_table[sk->sk_protocol].groups)
++ if (listeners && group - 1 < nl_table[sk->sk_protocol].groups)
+ res = test_bit(group - 1, listeners->masks);
+
+ rcu_read_unlock();
+@@ -1584,7 +1595,7 @@ int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
+ new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
+ if (!new)
+ return -ENOMEM;
+- old = rcu_dereference_protected(tbl->listeners, 1);
++ old = nl_deref_protected(tbl->listeners);
+ memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
+ rcu_assign_pointer(tbl->listeners, new);
+
+diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
+index 76388b0..9032d50 100644
+--- a/net/sctp/sm_sideeffect.c
++++ b/net/sctp/sm_sideeffect.c
+@@ -1604,8 +1604,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
+ asoc->outqueue.outstanding_bytes;
+ sackh.num_gap_ack_blocks = 0;
+ sackh.num_dup_tsns = 0;
++ chunk->subh.sack_hdr = &sackh;
+ sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK,
+- SCTP_SACKH(&sackh));
++ SCTP_CHUNK(chunk));
+ break;
+
+ case SCTP_CMD_DISCARD_PACKET:
+diff --git a/net/wireless/core.c b/net/wireless/core.c
+index 8f5042d..ea93f4b 100644
+--- a/net/wireless/core.c
++++ b/net/wireless/core.c
+@@ -548,8 +548,7 @@ int wiphy_register(struct wiphy *wiphy)
+ for (i = 0; i < sband->n_channels; i++) {
+ sband->channels[i].orig_flags =
+ sband->channels[i].flags;
+- sband->channels[i].orig_mag =
+- sband->channels[i].max_antenna_gain;
++ sband->channels[i].orig_mag = INT_MAX;
+ sband->channels[i].orig_mpwr =
+ sband->channels[i].max_power;
+ sband->channels[i].band = band;
+diff --git a/net/wireless/util.c b/net/wireless/util.c
+index 22fb802..5fba039 100644
+--- a/net/wireless/util.c
++++ b/net/wireless/util.c
+@@ -301,23 +301,21 @@ unsigned int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb)
+ }
+ EXPORT_SYMBOL(ieee80211_get_hdrlen_from_skb);
+
+-static int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr)
++unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr)
+ {
+ int ae = meshhdr->flags & MESH_FLAGS_AE;
+- /* 7.1.3.5a.2 */
++ /* 802.11-2012, 8.2.4.7.3 */
+ switch (ae) {
++ default:
+ case 0:
+ return 6;
+ case MESH_FLAGS_AE_A4:
+ return 12;
+ case MESH_FLAGS_AE_A5_A6:
+ return 18;
+- case (MESH_FLAGS_AE_A4 | MESH_FLAGS_AE_A5_A6):
+- return 24;
+- default:
+- return 6;
+ }
+ }
++EXPORT_SYMBOL(ieee80211_get_mesh_hdrlen);
+
+ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
+ enum nl80211_iftype iftype)
+@@ -365,6 +363,8 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
+ /* make sure meshdr->flags is on the linear part */
+ if (!pskb_may_pull(skb, hdrlen + 1))
+ return -1;
++ if (meshdr->flags & MESH_FLAGS_AE_A4)
++ return -1;
+ if (meshdr->flags & MESH_FLAGS_AE_A5_A6) {
+ skb_copy_bits(skb, hdrlen +
+ offsetof(struct ieee80211s_hdr, eaddr1),
+@@ -389,6 +389,8 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
+ /* make sure meshdr->flags is on the linear part */
+ if (!pskb_may_pull(skb, hdrlen + 1))
+ return -1;
++ if (meshdr->flags & MESH_FLAGS_AE_A5_A6)
++ return -1;
+ if (meshdr->flags & MESH_FLAGS_AE_A4)
+ skb_copy_bits(skb, hdrlen +
+ offsetof(struct ieee80211s_hdr, eaddr1),
+diff --git a/sound/core/control.c b/sound/core/control.c
+index 819a5c5..5511307 100644
+--- a/sound/core/control.c
++++ b/sound/core/control.c
+@@ -86,6 +86,7 @@ static int snd_ctl_open(struct inode *inode, struct file *file)
+ write_lock_irqsave(&card->ctl_files_rwlock, flags);
+ list_add_tail(&ctl->list, &card->ctl_files);
+ write_unlock_irqrestore(&card->ctl_files_rwlock, flags);
++ snd_card_unref(card);
+ return 0;
+
+ __error:
+@@ -93,6 +94,8 @@ static int snd_ctl_open(struct inode *inode, struct file *file)
+ __error2:
+ snd_card_file_remove(card, file);
+ __error1:
++ if (card)
++ snd_card_unref(card);
+ return err;
+ }
+
+@@ -1433,6 +1436,8 @@ static ssize_t snd_ctl_read(struct file *file, char __user *buffer,
+ spin_unlock_irq(&ctl->read_lock);
+ schedule();
+ remove_wait_queue(&ctl->change_sleep, &wait);
++ if (ctl->card->shutdown)
++ return -ENODEV;
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+ spin_lock_irq(&ctl->read_lock);
+diff --git a/sound/core/hwdep.c b/sound/core/hwdep.c
+index 75ea16f..3f7f662 100644
+--- a/sound/core/hwdep.c
++++ b/sound/core/hwdep.c
+@@ -100,8 +100,10 @@ static int snd_hwdep_open(struct inode *inode, struct file * file)
+ if (hw == NULL)
+ return -ENODEV;
+
+- if (!try_module_get(hw->card->module))
++ if (!try_module_get(hw->card->module)) {
++ snd_card_unref(hw->card);
+ return -EFAULT;
++ }
+
+ init_waitqueue_entry(&wait, current);
+ add_wait_queue(&hw->open_wait, &wait);
+@@ -129,6 +131,10 @@ static int snd_hwdep_open(struct inode *inode, struct file * file)
+ mutex_unlock(&hw->open_mutex);
+ schedule();
+ mutex_lock(&hw->open_mutex);
++ if (hw->card->shutdown) {
++ err = -ENODEV;
++ break;
++ }
+ if (signal_pending(current)) {
+ err = -ERESTARTSYS;
+ break;
+@@ -148,6 +154,7 @@ static int snd_hwdep_open(struct inode *inode, struct file * file)
+ mutex_unlock(&hw->open_mutex);
+ if (err < 0)
+ module_put(hw->card->module);
++ snd_card_unref(hw->card);
+ return err;
+ }
+
+@@ -459,12 +466,15 @@ static int snd_hwdep_dev_disconnect(struct snd_device *device)
+ mutex_unlock(&register_mutex);
+ return -EINVAL;
+ }
++ mutex_lock(&hwdep->open_mutex);
++ wake_up(&hwdep->open_wait);
+ #ifdef CONFIG_SND_OSSEMUL
+ if (hwdep->ossreg)
+ snd_unregister_oss_device(hwdep->oss_type, hwdep->card, hwdep->device);
+ #endif
+ snd_unregister_device(SNDRV_DEVICE_TYPE_HWDEP, hwdep->card, hwdep->device);
+ list_del_init(&hwdep->list);
++ mutex_unlock(&hwdep->open_mutex);
+ mutex_unlock(&register_mutex);
+ return 0;
+ }
+diff --git a/sound/core/init.c b/sound/core/init.c
+index 3ac49b1..fa0f35b 100644
+--- a/sound/core/init.c
++++ b/sound/core/init.c
+@@ -212,6 +212,7 @@ int snd_card_create(int idx, const char *xid,
+ spin_lock_init(&card->files_lock);
+ INIT_LIST_HEAD(&card->files_list);
+ init_waitqueue_head(&card->shutdown_sleep);
++ atomic_set(&card->refcount, 0);
+ #ifdef CONFIG_PM
+ mutex_init(&card->power_lock);
+ init_waitqueue_head(&card->power_sleep);
+@@ -445,21 +446,36 @@ static int snd_card_do_free(struct snd_card *card)
+ return 0;
+ }
+
++/**
++ * snd_card_unref - release the reference counter
++ * @card: the card instance
++ *
++ * Decrements the reference counter. When it reaches to zero, wake up
++ * the sleeper and call the destructor if needed.
++ */
++void snd_card_unref(struct snd_card *card)
++{
++ if (atomic_dec_and_test(&card->refcount)) {
++ wake_up(&card->shutdown_sleep);
++ if (card->free_on_last_close)
++ snd_card_do_free(card);
++ }
++}
++EXPORT_SYMBOL(snd_card_unref);
++
+ int snd_card_free_when_closed(struct snd_card *card)
+ {
+- int free_now = 0;
+- int ret = snd_card_disconnect(card);
+- if (ret)
+- return ret;
++ int ret;
+
+- spin_lock(&card->files_lock);
+- if (list_empty(&card->files_list))
+- free_now = 1;
+- else
+- card->free_on_last_close = 1;
+- spin_unlock(&card->files_lock);
++ atomic_inc(&card->refcount);
++ ret = snd_card_disconnect(card);
++ if (ret) {
++ atomic_dec(&card->refcount);
++ return ret;
++ }
+
+- if (free_now)
++ card->free_on_last_close = 1;
++ if (atomic_dec_and_test(&card->refcount))
+ snd_card_do_free(card);
+ return 0;
+ }
+@@ -473,7 +489,7 @@ int snd_card_free(struct snd_card *card)
+ return ret;
+
+ /* wait, until all devices are ready for the free operation */
+- wait_event(card->shutdown_sleep, list_empty(&card->files_list));
++ wait_event(card->shutdown_sleep, !atomic_read(&card->refcount));
+ snd_card_do_free(card);
+ return 0;
+ }
+@@ -854,6 +870,7 @@ int snd_card_file_add(struct snd_card *card, struct file *file)
+ return -ENODEV;
+ }
+ list_add(&mfile->list, &card->files_list);
++ atomic_inc(&card->refcount);
+ spin_unlock(&card->files_lock);
+ return 0;
+ }
+@@ -876,7 +893,6 @@ EXPORT_SYMBOL(snd_card_file_add);
+ int snd_card_file_remove(struct snd_card *card, struct file *file)
+ {
+ struct snd_monitor_file *mfile, *found = NULL;
+- int last_close = 0;
+
+ spin_lock(&card->files_lock);
+ list_for_each_entry(mfile, &card->files_list, list) {
+@@ -891,19 +907,13 @@ int snd_card_file_remove(struct snd_card *card, struct file *file)
+ break;
+ }
+ }
+- if (list_empty(&card->files_list))
+- last_close = 1;
+ spin_unlock(&card->files_lock);
+- if (last_close) {
+- wake_up(&card->shutdown_sleep);
+- if (card->free_on_last_close)
+- snd_card_do_free(card);
+- }
+ if (!found) {
+ snd_printk(KERN_ERR "ALSA card file remove problem (%p)\n", file);
+ return -ENOENT;
+ }
+ kfree(found);
++ snd_card_unref(card);
+ return 0;
+ }
+
+diff --git a/sound/core/oss/mixer_oss.c b/sound/core/oss/mixer_oss.c
+index 18297f7..c353768 100644
+--- a/sound/core/oss/mixer_oss.c
++++ b/sound/core/oss/mixer_oss.c
+@@ -52,14 +52,19 @@ static int snd_mixer_oss_open(struct inode *inode, struct file *file)
+ SNDRV_OSS_DEVICE_TYPE_MIXER);
+ if (card == NULL)
+ return -ENODEV;
+- if (card->mixer_oss == NULL)
++ if (card->mixer_oss == NULL) {
++ snd_card_unref(card);
+ return -ENODEV;
++ }
+ err = snd_card_file_add(card, file);
+- if (err < 0)
++ if (err < 0) {
++ snd_card_unref(card);
+ return err;
++ }
+ fmixer = kzalloc(sizeof(*fmixer), GFP_KERNEL);
+ if (fmixer == NULL) {
+ snd_card_file_remove(card, file);
++ snd_card_unref(card);
+ return -ENOMEM;
+ }
+ fmixer->card = card;
+@@ -68,8 +73,10 @@ static int snd_mixer_oss_open(struct inode *inode, struct file *file)
+ if (!try_module_get(card->module)) {
+ kfree(fmixer);
+ snd_card_file_remove(card, file);
++ snd_card_unref(card);
+ return -EFAULT;
+ }
++ snd_card_unref(card);
+ return 0;
+ }
+
+diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
+index 3cc4b86..542f69e 100644
+--- a/sound/core/oss/pcm_oss.c
++++ b/sound/core/oss/pcm_oss.c
+@@ -2441,6 +2441,10 @@ static int snd_pcm_oss_open(struct inode *inode, struct file *file)
+ mutex_unlock(&pcm->open_mutex);
+ schedule();
+ mutex_lock(&pcm->open_mutex);
++ if (pcm->card->shutdown) {
++ err = -ENODEV;
++ break;
++ }
+ if (signal_pending(current)) {
+ err = -ERESTARTSYS;
+ break;
+@@ -2450,6 +2454,7 @@ static int snd_pcm_oss_open(struct inode *inode, struct file *file)
+ mutex_unlock(&pcm->open_mutex);
+ if (err < 0)
+ goto __error;
++ snd_card_unref(pcm->card);
+ return err;
+
+ __error:
+@@ -2457,6 +2462,8 @@ static int snd_pcm_oss_open(struct inode *inode, struct file *file)
+ __error2:
+ snd_card_file_remove(pcm->card, file);
+ __error1:
++ if (pcm)
++ snd_card_unref(pcm->card);
+ return err;
+ }
+
+diff --git a/sound/core/pcm.c b/sound/core/pcm.c
+index 8928ca87..13eaeb3 100644
+--- a/sound/core/pcm.c
++++ b/sound/core/pcm.c
+@@ -1046,11 +1046,19 @@ static int snd_pcm_dev_disconnect(struct snd_device *device)
+ if (list_empty(&pcm->list))
+ goto unlock;
+
++ mutex_lock(&pcm->open_mutex);
++ wake_up(&pcm->open_wait);
+ list_del_init(&pcm->list);
+ for (cidx = 0; cidx < 2; cidx++)
+- for (substream = pcm->streams[cidx].substream; substream; substream = substream->next)
+- if (substream->runtime)
++ for (substream = pcm->streams[cidx].substream; substream; substream = substream->next) {
++ snd_pcm_stream_lock_irq(substream);
++ if (substream->runtime) {
+ substream->runtime->status->state = SNDRV_PCM_STATE_DISCONNECTED;
++ wake_up(&substream->runtime->sleep);
++ wake_up(&substream->runtime->tsleep);
++ }
++ snd_pcm_stream_unlock_irq(substream);
++ }
+ list_for_each_entry(notify, &snd_pcm_notify_list, list) {
+ notify->n_disconnect(pcm);
+ }
+@@ -1066,6 +1074,7 @@ static int snd_pcm_dev_disconnect(struct snd_device *device)
+ }
+ snd_unregister_device(devtype, pcm->card, pcm->device);
+ }
++ mutex_unlock(&pcm->open_mutex);
+ unlock:
+ mutex_unlock(&register_mutex);
+ return 0;
+diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
+index 25ed9fe..7ada40e 100644
+--- a/sound/core/pcm_native.c
++++ b/sound/core/pcm_native.c
+@@ -369,6 +369,14 @@ static int period_to_usecs(struct snd_pcm_runtime *runtime)
+ return usecs;
+ }
+
++static void snd_pcm_set_state(struct snd_pcm_substream *substream, int state)
++{
++ snd_pcm_stream_lock_irq(substream);
++ if (substream->runtime->status->state != SNDRV_PCM_STATE_DISCONNECTED)
++ substream->runtime->status->state = state;
++ snd_pcm_stream_unlock_irq(substream);
++}
++
+ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+ {
+@@ -452,7 +460,7 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
+ runtime->boundary *= 2;
+
+ snd_pcm_timer_resolution_change(substream);
+- runtime->status->state = SNDRV_PCM_STATE_SETUP;
++ snd_pcm_set_state(substream, SNDRV_PCM_STATE_SETUP);
+
+ if (pm_qos_request_active(&substream->latency_pm_qos_req))
+ pm_qos_remove_request(&substream->latency_pm_qos_req);
+@@ -464,7 +472,7 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
+ /* hardware might be unusable from this time,
+ so we force application to retry to set
+ the correct hardware parameter settings */
+- runtime->status->state = SNDRV_PCM_STATE_OPEN;
++ snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
+ if (substream->ops->hw_free != NULL)
+ substream->ops->hw_free(substream);
+ return err;
+@@ -512,7 +520,7 @@ static int snd_pcm_hw_free(struct snd_pcm_substream *substream)
+ return -EBADFD;
+ if (substream->ops->hw_free)
+ result = substream->ops->hw_free(substream);
+- runtime->status->state = SNDRV_PCM_STATE_OPEN;
++ snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
+ pm_qos_remove_request(&substream->latency_pm_qos_req);
+ return result;
+ }
+@@ -1320,7 +1328,7 @@ static void snd_pcm_post_prepare(struct snd_pcm_substream *substream, int state)
+ {
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ runtime->control->appl_ptr = runtime->status->hw_ptr;
+- runtime->status->state = SNDRV_PCM_STATE_PREPARED;
++ snd_pcm_set_state(substream, SNDRV_PCM_STATE_PREPARED);
+ }
+
+ static struct action_ops snd_pcm_action_prepare = {
+@@ -1500,6 +1508,10 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream,
+ down_read(&snd_pcm_link_rwsem);
+ snd_pcm_stream_lock_irq(substream);
+ remove_wait_queue(&to_check->sleep, &wait);
++ if (card->shutdown) {
++ result = -ENODEV;
++ break;
++ }
+ if (tout == 0) {
+ if (substream->runtime->status->state == SNDRV_PCM_STATE_SUSPENDED)
+ result = -ESTRPIPE;
+@@ -1620,6 +1632,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
+ _end:
+ write_unlock_irq(&snd_pcm_link_rwlock);
+ up_write(&snd_pcm_link_rwsem);
++ snd_card_unref(substream1->pcm->card);
+ fput(file);
+ return res;
+ }
+@@ -2092,7 +2105,10 @@ static int snd_pcm_playback_open(struct inode *inode, struct file *file)
+ return err;
+ pcm = snd_lookup_minor_data(iminor(inode),
+ SNDRV_DEVICE_TYPE_PCM_PLAYBACK);
+- return snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_PLAYBACK);
++ err = snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_PLAYBACK);
++ if (pcm)
++ snd_card_unref(pcm->card);
++ return err;
+ }
+
+ static int snd_pcm_capture_open(struct inode *inode, struct file *file)
+@@ -2103,7 +2119,10 @@ static int snd_pcm_capture_open(struct inode *inode, struct file *file)
+ return err;
+ pcm = snd_lookup_minor_data(iminor(inode),
+ SNDRV_DEVICE_TYPE_PCM_CAPTURE);
+- return snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_CAPTURE);
++ err = snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_CAPTURE);
++ if (pcm)
++ snd_card_unref(pcm->card);
++ return err;
+ }
+
+ static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream)
+@@ -2140,6 +2159,10 @@ static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream)
+ mutex_unlock(&pcm->open_mutex);
+ schedule();
+ mutex_lock(&pcm->open_mutex);
++ if (pcm->card->shutdown) {
++ err = -ENODEV;
++ break;
++ }
+ if (signal_pending(current)) {
+ err = -ERESTARTSYS;
+ break;
+diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
+index ebf6e49..1bb95ae 100644
+--- a/sound/core/rawmidi.c
++++ b/sound/core/rawmidi.c
+@@ -379,8 +379,10 @@ static int snd_rawmidi_open(struct inode *inode, struct file *file)
+ if (rmidi == NULL)
+ return -ENODEV;
+
+- if (!try_module_get(rmidi->card->module))
++ if (!try_module_get(rmidi->card->module)) {
++ snd_card_unref(rmidi->card);
+ return -ENXIO;
++ }
+
+ mutex_lock(&rmidi->open_mutex);
+ card = rmidi->card;
+@@ -422,6 +424,10 @@ static int snd_rawmidi_open(struct inode *inode, struct file *file)
+ mutex_unlock(&rmidi->open_mutex);
+ schedule();
+ mutex_lock(&rmidi->open_mutex);
++ if (rmidi->card->shutdown) {
++ err = -ENODEV;
++ break;
++ }
+ if (signal_pending(current)) {
+ err = -ERESTARTSYS;
+ break;
+@@ -440,6 +446,7 @@ static int snd_rawmidi_open(struct inode *inode, struct file *file)
+ #endif
+ file->private_data = rawmidi_file;
+ mutex_unlock(&rmidi->open_mutex);
++ snd_card_unref(rmidi->card);
+ return 0;
+
+ __error:
+@@ -447,6 +454,7 @@ static int snd_rawmidi_open(struct inode *inode, struct file *file)
+ __error_card:
+ mutex_unlock(&rmidi->open_mutex);
+ module_put(rmidi->card->module);
++ snd_card_unref(rmidi->card);
+ return err;
+ }
+
+@@ -991,6 +999,8 @@ static ssize_t snd_rawmidi_read(struct file *file, char __user *buf, size_t coun
+ spin_unlock_irq(&runtime->lock);
+ schedule();
+ remove_wait_queue(&runtime->sleep, &wait);
++ if (rfile->rmidi->card->shutdown)
++ return -ENODEV;
+ if (signal_pending(current))
+ return result > 0 ? result : -ERESTARTSYS;
+ if (!runtime->avail)
+@@ -1234,6 +1244,8 @@ static ssize_t snd_rawmidi_write(struct file *file, const char __user *buf,
+ spin_unlock_irq(&runtime->lock);
+ timeout = schedule_timeout(30 * HZ);
+ remove_wait_queue(&runtime->sleep, &wait);
++ if (rfile->rmidi->card->shutdown)
++ return -ENODEV;
+ if (signal_pending(current))
+ return result > 0 ? result : -ERESTARTSYS;
+ if (!runtime->avail && !timeout)
+@@ -1609,9 +1621,20 @@ static int snd_rawmidi_dev_register(struct snd_device *device)
+ static int snd_rawmidi_dev_disconnect(struct snd_device *device)
+ {
+ struct snd_rawmidi *rmidi = device->device_data;
++ int dir;
+
+ mutex_lock(&register_mutex);
++ mutex_lock(&rmidi->open_mutex);
++ wake_up(&rmidi->open_wait);
+ list_del_init(&rmidi->list);
++ for (dir = 0; dir < 2; dir++) {
++ struct snd_rawmidi_substream *s;
++ list_for_each_entry(s, &rmidi->streams[dir].substreams, list) {
++ if (s->runtime)
++ wake_up(&s->runtime->sleep);
++ }
++ }
++
+ #ifdef CONFIG_SND_OSSEMUL
+ if (rmidi->ossreg) {
+ if ((int)rmidi->device == midi_map[rmidi->card->number]) {
+@@ -1626,6 +1649,7 @@ static int snd_rawmidi_dev_disconnect(struct snd_device *device)
+ }
+ #endif /* CONFIG_SND_OSSEMUL */
+ snd_unregister_device(SNDRV_DEVICE_TYPE_RAWMIDI, rmidi->card, rmidi->device);
++ mutex_unlock(&rmidi->open_mutex);
+ mutex_unlock(&register_mutex);
+ return 0;
+ }
+diff --git a/sound/core/sound.c b/sound/core/sound.c
+index 828af35..8e17b4d 100644
+--- a/sound/core/sound.c
++++ b/sound/core/sound.c
+@@ -99,6 +99,10 @@ static void snd_request_other(int minor)
+ *
+ * Checks that a minor device with the specified type is registered, and returns
+ * its user data pointer.
++ *
++ * This function increments the reference counter of the card instance
++ * if an associated instance with the given minor number and type is found.
++ * The caller must call snd_card_unref() appropriately later.
+ */
+ void *snd_lookup_minor_data(unsigned int minor, int type)
+ {
+@@ -109,9 +113,11 @@ void *snd_lookup_minor_data(unsigned int minor, int type)
+ return NULL;
+ mutex_lock(&sound_mutex);
+ mreg = snd_minors[minor];
+- if (mreg && mreg->type == type)
++ if (mreg && mreg->type == type) {
+ private_data = mreg->private_data;
+- else
++ if (private_data && mreg->card_ptr)
++ atomic_inc(&mreg->card_ptr->refcount);
++ } else
+ private_data = NULL;
+ mutex_unlock(&sound_mutex);
+ return private_data;
+@@ -275,6 +281,7 @@ int snd_register_device_for_dev(int type, struct snd_card *card, int dev,
+ preg->device = dev;
+ preg->f_ops = f_ops;
+ preg->private_data = private_data;
++ preg->card_ptr = card;
+ mutex_lock(&sound_mutex);
+ #ifdef CONFIG_SND_DYNAMIC_MINORS
+ minor = snd_find_free_minor(type);
+diff --git a/sound/core/sound_oss.c b/sound/core/sound_oss.c
+index c700920..ec86009 100644
+--- a/sound/core/sound_oss.c
++++ b/sound/core/sound_oss.c
+@@ -40,6 +40,9 @@
+ static struct snd_minor *snd_oss_minors[SNDRV_OSS_MINORS];
+ static DEFINE_MUTEX(sound_oss_mutex);
+
++/* NOTE: This function increments the refcount of the associated card like
++ * snd_lookup_minor_data(); the caller must call snd_card_unref() appropriately
++ */
+ void *snd_lookup_oss_minor_data(unsigned int minor, int type)
+ {
+ struct snd_minor *mreg;
+@@ -49,9 +52,11 @@ void *snd_lookup_oss_minor_data(unsigned int minor, int type)
+ return NULL;
+ mutex_lock(&sound_oss_mutex);
+ mreg = snd_oss_minors[minor];
+- if (mreg && mreg->type == type)
++ if (mreg && mreg->type == type) {
+ private_data = mreg->private_data;
+- else
++ if (private_data && mreg->card_ptr)
++ atomic_inc(&mreg->card_ptr->refcount);
++ } else
+ private_data = NULL;
+ mutex_unlock(&sound_oss_mutex);
+ return private_data;
+@@ -123,6 +128,7 @@ int snd_register_oss_device(int type, struct snd_card *card, int dev,
+ preg->device = dev;
+ preg->f_ops = f_ops;
+ preg->private_data = private_data;
++ preg->card_ptr = card;
+ mutex_lock(&sound_oss_mutex);
+ snd_oss_minors[minor] = preg;
+ minor_unit = SNDRV_MINOR_OSS_DEVICE(minor);
+diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
+index bcb3310..b4890f9 100644
+--- a/sound/pci/hda/patch_analog.c
++++ b/sound/pci/hda/patch_analog.c
+@@ -573,6 +573,7 @@ static int ad198x_build_pcms(struct hda_codec *codec)
+ if (spec->multiout.dig_out_nid) {
+ info++;
+ codec->num_pcms++;
++ codec->spdif_status_reset = 1;
+ info->name = "AD198x Digital";
+ info->pcm_type = HDA_PCM_TYPE_SPDIF;
+ info->stream[SNDRV_PCM_STREAM_PLAYBACK] = ad198x_pcm_digital_playback;
+diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
+index e449278..0ed6867 100644
+--- a/sound/pci/hda/patch_cirrus.c
++++ b/sound/pci/hda/patch_cirrus.c
+@@ -93,8 +93,8 @@ enum {
+ #define CS420X_VENDOR_NID 0x11
+ #define CS_DIG_OUT1_PIN_NID 0x10
+ #define CS_DIG_OUT2_PIN_NID 0x15
+-#define CS_DMIC1_PIN_NID 0x12
+-#define CS_DMIC2_PIN_NID 0x0e
++#define CS_DMIC1_PIN_NID 0x0e
++#define CS_DMIC2_PIN_NID 0x12
+
+ /* coef indices */
+ #define IDX_SPDIF_STAT 0x0000
+@@ -1088,14 +1088,18 @@ static void init_input(struct hda_codec *codec)
+ cs_automic(codec);
+
+ coef = 0x000a; /* ADC1/2 - Digital and Analog Soft Ramp */
++ cs_vendor_coef_set(codec, IDX_ADC_CFG, coef);
++
++ coef = cs_vendor_coef_get(codec, IDX_BEEP_CFG);
+ if (is_active_pin(codec, CS_DMIC2_PIN_NID))
+- coef |= 0x0500; /* DMIC2 2 chan on, GPIO1 off */
++ coef |= 1 << 4; /* DMIC2 2 chan on, GPIO1 off */
+ if (is_active_pin(codec, CS_DMIC1_PIN_NID))
+- coef |= 0x1800; /* DMIC1 2 chan on, GPIO0 off
++ coef |= 1 << 3; /* DMIC1 2 chan on, GPIO0 off
+ * No effect if SPDIF_OUT2 is
+ * selected in IDX_SPDIF_CTL.
+ */
+- cs_vendor_coef_set(codec, IDX_ADC_CFG, coef);
++
++ cs_vendor_coef_set(codec, IDX_BEEP_CFG, coef);
+ }
+ }
+
+@@ -1109,7 +1113,7 @@ static const struct hda_verb cs_coef_init_verbs[] = {
+ | 0x0400 /* Disable Coefficient Auto increment */
+ )},
+ /* Beep */
+- {0x11, AC_VERB_SET_COEF_INDEX, IDX_DAC_CFG},
++ {0x11, AC_VERB_SET_COEF_INDEX, IDX_BEEP_CFG},
+ {0x11, AC_VERB_SET_PROC_COEF, 0x0007}, /* Enable Beep thru DAC1/2/3 */
+
+ {} /* terminator */
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index c2c7f90..3ce2da2 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -6039,6 +6039,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
+ .patch = patch_alc662 },
+ { .id = 0x10ec0663, .name = "ALC663", .patch = patch_alc662 },
+ { .id = 0x10ec0665, .name = "ALC665", .patch = patch_alc662 },
++ { .id = 0x10ec0668, .name = "ALC668", .patch = patch_alc662 },
+ { .id = 0x10ec0670, .name = "ALC670", .patch = patch_alc662 },
+ { .id = 0x10ec0680, .name = "ALC680", .patch = patch_alc680 },
+ { .id = 0x10ec0880, .name = "ALC880", .patch = patch_alc880 },
+@@ -6056,6 +6057,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
+ { .id = 0x10ec0889, .name = "ALC889", .patch = patch_alc882 },
+ { .id = 0x10ec0892, .name = "ALC892", .patch = patch_alc662 },
+ { .id = 0x10ec0899, .name = "ALC898", .patch = patch_alc882 },
++ { .id = 0x10ec0900, .name = "ALC1150", .patch = patch_alc882 },
+ {} /* terminator */
+ };
+
+diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
+index 7160ff2..9e0c889 100644
+--- a/sound/pci/hda/patch_via.c
++++ b/sound/pci/hda/patch_via.c
+@@ -1856,11 +1856,11 @@ static int via_auto_fill_dac_nids(struct hda_codec *codec)
+ {
+ struct via_spec *spec = codec->spec;
+ const struct auto_pin_cfg *cfg = &spec->autocfg;
+- int i, dac_num;
++ int i;
+ hda_nid_t nid;
+
++ spec->multiout.num_dacs = 0;
+ spec->multiout.dac_nids = spec->private_dac_nids;
+- dac_num = 0;
+ for (i = 0; i < cfg->line_outs; i++) {
+ hda_nid_t dac = 0;
+ nid = cfg->line_out_pins[i];
+@@ -1871,16 +1871,13 @@ static int via_auto_fill_dac_nids(struct hda_codec *codec)
+ if (!i && parse_output_path(codec, nid, dac, 1,
+ &spec->out_mix_path))
+ dac = spec->out_mix_path.path[0];
+- if (dac) {
+- spec->private_dac_nids[i] = dac;
+- dac_num++;
+- }
++ if (dac)
++ spec->private_dac_nids[spec->multiout.num_dacs++] = dac;
+ }
+ if (!spec->out_path[0].depth && spec->out_mix_path.depth) {
+ spec->out_path[0] = spec->out_mix_path;
+ spec->out_mix_path.depth = 0;
+ }
+- spec->multiout.num_dacs = dac_num;
+ return 0;
+ }
+
+@@ -3689,6 +3686,18 @@ static void set_widgets_power_state_vt2002P(struct hda_codec *codec)
+ AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
+ }
+
++/* NIDs 0x24 and 0x33 on VT1802 have connections to non-existing NID 0x3e
++ * Replace this with mixer NID 0x1c
++ */
++static void fix_vt1802_connections(struct hda_codec *codec)
++{
++ static hda_nid_t conn_24[] = { 0x14, 0x1c };
++ static hda_nid_t conn_33[] = { 0x1c };
++
++ snd_hda_override_conn_list(codec, 0x24, ARRAY_SIZE(conn_24), conn_24);
++ snd_hda_override_conn_list(codec, 0x33, ARRAY_SIZE(conn_33), conn_33);
++}
++
+ /* patch for vt2002P */
+ static int patch_vt2002P(struct hda_codec *codec)
+ {
+@@ -3703,6 +3712,8 @@ static int patch_vt2002P(struct hda_codec *codec)
+ spec->aa_mix_nid = 0x21;
+ override_mic_boost(codec, 0x2b, 0, 3, 40);
+ override_mic_boost(codec, 0x29, 0, 3, 40);
++ if (spec->codec_type == VT1802)
++ fix_vt1802_connections(codec);
+ add_secret_dac_path(codec);
+
+ /* automatic parse from the BIOS config */
+diff --git a/sound/usb/card.c b/sound/usb/card.c
+index 0f6dc0d..566acb3 100644
+--- a/sound/usb/card.c
++++ b/sound/usb/card.c
+@@ -336,7 +336,7 @@ static int snd_usb_audio_create(struct usb_device *dev, int idx,
+ return -ENOMEM;
+ }
+
+- mutex_init(&chip->shutdown_mutex);
++ init_rwsem(&chip->shutdown_rwsem);
+ chip->index = idx;
+ chip->dev = dev;
+ chip->card = card;
+@@ -555,9 +555,11 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
+ return;
+
+ card = chip->card;
+- mutex_lock(&register_mutex);
+- mutex_lock(&chip->shutdown_mutex);
++ down_write(&chip->shutdown_rwsem);
+ chip->shutdown = 1;
++ up_write(&chip->shutdown_rwsem);
++
++ mutex_lock(&register_mutex);
+ chip->num_interfaces--;
+ if (chip->num_interfaces <= 0) {
+ snd_card_disconnect(card);
+@@ -574,11 +576,9 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
+ snd_usb_mixer_disconnect(p);
+ }
+ usb_chip[chip->index] = NULL;
+- mutex_unlock(&chip->shutdown_mutex);
+ mutex_unlock(&register_mutex);
+ snd_card_free_when_closed(card);
+ } else {
+- mutex_unlock(&chip->shutdown_mutex);
+ mutex_unlock(&register_mutex);
+ }
+ }
+@@ -610,16 +610,20 @@ int snd_usb_autoresume(struct snd_usb_audio *chip)
+ {
+ int err = -ENODEV;
+
++ down_read(&chip->shutdown_rwsem);
+ if (!chip->shutdown && !chip->probing)
+ err = usb_autopm_get_interface(chip->pm_intf);
++ up_read(&chip->shutdown_rwsem);
+
+ return err;
+ }
+
+ void snd_usb_autosuspend(struct snd_usb_audio *chip)
+ {
++ down_read(&chip->shutdown_rwsem);
+ if (!chip->shutdown && !chip->probing)
+ usb_autopm_put_interface(chip->pm_intf);
++ up_read(&chip->shutdown_rwsem);
+ }
+
+ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message)
+diff --git a/sound/usb/card.h b/sound/usb/card.h
+index a39edcc..665e297 100644
+--- a/sound/usb/card.h
++++ b/sound/usb/card.h
+@@ -86,6 +86,7 @@ struct snd_usb_substream {
+ struct snd_urb_ctx syncurb[SYNC_URBS]; /* sync urb table */
+ char *syncbuf; /* sync buffer for all sync URBs */
+ dma_addr_t sync_dma; /* DMA address of syncbuf */
++ unsigned int speed; /* USB_SPEED_XXX */
+
+ u64 formats; /* format bitmasks (all or'ed) */
+ unsigned int num_formats; /* number of supported audio formats (list) */
+diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
+index 08dcce5..24c5114 100644
+--- a/sound/usb/endpoint.c
++++ b/sound/usb/endpoint.c
+@@ -148,8 +148,10 @@ void snd_usb_release_substream_urbs(struct snd_usb_substream *subs, int force)
+ int i;
+
+ /* stop urbs (to be sure) */
+- deactivate_urbs(subs, force, 1);
+- wait_clear_urbs(subs);
++ if (!subs->stream->chip->shutdown) {
++ deactivate_urbs(subs, force, 1);
++ wait_clear_urbs(subs);
++ }
+
+ for (i = 0; i < MAX_URBS; i++)
+ release_urb_ctx(&subs->dataurb[i]);
+@@ -895,7 +897,8 @@ void snd_usb_init_substream(struct snd_usb_stream *as,
+ subs->dev = as->chip->dev;
+ subs->txfr_quirk = as->chip->txfr_quirk;
+ subs->ops = audio_urb_ops[stream];
+- if (snd_usb_get_speed(subs->dev) >= USB_SPEED_HIGH)
++ subs->speed = snd_usb_get_speed(subs->dev);
++ if (subs->speed >= USB_SPEED_HIGH)
+ subs->ops.prepare_sync = prepare_capture_sync_urb_hs;
+
+ snd_usb_set_pcm_ops(as->pcm, stream);
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index ab23869..6730a33 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -287,25 +287,32 @@ static int get_ctl_value_v1(struct usb_mixer_elem_info *cval, int request, int v
+ unsigned char buf[2];
+ int val_len = cval->val_type >= USB_MIXER_S16 ? 2 : 1;
+ int timeout = 10;
+- int err;
++ int idx = 0, err;
+
+ err = snd_usb_autoresume(cval->mixer->chip);
+ if (err < 0)
+ return -EIO;
++ down_read(&chip->shutdown_rwsem);
+ while (timeout-- > 0) {
++ if (chip->shutdown)
++ break;
++ idx = snd_usb_ctrl_intf(chip) | (cval->id << 8);
+ if (snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), request,
+ USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN,
+- validx, snd_usb_ctrl_intf(chip) | (cval->id << 8),
+- buf, val_len) >= val_len) {
++ validx, idx, buf, val_len) >= val_len) {
+ *value_ret = convert_signed_value(cval, snd_usb_combine_bytes(buf, val_len));
+- snd_usb_autosuspend(cval->mixer->chip);
+- return 0;
++ err = 0;
++ goto out;
+ }
+ }
+- snd_usb_autosuspend(cval->mixer->chip);
+ snd_printdd(KERN_ERR "cannot get ctl value: req = %#x, wValue = %#x, wIndex = %#x, type = %d\n",
+- request, validx, snd_usb_ctrl_intf(chip) | (cval->id << 8), cval->val_type);
+- return -EINVAL;
++ request, validx, idx, cval->val_type);
++ err = -EINVAL;
++
++ out:
++ up_read(&chip->shutdown_rwsem);
++ snd_usb_autosuspend(cval->mixer->chip);
++ return err;
+ }
+
+ static int get_ctl_value_v2(struct usb_mixer_elem_info *cval, int request, int validx, int *value_ret)
+@@ -313,7 +320,7 @@ static int get_ctl_value_v2(struct usb_mixer_elem_info *cval, int request, int v
+ struct snd_usb_audio *chip = cval->mixer->chip;
+ unsigned char buf[2 + 3*sizeof(__u16)]; /* enough space for one range */
+ unsigned char *val;
+- int ret, size;
++ int idx = 0, ret, size;
+ __u8 bRequest;
+
+ if (request == UAC_GET_CUR) {
+@@ -330,16 +337,22 @@ static int get_ctl_value_v2(struct usb_mixer_elem_info *cval, int request, int v
+ if (ret)
+ goto error;
+
+- ret = snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), bRequest,
++ down_read(&chip->shutdown_rwsem);
++ if (chip->shutdown)
++ ret = -ENODEV;
++ else {
++ idx = snd_usb_ctrl_intf(chip) | (cval->id << 8);
++ ret = snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), bRequest,
+ USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN,
+- validx, snd_usb_ctrl_intf(chip) | (cval->id << 8),
+- buf, size);
++ validx, idx, buf, size);
++ }
++ up_read(&chip->shutdown_rwsem);
+ snd_usb_autosuspend(chip);
+
+ if (ret < 0) {
+ error:
+ snd_printk(KERN_ERR "cannot get ctl value: req = %#x, wValue = %#x, wIndex = %#x, type = %d\n",
+- request, validx, snd_usb_ctrl_intf(chip) | (cval->id << 8), cval->val_type);
++ request, validx, idx, cval->val_type);
+ return ret;
+ }
+
+@@ -417,7 +430,7 @@ int snd_usb_mixer_set_ctl_value(struct usb_mixer_elem_info *cval,
+ {
+ struct snd_usb_audio *chip = cval->mixer->chip;
+ unsigned char buf[2];
+- int val_len, err, timeout = 10;
++ int idx = 0, val_len, err, timeout = 10;
+
+ if (cval->mixer->protocol == UAC_VERSION_1) {
+ val_len = cval->val_type >= USB_MIXER_S16 ? 2 : 1;
+@@ -440,19 +453,27 @@ int snd_usb_mixer_set_ctl_value(struct usb_mixer_elem_info *cval,
+ err = snd_usb_autoresume(chip);
+ if (err < 0)
+ return -EIO;
+- while (timeout-- > 0)
++ down_read(&chip->shutdown_rwsem);
++ while (timeout-- > 0) {
++ if (chip->shutdown)
++ break;
++ idx = snd_usb_ctrl_intf(chip) | (cval->id << 8);
+ if (snd_usb_ctl_msg(chip->dev,
+ usb_sndctrlpipe(chip->dev, 0), request,
+ USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT,
+- validx, snd_usb_ctrl_intf(chip) | (cval->id << 8),
+- buf, val_len) >= 0) {
+- snd_usb_autosuspend(chip);
+- return 0;
++ validx, idx, buf, val_len) >= 0) {
++ err = 0;
++ goto out;
+ }
+- snd_usb_autosuspend(chip);
++ }
+ snd_printdd(KERN_ERR "cannot set ctl value: req = %#x, wValue = %#x, wIndex = %#x, type = %d, data = %#x/%#x\n",
+- request, validx, snd_usb_ctrl_intf(chip) | (cval->id << 8), cval->val_type, buf[0], buf[1]);
+- return -EINVAL;
++ request, validx, idx, cval->val_type, buf[0], buf[1]);
++ err = -EINVAL;
++
++ out:
++ up_read(&chip->shutdown_rwsem);
++ snd_usb_autosuspend(chip);
++ return err;
+ }
+
+ static int set_cur_ctl_value(struct usb_mixer_elem_info *cval, int validx, int value)
+diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
+index ab125ee..38a607a 100644
+--- a/sound/usb/mixer_quirks.c
++++ b/sound/usb/mixer_quirks.c
+@@ -186,6 +186,11 @@ static int snd_audigy2nx_led_put(struct snd_kcontrol *kcontrol, struct snd_ctl_e
+ if (value > 1)
+ return -EINVAL;
+ changed = value != mixer->audigy2nx_leds[index];
++ down_read(&mixer->chip->shutdown_rwsem);
++ if (mixer->chip->shutdown) {
++ err = -ENODEV;
++ goto out;
++ }
+ if (mixer->chip->usb_id == USB_ID(0x041e, 0x3042))
+ err = snd_usb_ctl_msg(mixer->chip->dev,
+ usb_sndctrlpipe(mixer->chip->dev, 0), 0x24,
+@@ -202,6 +207,8 @@ static int snd_audigy2nx_led_put(struct snd_kcontrol *kcontrol, struct snd_ctl_e
+ usb_sndctrlpipe(mixer->chip->dev, 0), 0x24,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
+ value, index + 2, NULL, 0);
++ out:
++ up_read(&mixer->chip->shutdown_rwsem);
+ if (err < 0)
+ return err;
+ mixer->audigy2nx_leds[index] = value;
+@@ -295,11 +302,16 @@ static void snd_audigy2nx_proc_read(struct snd_info_entry *entry,
+
+ for (i = 0; jacks[i].name; ++i) {
+ snd_iprintf(buffer, "%s: ", jacks[i].name);
+- err = snd_usb_ctl_msg(mixer->chip->dev,
++ down_read(&mixer->chip->shutdown_rwsem);
++ if (mixer->chip->shutdown)
++ err = 0;
++ else
++ err = snd_usb_ctl_msg(mixer->chip->dev,
+ usb_rcvctrlpipe(mixer->chip->dev, 0),
+ UAC_GET_MEM, USB_DIR_IN | USB_TYPE_CLASS |
+ USB_RECIP_INTERFACE, 0,
+ jacks[i].unitid << 8, buf, 3);
++ up_read(&mixer->chip->shutdown_rwsem);
+ if (err == 3 && (buf[0] == 3 || buf[0] == 6))
+ snd_iprintf(buffer, "%02x %02x\n", buf[1], buf[2]);
+ else
+@@ -329,10 +341,15 @@ static int snd_xonar_u1_switch_put(struct snd_kcontrol *kcontrol,
+ else
+ new_status = old_status & ~0x02;
+ changed = new_status != old_status;
+- err = snd_usb_ctl_msg(mixer->chip->dev,
++ down_read(&mixer->chip->shutdown_rwsem);
++ if (mixer->chip->shutdown)
++ err = -ENODEV;
++ else
++ err = snd_usb_ctl_msg(mixer->chip->dev,
+ usb_sndctrlpipe(mixer->chip->dev, 0), 0x08,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
+ 50, 0, &new_status, 1);
++ up_read(&mixer->chip->shutdown_rwsem);
+ if (err < 0)
+ return err;
+ mixer->xonar_u1_status = new_status;
+@@ -371,11 +388,17 @@ static int snd_nativeinstruments_control_get(struct snd_kcontrol *kcontrol,
+ u8 bRequest = (kcontrol->private_value >> 16) & 0xff;
+ u16 wIndex = kcontrol->private_value & 0xffff;
+ u8 tmp;
++ int ret;
+
+- int ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), bRequest,
++ down_read(&mixer->chip->shutdown_rwsem);
++ if (mixer->chip->shutdown)
++ ret = -ENODEV;
++ else
++ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), bRequest,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
+ 0, cpu_to_le16(wIndex),
+ &tmp, sizeof(tmp), 1000);
++ up_read(&mixer->chip->shutdown_rwsem);
+
+ if (ret < 0) {
+ snd_printk(KERN_ERR
+@@ -396,11 +419,17 @@ static int snd_nativeinstruments_control_put(struct snd_kcontrol *kcontrol,
+ u8 bRequest = (kcontrol->private_value >> 16) & 0xff;
+ u16 wIndex = kcontrol->private_value & 0xffff;
+ u16 wValue = ucontrol->value.integer.value[0];
++ int ret;
+
+- int ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), bRequest,
++ down_read(&mixer->chip->shutdown_rwsem);
++ if (mixer->chip->shutdown)
++ ret = -ENODEV;
++ else
++ ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), bRequest,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
+ cpu_to_le16(wValue), cpu_to_le16(wIndex),
+ NULL, 0, 1000);
++ up_read(&mixer->chip->shutdown_rwsem);
+
+ if (ret < 0) {
+ snd_printk(KERN_ERR
+diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
+index 839165f..983e071 100644
+--- a/sound/usb/pcm.c
++++ b/sound/usb/pcm.c
+@@ -67,6 +67,8 @@ static snd_pcm_uframes_t snd_usb_pcm_pointer(struct snd_pcm_substream *substream
+ unsigned int hwptr_done;
+
+ subs = (struct snd_usb_substream *)substream->runtime->private_data;
++ if (subs->stream->chip->shutdown)
++ return SNDRV_PCM_POS_XRUN;
+ spin_lock(&subs->lock);
+ hwptr_done = subs->hwptr_done;
+ substream->runtime->delay = snd_usb_pcm_delay(subs,
+@@ -373,8 +375,14 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
+ changed = subs->cur_audiofmt != fmt ||
+ subs->period_bytes != params_period_bytes(hw_params) ||
+ subs->cur_rate != rate;
++
++ down_read(&subs->stream->chip->shutdown_rwsem);
++ if (subs->stream->chip->shutdown) {
++ ret = -ENODEV;
++ goto unlock;
++ }
+ if ((ret = set_format(subs, fmt)) < 0)
+- return ret;
++ goto unlock;
+
+ if (subs->cur_rate != rate) {
+ struct usb_host_interface *alts;
+@@ -383,12 +391,11 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
+ alts = &iface->altsetting[fmt->altset_idx];
+ ret = snd_usb_init_sample_rate(subs->stream->chip, subs->interface, alts, fmt, rate);
+ if (ret < 0)
+- return ret;
++ goto unlock;
+ subs->cur_rate = rate;
+ }
+
+ if (changed) {
+- mutex_lock(&subs->stream->chip->shutdown_mutex);
+ /* format changed */
+ snd_usb_release_substream_urbs(subs, 0);
+ /* influenced: period_bytes, channels, rate, format, */
+@@ -396,9 +403,10 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
+ params_rate(hw_params),
+ snd_pcm_format_physical_width(params_format(hw_params)) *
+ params_channels(hw_params));
+- mutex_unlock(&subs->stream->chip->shutdown_mutex);
+ }
+
++unlock:
++ up_read(&subs->stream->chip->shutdown_rwsem);
+ return ret;
+ }
+
+@@ -414,9 +422,9 @@ static int snd_usb_hw_free(struct snd_pcm_substream *substream)
+ subs->cur_audiofmt = NULL;
+ subs->cur_rate = 0;
+ subs->period_bytes = 0;
+- mutex_lock(&subs->stream->chip->shutdown_mutex);
++ down_read(&subs->stream->chip->shutdown_rwsem);
+ snd_usb_release_substream_urbs(subs, 0);
+- mutex_unlock(&subs->stream->chip->shutdown_mutex);
++ up_read(&subs->stream->chip->shutdown_rwsem);
+ return snd_pcm_lib_free_vmalloc_buffer(substream);
+ }
+
+@@ -429,12 +437,18 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)
+ {
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_usb_substream *subs = runtime->private_data;
++ int ret = 0;
+
+ if (! subs->cur_audiofmt) {
+ snd_printk(KERN_ERR "usbaudio: no format is specified!\n");
+ return -ENXIO;
+ }
+
++ down_read(&subs->stream->chip->shutdown_rwsem);
++ if (subs->stream->chip->shutdown) {
++ ret = -ENODEV;
++ goto unlock;
++ }
+ /* some unit conversions in runtime */
+ subs->maxframesize = bytes_to_frames(runtime, subs->maxpacksize);
+ subs->curframesize = bytes_to_frames(runtime, subs->curpacksize);
+@@ -447,7 +461,10 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)
+ subs->last_frame_number = 0;
+ runtime->delay = 0;
+
+- return snd_usb_substream_prepare(subs, runtime);
++ ret = snd_usb_substream_prepare(subs, runtime);
++ unlock:
++ up_read(&subs->stream->chip->shutdown_rwsem);
++ return ret;
+ }
+
+ static struct snd_pcm_hardware snd_usb_hardware =
+@@ -500,7 +517,7 @@ static int hw_check_valid_format(struct snd_usb_substream *subs,
+ return 0;
+ }
+ /* check whether the period time is >= the data packet interval */
+- if (snd_usb_get_speed(subs->dev) != USB_SPEED_FULL) {
++ if (subs->speed != USB_SPEED_FULL) {
+ ptime = 125 * (1 << fp->datainterval);
+ if (ptime > pt->max || (ptime == pt->max && pt->openmax)) {
+ hwc_debug(" > check: ptime %u > max %u\n", ptime, pt->max);
+@@ -776,7 +793,7 @@ static int setup_hw_info(struct snd_pcm_runtime *runtime, struct snd_usb_substre
+ return err;
+
+ param_period_time_if_needed = SNDRV_PCM_HW_PARAM_PERIOD_TIME;
+- if (snd_usb_get_speed(subs->dev) == USB_SPEED_FULL)
++ if (subs->speed == USB_SPEED_FULL)
+ /* full speed devices have fixed data packet interval */
+ ptmin = 1000;
+ if (ptmin == 1000)
+diff --git a/sound/usb/proc.c b/sound/usb/proc.c
+index 961c9a2..aef03db 100644
+--- a/sound/usb/proc.c
++++ b/sound/usb/proc.c
+@@ -107,7 +107,7 @@ static void proc_dump_substream_formats(struct snd_usb_substream *subs, struct s
+ }
+ snd_iprintf(buffer, "\n");
+ }
+- if (snd_usb_get_speed(subs->dev) != USB_SPEED_FULL)
++ if (subs->speed != USB_SPEED_FULL)
+ snd_iprintf(buffer, " Data packet interval: %d us\n",
+ 125 * (1 << fp->datainterval));
+ // snd_iprintf(buffer, " Max Packet Size = %d\n", fp->maxpacksize);
+@@ -128,7 +128,7 @@ static void proc_dump_substream_status(struct snd_usb_substream *subs, struct sn
+ snd_iprintf(buffer, "]\n");
+ snd_iprintf(buffer, " Packet Size = %d\n", subs->curpacksize);
+ snd_iprintf(buffer, " Momentary freq = %u Hz (%#x.%04x)\n",
+- snd_usb_get_speed(subs->dev) == USB_SPEED_FULL
++ subs->speed == USB_SPEED_FULL
+ ? get_full_speed_hz(subs->freqm)
+ : get_high_speed_hz(subs->freqm),
+ subs->freqm >> 16, subs->freqm & 0xffff);
+diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
+index 3e2b035..6c805a5 100644
+--- a/sound/usb/usbaudio.h
++++ b/sound/usb/usbaudio.h
+@@ -36,7 +36,7 @@ struct snd_usb_audio {
+ struct snd_card *card;
+ struct usb_interface *pm_intf;
+ u32 usb_id;
+- struct mutex shutdown_mutex;
++ struct rw_semaphore shutdown_rwsem;
+ unsigned int shutdown:1;
+ unsigned int probing:1;
+ unsigned int autosuspended:1;
diff --git a/1034_linux-3.2.35.patch b/1034_linux-3.2.35.patch
new file mode 100644
index 00000000..76a9c19d
--- /dev/null
+++ b/1034_linux-3.2.35.patch
@@ -0,0 +1,3014 @@
+diff --git a/Documentation/cgroups/memory.txt b/Documentation/cgroups/memory.txt
+index cc0ebc5..fd129f6 100644
+--- a/Documentation/cgroups/memory.txt
++++ b/Documentation/cgroups/memory.txt
+@@ -440,6 +440,10 @@ Note:
+ 5.3 swappiness
+
+ Similar to /proc/sys/vm/swappiness, but affecting a hierarchy of groups only.
++Please note that unlike the global swappiness, memcg knob set to 0
++really prevents from any swapping even if there is a swap storage
++available. This might lead to memcg OOM killer if there are no file
++pages to reclaim.
+
+ Following cgroups' swappiness can't be changed.
+ - root cgroup (uses /proc/sys/vm/swappiness).
+diff --git a/Documentation/dvb/get_dvb_firmware b/Documentation/dvb/get_dvb_firmware
+index e67be7a..48d25ea 100755
+--- a/Documentation/dvb/get_dvb_firmware
++++ b/Documentation/dvb/get_dvb_firmware
+@@ -115,7 +115,7 @@ sub tda10045 {
+
+ sub tda10046 {
+ my $sourcefile = "TT_PCI_2.19h_28_11_2006.zip";
+- my $url = "http://www.tt-download.com/download/updates/219/$sourcefile";
++ my $url = "http://technotrend.com.ua/download/software/219/$sourcefile";
+ my $hash = "6a7e1e2f2644b162ff0502367553c72d";
+ my $outfile = "dvb-fe-tda10046.fw";
+ my $tmpdir = tempdir(DIR => "/tmp", CLEANUP => 1);
+diff --git a/Makefile b/Makefile
+index 14ebacf..d985af0 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 34
++SUBLEVEL = 35
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index 9fdc151..27bcd12 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -542,6 +542,7 @@ config ARCH_KIRKWOOD
+ bool "Marvell Kirkwood"
+ select CPU_FEROCEON
+ select PCI
++ select PCI_QUIRKS
+ select ARCH_REQUIRE_GPIOLIB
+ select GENERIC_CLOCKEVENTS
+ select PLAT_ORION
+diff --git a/arch/arm/mach-dove/include/mach/pm.h b/arch/arm/mach-dove/include/mach/pm.h
+index 3ad9f94..11799c3 100644
+--- a/arch/arm/mach-dove/include/mach/pm.h
++++ b/arch/arm/mach-dove/include/mach/pm.h
+@@ -45,7 +45,7 @@ static inline int pmu_to_irq(int pin)
+
+ static inline int irq_to_pmu(int irq)
+ {
+- if (IRQ_DOVE_PMU_START < irq && irq < NR_IRQS)
++ if (IRQ_DOVE_PMU_START <= irq && irq < NR_IRQS)
+ return irq - IRQ_DOVE_PMU_START;
+
+ return -EINVAL;
+diff --git a/arch/arm/mach-dove/irq.c b/arch/arm/mach-dove/irq.c
+index f07fd16..9f2fd10 100644
+--- a/arch/arm/mach-dove/irq.c
++++ b/arch/arm/mach-dove/irq.c
+@@ -61,8 +61,20 @@ static void pmu_irq_ack(struct irq_data *d)
+ int pin = irq_to_pmu(d->irq);
+ u32 u;
+
++ /*
++ * The PMU mask register is not RW0C: it is RW. This means that
++ * the bits take whatever value is written to them; if you write
++ * a '1', you will set the interrupt.
++ *
++ * Unfortunately this means there is NO race free way to clear
++ * these interrupts.
++ *
++ * So, let's structure the code so that the window is as small as
++ * possible.
++ */
+ u = ~(1 << (pin & 31));
+- writel(u, PMU_INTERRUPT_CAUSE);
++ u &= readl_relaxed(PMU_INTERRUPT_CAUSE);
++ writel_relaxed(u, PMU_INTERRUPT_CAUSE);
+ }
+
+ static struct irq_chip pmu_irq_chip = {
+diff --git a/arch/arm/mach-kirkwood/pcie.c b/arch/arm/mach-kirkwood/pcie.c
+index 74b992d..a881c54 100644
+--- a/arch/arm/mach-kirkwood/pcie.c
++++ b/arch/arm/mach-kirkwood/pcie.c
+@@ -213,14 +213,19 @@ static int __init kirkwood_pcie_setup(int nr, struct pci_sys_data *sys)
+ return 1;
+ }
+
++/*
++ * The root complex has a hardwired class of PCI_CLASS_MEMORY_OTHER, when it
++ * is operating as a root complex this needs to be switched to
++ * PCI_CLASS_BRIDGE_HOST or Linux will errantly try to process the BAR's on
++ * the device. Decoding setup is handled by the orion code.
++ */
+ static void __devinit rc_pci_fixup(struct pci_dev *dev)
+ {
+- /*
+- * Prevent enumeration of root complex.
+- */
+ if (dev->bus->parent == NULL && dev->devfn == 0) {
+ int i;
+
++ dev->class &= 0xff;
++ dev->class |= PCI_CLASS_BRIDGE_HOST << 8;
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ dev->resource[i].start = 0;
+ dev->resource[i].end = 0;
+diff --git a/arch/m68k/include/asm/signal.h b/arch/m68k/include/asm/signal.h
+index 60e8866..93fe83e 100644
+--- a/arch/m68k/include/asm/signal.h
++++ b/arch/m68k/include/asm/signal.h
+@@ -156,7 +156,7 @@ typedef struct sigaltstack {
+ static inline void sigaddset(sigset_t *set, int _sig)
+ {
+ asm ("bfset %0{%1,#1}"
+- : "+od" (*set)
++ : "+o" (*set)
+ : "id" ((_sig - 1) ^ 31)
+ : "cc");
+ }
+@@ -164,7 +164,7 @@ static inline void sigaddset(sigset_t *set, int _sig)
+ static inline void sigdelset(sigset_t *set, int _sig)
+ {
+ asm ("bfclr %0{%1,#1}"
+- : "+od" (*set)
++ : "+o" (*set)
+ : "id" ((_sig - 1) ^ 31)
+ : "cc");
+ }
+@@ -180,7 +180,7 @@ static inline int __gen_sigismember(sigset_t *set, int _sig)
+ int ret;
+ asm ("bfextu %1{%2,#1},%0"
+ : "=d" (ret)
+- : "od" (*set), "id" ((_sig-1) ^ 31)
++ : "o" (*set), "id" ((_sig-1) ^ 31)
+ : "cc");
+ return ret;
+ }
+diff --git a/arch/parisc/kernel/signal32.c b/arch/parisc/kernel/signal32.c
+index e141324..d0ea054 100644
+--- a/arch/parisc/kernel/signal32.c
++++ b/arch/parisc/kernel/signal32.c
+@@ -67,7 +67,8 @@ put_sigset32(compat_sigset_t __user *up, sigset_t *set, size_t sz)
+ {
+ compat_sigset_t s;
+
+- if (sz != sizeof *set) panic("put_sigset32()");
++ if (sz != sizeof *set)
++ return -EINVAL;
+ sigset_64to32(&s, set);
+
+ return copy_to_user(up, &s, sizeof s);
+@@ -79,7 +80,8 @@ get_sigset32(compat_sigset_t __user *up, sigset_t *set, size_t sz)
+ compat_sigset_t s;
+ int r;
+
+- if (sz != sizeof *set) panic("put_sigset32()");
++ if (sz != sizeof *set)
++ return -EINVAL;
+
+ if ((r = copy_from_user(&s, up, sz)) == 0) {
+ sigset_32to64(set, &s);
+diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
+index c9b9322..7ea75d1 100644
+--- a/arch/parisc/kernel/sys_parisc.c
++++ b/arch/parisc/kernel/sys_parisc.c
+@@ -73,6 +73,8 @@ static unsigned long get_shared_area(struct address_space *mapping,
+ struct vm_area_struct *vma;
+ int offset = mapping ? get_offset(mapping) : 0;
+
++ offset = (offset + (pgoff << PAGE_SHIFT)) & 0x3FF000;
++
+ addr = DCACHE_ALIGN(addr - offset) + offset;
+
+ for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
+diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h
+index 234f1d8..2e0a15b 100644
+--- a/arch/s390/include/asm/compat.h
++++ b/arch/s390/include/asm/compat.h
+@@ -20,7 +20,7 @@
+ #define PSW32_MASK_CC 0x00003000UL
+ #define PSW32_MASK_PM 0x00000f00UL
+
+-#define PSW32_MASK_USER 0x00003F00UL
++#define PSW32_MASK_USER 0x0000FF00UL
+
+ #define PSW32_ADDR_AMODE 0x80000000UL
+ #define PSW32_ADDR_INSN 0x7FFFFFFFUL
+diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
+index a658463..a5b4c48 100644
+--- a/arch/s390/include/asm/ptrace.h
++++ b/arch/s390/include/asm/ptrace.h
+@@ -240,7 +240,7 @@ typedef struct
+ #define PSW_MASK_EA 0x00000000UL
+ #define PSW_MASK_BA 0x00000000UL
+
+-#define PSW_MASK_USER 0x00003F00UL
++#define PSW_MASK_USER 0x0000FF00UL
+
+ #define PSW_ADDR_AMODE 0x80000000UL
+ #define PSW_ADDR_INSN 0x7FFFFFFFUL
+@@ -269,7 +269,7 @@ typedef struct
+ #define PSW_MASK_EA 0x0000000100000000UL
+ #define PSW_MASK_BA 0x0000000080000000UL
+
+-#define PSW_MASK_USER 0x00003F0180000000UL
++#define PSW_MASK_USER 0x0000FF0180000000UL
+
+ #define PSW_ADDR_AMODE 0x0000000000000000UL
+ #define PSW_ADDR_INSN 0xFFFFFFFFFFFFFFFFUL
+diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
+index 4f68c81..9fdd05d 100644
+--- a/arch/s390/kernel/compat_signal.c
++++ b/arch/s390/kernel/compat_signal.c
+@@ -312,6 +312,10 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
+ regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
+ (__u64)(regs32.psw.mask & PSW32_MASK_USER) << 32 |
+ (__u64)(regs32.psw.addr & PSW32_ADDR_AMODE);
++ /* Check for invalid user address space control. */
++ if ((regs->psw.mask & PSW_MASK_ASC) >= (psw_kernel_bits & PSW_MASK_ASC))
++ regs->psw.mask = (psw_user_bits & PSW_MASK_ASC) |
++ (regs->psw.mask & ~PSW_MASK_ASC);
+ regs->psw.addr = (__u64)(regs32.psw.addr & PSW32_ADDR_INSN);
+ for (i = 0; i < NUM_GPRS; i++)
+ regs->gprs[i] = (__u64) regs32.gprs[i];
+@@ -493,7 +497,10 @@ static int setup_frame32(int sig, struct k_sigaction *ka,
+
+ /* Set up registers for signal handler */
+ regs->gprs[15] = (__force __u64) frame;
+- regs->psw.mask |= PSW_MASK_BA; /* force amode 31 */
++ /* Force 31 bit amode and default user address space control. */
++ regs->psw.mask = PSW_MASK_BA |
++ (psw_user_bits & PSW_MASK_ASC) |
++ (regs->psw.mask & ~PSW_MASK_ASC);
+ regs->psw.addr = (__force __u64) ka->sa.sa_handler;
+
+ regs->gprs[2] = map_signal(sig);
+@@ -557,7 +564,10 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info,
+
+ /* Set up registers for signal handler */
+ regs->gprs[15] = (__force __u64) frame;
+- regs->psw.mask |= PSW_MASK_BA; /* force amode 31 */
++ /* Force 31 bit amode and default user address space control. */
++ regs->psw.mask = PSW_MASK_BA |
++ (psw_user_bits & PSW_MASK_ASC) |
++ (regs->psw.mask & ~PSW_MASK_ASC);
+ regs->psw.addr = (__u64) ka->sa.sa_handler;
+
+ regs->gprs[2] = map_signal(sig);
+diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
+index 5086553..d54d475 100644
+--- a/arch/s390/kernel/signal.c
++++ b/arch/s390/kernel/signal.c
+@@ -147,6 +147,10 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
+ /* Use regs->psw.mask instead of psw_user_bits to preserve PER bit. */
+ regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
+ (user_sregs.regs.psw.mask & PSW_MASK_USER);
++ /* Check for invalid user address space control. */
++ if ((regs->psw.mask & PSW_MASK_ASC) >= (psw_kernel_bits & PSW_MASK_ASC))
++ regs->psw.mask = (psw_user_bits & PSW_MASK_ASC) |
++ (regs->psw.mask & ~PSW_MASK_ASC);
+ /* Check for invalid amode */
+ if (regs->psw.mask & PSW_MASK_EA)
+ regs->psw.mask |= PSW_MASK_BA;
+@@ -293,7 +297,10 @@ static int setup_frame(int sig, struct k_sigaction *ka,
+
+ /* Set up registers for signal handler */
+ regs->gprs[15] = (unsigned long) frame;
+- regs->psw.mask |= PSW_MASK_EA | PSW_MASK_BA; /* 64 bit amode */
++ /* Force default amode and default user address space control. */
++ regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA |
++ (psw_user_bits & PSW_MASK_ASC) |
++ (regs->psw.mask & ~PSW_MASK_ASC);
+ regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE;
+
+ regs->gprs[2] = map_signal(sig);
+@@ -362,7 +369,10 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+
+ /* Set up registers for signal handler */
+ regs->gprs[15] = (unsigned long) frame;
+- regs->psw.mask |= PSW_MASK_EA | PSW_MASK_BA; /* 64 bit amode */
++ /* Force default amode and default user address space control. */
++ regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA |
++ (psw_user_bits & PSW_MASK_ASC) |
++ (regs->psw.mask & ~PSW_MASK_ASC);
+ regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE;
+
+ regs->gprs[2] = map_signal(sig);
+diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
+index 65cb06e..4ccf9f5 100644
+--- a/arch/s390/mm/gup.c
++++ b/arch/s390/mm/gup.c
+@@ -183,7 +183,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ addr = start;
+ len = (unsigned long) nr_pages << PAGE_SHIFT;
+ end = start + len;
+- if (end < start)
++ if ((end < start) || (end > TASK_SIZE))
+ goto slow_irqon;
+
+ /*
+diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
+index f0836cd..d58d3ed 100644
+--- a/arch/sparc/kernel/signal_64.c
++++ b/arch/sparc/kernel/signal_64.c
+@@ -307,9 +307,7 @@ void do_rt_sigreturn(struct pt_regs *regs)
+ err |= restore_fpu_state(regs, fpu_save);
+
+ err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t));
+- err |= do_sigaltstack(&sf->stack, NULL, (unsigned long)sf);
+-
+- if (err)
++ if (err || do_sigaltstack(&sf->stack, NULL, (unsigned long)sf) == -EFAULT)
+ goto segv;
+
+ err |= __get_user(rwin_save, &sf->rwin_save);
+diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
+index 3566454..3b96fd4 100644
+--- a/arch/x86/include/asm/ptrace.h
++++ b/arch/x86/include/asm/ptrace.h
+@@ -206,21 +206,14 @@ static inline bool user_64bit_mode(struct pt_regs *regs)
+ }
+ #endif
+
+-/*
+- * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
+- * when it traps. The previous stack will be directly underneath the saved
+- * registers, and 'sp/ss' won't even have been saved. Thus the '&regs->sp'.
+- *
+- * This is valid only for kernel mode traps.
+- */
+-static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
+-{
+ #ifdef CONFIG_X86_32
+- return (unsigned long)(&regs->sp);
++extern unsigned long kernel_stack_pointer(struct pt_regs *regs);
+ #else
++static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
++{
+ return regs->sp;
+-#endif
+ }
++#endif
+
+ #define GET_IP(regs) ((regs)->ip)
+ #define GET_FP(regs) ((regs)->bp)
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index ff8557e..f07becc 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -587,6 +587,20 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
+ }
+ }
+
++ /*
++ * The way access filter has a performance penalty on some workloads.
++ * Disable it on the affected CPUs.
++ */
++ if ((c->x86 == 0x15) &&
++ (c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
++ u64 val;
++
++ if (!rdmsrl_safe(0xc0011021, &val) && !(val & 0x1E)) {
++ val |= 0x1E;
++ checking_wrmsrl(0xc0011021, val);
++ }
++ }
++
+ cpu_detect_cache_sizes(c);
+
+ /* Multi core CPU? */
+diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
+index 787e06c..ce04b58 100644
+--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
++++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
+@@ -323,17 +323,6 @@ device_initcall(thermal_throttle_init_device);
+
+ #endif /* CONFIG_SYSFS */
+
+-/*
+- * Set up the most two significant bit to notify mce log that this thermal
+- * event type.
+- * This is a temp solution. May be changed in the future with mce log
+- * infrasture.
+- */
+-#define CORE_THROTTLED (0)
+-#define CORE_POWER_LIMIT ((__u64)1 << 62)
+-#define PACKAGE_THROTTLED ((__u64)2 << 62)
+-#define PACKAGE_POWER_LIMIT ((__u64)3 << 62)
+-
+ static void notify_thresholds(__u64 msr_val)
+ {
+ /* check whether the interrupt handler is defined;
+@@ -363,27 +352,23 @@ static void intel_thermal_interrupt(void)
+ if (therm_throt_process(msr_val & THERM_STATUS_PROCHOT,
+ THERMAL_THROTTLING_EVENT,
+ CORE_LEVEL) != 0)
+- mce_log_therm_throt_event(CORE_THROTTLED | msr_val);
++ mce_log_therm_throt_event(msr_val);
+
+ if (this_cpu_has(X86_FEATURE_PLN))
+- if (therm_throt_process(msr_val & THERM_STATUS_POWER_LIMIT,
++ therm_throt_process(msr_val & THERM_STATUS_POWER_LIMIT,
+ POWER_LIMIT_EVENT,
+- CORE_LEVEL) != 0)
+- mce_log_therm_throt_event(CORE_POWER_LIMIT | msr_val);
++ CORE_LEVEL);
+
+ if (this_cpu_has(X86_FEATURE_PTS)) {
+ rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val);
+- if (therm_throt_process(msr_val & PACKAGE_THERM_STATUS_PROCHOT,
++ therm_throt_process(msr_val & PACKAGE_THERM_STATUS_PROCHOT,
+ THERMAL_THROTTLING_EVENT,
+- PACKAGE_LEVEL) != 0)
+- mce_log_therm_throt_event(PACKAGE_THROTTLED | msr_val);
++ PACKAGE_LEVEL);
+ if (this_cpu_has(X86_FEATURE_PLN))
+- if (therm_throt_process(msr_val &
++ therm_throt_process(msr_val &
+ PACKAGE_THERM_STATUS_POWER_LIMIT,
+ POWER_LIMIT_EVENT,
+- PACKAGE_LEVEL) != 0)
+- mce_log_therm_throt_event(PACKAGE_POWER_LIMIT
+- | msr_val);
++ PACKAGE_LEVEL);
+ }
+ }
+
+diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
+index ac52c15..1ef962b 100644
+--- a/arch/x86/kernel/microcode_amd.c
++++ b/arch/x86/kernel/microcode_amd.c
+@@ -163,6 +163,7 @@ static unsigned int verify_ucode_size(int cpu, const u8 *buf, unsigned int size)
+ #define F1XH_MPB_MAX_SIZE 2048
+ #define F14H_MPB_MAX_SIZE 1824
+ #define F15H_MPB_MAX_SIZE 4096
++#define F16H_MPB_MAX_SIZE 3458
+
+ switch (c->x86) {
+ case 0x14:
+@@ -171,6 +172,9 @@ static unsigned int verify_ucode_size(int cpu, const u8 *buf, unsigned int size)
+ case 0x15:
+ max_size = F15H_MPB_MAX_SIZE;
+ break;
++ case 0x16:
++ max_size = F16H_MPB_MAX_SIZE;
++ break;
+ default:
+ max_size = F1XH_MPB_MAX_SIZE;
+ break;
+diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
+index 8252879..2dc4121 100644
+--- a/arch/x86/kernel/ptrace.c
++++ b/arch/x86/kernel/ptrace.c
+@@ -21,6 +21,7 @@
+ #include <linux/signal.h>
+ #include <linux/perf_event.h>
+ #include <linux/hw_breakpoint.h>
++#include <linux/module.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/pgtable.h>
+@@ -164,6 +165,35 @@ static inline bool invalid_selector(u16 value)
+
+ #define FLAG_MASK FLAG_MASK_32
+
++/*
++ * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
++ * when it traps. The previous stack will be directly underneath the saved
++ * registers, and 'sp/ss' won't even have been saved. Thus the '&regs->sp'.
++ *
++ * Now, if the stack is empty, '&regs->sp' is out of range. In this
++ * case we try to take the previous stack. To always return a non-null
++ * stack pointer we fall back to regs as stack if no previous stack
++ * exists.
++ *
++ * This is valid only for kernel mode traps.
++ */
++unsigned long kernel_stack_pointer(struct pt_regs *regs)
++{
++ unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
++ unsigned long sp = (unsigned long)&regs->sp;
++ struct thread_info *tinfo;
++
++ if (context == (sp & ~(THREAD_SIZE - 1)))
++ return sp;
++
++ tinfo = (struct thread_info *)context;
++ if (tinfo->previous_esp)
++ return tinfo->previous_esp;
++
++ return (unsigned long)regs;
++}
++EXPORT_SYMBOL_GPL(kernel_stack_pointer);
++
+ static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
+ {
+ BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
+diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
+index cf0ef98..0d403aa 100644
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -937,8 +937,21 @@ void __init setup_arch(char **cmdline_p)
+
+ #ifdef CONFIG_X86_64
+ if (max_pfn > max_low_pfn) {
+- max_pfn_mapped = init_memory_mapping(1UL<<32,
+- max_pfn<<PAGE_SHIFT);
++ int i;
++ for (i = 0; i < e820.nr_map; i++) {
++ struct e820entry *ei = &e820.map[i];
++
++ if (ei->addr + ei->size <= 1UL << 32)
++ continue;
++
++ if (ei->type == E820_RESERVED)
++ continue;
++
++ max_pfn_mapped = init_memory_mapping(
++ ei->addr < 1UL << 32 ? 1UL << 32 : ei->addr,
++ ei->addr + ei->size);
++ }
++
+ /* can we preseve max_low_pfn ?*/
+ max_low_pfn = max_pfn;
+ }
+diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
+index 87488b9..34a7f40 100644
+--- a/arch/x86/mm/init.c
++++ b/arch/x86/mm/init.c
+@@ -28,36 +28,50 @@ int direct_gbpages
+ #endif
+ ;
+
+-static void __init find_early_table_space(unsigned long end, int use_pse,
+- int use_gbpages)
++struct map_range {
++ unsigned long start;
++ unsigned long end;
++ unsigned page_size_mask;
++};
++
++/*
++ * First calculate space needed for kernel direct mapping page tables to cover
++ * mr[0].start to mr[nr_range - 1].end, while accounting for possible 2M and 1GB
++ * pages. Then find enough contiguous space for those page tables.
++ */
++static void __init find_early_table_space(struct map_range *mr, int nr_range)
+ {
+- unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
++ int i;
++ unsigned long puds = 0, pmds = 0, ptes = 0, tables;
++ unsigned long start = 0, good_end;
+ phys_addr_t base;
+
+- puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
+- tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
++ for (i = 0; i < nr_range; i++) {
++ unsigned long range, extra;
+
+- if (use_gbpages) {
+- unsigned long extra;
++ range = mr[i].end - mr[i].start;
++ puds += (range + PUD_SIZE - 1) >> PUD_SHIFT;
+
+- extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT);
+- pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT;
+- } else
+- pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
+-
+- tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
++ if (mr[i].page_size_mask & (1 << PG_LEVEL_1G)) {
++ extra = range - ((range >> PUD_SHIFT) << PUD_SHIFT);
++ pmds += (extra + PMD_SIZE - 1) >> PMD_SHIFT;
++ } else {
++ pmds += (range + PMD_SIZE - 1) >> PMD_SHIFT;
++ }
+
+- if (use_pse) {
+- unsigned long extra;
+-
+- extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
++ if (mr[i].page_size_mask & (1 << PG_LEVEL_2M)) {
++ extra = range - ((range >> PMD_SHIFT) << PMD_SHIFT);
+ #ifdef CONFIG_X86_32
+- extra += PMD_SIZE;
++ extra += PMD_SIZE;
+ #endif
+- ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
+- } else
+- ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ ptes += (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ } else {
++ ptes += (range + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ }
++ }
+
++ tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
++ tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
+ tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
+
+ #ifdef CONFIG_X86_32
+@@ -75,7 +89,8 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
+ pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT);
+
+ printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n",
+- end, pgt_buf_start << PAGE_SHIFT, pgt_buf_top << PAGE_SHIFT);
++ mr[nr_range - 1].end, pgt_buf_start << PAGE_SHIFT,
++ pgt_buf_top << PAGE_SHIFT);
+ }
+
+ void __init native_pagetable_reserve(u64 start, u64 end)
+@@ -83,12 +98,6 @@ void __init native_pagetable_reserve(u64 start, u64 end)
+ memblock_x86_reserve_range(start, end, "PGTABLE");
+ }
+
+-struct map_range {
+- unsigned long start;
+- unsigned long end;
+- unsigned page_size_mask;
+-};
+-
+ #ifdef CONFIG_X86_32
+ #define NR_RANGE_MR 3
+ #else /* CONFIG_X86_64 */
+@@ -260,7 +269,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
+ * nodes are discovered.
+ */
+ if (!after_bootmem)
+- find_early_table_space(end, use_pse, use_gbpages);
++ find_early_table_space(mr, nr_range);
+
+ for (i = 0; i < nr_range; i++)
+ ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
+diff --git a/block/blk-exec.c b/block/blk-exec.c
+index 6053285..ac2c6e7 100644
+--- a/block/blk-exec.c
++++ b/block/blk-exec.c
+@@ -49,6 +49,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
+ rq_end_io_fn *done)
+ {
+ int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
++ bool is_pm_resume;
+
+ if (unlikely(blk_queue_dead(q))) {
+ rq->errors = -ENXIO;
+@@ -59,12 +60,18 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
+
+ rq->rq_disk = bd_disk;
+ rq->end_io = done;
++ /*
++ * need to check this before __blk_run_queue(), because rq can
++ * be freed before that returns.
++ */
++ is_pm_resume = rq->cmd_type == REQ_TYPE_PM_RESUME;
++
+ WARN_ON(irqs_disabled());
+ spin_lock_irq(q->queue_lock);
+ __elv_add_request(q, rq, where);
+ __blk_run_queue(q);
+ /* the queue is stopped so it won't be run */
+- if (rq->cmd_type == REQ_TYPE_PM_RESUME)
++ if (is_pm_resume)
+ q->request_fn(q);
+ spin_unlock_irq(q->queue_lock);
+ }
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index c364358..791df46 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -374,6 +374,8 @@ typedef struct drm_i915_private {
+ unsigned int lvds_use_ssc:1;
+ unsigned int display_clock_mode:1;
+ int lvds_ssc_freq;
++ unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
++ unsigned int lvds_val; /* used for checking LVDS channel mode */
+ struct {
+ int rate;
+ int lanes;
+diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
+index 22efb08..87bb87b 100644
+--- a/drivers/gpu/drm/i915/intel_bios.c
++++ b/drivers/gpu/drm/i915/intel_bios.c
+@@ -174,6 +174,28 @@ get_lvds_dvo_timing(const struct bdb_lvds_lfp_data *lvds_lfp_data,
+ return (struct lvds_dvo_timing *)(entry + dvo_timing_offset);
+ }
+
++/* get lvds_fp_timing entry
++ * this function may return NULL if the corresponding entry is invalid
++ */
++static const struct lvds_fp_timing *
++get_lvds_fp_timing(const struct bdb_header *bdb,
++ const struct bdb_lvds_lfp_data *data,
++ const struct bdb_lvds_lfp_data_ptrs *ptrs,
++ int index)
++{
++ size_t data_ofs = (const u8 *)data - (const u8 *)bdb;
++ u16 data_size = ((const u16 *)data)[-1]; /* stored in header */
++ size_t ofs;
++
++ if (index >= ARRAY_SIZE(ptrs->ptr))
++ return NULL;
++ ofs = ptrs->ptr[index].fp_timing_offset;
++ if (ofs < data_ofs ||
++ ofs + sizeof(struct lvds_fp_timing) > data_ofs + data_size)
++ return NULL;
++ return (const struct lvds_fp_timing *)((const u8 *)bdb + ofs);
++}
++
+ /* Try to find integrated panel data */
+ static void
+ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
+@@ -183,6 +205,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
+ const struct bdb_lvds_lfp_data *lvds_lfp_data;
+ const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs;
+ const struct lvds_dvo_timing *panel_dvo_timing;
++ const struct lvds_fp_timing *fp_timing;
+ struct drm_display_mode *panel_fixed_mode;
+ int i, downclock;
+
+@@ -244,6 +267,19 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
+ "Normal Clock %dKHz, downclock %dKHz\n",
+ panel_fixed_mode->clock, 10*downclock);
+ }
++
++ fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data,
++ lvds_lfp_data_ptrs,
++ lvds_options->panel_type);
++ if (fp_timing) {
++ /* check the resolution, just to be sure */
++ if (fp_timing->x_res == panel_fixed_mode->hdisplay &&
++ fp_timing->y_res == panel_fixed_mode->vdisplay) {
++ dev_priv->bios_lvds_val = fp_timing->lvds_reg_val;
++ DRM_DEBUG_KMS("VBT initial LVDS value %x\n",
++ dev_priv->bios_lvds_val);
++ }
++ }
+ }
+
+ /* Try to find sdvo panel data */
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index adac0dd..fdae61f 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -356,6 +356,27 @@ static const intel_limit_t intel_limits_ironlake_display_port = {
+ .find_pll = intel_find_pll_ironlake_dp,
+ };
+
++static bool is_dual_link_lvds(struct drm_i915_private *dev_priv,
++ unsigned int reg)
++{
++ unsigned int val;
++
++ if (dev_priv->lvds_val)
++ val = dev_priv->lvds_val;
++ else {
++ /* BIOS should set the proper LVDS register value at boot, but
++ * in reality, it doesn't set the value when the lid is closed;
++ * we need to check "the value to be set" in VBT when LVDS
++ * register is uninitialized.
++ */
++ val = I915_READ(reg);
++ if (!(val & ~LVDS_DETECTED))
++ val = dev_priv->bios_lvds_val;
++ dev_priv->lvds_val = val;
++ }
++ return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
++}
++
+ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
+ int refclk)
+ {
+@@ -364,8 +385,7 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
+ const intel_limit_t *limit;
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+- if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
+- LVDS_CLKB_POWER_UP) {
++ if (is_dual_link_lvds(dev_priv, PCH_LVDS)) {
+ /* LVDS dual channel */
+ if (refclk == 100000)
+ limit = &intel_limits_ironlake_dual_lvds_100m;
+@@ -393,8 +413,7 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
+ const intel_limit_t *limit;
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+- if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
+- LVDS_CLKB_POWER_UP)
++ if (is_dual_link_lvds(dev_priv, LVDS))
+ /* LVDS with dual channel */
+ limit = &intel_limits_g4x_dual_channel_lvds;
+ else
+@@ -531,8 +550,7 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+ * reliably set up different single/dual channel state, if we
+ * even can.
+ */
+- if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
+- LVDS_CLKB_POWER_UP)
++ if (is_dual_link_lvds(dev_priv, LVDS))
+ clock.p2 = limit->p2.p2_fast;
+ else
+ clock.p2 = limit->p2.p2_slow;
+diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
+index 3f4afba..9e24670 100644
+--- a/drivers/gpu/drm/i915/intel_sdvo.c
++++ b/drivers/gpu/drm/i915/intel_sdvo.c
+@@ -2264,6 +2264,18 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags)
+ return true;
+ }
+
++static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo)
++{
++ struct drm_device *dev = intel_sdvo->base.base.dev;
++ struct drm_connector *connector, *tmp;
++
++ list_for_each_entry_safe(connector, tmp,
++ &dev->mode_config.connector_list, head) {
++ if (intel_attached_encoder(connector) == &intel_sdvo->base)
++ intel_sdvo_destroy(connector);
++ }
++}
++
+ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
+ struct intel_sdvo_connector *intel_sdvo_connector,
+ int type)
+@@ -2596,7 +2608,8 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
+ intel_sdvo->caps.output_flags) != true) {
+ DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
+ IS_SDVOB(sdvo_reg) ? 'B' : 'C');
+- goto err;
++ /* Output_setup can leave behind connectors! */
++ goto err_output;
+ }
+
+ /* Only enable the hotplug irq if we need it, to work around noisy
+@@ -2609,12 +2622,12 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
+
+ /* Set the input timing to the screen. Assume always input 0. */
+ if (!intel_sdvo_set_target_input(intel_sdvo))
+- goto err;
++ goto err_output;
+
+ if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo,
+ &intel_sdvo->pixel_clock_min,
+ &intel_sdvo->pixel_clock_max))
+- goto err;
++ goto err_output;
+
+ DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
+ "clock range %dMHz - %dMHz, "
+@@ -2634,6 +2647,9 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
+ (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
+ return true;
+
++err_output:
++ intel_sdvo_output_cleanup(intel_sdvo);
++
+ err:
+ drm_encoder_cleanup(&intel_encoder->base);
+ i2c_del_adapter(&intel_sdvo->ddc);
+diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
+index 382e141..aca4755 100644
+--- a/drivers/gpu/drm/radeon/atombios_encoders.c
++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
+@@ -1386,7 +1386,7 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+ /* some early dce3.2 boards have a bug in their transmitter control table */
+- if ((rdev->family != CHIP_RV710) || (rdev->family != CHIP_RV730))
++ if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730))
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
+ }
+ if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index ca94e23..b919b11 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -1122,6 +1122,8 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
+ break;
+ udelay(1);
+ }
++ } else {
++ save->crtc_enabled[i] = false;
+ }
+ }
+
+diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
+index bd2f33e..bc6b64f 100644
+--- a/drivers/gpu/drm/radeon/radeon_agp.c
++++ b/drivers/gpu/drm/radeon/radeon_agp.c
+@@ -70,9 +70,12 @@ static struct radeon_agpmode_quirk radeon_agpmode_quirk_list[] = {
+ /* Intel 82830 830 Chipset Host Bridge / Mobility M6 LY Needs AGPMode 2 (fdo #17360)*/
+ { PCI_VENDOR_ID_INTEL, 0x3575, PCI_VENDOR_ID_ATI, 0x4c59,
+ PCI_VENDOR_ID_DELL, 0x00e3, 2},
+- /* Intel 82852/82855 host bridge / Mobility FireGL 9000 R250 Needs AGPMode 1 (lp #296617) */
++ /* Intel 82852/82855 host bridge / Mobility FireGL 9000 RV250 Needs AGPMode 1 (lp #296617) */
+ { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4c66,
+ PCI_VENDOR_ID_DELL, 0x0149, 1},
++ /* Intel 82855PM host bridge / Mobility FireGL 9000 RV250 Needs AGPMode 1 for suspend/resume */
++ { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c66,
++ PCI_VENDOR_ID_IBM, 0x0531, 1},
+ /* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (deb #467460) */
+ { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50,
+ 0x1025, 0x0061, 1},
+diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
+index 727e93d..9e4313e 100644
+--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
++++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
+@@ -708,7 +708,10 @@ int ttm_get_pages(struct list_head *pages, int flags,
+ /* clear the pages coming from the pool if requested */
+ if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
+ list_for_each_entry(p, pages, lru) {
+- clear_page(page_address(p));
++ if (PageHighMem(p))
++ clear_highpage(p);
++ else
++ clear_page(page_address(p));
+ }
+ }
+
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index ab75a4e..652f230 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -277,6 +277,9 @@
+ #define USB_VENDOR_ID_EZKEY 0x0518
+ #define USB_DEVICE_ID_BTC_8193 0x0002
+
++#define USB_VENDOR_ID_FREESCALE 0x15A2
++#define USB_DEVICE_ID_FREESCALE_MX28 0x004F
++
+ #define USB_VENDOR_ID_GAMERON 0x0810
+ #define USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR 0x0001
+ #define USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR 0x0002
+diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
+index afb73af..aec3fa3 100644
+--- a/drivers/hid/usbhid/hid-quirks.c
++++ b/drivers/hid/usbhid/hid-quirks.c
+@@ -68,6 +68,7 @@ static const struct hid_blacklist {
+ { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
++ { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
+diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
+index 5b39216..3f28290 100644
+--- a/drivers/idle/intel_idle.c
++++ b/drivers/idle/intel_idle.c
+@@ -163,6 +163,38 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = {
+ .enter = &intel_idle },
+ };
+
++static struct cpuidle_state ivb_cstates[MWAIT_MAX_NUM_CSTATES] = {
++ { /* MWAIT C0 */ },
++ { /* MWAIT C1 */
++ .name = "C1-IVB",
++ .desc = "MWAIT 0x00",
++ .flags = CPUIDLE_FLAG_TIME_VALID,
++ .exit_latency = 1,
++ .target_residency = 1,
++ .enter = &intel_idle },
++ { /* MWAIT C2 */
++ .name = "C3-IVB",
++ .desc = "MWAIT 0x10",
++ .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
++ .exit_latency = 59,
++ .target_residency = 156,
++ .enter = &intel_idle },
++ { /* MWAIT C3 */
++ .name = "C6-IVB",
++ .desc = "MWAIT 0x20",
++ .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
++ .exit_latency = 80,
++ .target_residency = 300,
++ .enter = &intel_idle },
++ { /* MWAIT C4 */
++ .name = "C7-IVB",
++ .desc = "MWAIT 0x30",
++ .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
++ .exit_latency = 87,
++ .target_residency = 300,
++ .enter = &intel_idle },
++};
++
+ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
+ { /* MWAIT C0 */ },
+ { /* MWAIT C1 */
+@@ -386,6 +418,11 @@ static int intel_idle_probe(void)
+ cpuidle_state_table = snb_cstates;
+ break;
+
++ case 0x3A: /* IVB */
++ case 0x3E: /* IVB Xeon */
++ cpuidle_state_table = ivb_cstates;
++ break;
++
+ default:
+ pr_debug(PREFIX "does not run on family %d model %d\n",
+ boot_cpu_data.x86, boot_cpu_data.x86_model);
+diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
+index ec58f48..1512bd8 100644
+--- a/drivers/input/mouse/bcm5974.c
++++ b/drivers/input/mouse/bcm5974.c
+@@ -453,6 +453,9 @@ static void setup_events_to_report(struct input_dev *input_dev,
+ __set_bit(BTN_TOOL_QUADTAP, input_dev->keybit);
+ __set_bit(BTN_LEFT, input_dev->keybit);
+
++ if (cfg->caps & HAS_INTEGRATED_BUTTON)
++ __set_bit(INPUT_PROP_BUTTONPAD, input_dev->propbit);
++
+ input_set_events_per_packet(input_dev, 60);
+ }
+
+diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
+index d37a48e..8656441 100644
+--- a/drivers/input/serio/i8042.c
++++ b/drivers/input/serio/i8042.c
+@@ -991,7 +991,7 @@ static int i8042_controller_init(void)
+ * Reset the controller and reset CRT to the original value set by BIOS.
+ */
+
+-static void i8042_controller_reset(void)
++static void i8042_controller_reset(bool force_reset)
+ {
+ i8042_flush();
+
+@@ -1016,7 +1016,7 @@ static void i8042_controller_reset(void)
+ * Reset the controller if requested.
+ */
+
+- if (i8042_reset)
++ if (i8042_reset || force_reset)
+ i8042_controller_selftest();
+
+ /*
+@@ -1139,9 +1139,9 @@ static int i8042_controller_resume(bool force_reset)
+ * upsetting it.
+ */
+
+-static int i8042_pm_reset(struct device *dev)
++static int i8042_pm_suspend(struct device *dev)
+ {
+- i8042_controller_reset();
++ i8042_controller_reset(true);
+
+ return 0;
+ }
+@@ -1163,13 +1163,20 @@ static int i8042_pm_thaw(struct device *dev)
+ return 0;
+ }
+
++static int i8042_pm_reset(struct device *dev)
++{
++ i8042_controller_reset(false);
++
++ return 0;
++}
++
+ static int i8042_pm_restore(struct device *dev)
+ {
+ return i8042_controller_resume(false);
+ }
+
+ static const struct dev_pm_ops i8042_pm_ops = {
+- .suspend = i8042_pm_reset,
++ .suspend = i8042_pm_suspend,
+ .resume = i8042_pm_resume,
+ .thaw = i8042_pm_thaw,
+ .poweroff = i8042_pm_reset,
+@@ -1185,7 +1192,7 @@ static const struct dev_pm_ops i8042_pm_ops = {
+
+ static void i8042_shutdown(struct platform_device *dev)
+ {
+- i8042_controller_reset();
++ i8042_controller_reset(false);
+ }
+
+ static int __init i8042_create_kbd_port(void)
+@@ -1424,7 +1431,7 @@ static int __init i8042_probe(struct platform_device *dev)
+ out_fail:
+ i8042_free_aux_ports(); /* in case KBD failed but AUX not */
+ i8042_free_irqs();
+- i8042_controller_reset();
++ i8042_controller_reset(false);
+ i8042_platform_device = NULL;
+
+ return error;
+@@ -1434,7 +1441,7 @@ static int __devexit i8042_remove(struct platform_device *dev)
+ {
+ i8042_unregister_ports();
+ i8042_free_irqs();
+- i8042_controller_reset();
++ i8042_controller_reset(false);
+ i8042_platform_device = NULL;
+
+ return 0;
+diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
+index 3913f47..492aa52 100644
+--- a/drivers/isdn/gigaset/bas-gigaset.c
++++ b/drivers/isdn/gigaset/bas-gigaset.c
+@@ -616,7 +616,13 @@ static void int_in_work(struct work_struct *work)
+ if (rc == 0)
+ /* success, resubmit interrupt read URB */
+ rc = usb_submit_urb(urb, GFP_ATOMIC);
+- if (rc != 0 && rc != -ENODEV) {
++
++ switch (rc) {
++ case 0: /* success */
++ case -ENODEV: /* device gone */
++ case -EINVAL: /* URB already resubmitted, or terminal badness */
++ break;
++ default: /* failure: try to recover by resetting the device */
+ dev_err(cs->dev, "clear halt failed: %s\n", get_usb_rcmsg(rc));
+ rc = usb_lock_device_for_reset(ucs->udev, ucs->interface);
+ if (rc == 0) {
+@@ -2437,7 +2443,9 @@ static void gigaset_disconnect(struct usb_interface *interface)
+ }
+
+ /* gigaset_suspend
+- * This function is called before the USB connection is suspended.
++ * This function is called before the USB connection is suspended
++ * or before the USB device is reset.
++ * In the latter case, message == PMSG_ON.
+ */
+ static int gigaset_suspend(struct usb_interface *intf, pm_message_t message)
+ {
+@@ -2493,7 +2501,12 @@ static int gigaset_suspend(struct usb_interface *intf, pm_message_t message)
+ del_timer_sync(&ucs->timer_atrdy);
+ del_timer_sync(&ucs->timer_cmd_in);
+ del_timer_sync(&ucs->timer_int_in);
+- cancel_work_sync(&ucs->int_in_wq);
++
++ /* don't try to cancel int_in_wq from within reset as it
++ * might be the one requesting the reset
++ */
++ if (message.event != PM_EVENT_ON)
++ cancel_work_sync(&ucs->int_in_wq);
+
+ gig_dbg(DEBUG_SUSPEND, "suspend complete");
+ return 0;
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 502dcf7..8953630 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -755,8 +755,14 @@ static void rq_completed(struct mapped_device *md, int rw, int run_queue)
+ if (!md_in_flight(md))
+ wake_up(&md->wait);
+
++ /*
++ * Run this off this callpath, as drivers could invoke end_io while
++ * inside their request_fn (and holding the queue lock). Calling
++ * back into ->request_fn() could deadlock attempting to grab the
++ * queue lock again.
++ */
+ if (run_queue)
+- blk_run_queue(md->queue);
++ blk_run_queue_async(md->queue);
+
+ /*
+ * dm_put() must be at the end of this function. See the comment above
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 2887f22..145e378e 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -1801,10 +1801,10 @@ retry:
+ memset(bbp, 0xff, PAGE_SIZE);
+
+ for (i = 0 ; i < bb->count ; i++) {
+- u64 internal_bb = *p++;
++ u64 internal_bb = p[i];
+ u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
+ | BB_LEN(internal_bb));
+- *bbp++ = cpu_to_le64(store_bb);
++ bbp[i] = cpu_to_le64(store_bb);
+ }
+ bb->changed = 0;
+ if (read_seqretry(&bb->lock, seq))
+@@ -7650,9 +7650,9 @@ int md_is_badblock(struct badblocks *bb, sector_t s, int sectors,
+ sector_t *first_bad, int *bad_sectors)
+ {
+ int hi;
+- int lo = 0;
++ int lo;
+ u64 *p = bb->page;
+- int rv = 0;
++ int rv;
+ sector_t target = s + sectors;
+ unsigned seq;
+
+@@ -7667,7 +7667,8 @@ int md_is_badblock(struct badblocks *bb, sector_t s, int sectors,
+
+ retry:
+ seq = read_seqbegin(&bb->lock);
+-
++ lo = 0;
++ rv = 0;
+ hi = bb->count;
+
+ /* Binary search between lo and hi for 'target'
+diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c
+index e585263..f38c348 100644
+--- a/drivers/mtd/devices/slram.c
++++ b/drivers/mtd/devices/slram.c
+@@ -266,7 +266,7 @@ static int parse_cmdline(char *devname, char *szstart, char *szlength)
+
+ if (*(szlength) != '+') {
+ devlength = simple_strtoul(szlength, &buffer, 0);
+- devlength = handle_unit(devlength, buffer) - devstart;
++ devlength = handle_unit(devlength, buffer);
+ if (devlength < devstart)
+ goto err_out;
+
+diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c
+index 64be8f0..d9127e2 100644
+--- a/drivers/mtd/ofpart.c
++++ b/drivers/mtd/ofpart.c
+@@ -121,7 +121,7 @@ static int parse_ofoldpart_partitions(struct mtd_info *master,
+ nr_parts = plen / sizeof(part[0]);
+
+ *pparts = kzalloc(nr_parts * sizeof(*(*pparts)), GFP_KERNEL);
+- if (!pparts)
++ if (!*pparts)
+ return -ENOMEM;
+
+ names = of_get_property(dp, "partition-names", &plen);
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+index 4ae26a7..7720721 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+@@ -356,6 +356,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
+ case IXGBE_DEV_ID_82599_SFP_FCOE:
+ case IXGBE_DEV_ID_82599_SFP_EM:
+ case IXGBE_DEV_ID_82599_SFP_SF2:
++ case IXGBE_DEV_ID_82599_SFP_SF_QP:
+ case IXGBE_DEV_ID_82599EN_SFP:
+ media_type = ixgbe_media_type_fiber;
+ break;
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+index f1365fe..2c14e85 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+@@ -3157,6 +3157,7 @@ static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
+
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X540T:
++ case IXGBE_DEV_ID_X540T1:
+ return 0;
+ case IXGBE_DEV_ID_82599_T3_LOM:
+ return 0;
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+index 8ef92d1..cc96a5a 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+@@ -106,6 +106,8 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
+ /* required last entry */
+ {0, }
+ };
+@@ -7611,6 +7613,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
+ adapter->wol = IXGBE_WUFC_MAG;
+ break;
+ case IXGBE_DEV_ID_X540T:
++ case IXGBE_DEV_ID_X540T1:
+ /* Check eeprom to see if it is enabled */
+ hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap);
+ wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+index 6c5cca8..f00d6d5 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+@@ -65,6 +65,8 @@
+ #define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C
+ #define IXGBE_DEV_ID_82599_LS 0x154F
+ #define IXGBE_DEV_ID_X540T 0x1528
++#define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A
++#define IXGBE_DEV_ID_X540T1 0x1560
+
+ /* VF Device IDs */
+ #define IXGBE_DEV_ID_82599_VF 0x10ED
+diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
+index 0515862..858a762 100644
+--- a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
++++ b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
+@@ -1072,9 +1072,9 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
+ IEEE80211_TX_STAT_AMPDU_NO_BACK;
+ skb_pull(p, D11_PHY_HDR_LEN);
+ skb_pull(p, D11_TXH_LEN);
+- wiphy_err(wiphy, "%s: BA Timeout, seq %d, in_"
+- "transit %d\n", "AMPDU status", seq,
+- ini->tx_in_transit);
++ BCMMSG(wiphy,
++ "BA Timeout, seq %d, in_transit %d\n",
++ seq, ini->tx_in_transit);
+ ieee80211_tx_status_irqsafe(wlc->pub->ieee_hw,
+ p);
+ }
+diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
+index 6e0a3ea..5a25dd2 100644
+--- a/drivers/net/wireless/mwifiex/cmdevt.c
++++ b/drivers/net/wireless/mwifiex/cmdevt.c
+@@ -816,9 +816,6 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
+ return;
+ }
+ cmd_node = adapter->curr_cmd;
+- if (cmd_node->wait_q_enabled)
+- adapter->cmd_wait_q.status = -ETIMEDOUT;
+-
+ if (cmd_node) {
+ adapter->dbg.timeout_cmd_id =
+ adapter->dbg.last_cmd_id[adapter->dbg.last_cmd_index];
+@@ -863,6 +860,14 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
+
+ dev_err(adapter->dev, "ps_mode=%d ps_state=%d\n",
+ adapter->ps_mode, adapter->ps_state);
++
++ if (cmd_node->wait_q_enabled) {
++ adapter->cmd_wait_q.status = -ETIMEDOUT;
++ wake_up_interruptible(&adapter->cmd_wait_q.wait);
++ mwifiex_cancel_pending_ioctl(adapter);
++ /* reset cmd_sent flag to unblock new commands */
++ adapter->cmd_sent = false;
++ }
+ }
+ if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING)
+ mwifiex_init_fw_complete(adapter);
+diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
+index 283171b..3579a68 100644
+--- a/drivers/net/wireless/mwifiex/sdio.c
++++ b/drivers/net/wireless/mwifiex/sdio.c
+@@ -162,7 +162,6 @@ static int mwifiex_sdio_suspend(struct device *dev)
+ struct sdio_mmc_card *card;
+ struct mwifiex_adapter *adapter;
+ mmc_pm_flag_t pm_flag = 0;
+- int hs_actived = 0;
+ int i;
+ int ret = 0;
+
+@@ -189,12 +188,14 @@ static int mwifiex_sdio_suspend(struct device *dev)
+ adapter = card->adapter;
+
+ /* Enable the Host Sleep */
+- hs_actived = mwifiex_enable_hs(adapter);
+- if (hs_actived) {
+- pr_debug("cmd: suspend with MMC_PM_KEEP_POWER\n");
+- ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
++ if (!mwifiex_enable_hs(adapter)) {
++ dev_err(adapter->dev, "cmd: failed to suspend\n");
++ return -EFAULT;
+ }
+
++ dev_dbg(adapter->dev, "cmd: suspend with MMC_PM_KEEP_POWER\n");
++ ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
++
+ /* Indicate device suspended */
+ adapter->is_suspended = true;
+
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+index 0302148..a99be2d0 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+@@ -307,6 +307,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
+ /*=== Customer ID ===*/
+ /****** 8188CU ********/
+ {RTL_USB_DEVICE(0x050d, 0x1102, rtl92cu_hal_cfg)}, /*Belkin - Edimax*/
++ {RTL_USB_DEVICE(0x050d, 0x11f2, rtl92cu_hal_cfg)}, /*Belkin - ISY*/
+ {RTL_USB_DEVICE(0x06f8, 0xe033, rtl92cu_hal_cfg)}, /*Hercules - Edimax*/
+ {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
+ {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
+diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
+index 86b69f85..9d932f4 100644
+--- a/drivers/pci/setup-bus.c
++++ b/drivers/pci/setup-bus.c
+@@ -612,7 +612,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
+ if (children_add_size > add_size)
+ add_size = children_add_size;
+ size1 = (!realloc_head || (realloc_head && !add_size)) ? size0 :
+- calculate_iosize(size, min_size+add_size, size1,
++ calculate_iosize(size, min_size, add_size + size1,
+ resource_size(b_res), 4096);
+ if (!size0 && !size1) {
+ if (b_res->start || b_res->end)
+@@ -726,7 +726,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
+ if (children_add_size > add_size)
+ add_size = children_add_size;
+ size1 = (!realloc_head || (realloc_head && !add_size)) ? size0 :
+- calculate_memsize(size, min_size+add_size, 0,
++ calculate_memsize(size, min_size, add_size,
+ resource_size(b_res), min_align);
+ if (!size0 && !size1) {
+ if (b_res->start || b_res->end)
+diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
+index 5717509b..8b25f9c 100644
+--- a/drivers/pci/setup-res.c
++++ b/drivers/pci/setup-res.c
+@@ -233,11 +233,12 @@ int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsiz
+ return -EINVAL;
+ }
+
+- new_size = resource_size(res) + addsize + min_align;
++ /* already aligned with min_align */
++ new_size = resource_size(res) + addsize;
+ ret = _pci_assign_resource(dev, resno, new_size, min_align);
+ if (!ret) {
+ res->flags &= ~IORESOURCE_STARTALIGN;
+- dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res);
++ dev_info(&dev->dev, "BAR %d: reassigned %pR\n", resno, res);
+ if (resno < PCI_BRIDGE_RESOURCES)
+ pci_update_resource(dev, resno);
+ }
+diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
+index 110e4af..7d47434 100644
+--- a/drivers/platform/x86/acer-wmi.c
++++ b/drivers/platform/x86/acer-wmi.c
+@@ -105,6 +105,7 @@ static const struct key_entry acer_wmi_keymap[] = {
+ {KE_KEY, 0x22, {KEY_PROG2} }, /* Arcade */
+ {KE_KEY, 0x23, {KEY_PROG3} }, /* P_Key */
+ {KE_KEY, 0x24, {KEY_PROG4} }, /* Social networking_Key */
++ {KE_KEY, 0x29, {KEY_PROG3} }, /* P_Key for TM8372 */
+ {KE_IGNORE, 0x41, {KEY_MUTE} },
+ {KE_IGNORE, 0x42, {KEY_PREVIOUSSONG} },
+ {KE_IGNORE, 0x43, {KEY_NEXTSONG} },
+diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
+index 192cb48..f3cbecc 100644
+--- a/drivers/scsi/isci/request.c
++++ b/drivers/scsi/isci/request.c
+@@ -1849,7 +1849,7 @@ sci_io_request_frame_handler(struct isci_request *ireq,
+ frame_index,
+ (void **)&frame_buffer);
+
+- sci_controller_copy_sata_response(&ireq->stp.req,
++ sci_controller_copy_sata_response(&ireq->stp.rsp,
+ frame_header,
+ frame_buffer);
+
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 0c6fb19..7de9993 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -1934,7 +1934,6 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ }
+ break;
+ case COMP_SHORT_TX:
+- xhci_warn(xhci, "WARN: short transfer on control ep\n");
+ if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+ *status = -EREMOTEIO;
+ else
+@@ -2291,7 +2290,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
+ xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
+ break;
+ case COMP_STALL:
+- xhci_warn(xhci, "WARN: Stalled endpoint\n");
++ xhci_dbg(xhci, "Stalled endpoint\n");
+ ep->ep_state |= EP_HALTED;
+ status = -EPIPE;
+ break;
+@@ -2301,11 +2300,11 @@ static int handle_tx_event(struct xhci_hcd *xhci,
+ break;
+ case COMP_SPLIT_ERR:
+ case COMP_TX_ERR:
+- xhci_warn(xhci, "WARN: transfer error on endpoint\n");
++ xhci_dbg(xhci, "Transfer error on endpoint\n");
+ status = -EPROTO;
+ break;
+ case COMP_BABBLE:
+- xhci_warn(xhci, "WARN: babble error on endpoint\n");
++ xhci_dbg(xhci, "Babble error on endpoint\n");
+ status = -EOVERFLOW;
+ break;
+ case COMP_DB_ERR:
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 5a23f4d..dab05d1 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -206,14 +206,14 @@ static int xhci_setup_msi(struct xhci_hcd *xhci)
+
+ ret = pci_enable_msi(pdev);
+ if (ret) {
+- xhci_err(xhci, "failed to allocate MSI entry\n");
++ xhci_dbg(xhci, "failed to allocate MSI entry\n");
+ return ret;
+ }
+
+ ret = request_irq(pdev->irq, (irq_handler_t)xhci_msi_irq,
+ 0, "xhci_hcd", xhci_to_hcd(xhci));
+ if (ret) {
+- xhci_err(xhci, "disable MSI interrupt\n");
++ xhci_dbg(xhci, "disable MSI interrupt\n");
+ pci_disable_msi(pdev);
+ }
+
+@@ -276,7 +276,7 @@ static int xhci_setup_msix(struct xhci_hcd *xhci)
+
+ ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count);
+ if (ret) {
+- xhci_err(xhci, "Failed to enable MSI-X\n");
++ xhci_dbg(xhci, "Failed to enable MSI-X\n");
+ goto free_entries;
+ }
+
+@@ -292,7 +292,7 @@ static int xhci_setup_msix(struct xhci_hcd *xhci)
+ return ret;
+
+ disable_msix:
+- xhci_err(xhci, "disable MSI-X interrupt\n");
++ xhci_dbg(xhci, "disable MSI-X interrupt\n");
+ xhci_free_irq(xhci);
+ pci_disable_msix(pdev);
+ free_entries:
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index c334670..a5f875d 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -157,6 +157,7 @@ static void option_instat_callback(struct urb *urb);
+ #define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED 0x8001
+ #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0x9000
+ #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001
++#define NOVATELWIRELESS_PRODUCT_E362 0x9010
+ #define NOVATELWIRELESS_PRODUCT_G1 0xA001
+ #define NOVATELWIRELESS_PRODUCT_G1_M 0xA002
+ #define NOVATELWIRELESS_PRODUCT_G2 0xA010
+@@ -192,6 +193,9 @@ static void option_instat_callback(struct urb *urb);
+ #define DELL_PRODUCT_5730_MINICARD_TELUS 0x8181
+ #define DELL_PRODUCT_5730_MINICARD_VZW 0x8182
+
++#define DELL_PRODUCT_5800_MINICARD_VZW 0x8195 /* Novatel E362 */
++#define DELL_PRODUCT_5800_V2_MINICARD_VZW 0x8196 /* Novatel E362 */
++
+ #define KYOCERA_VENDOR_ID 0x0c88
+ #define KYOCERA_PRODUCT_KPC650 0x17da
+ #define KYOCERA_PRODUCT_KPC680 0x180a
+@@ -282,6 +286,7 @@ static void option_instat_callback(struct urb *urb);
+ /* ALCATEL PRODUCTS */
+ #define ALCATEL_VENDOR_ID 0x1bbb
+ #define ALCATEL_PRODUCT_X060S_X200 0x0000
++#define ALCATEL_PRODUCT_X220_X500D 0x0017
+
+ #define PIRELLI_VENDOR_ID 0x1266
+ #define PIRELLI_PRODUCT_C100_1 0x1002
+@@ -705,6 +710,7 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G2) },
+ /* Novatel Ovation MC551 a.k.a. Verizon USB551L */
+ { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E362, 0xff, 0xff, 0xff) },
+
+ { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
+ { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
+@@ -727,6 +733,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_SPRINT) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */
+ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_TELUS) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */
+ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_VZW) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */
++ { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_MINICARD_VZW, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_V2_MINICARD_VZW, 0xff, 0xff, 0xff) },
+ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */
+ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
+ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
+@@ -1156,6 +1164,7 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
+ .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
+ },
++ { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D) },
+ { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
+ { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
+ { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
+diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
+index 8bea45c..e5206de 100644
+--- a/drivers/usb/serial/usb-serial.c
++++ b/drivers/usb/serial/usb-serial.c
+@@ -764,7 +764,7 @@ int usb_serial_probe(struct usb_interface *interface,
+
+ if (retval) {
+ dbg("sub driver rejected device");
+- kfree(serial);
++ usb_serial_put(serial);
+ module_put(type->driver.owner);
+ return retval;
+ }
+@@ -836,7 +836,7 @@ int usb_serial_probe(struct usb_interface *interface,
+ */
+ if (num_bulk_in == 0 || num_bulk_out == 0) {
+ dev_info(&interface->dev, "PL-2303 hack: descriptors matched but endpoints did not\n");
+- kfree(serial);
++ usb_serial_put(serial);
+ module_put(type->driver.owner);
+ return -ENODEV;
+ }
+@@ -850,7 +850,7 @@ int usb_serial_probe(struct usb_interface *interface,
+ if (num_ports == 0) {
+ dev_err(&interface->dev,
+ "Generic device with no bulk out, not allowed.\n");
+- kfree(serial);
++ usb_serial_put(serial);
+ module_put(type->driver.owner);
+ return -EIO;
+ }
+diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
+index 99796c5..bdf401b 100644
+--- a/drivers/watchdog/iTCO_wdt.c
++++ b/drivers/watchdog/iTCO_wdt.c
+@@ -36,6 +36,7 @@
+ * document number TBD : Patsburg (PBG)
+ * document number TBD : DH89xxCC
+ * document number TBD : Panther Point
++ * document number TBD : Lynx Point
+ */
+
+ /*
+@@ -126,6 +127,7 @@ enum iTCO_chipsets {
+ TCO_PBG, /* Patsburg */
+ TCO_DH89XXCC, /* DH89xxCC */
+ TCO_PPT, /* Panther Point */
++ TCO_LPT, /* Lynx Point */
+ };
+
+ static struct {
+@@ -189,6 +191,7 @@ static struct {
+ {"Patsburg", 2},
+ {"DH89xxCC", 2},
+ {"Panther Point", 2},
++ {"Lynx Point", 2},
+ {NULL, 0}
+ };
+
+@@ -331,6 +334,38 @@ static DEFINE_PCI_DEVICE_TABLE(iTCO_wdt_pci_tbl) = {
+ { PCI_VDEVICE(INTEL, 0x1e5d), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e5e), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e5f), TCO_PPT},
++ { PCI_VDEVICE(INTEL, 0x8c40), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c41), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c42), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c43), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c44), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c45), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c46), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c47), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c48), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c49), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c4a), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c4b), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c4c), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c4d), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c4e), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c4f), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c50), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c51), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c52), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c53), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c54), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c55), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c56), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c57), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c58), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c59), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c5a), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c5b), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c5c), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c5d), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c5e), TCO_LPT},
++ { PCI_VDEVICE(INTEL, 0x8c5f), TCO_LPT},
+ { 0, }, /* End of list */
+ };
+ MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl);
+diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
+index 0301be6..465e49a 100644
+--- a/fs/gfs2/lops.c
++++ b/fs/gfs2/lops.c
+@@ -165,16 +165,14 @@ static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
+ struct gfs2_meta_header *mh;
+ struct gfs2_trans *tr;
+
+- lock_buffer(bd->bd_bh);
+- gfs2_log_lock(sdp);
+ if (!list_empty(&bd->bd_list_tr))
+- goto out;
++ return;
+ tr = current->journal_info;
+ tr->tr_touched = 1;
+ tr->tr_num_buf++;
+ list_add(&bd->bd_list_tr, &tr->tr_list_buf);
+ if (!list_empty(&le->le_list))
+- goto out;
++ return;
+ set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
+ set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
+ gfs2_meta_check(sdp, bd->bd_bh);
+@@ -185,9 +183,6 @@ static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
+ sdp->sd_log_num_buf++;
+ list_add(&le->le_list, &sdp->sd_log_le_buf);
+ tr->tr_num_buf_new++;
+-out:
+- gfs2_log_unlock(sdp);
+- unlock_buffer(bd->bd_bh);
+ }
+
+ static void buf_lo_before_commit(struct gfs2_sbd *sdp)
+@@ -518,11 +513,9 @@ static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
+ struct address_space *mapping = bd->bd_bh->b_page->mapping;
+ struct gfs2_inode *ip = GFS2_I(mapping->host);
+
+- lock_buffer(bd->bd_bh);
+- gfs2_log_lock(sdp);
+ if (tr) {
+ if (!list_empty(&bd->bd_list_tr))
+- goto out;
++ return;
+ tr->tr_touched = 1;
+ if (gfs2_is_jdata(ip)) {
+ tr->tr_num_buf++;
+@@ -530,7 +523,7 @@ static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
+ }
+ }
+ if (!list_empty(&le->le_list))
+- goto out;
++ return;
+
+ set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
+ set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
+@@ -542,9 +535,6 @@ static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
+ } else {
+ list_add_tail(&le->le_list, &sdp->sd_log_le_ordered);
+ }
+-out:
+- gfs2_log_unlock(sdp);
+- unlock_buffer(bd->bd_bh);
+ }
+
+ static void gfs2_check_magic(struct buffer_head *bh)
+diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c
+index 86ac75d..6ab2a77 100644
+--- a/fs/gfs2/trans.c
++++ b/fs/gfs2/trans.c
+@@ -145,14 +145,22 @@ void gfs2_trans_add_bh(struct gfs2_glock *gl, struct buffer_head *bh, int meta)
+ struct gfs2_sbd *sdp = gl->gl_sbd;
+ struct gfs2_bufdata *bd;
+
++ lock_buffer(bh);
++ gfs2_log_lock(sdp);
+ bd = bh->b_private;
+ if (bd)
+ gfs2_assert(sdp, bd->bd_gl == gl);
+ else {
++ gfs2_log_unlock(sdp);
++ unlock_buffer(bh);
+ gfs2_attach_bufdata(gl, bh, meta);
+ bd = bh->b_private;
++ lock_buffer(bh);
++ gfs2_log_lock(sdp);
+ }
+ lops_add(sdp, &bd->bd_le);
++ gfs2_log_unlock(sdp);
++ unlock_buffer(bh);
+ }
+
+ void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
+diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
+index edac004..7c86b37 100644
+--- a/fs/jbd/transaction.c
++++ b/fs/jbd/transaction.c
+@@ -1957,7 +1957,9 @@ retry:
+ spin_unlock(&journal->j_list_lock);
+ jbd_unlock_bh_state(bh);
+ spin_unlock(&journal->j_state_lock);
++ unlock_buffer(bh);
+ log_wait_commit(journal, tid);
++ lock_buffer(bh);
+ goto retry;
+ }
+ /*
+diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
+index 61e6723..0095a70 100644
+--- a/fs/jffs2/file.c
++++ b/fs/jffs2/file.c
+@@ -135,33 +135,39 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
+ struct page *pg;
+ struct inode *inode = mapping->host;
+ struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
++ struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
++ struct jffs2_raw_inode ri;
++ uint32_t alloc_len = 0;
+ pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+ uint32_t pageofs = index << PAGE_CACHE_SHIFT;
+ int ret = 0;
+
++ D1(printk(KERN_DEBUG "%s()\n", __func__));
++
++ if (pageofs > inode->i_size) {
++ ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
++ ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
++ if (ret)
++ return ret;
++ }
++
++ mutex_lock(&f->sem);
+ pg = grab_cache_page_write_begin(mapping, index, flags);
+- if (!pg)
++ if (!pg) {
++ if (alloc_len)
++ jffs2_complete_reservation(c);
++ mutex_unlock(&f->sem);
+ return -ENOMEM;
++ }
+ *pagep = pg;
+
+- D1(printk(KERN_DEBUG "jffs2_write_begin()\n"));
+-
+- if (pageofs > inode->i_size) {
++ if (alloc_len) {
+ /* Make new hole frag from old EOF to new page */
+- struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
+- struct jffs2_raw_inode ri;
+ struct jffs2_full_dnode *fn;
+- uint32_t alloc_len;
+
+ D1(printk(KERN_DEBUG "Writing new hole frag 0x%x-0x%x between current EOF and new page\n",
+ (unsigned int)inode->i_size, pageofs));
+
+- ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
+- ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
+- if (ret)
+- goto out_page;
+-
+- mutex_lock(&f->sem);
+ memset(&ri, 0, sizeof(ri));
+
+ ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+@@ -188,7 +194,6 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
+ if (IS_ERR(fn)) {
+ ret = PTR_ERR(fn);
+ jffs2_complete_reservation(c);
+- mutex_unlock(&f->sem);
+ goto out_page;
+ }
+ ret = jffs2_add_full_dnode_to_inode(c, f, fn);
+@@ -202,12 +207,10 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
+ jffs2_mark_node_obsolete(c, fn->raw);
+ jffs2_free_full_dnode(fn);
+ jffs2_complete_reservation(c);
+- mutex_unlock(&f->sem);
+ goto out_page;
+ }
+ jffs2_complete_reservation(c);
+ inode->i_size = pageofs;
+- mutex_unlock(&f->sem);
+ }
+
+ /*
+@@ -216,18 +219,18 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
+ * case of a short-copy.
+ */
+ if (!PageUptodate(pg)) {
+- mutex_lock(&f->sem);
+ ret = jffs2_do_readpage_nolock(inode, pg);
+- mutex_unlock(&f->sem);
+ if (ret)
+ goto out_page;
+ }
++ mutex_unlock(&f->sem);
+ D1(printk(KERN_DEBUG "end write_begin(). pg->flags %lx\n", pg->flags));
+ return ret;
+
+ out_page:
+ unlock_page(pg);
+ page_cache_release(pg);
++ mutex_unlock(&f->sem);
+ return ret;
+ }
+
+diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
+index 5809abb..fe677c0 100644
+--- a/fs/reiserfs/inode.c
++++ b/fs/reiserfs/inode.c
+@@ -1788,8 +1788,9 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
+
+ BUG_ON(!th->t_trans_id);
+
+- dquot_initialize(inode);
++ reiserfs_write_unlock(inode->i_sb);
+ err = dquot_alloc_inode(inode);
++ reiserfs_write_lock(inode->i_sb);
+ if (err)
+ goto out_end_trans;
+ if (!dir->i_nlink) {
+@@ -1985,8 +1986,10 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
+
+ out_end_trans:
+ journal_end(th, th->t_super, th->t_blocks_allocated);
++ reiserfs_write_unlock(inode->i_sb);
+ /* Drop can be outside and it needs more credits so it's better to have it outside */
+ dquot_drop(inode);
++ reiserfs_write_lock(inode->i_sb);
+ inode->i_flags |= S_NOQUOTA;
+ make_bad_inode(inode);
+
+@@ -3109,10 +3112,9 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
+ /* must be turned off for recursive notify_change calls */
+ ia_valid = attr->ia_valid &= ~(ATTR_KILL_SUID|ATTR_KILL_SGID);
+
+- depth = reiserfs_write_lock_once(inode->i_sb);
+ if (is_quota_modification(inode, attr))
+ dquot_initialize(inode);
+-
++ depth = reiserfs_write_lock_once(inode->i_sb);
+ if (attr->ia_valid & ATTR_SIZE) {
+ /* version 2 items will be caught by the s_maxbytes check
+ ** done for us in vmtruncate
+@@ -3176,7 +3178,9 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
+ error = journal_begin(&th, inode->i_sb, jbegin_count);
+ if (error)
+ goto out;
++ reiserfs_write_unlock_once(inode->i_sb, depth);
+ error = dquot_transfer(inode, attr);
++ depth = reiserfs_write_lock_once(inode->i_sb);
+ if (error) {
+ journal_end(&th, inode->i_sb, jbegin_count);
+ goto out;
+diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
+index 313d39d..3ae9926 100644
+--- a/fs/reiserfs/stree.c
++++ b/fs/reiserfs/stree.c
+@@ -1968,7 +1968,9 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
+ key2type(&(key->on_disk_key)));
+ #endif
+
++ reiserfs_write_unlock(inode->i_sb);
+ retval = dquot_alloc_space_nodirty(inode, pasted_size);
++ reiserfs_write_lock(inode->i_sb);
+ if (retval) {
+ pathrelse(search_path);
+ return retval;
+@@ -2061,9 +2063,11 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
+ "reiserquota insert_item(): allocating %u id=%u type=%c",
+ quota_bytes, inode->i_uid, head2type(ih));
+ #endif
++ reiserfs_write_unlock(inode->i_sb);
+ /* We can't dirty inode here. It would be immediately written but
+ * appropriate stat item isn't inserted yet... */
+ retval = dquot_alloc_space_nodirty(inode, quota_bytes);
++ reiserfs_write_lock(inode->i_sb);
+ if (retval) {
+ pathrelse(path);
+ return retval;
+diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
+index 5e3527b..569498a 100644
+--- a/fs/reiserfs/super.c
++++ b/fs/reiserfs/super.c
+@@ -254,7 +254,9 @@ static int finish_unfinished(struct super_block *s)
+ retval = remove_save_link_only(s, &save_link_key, 0);
+ continue;
+ }
++ reiserfs_write_unlock(s);
+ dquot_initialize(inode);
++ reiserfs_write_lock(s);
+
+ if (truncate && S_ISDIR(inode->i_mode)) {
+ /* We got a truncate request for a dir which is impossible.
+@@ -1207,7 +1209,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
+ kfree(qf_names[i]);
+ #endif
+ err = -EINVAL;
+- goto out_err;
++ goto out_unlock;
+ }
+ #ifdef CONFIG_QUOTA
+ handle_quota_files(s, qf_names, &qfmt);
+@@ -1250,7 +1252,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
+ if (blocks) {
+ err = reiserfs_resize(s, blocks);
+ if (err != 0)
+- goto out_err;
++ goto out_unlock;
+ }
+
+ if (*mount_flags & MS_RDONLY) {
+@@ -1260,9 +1262,15 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
+ /* it is read-only already */
+ goto out_ok;
+
++ /*
++ * Drop write lock. Quota will retake it when needed and lock
++ * ordering requires calling dquot_suspend() without it.
++ */
++ reiserfs_write_unlock(s);
+ err = dquot_suspend(s, -1);
+ if (err < 0)
+ goto out_err;
++ reiserfs_write_lock(s);
+
+ /* try to remount file system with read-only permissions */
+ if (sb_umount_state(rs) == REISERFS_VALID_FS
+@@ -1272,7 +1280,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
+
+ err = journal_begin(&th, s, 10);
+ if (err)
+- goto out_err;
++ goto out_unlock;
+
+ /* Mounting a rw partition read-only. */
+ reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
+@@ -1287,7 +1295,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
+
+ if (reiserfs_is_journal_aborted(journal)) {
+ err = journal->j_errno;
+- goto out_err;
++ goto out_unlock;
+ }
+
+ handle_data_mode(s, mount_options);
+@@ -1296,7 +1304,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
+ s->s_flags &= ~MS_RDONLY; /* now it is safe to call journal_begin */
+ err = journal_begin(&th, s, 10);
+ if (err)
+- goto out_err;
++ goto out_unlock;
+
+ /* Mount a partition which is read-only, read-write */
+ reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
+@@ -1313,11 +1321,17 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
+ SB_JOURNAL(s)->j_must_wait = 1;
+ err = journal_end(&th, s, 10);
+ if (err)
+- goto out_err;
++ goto out_unlock;
+ s->s_dirt = 0;
+
+ if (!(*mount_flags & MS_RDONLY)) {
++ /*
++ * Drop write lock. Quota will retake it when needed and lock
++ * ordering requires calling dquot_resume() without it.
++ */
++ reiserfs_write_unlock(s);
+ dquot_resume(s, -1);
++ reiserfs_write_lock(s);
+ finish_unfinished(s);
+ reiserfs_xattr_init(s, *mount_flags);
+ }
+@@ -1327,9 +1341,10 @@ out_ok:
+ reiserfs_write_unlock(s);
+ return 0;
+
++out_unlock:
++ reiserfs_write_unlock(s);
+ out_err:
+ kfree(new_opts);
+- reiserfs_write_unlock(s);
+ return err;
+ }
+
+@@ -1953,13 +1968,15 @@ static int reiserfs_write_dquot(struct dquot *dquot)
+ REISERFS_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
+ if (ret)
+ goto out;
++ reiserfs_write_unlock(dquot->dq_sb);
+ ret = dquot_commit(dquot);
++ reiserfs_write_lock(dquot->dq_sb);
+ err =
+ journal_end(&th, dquot->dq_sb,
+ REISERFS_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
+ if (!ret && err)
+ ret = err;
+- out:
++out:
+ reiserfs_write_unlock(dquot->dq_sb);
+ return ret;
+ }
+@@ -1975,13 +1992,15 @@ static int reiserfs_acquire_dquot(struct dquot *dquot)
+ REISERFS_QUOTA_INIT_BLOCKS(dquot->dq_sb));
+ if (ret)
+ goto out;
++ reiserfs_write_unlock(dquot->dq_sb);
+ ret = dquot_acquire(dquot);
++ reiserfs_write_lock(dquot->dq_sb);
+ err =
+ journal_end(&th, dquot->dq_sb,
+ REISERFS_QUOTA_INIT_BLOCKS(dquot->dq_sb));
+ if (!ret && err)
+ ret = err;
+- out:
++out:
+ reiserfs_write_unlock(dquot->dq_sb);
+ return ret;
+ }
+@@ -1995,19 +2014,21 @@ static int reiserfs_release_dquot(struct dquot *dquot)
+ ret =
+ journal_begin(&th, dquot->dq_sb,
+ REISERFS_QUOTA_DEL_BLOCKS(dquot->dq_sb));
++ reiserfs_write_unlock(dquot->dq_sb);
+ if (ret) {
+ /* Release dquot anyway to avoid endless cycle in dqput() */
+ dquot_release(dquot);
+ goto out;
+ }
+ ret = dquot_release(dquot);
++ reiserfs_write_lock(dquot->dq_sb);
+ err =
+ journal_end(&th, dquot->dq_sb,
+ REISERFS_QUOTA_DEL_BLOCKS(dquot->dq_sb));
+ if (!ret && err)
+ ret = err;
+- out:
+ reiserfs_write_unlock(dquot->dq_sb);
++out:
+ return ret;
+ }
+
+@@ -2032,11 +2053,13 @@ static int reiserfs_write_info(struct super_block *sb, int type)
+ ret = journal_begin(&th, sb, 2);
+ if (ret)
+ goto out;
++ reiserfs_write_unlock(sb);
+ ret = dquot_commit_info(sb, type);
++ reiserfs_write_lock(sb);
+ err = journal_end(&th, sb, 2);
+ if (!ret && err)
+ ret = err;
+- out:
++out:
+ reiserfs_write_unlock(sb);
+ return ret;
+ }
+@@ -2060,8 +2083,11 @@ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id,
+ struct inode *inode;
+ struct reiserfs_transaction_handle th;
+
+- if (!(REISERFS_SB(sb)->s_mount_opt & (1 << REISERFS_QUOTA)))
+- return -EINVAL;
++ reiserfs_write_lock(sb);
++ if (!(REISERFS_SB(sb)->s_mount_opt & (1 << REISERFS_QUOTA))) {
++ err = -EINVAL;
++ goto out;
++ }
+
+ /* Quotafile not on the same filesystem? */
+ if (path->mnt->mnt_sb != sb) {
+@@ -2103,8 +2129,10 @@ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id,
+ if (err)
+ goto out;
+ }
+- err = dquot_quota_on(sb, type, format_id, path);
++ reiserfs_write_unlock(sb);
++ return dquot_quota_on(sb, type, format_id, path);
+ out:
++ reiserfs_write_unlock(sb);
+ return err;
+ }
+
+@@ -2178,7 +2206,9 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type,
+ tocopy = sb->s_blocksize - offset < towrite ?
+ sb->s_blocksize - offset : towrite;
+ tmp_bh.b_state = 0;
++ reiserfs_write_lock(sb);
+ err = reiserfs_get_block(inode, blk, &tmp_bh, GET_BLOCK_CREATE);
++ reiserfs_write_unlock(sb);
+ if (err)
+ goto out;
+ if (offset || tocopy != sb->s_blocksize)
+@@ -2194,10 +2224,12 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type,
+ flush_dcache_page(bh->b_page);
+ set_buffer_uptodate(bh);
+ unlock_buffer(bh);
++ reiserfs_write_lock(sb);
+ reiserfs_prepare_for_journal(sb, bh, 1);
+ journal_mark_dirty(current->journal_info, sb, bh);
+ if (!journal_quota)
+ reiserfs_add_ordered_list(inode, bh);
++ reiserfs_write_unlock(sb);
+ brelse(bh);
+ offset = 0;
+ towrite -= tocopy;
+diff --git a/fs/ubifs/find.c b/fs/ubifs/find.c
+index 2559d17..5dc48ca 100644
+--- a/fs/ubifs/find.c
++++ b/fs/ubifs/find.c
+@@ -681,8 +681,16 @@ int ubifs_find_free_leb_for_idx(struct ubifs_info *c)
+ if (!lprops) {
+ lprops = ubifs_fast_find_freeable(c);
+ if (!lprops) {
+- ubifs_assert(c->freeable_cnt == 0);
+- if (c->lst.empty_lebs - c->lst.taken_empty_lebs > 0) {
++ /*
++ * The first condition means the following: go scan the
++ * LPT if there are uncategorized lprops, which means
++ * there may be freeable LEBs there (UBIFS does not
++ * store the information about freeable LEBs in the
++ * master node).
++ */
++ if (c->in_a_category_cnt != c->main_lebs ||
++ c->lst.empty_lebs - c->lst.taken_empty_lebs > 0) {
++ ubifs_assert(c->freeable_cnt == 0);
+ lprops = scan_for_leb_for_idx(c);
+ if (IS_ERR(lprops)) {
+ err = PTR_ERR(lprops);
+diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c
+index f8a181e..ea9d491 100644
+--- a/fs/ubifs/lprops.c
++++ b/fs/ubifs/lprops.c
+@@ -300,8 +300,11 @@ void ubifs_add_to_cat(struct ubifs_info *c, struct ubifs_lprops *lprops,
+ default:
+ ubifs_assert(0);
+ }
++
+ lprops->flags &= ~LPROPS_CAT_MASK;
+ lprops->flags |= cat;
++ c->in_a_category_cnt += 1;
++ ubifs_assert(c->in_a_category_cnt <= c->main_lebs);
+ }
+
+ /**
+@@ -334,6 +337,9 @@ static void ubifs_remove_from_cat(struct ubifs_info *c,
+ default:
+ ubifs_assert(0);
+ }
++
++ c->in_a_category_cnt -= 1;
++ ubifs_assert(c->in_a_category_cnt >= 0);
+ }
+
+ /**
+diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
+index 27f2255..8bbc99e 100644
+--- a/fs/ubifs/ubifs.h
++++ b/fs/ubifs/ubifs.h
+@@ -1187,6 +1187,8 @@ struct ubifs_debug_info;
+ * @freeable_list: list of freeable non-index LEBs (free + dirty == @leb_size)
+ * @frdi_idx_list: list of freeable index LEBs (free + dirty == @leb_size)
+ * @freeable_cnt: number of freeable LEBs in @freeable_list
++ * @in_a_category_cnt: count of lprops which are in a certain category, which
++ * basically meants that they were loaded from the flash
+ *
+ * @ltab_lnum: LEB number of LPT's own lprops table
+ * @ltab_offs: offset of LPT's own lprops table
+@@ -1416,6 +1418,7 @@ struct ubifs_info {
+ struct list_head freeable_list;
+ struct list_head frdi_idx_list;
+ int freeable_cnt;
++ int in_a_category_cnt;
+
+ int ltab_lnum;
+ int ltab_offs;
+diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
+index cf0ac05..2f5a8f7 100644
+--- a/fs/xfs/xfs_buf.c
++++ b/fs/xfs/xfs_buf.c
+@@ -1167,9 +1167,14 @@ xfs_buf_bio_end_io(
+ {
+ xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
+
+- xfs_buf_ioerror(bp, -error);
++ /*
++ * don't overwrite existing errors - otherwise we can lose errors on
++ * buffers that require multiple bios to complete.
++ */
++ if (!bp->b_error)
++ xfs_buf_ioerror(bp, -error);
+
+- if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
++ if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
+ invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
+
+ _xfs_buf_ioend(bp, 1);
+@@ -1245,6 +1250,11 @@ next_chunk:
+ if (size)
+ goto next_chunk;
+ } else {
++ /*
++ * This is guaranteed not to be the last io reference count
++ * because the caller (xfs_buf_iorequest) holds a count itself.
++ */
++ atomic_dec(&bp->b_io_remaining);
+ xfs_buf_ioerror(bp, EIO);
+ bio_put(bio);
+ }
+diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
+index dd2e44f..9d709d1 100644
+--- a/include/linux/ptp_clock_kernel.h
++++ b/include/linux/ptp_clock_kernel.h
+@@ -50,7 +50,8 @@ struct ptp_clock_request {
+ * clock operations
+ *
+ * @adjfreq: Adjusts the frequency of the hardware clock.
+- * parameter delta: Desired period change in parts per billion.
++ * parameter delta: Desired frequency offset from nominal frequency
++ * in parts per billion
+ *
+ * @adjtime: Shifts the time of the hardware clock.
+ * parameter delta: Desired change in nanoseconds.
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 80fb1c6..77bccfc 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -716,7 +716,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
+ struct futex_pi_state **ps,
+ struct task_struct *task, int set_waiters)
+ {
+- int lock_taken, ret, ownerdied = 0;
++ int lock_taken, ret, force_take = 0;
+ u32 uval, newval, curval, vpid = task_pid_vnr(task);
+
+ retry:
+@@ -755,17 +755,15 @@ retry:
+ newval = curval | FUTEX_WAITERS;
+
+ /*
+- * There are two cases, where a futex might have no owner (the
+- * owner TID is 0): OWNER_DIED. We take over the futex in this
+- * case. We also do an unconditional take over, when the owner
+- * of the futex died.
+- *
+- * This is safe as we are protected by the hash bucket lock !
++ * Should we force take the futex? See below.
+ */
+- if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) {
+- /* Keep the OWNER_DIED bit */
++ if (unlikely(force_take)) {
++ /*
++ * Keep the OWNER_DIED and the WAITERS bit and set the
++ * new TID value.
++ */
+ newval = (curval & ~FUTEX_TID_MASK) | vpid;
+- ownerdied = 0;
++ force_take = 0;
+ lock_taken = 1;
+ }
+
+@@ -775,7 +773,7 @@ retry:
+ goto retry;
+
+ /*
+- * We took the lock due to owner died take over.
++ * We took the lock due to forced take over.
+ */
+ if (unlikely(lock_taken))
+ return 1;
+@@ -790,20 +788,25 @@ retry:
+ switch (ret) {
+ case -ESRCH:
+ /*
+- * No owner found for this futex. Check if the
+- * OWNER_DIED bit is set to figure out whether
+- * this is a robust futex or not.
++ * We failed to find an owner for this
++ * futex. So we have no pi_state to block
++ * on. This can happen in two cases:
++ *
++ * 1) The owner died
++ * 2) A stale FUTEX_WAITERS bit
++ *
++ * Re-read the futex value.
+ */
+ if (get_futex_value_locked(&curval, uaddr))
+ return -EFAULT;
+
+ /*
+- * We simply start over in case of a robust
+- * futex. The code above will take the futex
+- * and return happy.
++ * If the owner died or we have a stale
++ * WAITERS bit the owner TID in the user space
++ * futex is 0.
+ */
+- if (curval & FUTEX_OWNER_DIED) {
+- ownerdied = 1;
++ if (!(curval & FUTEX_TID_MASK)) {
++ force_take = 1;
+ goto retry;
+ }
+ default:
+@@ -840,6 +843,9 @@ static void wake_futex(struct futex_q *q)
+ {
+ struct task_struct *p = q->task;
+
++ if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
++ return;
++
+ /*
+ * We set q->lock_ptr = NULL _before_ we wake up the task. If
+ * a non-futex wake up happens on another CPU then the task
+@@ -1075,6 +1081,10 @@ retry_private:
+
+ plist_for_each_entry_safe(this, next, head, list) {
+ if (match_futex (&this->key, &key1)) {
++ if (this->pi_state || this->rt_waiter) {
++ ret = -EINVAL;
++ goto out_unlock;
++ }
+ wake_futex(this);
+ if (++ret >= nr_wake)
+ break;
+@@ -1087,6 +1097,10 @@ retry_private:
+ op_ret = 0;
+ plist_for_each_entry_safe(this, next, head, list) {
+ if (match_futex (&this->key, &key2)) {
++ if (this->pi_state || this->rt_waiter) {
++ ret = -EINVAL;
++ goto out_unlock;
++ }
+ wake_futex(this);
+ if (++op_ret >= nr_wake2)
+ break;
+@@ -1095,6 +1109,7 @@ retry_private:
+ ret += op_ret;
+ }
+
++out_unlock:
+ double_unlock_hb(hb1, hb2);
+ out_put_keys:
+ put_futex_key(&key2);
+@@ -1384,9 +1399,13 @@ retry_private:
+ /*
+ * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
+ * be paired with each other and no other futex ops.
++ *
++ * We should never be requeueing a futex_q with a pi_state,
++ * which is awaiting a futex_unlock_pi().
+ */
+ if ((requeue_pi && !this->rt_waiter) ||
+- (!requeue_pi && this->rt_waiter)) {
++ (!requeue_pi && this->rt_waiter) ||
++ this->pi_state) {
+ ret = -EINVAL;
+ break;
+ }
+diff --git a/kernel/watchdog.c b/kernel/watchdog.c
+index 1d7bca7..a8bc4d9 100644
+--- a/kernel/watchdog.c
++++ b/kernel/watchdog.c
+@@ -113,7 +113,7 @@ static unsigned long get_timestamp(int this_cpu)
+ return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */
+ }
+
+-static unsigned long get_sample_period(void)
++static u64 get_sample_period(void)
+ {
+ /*
+ * convert watchdog_thresh from seconds to ns
+@@ -121,7 +121,7 @@ static unsigned long get_sample_period(void)
+ * increment before the hardlockup detector generates
+ * a warning
+ */
+- return get_softlockup_thresh() * (NSEC_PER_SEC / 5);
++ return get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
+ }
+
+ /* Commands for resetting the watchdog */
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 43a19c5..d551d5f 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -2052,8 +2052,10 @@ static int rescuer_thread(void *__wq)
+ repeat:
+ set_current_state(TASK_INTERRUPTIBLE);
+
+- if (kthread_should_stop())
++ if (kthread_should_stop()) {
++ __set_current_state(TASK_RUNNING);
+ return 0;
++ }
+
+ /*
+ * See whether any cpu is asking for help. Unbounded
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index c8425b1..d027a24 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -1457,17 +1457,26 @@ static int mem_cgroup_count_children(struct mem_cgroup *memcg)
+ u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
+ {
+ u64 limit;
+- u64 memsw;
+
+ limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
+- limit += total_swap_pages << PAGE_SHIFT;
+
+- memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
+ /*
+- * If memsw is finite and limits the amount of swap space available
+- * to this memcg, return that limit.
++ * Do not consider swap space if we cannot swap due to swappiness
+ */
+- return min(limit, memsw);
++ if (mem_cgroup_swappiness(memcg)) {
++ u64 memsw;
++
++ limit += total_swap_pages << PAGE_SHIFT;
++ memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
++
++ /*
++ * If memsw is finite and limits the amount of swap space
++ * available to this memcg, return that limit.
++ */
++ limit = min(limit, memsw);
++ }
++
++ return limit;
+ }
+
+ /*
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 5bd5bb1..1b03878 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -1475,9 +1475,17 @@ int soft_offline_page(struct page *page, int flags)
+ {
+ int ret;
+ unsigned long pfn = page_to_pfn(page);
++ struct page *hpage = compound_trans_head(page);
+
+ if (PageHuge(page))
+ return soft_offline_huge_page(page, flags);
++ if (PageTransHuge(hpage)) {
++ if (PageAnon(hpage) && unlikely(split_huge_page(hpage))) {
++ pr_info("soft offline: %#lx: failed to split THP\n",
++ pfn);
++ return -EBUSY;
++ }
++ }
+
+ ret = get_any_page(page, pfn, flags);
+ if (ret < 0)
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 126ca35..2d46e23 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -595,7 +595,7 @@ static void shmem_evict_inode(struct inode *inode)
+ kfree(xattr->name);
+ kfree(xattr);
+ }
+- BUG_ON(inode->i_blocks);
++ WARN_ON(inode->i_blocks);
+ shmem_free_inode(inode->i_sb);
+ end_writeback(inode);
+ }
+diff --git a/mm/sparse.c b/mm/sparse.c
+index bf7d3cc..42935b5 100644
+--- a/mm/sparse.c
++++ b/mm/sparse.c
+@@ -622,7 +622,7 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
+ {
+ return; /* XXX: Not implemented yet */
+ }
+-static void free_map_bootmem(struct page *page, unsigned long nr_pages)
++static void free_map_bootmem(struct page *memmap, unsigned long nr_pages)
+ {
+ }
+ #else
+@@ -663,10 +663,11 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
+ get_order(sizeof(struct page) * nr_pages));
+ }
+
+-static void free_map_bootmem(struct page *page, unsigned long nr_pages)
++static void free_map_bootmem(struct page *memmap, unsigned long nr_pages)
+ {
+ unsigned long maps_section_nr, removing_section_nr, i;
+ unsigned long magic;
++ struct page *page = virt_to_page(memmap);
+
+ for (i = 0; i < nr_pages; i++, page++) {
+ magic = (unsigned long) page->lru.next;
+@@ -715,13 +716,10 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap)
+ */
+
+ if (memmap) {
+- struct page *memmap_page;
+- memmap_page = virt_to_page(memmap);
+-
+ nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
+ >> PAGE_SHIFT;
+
+- free_map_bootmem(memmap_page, nr_pages);
++ free_map_bootmem(memmap, nr_pages);
+ }
+ }
+
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 313381c..1e4ee1a 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -2492,6 +2492,19 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
+ }
+ #endif
+
++static bool zone_balanced(struct zone *zone, int order,
++ unsigned long balance_gap, int classzone_idx)
++{
++ if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone) +
++ balance_gap, classzone_idx, 0))
++ return false;
++
++ if (COMPACTION_BUILD && order && !compaction_suitable(zone, order))
++ return false;
++
++ return true;
++}
++
+ /*
+ * pgdat_balanced is used when checking if a node is balanced for high-order
+ * allocations. Only zones that meet watermarks and are in a zone allowed
+@@ -2551,8 +2564,7 @@ static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining,
+ continue;
+ }
+
+- if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone),
+- i, 0))
++ if (!zone_balanced(zone, order, 0, i))
+ all_zones_ok = false;
+ else
+ balanced += zone->present_pages;
+@@ -2655,8 +2667,7 @@ loop_again:
+ shrink_active_list(SWAP_CLUSTER_MAX, zone,
+ &sc, priority, 0);
+
+- if (!zone_watermark_ok_safe(zone, order,
+- high_wmark_pages(zone), 0, 0)) {
++ if (!zone_balanced(zone, order, 0, 0)) {
+ end_zone = i;
+ break;
+ } else {
+@@ -2717,9 +2728,8 @@ loop_again:
+ (zone->present_pages +
+ KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
+ KSWAPD_ZONE_BALANCE_GAP_RATIO);
+- if (!zone_watermark_ok_safe(zone, order,
+- high_wmark_pages(zone) + balance_gap,
+- end_zone, 0)) {
++ if (!zone_balanced(zone, order,
++ balance_gap, end_zone)) {
+ shrink_zone(priority, zone, &sc);
+
+ reclaim_state->reclaimed_slab = 0;
+@@ -2746,8 +2756,7 @@ loop_again:
+ continue;
+ }
+
+- if (!zone_watermark_ok_safe(zone, order,
+- high_wmark_pages(zone), end_zone, 0)) {
++ if (!zone_balanced(zone, order, 0, end_zone)) {
+ all_zones_ok = 0;
+ /*
+ * We are still under min water mark. This
+diff --git a/net/can/bcm.c b/net/can/bcm.c
+index 151b773..3910c1f 100644
+--- a/net/can/bcm.c
++++ b/net/can/bcm.c
+@@ -1084,6 +1084,9 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
+ op->sk = sk;
+ op->ifindex = ifindex;
+
++ /* ifindex for timeout events w/o previous frame reception */
++ op->rx_ifindex = ifindex;
++
+ /* initialize uninitialized (kzalloc) structure */
+ hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ op->timer.function = bcm_rx_timeout_handler;
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 480be72..2aac4ec 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2829,8 +2829,10 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
+ if (unlikely(tcpu != next_cpu) &&
+ (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
+ ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
+- rflow->last_qtail)) >= 0))
++ rflow->last_qtail)) >= 0)) {
++ tcpu = next_cpu;
+ rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
++ }
+
+ if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
+ *rflowp = rflow;
+diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
+index 277faef..0387da0 100644
+--- a/net/core/dev_addr_lists.c
++++ b/net/core/dev_addr_lists.c
+@@ -308,7 +308,8 @@ int dev_addr_del(struct net_device *dev, unsigned char *addr,
+ */
+ ha = list_first_entry(&dev->dev_addrs.list,
+ struct netdev_hw_addr, list);
+- if (ha->addr == dev->dev_addr && ha->refcount == 1)
++ if (!memcmp(ha->addr, addr, dev->addr_len) &&
++ ha->type == addr_type && ha->refcount == 1)
+ return -ENOENT;
+
+ err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len,
+diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
+index 09ff51b..0106d25 100644
+--- a/net/ipv4/ip_sockglue.c
++++ b/net/ipv4/ip_sockglue.c
+@@ -468,18 +468,27 @@ static int do_ip_setsockopt(struct sock *sk, int level,
+ struct inet_sock *inet = inet_sk(sk);
+ int val = 0, err;
+
+- if (((1<<optname) & ((1<<IP_PKTINFO) | (1<<IP_RECVTTL) |
+- (1<<IP_RECVOPTS) | (1<<IP_RECVTOS) |
+- (1<<IP_RETOPTS) | (1<<IP_TOS) |
+- (1<<IP_TTL) | (1<<IP_HDRINCL) |
+- (1<<IP_MTU_DISCOVER) | (1<<IP_RECVERR) |
+- (1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) |
+- (1<<IP_PASSSEC) | (1<<IP_TRANSPARENT) |
+- (1<<IP_MINTTL) | (1<<IP_NODEFRAG))) ||
+- optname == IP_MULTICAST_TTL ||
+- optname == IP_MULTICAST_ALL ||
+- optname == IP_MULTICAST_LOOP ||
+- optname == IP_RECVORIGDSTADDR) {
++ switch (optname) {
++ case IP_PKTINFO:
++ case IP_RECVTTL:
++ case IP_RECVOPTS:
++ case IP_RECVTOS:
++ case IP_RETOPTS:
++ case IP_TOS:
++ case IP_TTL:
++ case IP_HDRINCL:
++ case IP_MTU_DISCOVER:
++ case IP_RECVERR:
++ case IP_ROUTER_ALERT:
++ case IP_FREEBIND:
++ case IP_PASSSEC:
++ case IP_TRANSPARENT:
++ case IP_MINTTL:
++ case IP_NODEFRAG:
++ case IP_MULTICAST_TTL:
++ case IP_MULTICAST_ALL:
++ case IP_MULTICAST_LOOP:
++ case IP_RECVORIGDSTADDR:
+ if (optlen >= sizeof(int)) {
+ if (get_user(val, (int __user *) optval))
+ return -EFAULT;
+diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c
+index 9290048..e32b542 100644
+--- a/net/ipv4/netfilter/nf_nat_standalone.c
++++ b/net/ipv4/netfilter/nf_nat_standalone.c
+@@ -194,7 +194,8 @@ nf_nat_out(unsigned int hooknum,
+
+ if ((ct->tuplehash[dir].tuple.src.u3.ip !=
+ ct->tuplehash[!dir].tuple.dst.u3.ip) ||
+- (ct->tuplehash[dir].tuple.src.u.all !=
++ (ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMP &&
++ ct->tuplehash[dir].tuple.src.u.all !=
+ ct->tuplehash[!dir].tuple.dst.u.all)
+ )
+ return ip_xfrm_me_harder(skb) == 0 ? ret : NF_DROP;
+@@ -230,7 +231,8 @@ nf_nat_local_fn(unsigned int hooknum,
+ ret = NF_DROP;
+ }
+ #ifdef CONFIG_XFRM
+- else if (ct->tuplehash[dir].tuple.dst.u.all !=
++ else if (ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMP &&
++ ct->tuplehash[dir].tuple.dst.u.all !=
+ ct->tuplehash[!dir].tuple.src.u.all)
+ if (ip_xfrm_me_harder(skb))
+ ret = NF_DROP;
+diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
+index 26cb08c..b204df8 100644
+--- a/net/ipv6/ipv6_sockglue.c
++++ b/net/ipv6/ipv6_sockglue.c
+@@ -798,6 +798,7 @@ pref_skip_coa:
+ if (val < 0 || val > 255)
+ goto e_inval;
+ np->min_hopcount = val;
++ retv = 0;
+ break;
+ case IPV6_DONTFRAG:
+ np->dontfrag = valbool;
+diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
+index 8c7364b..9e20cb8 100644
+--- a/net/mac80211/ibss.c
++++ b/net/mac80211/ibss.c
+@@ -965,10 +965,6 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
+
+ mutex_lock(&sdata->u.ibss.mtx);
+
+- sdata->u.ibss.state = IEEE80211_IBSS_MLME_SEARCH;
+- memset(sdata->u.ibss.bssid, 0, ETH_ALEN);
+- sdata->u.ibss.ssid_len = 0;
+-
+ active_ibss = ieee80211_sta_active_ibss(sdata);
+
+ if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) {
+@@ -989,6 +985,10 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
+ }
+ }
+
++ ifibss->state = IEEE80211_IBSS_MLME_SEARCH;
++ memset(ifibss->bssid, 0, ETH_ALEN);
++ ifibss->ssid_len = 0;
++
+ sta_info_flush(sdata->local, sdata);
+
+ /* remove beacon */
+diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
+index 1fdd8ff..1c775f0 100644
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -1129,6 +1129,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
+ struct ieee80211_local *local = sdata->local;
+ struct sk_buff_head pending;
+ int filtered = 0, buffered = 0, ac;
++ unsigned long flags;
+
+ clear_sta_flag(sta, WLAN_STA_SP);
+
+@@ -1144,12 +1145,16 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ int count = skb_queue_len(&pending), tmp;
+
++ spin_lock_irqsave(&sta->tx_filtered[ac].lock, flags);
+ skb_queue_splice_tail_init(&sta->tx_filtered[ac], &pending);
++ spin_unlock_irqrestore(&sta->tx_filtered[ac].lock, flags);
+ tmp = skb_queue_len(&pending);
+ filtered += tmp - count;
+ count = tmp;
+
++ spin_lock_irqsave(&sta->ps_tx_buf[ac].lock, flags);
+ skb_queue_splice_tail_init(&sta->ps_tx_buf[ac], &pending);
++ spin_unlock_irqrestore(&sta->ps_tx_buf[ac].lock, flags);
+ tmp = skb_queue_len(&pending);
+ buffered += tmp - count;
+ }
+diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
+index 8235b86..57ad466 100644
+--- a/net/netfilter/nf_conntrack_proto_tcp.c
++++ b/net/netfilter/nf_conntrack_proto_tcp.c
+@@ -159,21 +159,18 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
+ * sCL -> sSS
+ */
+ /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
+-/*synack*/ { sIV, sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG, sSR },
++/*synack*/ { sIV, sIV, sSR, sIV, sIV, sIV, sIV, sIV, sIV, sSR },
+ /*
+ * sNO -> sIV Too late and no reason to do anything
+ * sSS -> sIV Client can't send SYN and then SYN/ACK
+ * sS2 -> sSR SYN/ACK sent to SYN2 in simultaneous open
+- * sSR -> sIG
+- * sES -> sIG Error: SYNs in window outside the SYN_SENT state
+- * are errors. Receiver will reply with RST
+- * and close the connection.
+- * Or we are not in sync and hold a dead connection.
+- * sFW -> sIG
+- * sCW -> sIG
+- * sLA -> sIG
+- * sTW -> sIG
+- * sCL -> sIG
++ * sSR -> sSR Late retransmitted SYN/ACK in simultaneous open
++ * sES -> sIV Invalid SYN/ACK packets sent by the client
++ * sFW -> sIV
++ * sCW -> sIV
++ * sLA -> sIV
++ * sTW -> sIV
++ * sCL -> sIV
+ */
+ /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
+ /*fin*/ { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV },
+@@ -628,15 +625,9 @@ static bool tcp_in_window(const struct nf_conn *ct,
+ ack = sack = receiver->td_end;
+ }
+
+- if (seq == end
+- && (!tcph->rst
+- || (seq == 0 && state->state == TCP_CONNTRACK_SYN_SENT)))
++ if (tcph->rst && seq == 0 && state->state == TCP_CONNTRACK_SYN_SENT)
+ /*
+- * Packets contains no data: we assume it is valid
+- * and check the ack value only.
+- * However RST segments are always validated by their
+- * SEQ number, except when seq == 0 (reset sent answering
+- * SYN.
++ * RST sent answering SYN.
+ */
+ seq = end = sender->td_end;
+
+diff --git a/net/wireless/reg.c b/net/wireless/reg.c
+index fa39731..0b08905 100644
+--- a/net/wireless/reg.c
++++ b/net/wireless/reg.c
+@@ -125,9 +125,8 @@ static const struct ieee80211_regdomain world_regdom = {
+ .reg_rules = {
+ /* IEEE 802.11b/g, channels 1..11 */
+ REG_RULE(2412-10, 2462+10, 40, 6, 20, 0),
+- /* IEEE 802.11b/g, channels 12..13. No HT40
+- * channel fits here. */
+- REG_RULE(2467-10, 2472+10, 20, 6, 20,
++ /* IEEE 802.11b/g, channels 12..13. */
++ REG_RULE(2467-10, 2472+10, 40, 6, 20,
+ NL80211_RRF_PASSIVE_SCAN |
+ NL80211_RRF_NO_IBSS),
+ /* IEEE 802.11 channel 14 - Only JP enables
+diff --git a/security/device_cgroup.c b/security/device_cgroup.c
+index 4450fbe..92e24bb 100644
+--- a/security/device_cgroup.c
++++ b/security/device_cgroup.c
+@@ -202,8 +202,8 @@ static void devcgroup_destroy(struct cgroup_subsys *ss,
+
+ dev_cgroup = cgroup_to_devcgroup(cgroup);
+ list_for_each_entry_safe(wh, tmp, &dev_cgroup->whitelist, list) {
+- list_del(&wh->list);
+- kfree(wh);
++ list_del_rcu(&wh->list);
++ kfree_rcu(wh, rcu);
+ }
+ kfree(dev_cgroup);
+ }
+@@ -278,7 +278,7 @@ static int may_access_whitelist(struct dev_cgroup *c,
+ {
+ struct dev_whitelist_item *whitem;
+
+- list_for_each_entry(whitem, &c->whitelist, list) {
++ list_for_each_entry_rcu(whitem, &c->whitelist, list) {
+ if (whitem->type & DEV_ALL)
+ return 1;
+ if ((refwh->type & DEV_BLOCK) && !(whitem->type & DEV_BLOCK))
+diff --git a/security/selinux/netnode.c b/security/selinux/netnode.c
+index 3bf46ab..46a5b81 100644
+--- a/security/selinux/netnode.c
++++ b/security/selinux/netnode.c
+@@ -174,7 +174,8 @@ static void sel_netnode_insert(struct sel_netnode *node)
+ if (sel_netnode_hash[idx].size == SEL_NETNODE_HASH_BKT_LIMIT) {
+ struct sel_netnode *tail;
+ tail = list_entry(
+- rcu_dereference(sel_netnode_hash[idx].list.prev),
++ rcu_dereference_protected(sel_netnode_hash[idx].list.prev,
++ lockdep_is_held(&sel_netnode_lock)),
+ struct sel_netnode, list);
+ list_del_rcu(&tail->list);
+ kfree_rcu(tail, rcu);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 3ce2da2..1a09fbf 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -6026,6 +6026,9 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
+ { .id = 0x10ec0276, .name = "ALC276", .patch = patch_alc269 },
+ { .id = 0x10ec0280, .name = "ALC280", .patch = patch_alc269 },
+ { .id = 0x10ec0282, .name = "ALC282", .patch = patch_alc269 },
++ { .id = 0x10ec0283, .name = "ALC283", .patch = patch_alc269 },
++ { .id = 0x10ec0290, .name = "ALC290", .patch = patch_alc269 },
++ { .id = 0x10ec0292, .name = "ALC292", .patch = patch_alc269 },
+ { .id = 0x10ec0861, .rev = 0x100340, .name = "ALC660",
+ .patch = patch_alc861 },
+ { .id = 0x10ec0660, .name = "ALC660-VD", .patch = patch_alc861vd },
+diff --git a/sound/soc/codecs/wm8978.c b/sound/soc/codecs/wm8978.c
+index 41ca4d9..f81b185 100644
+--- a/sound/soc/codecs/wm8978.c
++++ b/sound/soc/codecs/wm8978.c
+@@ -749,7 +749,7 @@ static int wm8978_hw_params(struct snd_pcm_substream *substream,
+ wm8978->mclk_idx = -1;
+ f_sel = wm8978->f_mclk;
+ } else {
+- if (!wm8978->f_pllout) {
++ if (!wm8978->f_opclk) {
+ /* We only enter here, if OPCLK is not used */
+ int ret = wm8978_configure_pll(codec);
+ if (ret < 0)
+diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
+index 0dc441c..b516488 100644
+--- a/sound/soc/soc-dapm.c
++++ b/sound/soc/soc-dapm.c
+@@ -3009,7 +3009,7 @@ void snd_soc_dapm_shutdown(struct snd_soc_card *card)
+ {
+ struct snd_soc_codec *codec;
+
+- list_for_each_entry(codec, &card->codec_dev_list, list) {
++ list_for_each_entry(codec, &card->codec_dev_list, card_list) {
+ soc_dapm_shutdown_codec(&codec->dapm);
+ if (codec->dapm.bias_level == SND_SOC_BIAS_STANDBY)
+ snd_soc_dapm_set_bias_level(&codec->dapm,
+diff --git a/sound/usb/midi.c b/sound/usb/midi.c
+index c83f614..eeefbce 100644
+--- a/sound/usb/midi.c
++++ b/sound/usb/midi.c
+@@ -148,6 +148,7 @@ struct snd_usb_midi_out_endpoint {
+ struct snd_usb_midi_out_endpoint* ep;
+ struct snd_rawmidi_substream *substream;
+ int active;
++ bool autopm_reference;
+ uint8_t cable; /* cable number << 4 */
+ uint8_t state;
+ #define STATE_UNKNOWN 0
+@@ -1076,7 +1077,8 @@ static int snd_usbmidi_output_open(struct snd_rawmidi_substream *substream)
+ return -ENXIO;
+ }
+ err = usb_autopm_get_interface(umidi->iface);
+- if (err < 0)
++ port->autopm_reference = err >= 0;
++ if (err < 0 && err != -EACCES)
+ return -EIO;
+ substream->runtime->private_data = port;
+ port->state = STATE_UNKNOWN;
+@@ -1087,9 +1089,11 @@ static int snd_usbmidi_output_open(struct snd_rawmidi_substream *substream)
+ static int snd_usbmidi_output_close(struct snd_rawmidi_substream *substream)
+ {
+ struct snd_usb_midi* umidi = substream->rmidi->private_data;
++ struct usbmidi_out_port *port = substream->runtime->private_data;
+
+ substream_open(substream, 0);
+- usb_autopm_put_interface(umidi->iface);
++ if (port->autopm_reference)
++ usb_autopm_put_interface(umidi->iface);
+ return 0;
+ }
+
diff --git a/1035_linux-3.2.36.patch b/1035_linux-3.2.36.patch
new file mode 100644
index 00000000..5d192a39
--- /dev/null
+++ b/1035_linux-3.2.36.patch
@@ -0,0 +1,6434 @@
+diff --git a/Documentation/hwmon/coretemp b/Documentation/hwmon/coretemp
+index 84d46c0..eb5502e 100644
+--- a/Documentation/hwmon/coretemp
++++ b/Documentation/hwmon/coretemp
+@@ -6,7 +6,9 @@ Supported chips:
+ Prefix: 'coretemp'
+ CPUID: family 0x6, models 0xe (Pentium M DC), 0xf (Core 2 DC 65nm),
+ 0x16 (Core 2 SC 65nm), 0x17 (Penryn 45nm),
+- 0x1a (Nehalem), 0x1c (Atom), 0x1e (Lynnfield)
++ 0x1a (Nehalem), 0x1c (Atom), 0x1e (Lynnfield),
++ 0x26 (Tunnel Creek Atom), 0x27 (Medfield Atom),
++ 0x36 (Cedar Trail Atom)
+ Datasheet: Intel 64 and IA-32 Architectures Software Developer's Manual
+ Volume 3A: System Programming Guide
+ http://softwarecommunity.intel.com/Wiki/Mobility/720.htm
+@@ -65,6 +67,11 @@ Process Processor TjMax(C)
+ U3400 105
+ P4505/P4500 90
+
++32nm Atom Processors
++ Z2460 90
++ D2700/2550/2500 100
++ N2850/2800/2650/2600 100
++
+ 45nm Xeon Processors 5400 Quad-Core
+ X5492, X5482, X5472, X5470, X5460, X5450 85
+ E5472, E5462, E5450/40/30/20/10/05 85
+@@ -85,6 +92,9 @@ Process Processor TjMax(C)
+ N475/470/455/450 100
+ N280/270 90
+ 330/230 125
++ E680/660/640/620 90
++ E680T/660T/640T/620T 110
++ CE4170/4150/4110 110
+
+ 45nm Core2 Processors
+ Solo ULV SU3500/3300 100
+diff --git a/Makefile b/Makefile
+index d985af0..2052c29 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 35
++SUBLEVEL = 36
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/include/asm/hwcap.h b/arch/arm/include/asm/hwcap.h
+index c93a22a..3a925fb 100644
+--- a/arch/arm/include/asm/hwcap.h
++++ b/arch/arm/include/asm/hwcap.h
+@@ -18,11 +18,12 @@
+ #define HWCAP_THUMBEE (1 << 11)
+ #define HWCAP_NEON (1 << 12)
+ #define HWCAP_VFPv3 (1 << 13)
+-#define HWCAP_VFPv3D16 (1 << 14)
++#define HWCAP_VFPv3D16 (1 << 14) /* also set for VFPv4-D16 */
+ #define HWCAP_TLS (1 << 15)
+ #define HWCAP_VFPv4 (1 << 16)
+ #define HWCAP_IDIVA (1 << 17)
+ #define HWCAP_IDIVT (1 << 18)
++#define HWCAP_VFPD32 (1 << 19) /* set if VFP has 32 regs (not 16) */
+ #define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT)
+
+ #if defined(__KERNEL__) && !defined(__ASSEMBLY__)
+diff --git a/arch/arm/include/asm/vfpmacros.h b/arch/arm/include/asm/vfpmacros.h
+index bf53047..c49c8f7 100644
+--- a/arch/arm/include/asm/vfpmacros.h
++++ b/arch/arm/include/asm/vfpmacros.h
+@@ -27,9 +27,9 @@
+ #if __LINUX_ARM_ARCH__ <= 6
+ ldr \tmp, =elf_hwcap @ may not have MVFR regs
+ ldr \tmp, [\tmp, #0]
+- tst \tmp, #HWCAP_VFPv3D16
+- ldceql p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
+- addne \base, \base, #32*4 @ step over unused register space
++ tst \tmp, #HWCAP_VFPD32
++ ldcnel p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
++ addeq \base, \base, #32*4 @ step over unused register space
+ #else
+ VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
+ and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field
+@@ -51,9 +51,9 @@
+ #if __LINUX_ARM_ARCH__ <= 6
+ ldr \tmp, =elf_hwcap @ may not have MVFR regs
+ ldr \tmp, [\tmp, #0]
+- tst \tmp, #HWCAP_VFPv3D16
+- stceql p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
+- addne \base, \base, #32*4 @ step over unused register space
++ tst \tmp, #HWCAP_VFPD32
++ stcnel p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
++ addeq \base, \base, #32*4 @ step over unused register space
+ #else
+ VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
+ and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field
+diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c
+index 5f452f8..5d9b1ee 100644
+--- a/arch/arm/kernel/swp_emulate.c
++++ b/arch/arm/kernel/swp_emulate.c
+@@ -108,10 +108,12 @@ static void set_segfault(struct pt_regs *regs, unsigned long addr)
+ {
+ siginfo_t info;
+
++ down_read(&current->mm->mmap_sem);
+ if (find_vma(current->mm, addr) == NULL)
+ info.si_code = SEGV_MAPERR;
+ else
+ info.si_code = SEGV_ACCERR;
++ up_read(&current->mm->mmap_sem);
+
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
+index dc8c550..9e28fdb 100644
+--- a/arch/arm/mm/mmu.c
++++ b/arch/arm/mm/mmu.c
+@@ -475,7 +475,7 @@ static void __init build_mem_type_table(void)
+ }
+
+ for (i = 0; i < 16; i++) {
+- unsigned long v = pgprot_val(protection_map[i]);
++ pteval_t v = pgprot_val(protection_map[i]);
+ protection_map[i] = __pgprot(v | user_pgprot);
+ }
+
+diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
+index f0702f3..7c815b2 100644
+--- a/arch/arm/vfp/vfpmodule.c
++++ b/arch/arm/vfp/vfpmodule.c
+@@ -610,11 +610,14 @@ static int __init vfp_init(void)
+ elf_hwcap |= HWCAP_VFPv3;
+
+ /*
+- * Check for VFPv3 D16. CPUs in this configuration
+- * only have 16 x 64bit registers.
++ * Check for VFPv3 D16 and VFPv4 D16. CPUs in
++ * this configuration only have 16 x 64bit
++ * registers.
+ */
+ if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1)
+- elf_hwcap |= HWCAP_VFPv3D16;
++ elf_hwcap |= HWCAP_VFPv3D16; /* also v4-D16 */
++ else
++ elf_hwcap |= HWCAP_VFPD32;
+ }
+ #endif
+ /*
+diff --git a/arch/cris/include/asm/io.h b/arch/cris/include/asm/io.h
+index 32567bc..ac12ae2 100644
+--- a/arch/cris/include/asm/io.h
++++ b/arch/cris/include/asm/io.h
+@@ -133,12 +133,39 @@ static inline void writel(unsigned int b, volatile void __iomem *addr)
+ #define insb(port,addr,count) (cris_iops ? cris_iops->read_io(port,addr,1,count) : 0)
+ #define insw(port,addr,count) (cris_iops ? cris_iops->read_io(port,addr,2,count) : 0)
+ #define insl(port,addr,count) (cris_iops ? cris_iops->read_io(port,addr,4,count) : 0)
+-#define outb(data,port) if (cris_iops) cris_iops->write_io(port,(void*)(unsigned)data,1,1)
+-#define outw(data,port) if (cris_iops) cris_iops->write_io(port,(void*)(unsigned)data,2,1)
+-#define outl(data,port) if (cris_iops) cris_iops->write_io(port,(void*)(unsigned)data,4,1)
+-#define outsb(port,addr,count) if(cris_iops) cris_iops->write_io(port,(void*)addr,1,count)
+-#define outsw(port,addr,count) if(cris_iops) cris_iops->write_io(port,(void*)addr,2,count)
+-#define outsl(port,addr,count) if(cris_iops) cris_iops->write_io(port,(void*)addr,3,count)
++static inline void outb(unsigned char data, unsigned int port)
++{
++ if (cris_iops)
++ cris_iops->write_io(port, (void *) &data, 1, 1);
++}
++static inline void outw(unsigned short data, unsigned int port)
++{
++ if (cris_iops)
++ cris_iops->write_io(port, (void *) &data, 2, 1);
++}
++static inline void outl(unsigned int data, unsigned int port)
++{
++ if (cris_iops)
++ cris_iops->write_io(port, (void *) &data, 4, 1);
++}
++static inline void outsb(unsigned int port, const void *addr,
++ unsigned long count)
++{
++ if (cris_iops)
++ cris_iops->write_io(port, (void *)addr, 1, count);
++}
++static inline void outsw(unsigned int port, const void *addr,
++ unsigned long count)
++{
++ if (cris_iops)
++ cris_iops->write_io(port, (void *)addr, 2, count);
++}
++static inline void outsl(unsigned int port, const void *addr,
++ unsigned long count)
++{
++ if (cris_iops)
++ cris_iops->write_io(port, (void *)addr, 4, count);
++}
+
+ /*
+ * Convert a physical pointer to a virtual kernel pointer for /dev/mem
+diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
+index c47f96e..bf128d7 100644
+--- a/arch/mips/kernel/process.c
++++ b/arch/mips/kernel/process.c
+@@ -72,9 +72,7 @@ void __noreturn cpu_idle(void)
+ }
+ }
+ #ifdef CONFIG_HOTPLUG_CPU
+- if (!cpu_online(cpu) && !cpu_isset(cpu, cpu_callin_map) &&
+- (system_state == SYSTEM_RUNNING ||
+- system_state == SYSTEM_BOOTING))
++ if (!cpu_online(cpu) && !cpu_isset(cpu, cpu_callin_map))
+ play_dead();
+ #endif
+ tick_nohz_restart_sched_tick();
+diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
+index 06c7251..cdf6b3f 100644
+--- a/arch/powerpc/kernel/head_64.S
++++ b/arch/powerpc/kernel/head_64.S
+@@ -435,7 +435,7 @@ _STATIC(__after_prom_start)
+ tovirt(r6,r6) /* on booke, we already run at PAGE_OFFSET */
+ #endif
+
+-#ifdef CONFIG_CRASH_DUMP
++#ifdef CONFIG_RELOCATABLE
+ /*
+ * Check if the kernel has to be running as relocatable kernel based on the
+ * variable __run_at_load, if it is set the kernel is treated as relocatable
+diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c
+index 549bb2c..ded8a1a 100644
+--- a/arch/powerpc/kvm/44x_emulate.c
++++ b/arch/powerpc/kvm/44x_emulate.c
+@@ -79,6 +79,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ run->dcr.dcrn = dcrn;
+ run->dcr.data = 0;
+ run->dcr.is_write = 0;
++ vcpu->arch.dcr_is_write = 0;
+ vcpu->arch.io_gpr = rt;
+ vcpu->arch.dcr_needed = 1;
+ kvmppc_account_exit(vcpu, DCR_EXITS);
+@@ -100,6 +101,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ run->dcr.dcrn = dcrn;
+ run->dcr.data = kvmppc_get_gpr(vcpu, rs);
+ run->dcr.is_write = 1;
++ vcpu->arch.dcr_is_write = 1;
+ vcpu->arch.dcr_needed = 1;
+ kvmppc_account_exit(vcpu, DCR_EXITS);
+ emulated = EMULATE_DO_DCR;
+diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c
+index 1b5dc1a..daf793b 100644
+--- a/arch/powerpc/platforms/embedded6xx/wii.c
++++ b/arch/powerpc/platforms/embedded6xx/wii.c
+@@ -85,9 +85,11 @@ void __init wii_memory_fixups(void)
+ wii_hole_start = p[0].base + p[0].size;
+ wii_hole_size = p[1].base - wii_hole_start;
+
+- pr_info("MEM1: <%08llx %08llx>\n", p[0].base, p[0].size);
++ pr_info("MEM1: <%08llx %08llx>\n",
++ (unsigned long long) p[0].base, (unsigned long long) p[0].size);
+ pr_info("HOLE: <%08lx %08lx>\n", wii_hole_start, wii_hole_size);
+- pr_info("MEM2: <%08llx %08llx>\n", p[1].base, p[1].size);
++ pr_info("MEM2: <%08llx %08llx>\n",
++ (unsigned long long) p[1].base, (unsigned long long) p[1].size);
+
+ p[0].size += wii_hole_size + p[1].size;
+
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index d3cb86c..dffcaa4 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -749,7 +749,7 @@ static int __init kvm_s390_init(void)
+ }
+ memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
+ facilities[0] &= 0xff00fff3f47c0000ULL;
+- facilities[1] &= 0x201c000000000000ULL;
++ facilities[1] &= 0x001c000000000000ULL;
+ return 0;
+ }
+
+diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
+index 1bb0bf4..4970ef0 100644
+--- a/arch/x86/kernel/hpet.c
++++ b/arch/x86/kernel/hpet.c
+@@ -429,7 +429,7 @@ void hpet_msi_unmask(struct irq_data *data)
+
+ /* unmask it */
+ cfg = hpet_readl(HPET_Tn_CFG(hdev->num));
+- cfg |= HPET_TN_FSB;
++ cfg |= HPET_TN_ENABLE | HPET_TN_FSB;
+ hpet_writel(cfg, HPET_Tn_CFG(hdev->num));
+ }
+
+@@ -440,7 +440,7 @@ void hpet_msi_mask(struct irq_data *data)
+
+ /* mask it */
+ cfg = hpet_readl(HPET_Tn_CFG(hdev->num));
+- cfg &= ~HPET_TN_FSB;
++ cfg &= ~(HPET_TN_ENABLE | HPET_TN_FSB);
+ hpet_writel(cfg, HPET_Tn_CFG(hdev->num));
+ }
+
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 4fc5323..f4063fd 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -578,6 +578,9 @@ static bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
+ {
+ struct kvm_cpuid_entry2 *best;
+
++ if (!static_cpu_has(X86_FEATURE_XSAVE))
++ return 0;
++
+ best = kvm_find_cpuid_entry(vcpu, 1, 0);
+ return best && (best->ecx & bit(X86_FEATURE_XSAVE));
+ }
+@@ -6149,6 +6152,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
+ int pending_vec, max_bits, idx;
+ struct desc_ptr dt;
+
++ if (!guest_cpuid_has_xsave(vcpu) && (sregs->cr4 & X86_CR4_OSXSAVE))
++ return -EINVAL;
++
+ dt.size = sregs->idt.limit;
+ dt.address = sregs->idt.base;
+ kvm_x86_ops->set_idt(vcpu, &dt);
+diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
+index 5535477..a1a4b8e 100644
+--- a/drivers/acpi/battery.c
++++ b/drivers/acpi/battery.c
+@@ -34,6 +34,7 @@
+ #include <linux/dmi.h>
+ #include <linux/slab.h>
+ #include <linux/suspend.h>
++#include <asm/unaligned.h>
+
+ #ifdef CONFIG_ACPI_PROCFS_POWER
+ #include <linux/proc_fs.h>
+@@ -95,6 +96,18 @@ enum {
+ ACPI_BATTERY_ALARM_PRESENT,
+ ACPI_BATTERY_XINFO_PRESENT,
+ ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY,
++ /* On Lenovo Thinkpad models from 2010 and 2011, the power unit
++ switches between mWh and mAh depending on whether the system
++ is running on battery or not. When mAh is the unit, most
++ reported values are incorrect and need to be adjusted by
++ 10000/design_voltage. Verified on x201, t410, t410s, and x220.
++ Pre-2010 and 2012 models appear to always report in mWh and
++ are thus unaffected (tested with t42, t61, t500, x200, x300,
++ and x230). Also, in mid-2012 Lenovo issued a BIOS update for
++ the 2011 models that fixes the issue (tested on x220 with a
++ post-1.29 BIOS), but as of Nov. 2012, no such update is
++ available for the 2010 models. */
++ ACPI_BATTERY_QUIRK_THINKPAD_MAH,
+ };
+
+ struct acpi_battery {
+@@ -429,6 +442,21 @@ static int acpi_battery_get_info(struct acpi_battery *battery)
+ kfree(buffer.pointer);
+ if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags))
+ battery->full_charge_capacity = battery->design_capacity;
++ if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, &battery->flags) &&
++ battery->power_unit && battery->design_voltage) {
++ battery->design_capacity = battery->design_capacity *
++ 10000 / battery->design_voltage;
++ battery->full_charge_capacity = battery->full_charge_capacity *
++ 10000 / battery->design_voltage;
++ battery->design_capacity_warning =
++ battery->design_capacity_warning *
++ 10000 / battery->design_voltage;
++ /* Curiously, design_capacity_low, unlike the rest of them,
++ is correct. */
++ /* capacity_granularity_* equal 1 on the systems tested, so
++ it's impossible to tell if they would need an adjustment
++ or not if their values were higher. */
++ }
+ return result;
+ }
+
+@@ -477,6 +505,11 @@ static int acpi_battery_get_state(struct acpi_battery *battery)
+ && battery->capacity_now >= 0 && battery->capacity_now <= 100)
+ battery->capacity_now = (battery->capacity_now *
+ battery->full_charge_capacity) / 100;
++ if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, &battery->flags) &&
++ battery->power_unit && battery->design_voltage) {
++ battery->capacity_now = battery->capacity_now *
++ 10000 / battery->design_voltage;
++ }
+ return result;
+ }
+
+@@ -586,6 +619,24 @@ static void sysfs_remove_battery(struct acpi_battery *battery)
+ mutex_unlock(&battery->sysfs_lock);
+ }
+
++static void find_battery(const struct dmi_header *dm, void *private)
++{
++ struct acpi_battery *battery = (struct acpi_battery *)private;
++ /* Note: the hardcoded offsets below have been extracted from
++ the source code of dmidecode. */
++ if (dm->type == DMI_ENTRY_PORTABLE_BATTERY && dm->length >= 8) {
++ const u8 *dmi_data = (const u8 *)(dm + 1);
++ int dmi_capacity = get_unaligned((const u16 *)(dmi_data + 6));
++ if (dm->length >= 18)
++ dmi_capacity *= dmi_data[17];
++ if (battery->design_capacity * battery->design_voltage / 1000
++ != dmi_capacity &&
++ battery->design_capacity * 10 == dmi_capacity)
++ set_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH,
++ &battery->flags);
++ }
++}
++
+ /*
+ * According to the ACPI spec, some kinds of primary batteries can
+ * report percentage battery remaining capacity directly to OS.
+@@ -611,6 +662,32 @@ static void acpi_battery_quirks(struct acpi_battery *battery)
+ battery->capacity_now = (battery->capacity_now *
+ battery->full_charge_capacity) / 100;
+ }
++
++ if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, &battery->flags))
++ return ;
++
++ if (battery->power_unit && dmi_name_in_vendors("LENOVO")) {
++ const char *s;
++ s = dmi_get_system_info(DMI_PRODUCT_VERSION);
++ if (s && !strnicmp(s, "ThinkPad", 8)) {
++ dmi_walk(find_battery, battery);
++ if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH,
++ &battery->flags) &&
++ battery->design_voltage) {
++ battery->design_capacity =
++ battery->design_capacity *
++ 10000 / battery->design_voltage;
++ battery->full_charge_capacity =
++ battery->full_charge_capacity *
++ 10000 / battery->design_voltage;
++ battery->design_capacity_warning =
++ battery->design_capacity_warning *
++ 10000 / battery->design_voltage;
++ battery->capacity_now = battery->capacity_now *
++ 10000 / battery->design_voltage;
++ }
++ }
++ }
+ }
+
+ static int acpi_battery_update(struct acpi_battery *battery)
+diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
+index 9d7bc9f..ac28db3 100644
+--- a/drivers/acpi/processor_driver.c
++++ b/drivers/acpi/processor_driver.c
+@@ -409,6 +409,7 @@ static void acpi_processor_notify(struct acpi_device *device, u32 event)
+ acpi_bus_generate_proc_event(device, event, 0);
+ acpi_bus_generate_netlink_event(device->pnp.device_class,
+ dev_name(&device->dev), event, 0);
++ break;
+ default:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Unsupported event [0x%x]\n", event));
+diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
+index ed6bc52..d790791 100644
+--- a/drivers/acpi/sleep.c
++++ b/drivers/acpi/sleep.c
+@@ -108,6 +108,180 @@ void __init acpi_old_suspend_ordering(void)
+ old_suspend_ordering = true;
+ }
+
++static int __init init_old_suspend_ordering(const struct dmi_system_id *d)
++{
++ acpi_old_suspend_ordering();
++ return 0;
++}
++
++static int __init init_nvs_nosave(const struct dmi_system_id *d)
++{
++ acpi_nvs_nosave();
++ return 0;
++}
++
++static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
++ {
++ .callback = init_old_suspend_ordering,
++ .ident = "Abit KN9 (nForce4 variant)",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "http://www.abit.com.tw/"),
++ DMI_MATCH(DMI_BOARD_NAME, "KN9 Series(NF-CK804)"),
++ },
++ },
++ {
++ .callback = init_old_suspend_ordering,
++ .ident = "HP xw4600 Workstation",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "HP xw4600 Workstation"),
++ },
++ },
++ {
++ .callback = init_old_suspend_ordering,
++ .ident = "Asus Pundit P1-AH2 (M2N8L motherboard)",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek Computer INC."),
++ DMI_MATCH(DMI_BOARD_NAME, "M2N8L"),
++ },
++ },
++ {
++ .callback = init_old_suspend_ordering,
++ .ident = "Panasonic CF51-2L",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR,
++ "Matsushita Electric Industrial Co.,Ltd."),
++ DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"),
++ },
++ },
++ {
++ .callback = init_nvs_nosave,
++ .ident = "Sony Vaio VGN-FW21E",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21E"),
++ },
++ },
++ {
++ .callback = init_nvs_nosave,
++ .ident = "Sony Vaio VPCEB17FX",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB17FX"),
++ },
++ },
++ {
++ .callback = init_nvs_nosave,
++ .ident = "Sony Vaio VGN-SR11M",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"),
++ },
++ },
++ {
++ .callback = init_nvs_nosave,
++ .ident = "Everex StepNote Series",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"),
++ },
++ },
++ {
++ .callback = init_nvs_nosave,
++ .ident = "Sony Vaio VPCEB1Z1E",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1Z1E"),
++ },
++ },
++ {
++ .callback = init_nvs_nosave,
++ .ident = "Sony Vaio VGN-NW130D",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NW130D"),
++ },
++ },
++ {
++ .callback = init_nvs_nosave,
++ .ident = "Sony Vaio VPCCW29FX",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "VPCCW29FX"),
++ },
++ },
++ {
++ .callback = init_nvs_nosave,
++ .ident = "Averatec AV1020-ED2",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"),
++ },
++ },
++ {
++ .callback = init_old_suspend_ordering,
++ .ident = "Asus A8N-SLI DELUXE",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
++ DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI DELUXE"),
++ },
++ },
++ {
++ .callback = init_old_suspend_ordering,
++ .ident = "Asus A8N-SLI Premium",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
++ DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI Premium"),
++ },
++ },
++ {
++ .callback = init_nvs_nosave,
++ .ident = "Sony Vaio VGN-SR26GN_P",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR26GN_P"),
++ },
++ },
++ {
++ .callback = init_nvs_nosave,
++ .ident = "Sony Vaio VPCEB1S1E",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1S1E"),
++ },
++ },
++ {
++ .callback = init_nvs_nosave,
++ .ident = "Sony Vaio VGN-FW520F",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"),
++ },
++ },
++ {
++ .callback = init_nvs_nosave,
++ .ident = "Asus K54C",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "K54C"),
++ },
++ },
++ {
++ .callback = init_nvs_nosave,
++ .ident = "Asus K54HR",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"),
++ },
++ },
++ {},
++};
++
++static void acpi_sleep_dmi_check(void)
++{
++ dmi_check_system(acpisleep_dmi_table);
++}
++
+ /**
+ * acpi_pm_freeze - Disable the GPEs and suspend EC transactions.
+ */
+@@ -197,6 +371,7 @@ static void acpi_pm_end(void)
+ }
+ #else /* !CONFIG_ACPI_SLEEP */
+ #define acpi_target_sleep_state ACPI_STATE_S0
++static inline void acpi_sleep_dmi_check(void) {}
+ #endif /* CONFIG_ACPI_SLEEP */
+
+ #ifdef CONFIG_SUSPEND
+@@ -341,167 +516,6 @@ static const struct platform_suspend_ops acpi_suspend_ops_old = {
+ .end = acpi_pm_end,
+ .recover = acpi_pm_finish,
+ };
+-
+-static int __init init_old_suspend_ordering(const struct dmi_system_id *d)
+-{
+- old_suspend_ordering = true;
+- return 0;
+-}
+-
+-static int __init init_nvs_nosave(const struct dmi_system_id *d)
+-{
+- acpi_nvs_nosave();
+- return 0;
+-}
+-
+-static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
+- {
+- .callback = init_old_suspend_ordering,
+- .ident = "Abit KN9 (nForce4 variant)",
+- .matches = {
+- DMI_MATCH(DMI_BOARD_VENDOR, "http://www.abit.com.tw/"),
+- DMI_MATCH(DMI_BOARD_NAME, "KN9 Series(NF-CK804)"),
+- },
+- },
+- {
+- .callback = init_old_suspend_ordering,
+- .ident = "HP xw4600 Workstation",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "HP xw4600 Workstation"),
+- },
+- },
+- {
+- .callback = init_old_suspend_ordering,
+- .ident = "Asus Pundit P1-AH2 (M2N8L motherboard)",
+- .matches = {
+- DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek Computer INC."),
+- DMI_MATCH(DMI_BOARD_NAME, "M2N8L"),
+- },
+- },
+- {
+- .callback = init_old_suspend_ordering,
+- .ident = "Panasonic CF51-2L",
+- .matches = {
+- DMI_MATCH(DMI_BOARD_VENDOR,
+- "Matsushita Electric Industrial Co.,Ltd."),
+- DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"),
+- },
+- },
+- {
+- .callback = init_nvs_nosave,
+- .ident = "Sony Vaio VGN-FW21E",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21E"),
+- },
+- },
+- {
+- .callback = init_nvs_nosave,
+- .ident = "Sony Vaio VPCEB17FX",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB17FX"),
+- },
+- },
+- {
+- .callback = init_nvs_nosave,
+- .ident = "Sony Vaio VGN-SR11M",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"),
+- },
+- },
+- {
+- .callback = init_nvs_nosave,
+- .ident = "Everex StepNote Series",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"),
+- },
+- },
+- {
+- .callback = init_nvs_nosave,
+- .ident = "Sony Vaio VPCEB1Z1E",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1Z1E"),
+- },
+- },
+- {
+- .callback = init_nvs_nosave,
+- .ident = "Sony Vaio VGN-NW130D",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NW130D"),
+- },
+- },
+- {
+- .callback = init_nvs_nosave,
+- .ident = "Sony Vaio VPCCW29FX",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "VPCCW29FX"),
+- },
+- },
+- {
+- .callback = init_nvs_nosave,
+- .ident = "Averatec AV1020-ED2",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"),
+- },
+- },
+- {
+- .callback = init_old_suspend_ordering,
+- .ident = "Asus A8N-SLI DELUXE",
+- .matches = {
+- DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+- DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI DELUXE"),
+- },
+- },
+- {
+- .callback = init_old_suspend_ordering,
+- .ident = "Asus A8N-SLI Premium",
+- .matches = {
+- DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+- DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI Premium"),
+- },
+- },
+- {
+- .callback = init_nvs_nosave,
+- .ident = "Sony Vaio VGN-SR26GN_P",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR26GN_P"),
+- },
+- },
+- {
+- .callback = init_nvs_nosave,
+- .ident = "Sony Vaio VGN-FW520F",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"),
+- },
+- },
+- {
+- .callback = init_nvs_nosave,
+- .ident = "Asus K54C",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "K54C"),
+- },
+- },
+- {
+- .callback = init_nvs_nosave,
+- .ident = "Asus K54HR",
+- .matches = {
+- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+- DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"),
+- },
+- },
+- {},
+-};
+ #endif /* CONFIG_SUSPEND */
+
+ #ifdef CONFIG_HIBERNATION
+@@ -808,13 +822,13 @@ int __init acpi_sleep_init(void)
+ u8 type_a, type_b;
+ #ifdef CONFIG_SUSPEND
+ int i = 0;
+-
+- dmi_check_system(acpisleep_dmi_table);
+ #endif
+
+ if (acpi_disabled)
+ return 0;
+
++ acpi_sleep_dmi_check();
++
+ sleep_states[ACPI_STATE_S0] = 1;
+ printk(KERN_INFO PREFIX "(supports S0");
+
+diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
+index 08a44b5..0e47949 100644
+--- a/drivers/acpi/video.c
++++ b/drivers/acpi/video.c
+@@ -389,6 +389,12 @@ static int __init video_set_bqc_offset(const struct dmi_system_id *d)
+ return 0;
+ }
+
++static int video_ignore_initial_backlight(const struct dmi_system_id *d)
++{
++ use_bios_initial_backlight = 0;
++ return 0;
++}
++
+ static struct dmi_system_id video_dmi_table[] __initdata = {
+ /*
+ * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121
+@@ -433,6 +439,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720"),
+ },
+ },
++ {
++ .callback = video_ignore_initial_backlight,
++ .ident = "HP Folio 13-2000",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "HP Folio 13 - 2000 Notebook PC"),
++ },
++ },
+ {}
+ };
+
+diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
+index f3f0fe7..d9c0199 100644
+--- a/drivers/acpi/video_detect.c
++++ b/drivers/acpi/video_detect.c
+@@ -132,6 +132,41 @@ find_video(acpi_handle handle, u32 lvl, void *context, void **rv)
+ return AE_OK;
+ }
+
++/* Force to use vendor driver when the ACPI device is known to be
++ * buggy */
++static int video_detect_force_vendor(const struct dmi_system_id *d)
++{
++ acpi_video_support |= ACPI_VIDEO_BACKLIGHT_DMI_VENDOR;
++ return 0;
++}
++
++static struct dmi_system_id video_detect_dmi_table[] = {
++ /* On Samsung X360, the BIOS will set a flag (VDRV) if generic
++ * ACPI backlight device is used. This flag will definitively break
++ * the backlight interface (even the vendor interface) untill next
++ * reboot. It's why we should prevent video.ko from being used here
++ * and we can't rely on a later call to acpi_video_unregister().
++ */
++ {
++ .callback = video_detect_force_vendor,
++ .ident = "X360",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "X360"),
++ DMI_MATCH(DMI_BOARD_NAME, "X360"),
++ },
++ },
++ {
++ .callback = video_detect_force_vendor,
++ .ident = "Asus UL30VT",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "UL30VT"),
++ },
++ },
++ { },
++};
++
+ /*
+ * Returns the video capabilities of a specific ACPI graphics device
+ *
+@@ -164,6 +199,8 @@ long acpi_video_get_capabilities(acpi_handle graphics_handle)
+ * ACPI_VIDEO_BACKLIGHT_DMI_VENDOR;
+ *}
+ */
++
++ dmi_check_system(video_detect_dmi_table);
+ } else {
+ status = acpi_bus_get_device(graphics_handle, &tmp_dev);
+ if (ACPI_FAILURE(status)) {
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 321e23e..c9540c0 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -2529,6 +2529,7 @@ int ata_bus_probe(struct ata_port *ap)
+ * bus as we may be talking too fast.
+ */
+ dev->pio_mode = XFER_PIO_0;
++ dev->dma_mode = 0xff;
+
+ /* If the controller has a pio mode setup function
+ * then use it to set the chipset to rights. Don't
+diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
+index 58db834..aea627e 100644
+--- a/drivers/ata/libata-eh.c
++++ b/drivers/ata/libata-eh.c
+@@ -2599,6 +2599,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
+ * bus as we may be talking too fast.
+ */
+ dev->pio_mode = XFER_PIO_0;
++ dev->dma_mode = 0xff;
+
+ /* If the controller has a pio mode setup function
+ * then use it to set the chipset to rights. Don't
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index 2a5412e..dd332e5 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -309,7 +309,8 @@ ata_scsi_activity_show(struct device *dev, struct device_attribute *attr,
+ struct ata_port *ap = ata_shost_to_port(sdev->host);
+ struct ata_device *atadev = ata_scsi_find_dev(ap, sdev);
+
+- if (ap->ops->sw_activity_show && (ap->flags & ATA_FLAG_SW_ACTIVITY))
++ if (atadev && ap->ops->sw_activity_show &&
++ (ap->flags & ATA_FLAG_SW_ACTIVITY))
+ return ap->ops->sw_activity_show(atadev, buf);
+ return -EINVAL;
+ }
+@@ -324,7 +325,8 @@ ata_scsi_activity_store(struct device *dev, struct device_attribute *attr,
+ enum sw_activity val;
+ int rc;
+
+- if (ap->ops->sw_activity_store && (ap->flags & ATA_FLAG_SW_ACTIVITY)) {
++ if (atadev && ap->ops->sw_activity_store &&
++ (ap->flags & ATA_FLAG_SW_ACTIVITY)) {
+ val = simple_strtoul(buf, NULL, 0);
+ switch (val) {
+ case OFF: case BLINK_ON: case BLINK_OFF:
+diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
+index 000fcc9..ef6e328 100644
+--- a/drivers/ata/sata_promise.c
++++ b/drivers/ata/sata_promise.c
+@@ -147,6 +147,10 @@ struct pdc_port_priv {
+ dma_addr_t pkt_dma;
+ };
+
++struct pdc_host_priv {
++ spinlock_t hard_reset_lock;
++};
++
+ static int pdc_sata_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
+ static int pdc_sata_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
+ static int pdc_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+@@ -801,9 +805,10 @@ static void pdc_hard_reset_port(struct ata_port *ap)
+ void __iomem *host_mmio = ap->host->iomap[PDC_MMIO_BAR];
+ void __iomem *pcictl_b1_mmio = host_mmio + PDC_PCI_CTL + 1;
+ unsigned int ata_no = pdc_ata_port_to_ata_no(ap);
++ struct pdc_host_priv *hpriv = ap->host->private_data;
+ u8 tmp;
+
+- spin_lock(&ap->host->lock);
++ spin_lock(&hpriv->hard_reset_lock);
+
+ tmp = readb(pcictl_b1_mmio);
+ tmp &= ~(0x10 << ata_no);
+@@ -814,7 +819,7 @@ static void pdc_hard_reset_port(struct ata_port *ap)
+ writeb(tmp, pcictl_b1_mmio);
+ readb(pcictl_b1_mmio); /* flush */
+
+- spin_unlock(&ap->host->lock);
++ spin_unlock(&hpriv->hard_reset_lock);
+ }
+
+ static int pdc_sata_hardreset(struct ata_link *link, unsigned int *class,
+@@ -1182,6 +1187,7 @@ static int pdc_ata_init_one(struct pci_dev *pdev,
+ const struct ata_port_info *pi = &pdc_port_info[ent->driver_data];
+ const struct ata_port_info *ppi[PDC_MAX_PORTS];
+ struct ata_host *host;
++ struct pdc_host_priv *hpriv;
+ void __iomem *host_mmio;
+ int n_ports, i, rc;
+ int is_sataii_tx4;
+@@ -1218,6 +1224,11 @@ static int pdc_ata_init_one(struct pci_dev *pdev,
+ dev_err(&pdev->dev, "failed to allocate host\n");
+ return -ENOMEM;
+ }
++ hpriv = devm_kzalloc(&pdev->dev, sizeof *hpriv, GFP_KERNEL);
++ if (!hpriv)
++ return -ENOMEM;
++ spin_lock_init(&hpriv->hard_reset_lock);
++ host->private_data = hpriv;
+ host->iomap = pcim_iomap_table(pdev);
+
+ is_sataii_tx4 = pdc_is_sataii_tx4(pi->flags);
+diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
+index c646118..833607f 100644
+--- a/drivers/ata/sata_svw.c
++++ b/drivers/ata/sata_svw.c
+@@ -142,6 +142,39 @@ static int k2_sata_scr_write(struct ata_link *link,
+ return 0;
+ }
+
++static int k2_sata_softreset(struct ata_link *link,
++ unsigned int *class, unsigned long deadline)
++{
++ u8 dmactl;
++ void __iomem *mmio = link->ap->ioaddr.bmdma_addr;
++
++ dmactl = readb(mmio + ATA_DMA_CMD);
++
++ /* Clear the start bit */
++ if (dmactl & ATA_DMA_START) {
++ dmactl &= ~ATA_DMA_START;
++ writeb(dmactl, mmio + ATA_DMA_CMD);
++ }
++
++ return ata_sff_softreset(link, class, deadline);
++}
++
++static int k2_sata_hardreset(struct ata_link *link,
++ unsigned int *class, unsigned long deadline)
++{
++ u8 dmactl;
++ void __iomem *mmio = link->ap->ioaddr.bmdma_addr;
++
++ dmactl = readb(mmio + ATA_DMA_CMD);
++
++ /* Clear the start bit */
++ if (dmactl & ATA_DMA_START) {
++ dmactl &= ~ATA_DMA_START;
++ writeb(dmactl, mmio + ATA_DMA_CMD);
++ }
++
++ return sata_sff_hardreset(link, class, deadline);
++}
+
+ static void k2_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
+ {
+@@ -346,6 +379,8 @@ static struct scsi_host_template k2_sata_sht = {
+
+ static struct ata_port_operations k2_sata_ops = {
+ .inherits = &ata_bmdma_port_ops,
++ .softreset = k2_sata_softreset,
++ .hardreset = k2_sata_hardreset,
+ .sff_tf_load = k2_sata_tf_load,
+ .sff_tf_read = k2_sata_tf_read,
+ .sff_check_status = k2_stat_check_status,
+diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
+index d452592..adfce9f 100644
+--- a/drivers/atm/solos-pci.c
++++ b/drivers/atm/solos-pci.c
+@@ -967,10 +967,11 @@ static uint32_t fpga_tx(struct solos_card *card)
+ for (port = 0; tx_pending; tx_pending >>= 1, port++) {
+ if (tx_pending & 1) {
+ struct sk_buff *oldskb = card->tx_skb[port];
+- if (oldskb)
++ if (oldskb) {
+ pci_unmap_single(card->dev, SKB_CB(oldskb)->dma_addr,
+ oldskb->len, PCI_DMA_TODEVICE);
+-
++ card->tx_skb[port] = NULL;
++ }
+ spin_lock(&card->tx_queue_lock);
+ skb = skb_dequeue(&card->tx_queue[port]);
+ if (!skb)
+diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
+index 6f39747..cd252e0 100644
+--- a/drivers/base/regmap/regmap-debugfs.c
++++ b/drivers/base/regmap/regmap-debugfs.c
+@@ -67,7 +67,7 @@ static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf,
+ /* If we're in the region the user is trying to read */
+ if (p >= *ppos) {
+ /* ...but not beyond it */
+- if (buf_pos >= count - 1 - tot_len)
++ if (buf_pos + 1 + tot_len >= count)
+ break;
+
+ /* Format the register */
+diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c
+index c3e9dff..041fddf 100644
+--- a/drivers/bcma/driver_mips.c
++++ b/drivers/bcma/driver_mips.c
+@@ -115,7 +115,7 @@ static void bcma_core_mips_set_irq(struct bcma_device *dev, unsigned int irq)
+ bcma_read32(mdev, BCMA_MIPS_MIPS74K_INTMASK(0)) &
+ ~(1 << irqflag));
+ else
+- bcma_write32(mdev, BCMA_MIPS_MIPS74K_INTMASK(irq), 0);
++ bcma_write32(mdev, BCMA_MIPS_MIPS74K_INTMASK(oldirq), 0);
+
+ /* assign the new one */
+ if (irq == 0) {
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index 5c6709d..574ce73 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -66,6 +66,7 @@ static struct usb_device_id ath3k_table[] = {
+ { USB_DEVICE(0x13d3, 0x3304) },
+ { USB_DEVICE(0x0930, 0x0215) },
+ { USB_DEVICE(0x0489, 0xE03D) },
++ { USB_DEVICE(0x0489, 0xE027) },
+
+ /* Atheros AR9285 Malbec with sflash firmware */
+ { USB_DEVICE(0x03F0, 0x311D) },
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 1f90dab..c5e44a3 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -104,6 +104,8 @@ static struct usb_device_id btusb_table[] = {
+ { USB_DEVICE(0x0c10, 0x0000) },
+
+ /* Broadcom BCM20702A0 */
++ { USB_DEVICE(0x0b05, 0x17b5) },
++ { USB_DEVICE(0x04ca, 0x2003) },
+ { USB_DEVICE(0x0489, 0xe042) },
+ { USB_DEVICE(0x413c, 0x8197) },
+
+@@ -131,6 +133,7 @@ static struct usb_device_id blacklist_table[] = {
+ { USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE },
+ { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
+ { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
++ { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE },
+
+ /* Atheros AR9285 Malbec with sflash firmware */
+ { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
+diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
+index 6f24604..439d7e7 100644
+--- a/drivers/char/agp/intel-agp.h
++++ b/drivers/char/agp/intel-agp.h
+@@ -235,6 +235,7 @@
+ #define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG 0x0166
+ #define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB 0x0158 /* Server */
+ #define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG 0x015A
++#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG 0x016A
+
+ int intel_gmch_probe(struct pci_dev *pdev,
+ struct agp_bridge_data *bridge);
+diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
+index c92424c..43c4ec3 100644
+--- a/drivers/char/agp/intel-gtt.c
++++ b/drivers/char/agp/intel-gtt.c
+@@ -1459,6 +1459,8 @@ static const struct intel_gtt_driver_description {
+ "Ivybridge", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG,
+ "Ivybridge", &sandybridge_gtt_driver },
++ { PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG,
++ "Ivybridge", &sandybridge_gtt_driver },
+ { 0, NULL, NULL }
+ };
+
+diff --git a/drivers/char/ramoops.c b/drivers/char/ramoops.c
+index 7c7f42a1f8..9658116 100644
+--- a/drivers/char/ramoops.c
++++ b/drivers/char/ramoops.c
+@@ -126,8 +126,8 @@ static int __init ramoops_probe(struct platform_device *pdev)
+ goto fail3;
+ }
+
+- rounddown_pow_of_two(pdata->mem_size);
+- rounddown_pow_of_two(pdata->record_size);
++ pdata->mem_size = rounddown_pow_of_two(pdata->mem_size);
++ pdata->record_size = rounddown_pow_of_two(pdata->record_size);
+
+ /* Check for the minimum memory size */
+ if (pdata->mem_size < MIN_MEM_SIZE &&
+diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
+index 6104dba..2244df0 100644
+--- a/drivers/edac/i7300_edac.c
++++ b/drivers/edac/i7300_edac.c
+@@ -215,8 +215,8 @@ static const char *ferr_fat_fbd_name[] = {
+ [0] = "Memory Write error on non-redundant retry or "
+ "FBD configuration Write error on retry",
+ };
+-#define GET_FBD_FAT_IDX(fbderr) (fbderr & (3 << 28))
+-#define FERR_FAT_FBD_ERR_MASK ((1 << 0) | (1 << 1) | (1 << 2) | (1 << 3))
++#define GET_FBD_FAT_IDX(fbderr) (((fbderr) >> 28) & 3)
++#define FERR_FAT_FBD_ERR_MASK ((1 << 0) | (1 << 1) | (1 << 2) | (1 << 22))
+
+ #define FERR_NF_FBD 0xa0
+ static const char *ferr_nf_fbd_name[] = {
+@@ -243,7 +243,7 @@ static const char *ferr_nf_fbd_name[] = {
+ [1] = "Aliased Uncorrectable Non-Mirrored Demand Data ECC",
+ [0] = "Uncorrectable Data ECC on Replay",
+ };
+-#define GET_FBD_NF_IDX(fbderr) (fbderr & (3 << 28))
++#define GET_FBD_NF_IDX(fbderr) (((fbderr) >> 28) & 3)
+ #define FERR_NF_FBD_ERR_MASK ((1 << 24) | (1 << 23) | (1 << 22) | (1 << 21) |\
+ (1 << 18) | (1 << 17) | (1 << 16) | (1 << 15) |\
+ (1 << 14) | (1 << 13) | (1 << 11) | (1 << 10) |\
+@@ -485,7 +485,7 @@ static void i7300_process_fbd_error(struct mem_ctl_info *mci)
+ errnum = find_first_bit(&errors,
+ ARRAY_SIZE(ferr_nf_fbd_name));
+ specific = GET_ERR_FROM_TABLE(ferr_nf_fbd_name, errnum);
+- branch = (GET_FBD_FAT_IDX(error_reg) == 2) ? 1 : 0;
++ branch = (GET_FBD_NF_IDX(error_reg) == 2) ? 1 : 0;
+
+ pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
+ REDMEMA, &syndrome);
+diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
+index a5da732..01658ca 100644
+--- a/drivers/edac/i82975x_edac.c
++++ b/drivers/edac/i82975x_edac.c
+@@ -355,10 +355,6 @@ static enum dev_type i82975x_dram_type(void __iomem *mch_window, int rank)
+ static void i82975x_init_csrows(struct mem_ctl_info *mci,
+ struct pci_dev *pdev, void __iomem *mch_window)
+ {
+- static const char *labels[4] = {
+- "DIMM A1", "DIMM A2",
+- "DIMM B1", "DIMM B2"
+- };
+ struct csrow_info *csrow;
+ unsigned long last_cumul_size;
+ u8 value;
+@@ -399,9 +395,10 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
+ * [0-3] for dual-channel; i.e. csrow->nr_channels = 2
+ */
+ for (chan = 0; chan < csrow->nr_channels; chan++)
+- strncpy(csrow->channels[chan].label,
+- labels[(index >> 1) + (chan * 2)],
+- EDAC_MC_LABEL_LEN);
++
++ snprintf(csrow->channels[chan].label, EDAC_MC_LABEL_LEN, "DIMM %c%d",
++ (chan == 0) ? 'A' : 'B',
++ index);
+
+ if (cumul_size == last_cumul_size)
+ continue; /* not populated */
+diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
+index a20f45b..7c869b7 100644
+--- a/drivers/firewire/net.c
++++ b/drivers/firewire/net.c
+@@ -860,8 +860,8 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
+ if (specifier_id == IANA_SPECIFIER_ID && ver == RFC2734_SW_VERSION) {
+ buf_ptr += 2;
+ length -= IEEE1394_GASP_HDR_SIZE;
+- fwnet_incoming_packet(dev, buf_ptr, length,
+- source_node_id, -1, true);
++ fwnet_incoming_packet(dev, buf_ptr, length, source_node_id,
++ context->card->generation, true);
+ }
+
+ packet.payload_length = dev->rcv_buffer_size;
+@@ -956,7 +956,12 @@ static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask)
+ break;
+ }
+
+- skb_pull(skb, ptask->max_payload);
++ if (ptask->dest_node == IEEE1394_ALL_NODES) {
++ skb_pull(skb,
++ ptask->max_payload + IEEE1394_GASP_HDR_SIZE);
++ } else {
++ skb_pull(skb, ptask->max_payload);
++ }
+ if (ptask->outstanding_pkts > 1) {
+ fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_INTFRAG,
+ dg_size, fg_off, datagram_label);
+@@ -1059,7 +1064,7 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
+ smp_rmb();
+ node_id = dev->card->node_id;
+
+- p = skb_push(ptask->skb, 8);
++ p = skb_push(ptask->skb, IEEE1394_GASP_HDR_SIZE);
+ put_unaligned_be32(node_id << 16 | IANA_SPECIFIER_ID >> 8, p);
+ put_unaligned_be32((IANA_SPECIFIER_ID & 0xff) << 24
+ | RFC2734_SW_VERSION, &p[4]);
+diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
+index d2619d7..11788f7 100644
+--- a/drivers/gpu/drm/drm_crtc_helper.c
++++ b/drivers/gpu/drm/drm_crtc_helper.c
+@@ -321,8 +321,8 @@ drm_crtc_prepare_encoders(struct drm_device *dev)
+ * drm_crtc_set_mode - set a mode
+ * @crtc: CRTC to program
+ * @mode: mode to use
+- * @x: width of mode
+- * @y: height of mode
++ * @x: horizontal offset into the surface
++ * @y: vertical offset into the surface
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index 0c1a99b..bb95d59 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -274,6 +274,11 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
+ }
+ };
+ ret = i2c_transfer(adapter, msgs, 2);
++ if (ret == -ENXIO) {
++ DRM_DEBUG_KMS("drm: skipping non-existent adapter %s\n",
++ adapter->name);
++ break;
++ }
+ } while (ret != 2 && --retries);
+
+ return ret == 2 ? 0 : -1;
+diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
+index b2e3c97..d00f905 100644
+--- a/drivers/gpu/drm/i915/i915_debugfs.c
++++ b/drivers/gpu/drm/i915/i915_debugfs.c
+@@ -339,7 +339,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
+ seq_printf(m, "No flip due on pipe %c (plane %c)\n",
+ pipe, plane);
+ } else {
+- if (!work->pending) {
++ if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
+ seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
+ pipe, plane);
+ } else {
+@@ -350,7 +350,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
+ seq_printf(m, "Stall check enabled, ");
+ else
+ seq_printf(m, "Stall check waiting for page flip ioctl, ");
+- seq_printf(m, "%d prepares\n", work->pending);
++ seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
+
+ if (work->old_fb_obj) {
+ struct drm_i915_gem_object *obj = work->old_fb_obj;
+diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
+index 3a1bfd7..452bc51 100644
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -287,6 +287,7 @@ static const struct pci_device_id pciidlist[] = { /* aka */
+ INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
+ INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
+ INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
++ INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
+ {0, 0, 0}
+ };
+
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index dbe4dbe..5950ba3 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -1259,6 +1259,11 @@ out:
+ case 0:
+ case -ERESTARTSYS:
+ case -EINTR:
++ case -EBUSY:
++ /*
++ * EBUSY is ok: this just means that another thread
++ * already did the job.
++ */
+ return VM_FAULT_NOPAGE;
+ case -ENOMEM:
+ return VM_FAULT_OOM;
+diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
+index 2812d7b..93e74fb 100644
+--- a/drivers/gpu/drm/i915/i915_irq.c
++++ b/drivers/gpu/drm/i915/i915_irq.c
+@@ -1187,7 +1187,9 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
+ spin_lock_irqsave(&dev->event_lock, flags);
+ work = intel_crtc->unpin_work;
+
+- if (work == NULL || work->pending || !work->enable_stall_check) {
++ if (work == NULL ||
++ atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
++ !work->enable_stall_check) {
+ /* Either the pending flip IRQ arrived, or we're too early. Don't check */
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ return;
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index a294a32..7a10f5f 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -2816,6 +2816,8 @@
+ #define _PFA_CTL_1 0x68080
+ #define _PFB_CTL_1 0x68880
+ #define PF_ENABLE (1<<31)
++#define PF_PIPE_SEL_MASK_IVB (3<<29)
++#define PF_PIPE_SEL_IVB(pipe) ((pipe)<<29)
+ #define PF_FILTER_MASK (3<<23)
+ #define PF_FILTER_PROGRAMMED (0<<23)
+ #define PF_FILTER_MED_3x3 (1<<23)
+diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
+index 87bb87b..0016fee 100644
+--- a/drivers/gpu/drm/i915/intel_bios.c
++++ b/drivers/gpu/drm/i915/intel_bios.c
+@@ -495,12 +495,8 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
+
+ edp = find_section(bdb, BDB_EDP);
+ if (!edp) {
+- if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support) {
+- DRM_DEBUG_KMS("No eDP BDB found but eDP panel "
+- "supported, assume %dbpp panel color "
+- "depth.\n",
+- dev_priv->edp.bpp);
+- }
++ if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support)
++ DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n");
+ return;
+ }
+
+@@ -653,9 +649,6 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
+ dev_priv->lvds_use_ssc = 1;
+ dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
+ DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq);
+-
+- /* eDP data */
+- dev_priv->edp.bpp = 18;
+ }
+
+ static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index fdae61f..54acad3 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -2384,18 +2384,6 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
+ FDI_FE_ERRC_ENABLE);
+ }
+
+-static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
+-{
+- struct drm_i915_private *dev_priv = dev->dev_private;
+- u32 flags = I915_READ(SOUTH_CHICKEN1);
+-
+- flags |= FDI_PHASE_SYNC_OVR(pipe);
+- I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
+- flags |= FDI_PHASE_SYNC_EN(pipe);
+- I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
+- POSTING_READ(SOUTH_CHICKEN1);
+-}
+-
+ /* The FDI link training functions for ILK/Ibexpeak. */
+ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
+ {
+@@ -2439,11 +2427,9 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
+ udelay(150);
+
+ /* Ironlake workaround, enable clock pointer after FDI enable*/
+- if (HAS_PCH_IBX(dev)) {
+- I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
+- I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
+- FDI_RX_PHASE_SYNC_POINTER_EN);
+- }
++ I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
++ I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
++ FDI_RX_PHASE_SYNC_POINTER_EN);
+
+ reg = FDI_RX_IIR(pipe);
+ for (tries = 0; tries < 5; tries++) {
+@@ -2546,9 +2532,6 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
+ POSTING_READ(reg);
+ udelay(150);
+
+- if (HAS_PCH_CPT(dev))
+- cpt_phase_pointer_enable(dev, pipe);
+-
+ for (i = 0; i < 4; i++) {
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+@@ -2667,9 +2650,6 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
+ POSTING_READ(reg);
+ udelay(150);
+
+- if (HAS_PCH_CPT(dev))
+- cpt_phase_pointer_enable(dev, pipe);
+-
+ for (i = 0; i < 4; i++) {
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+@@ -2779,17 +2759,6 @@ static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
+ }
+ }
+
+-static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
+-{
+- struct drm_i915_private *dev_priv = dev->dev_private;
+- u32 flags = I915_READ(SOUTH_CHICKEN1);
+-
+- flags &= ~(FDI_PHASE_SYNC_EN(pipe));
+- I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
+- flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
+- I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
+- POSTING_READ(SOUTH_CHICKEN1);
+-}
+ static void ironlake_fdi_disable(struct drm_crtc *crtc)
+ {
+ struct drm_device *dev = crtc->dev;
+@@ -2819,8 +2788,6 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
+ I915_WRITE(FDI_RX_CHICKEN(pipe),
+ I915_READ(FDI_RX_CHICKEN(pipe) &
+ ~FDI_RX_PHASE_SYNC_POINTER_EN));
+- } else if (HAS_PCH_CPT(dev)) {
+- cpt_phase_pointer_disable(dev, pipe);
+ }
+
+ /* still set train pattern 1 */
+@@ -3073,7 +3040,11 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
+ * as some pre-programmed values are broken,
+ * e.g. x201.
+ */
+- I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
++ if (IS_IVYBRIDGE(dev))
++ I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
++ PF_PIPE_SEL_IVB(pipe));
++ else
++ I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
+ I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
+ I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
+ }
+@@ -4782,6 +4753,17 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
+ }
+ }
+
++ if (intel_encoder->type == INTEL_OUTPUT_EDP) {
++ /* Use VBT settings if we have an eDP panel */
++ unsigned int edp_bpc = dev_priv->edp.bpp / 3;
++
++ if (edp_bpc && edp_bpc < display_bpc) {
++ DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
++ display_bpc = edp_bpc;
++ }
++ continue;
++ }
++
+ /*
+ * HDMI is either 12 or 8, so if the display lets 10bpc sneak
+ * through, clamp it down. (Note: >12bpc will be caught below.)
+@@ -6945,11 +6927,18 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ work = intel_crtc->unpin_work;
+- if (work == NULL || !work->pending) {
++
++ /* Ensure we don't miss a work->pending update ... */
++ smp_rmb();
++
++ if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ return;
+ }
+
++ /* and that the unpin work is consistent wrt ->pending. */
++ smp_rmb();
++
+ intel_crtc->unpin_work = NULL;
+
+ if (work->event) {
+@@ -7021,16 +7010,25 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
+ to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
+ unsigned long flags;
+
++ /* NB: An MMIO update of the plane base pointer will also
++ * generate a page-flip completion irq, i.e. every modeset
++ * is also accompanied by a spurious intel_prepare_page_flip().
++ */
+ spin_lock_irqsave(&dev->event_lock, flags);
+- if (intel_crtc->unpin_work) {
+- if ((++intel_crtc->unpin_work->pending) > 1)
+- DRM_ERROR("Prepared flip multiple times\n");
+- } else {
+- DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
+- }
++ if (intel_crtc->unpin_work)
++ atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
+
++inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
++{
++ /* Ensure that the work item is consistent when activating it ... */
++ smp_wmb();
++ atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
++ /* and that it is marked active as soon as the irq could fire. */
++ smp_wmb();
++}
++
+ static int intel_gen2_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+@@ -7067,6 +7065,8 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
+ OUT_RING(fb->pitch);
+ OUT_RING(obj->gtt_offset + offset);
+ OUT_RING(MI_NOOP);
++
++ intel_mark_page_flip_active(intel_crtc);
+ ADVANCE_LP_RING();
+ return 0;
+
+@@ -7110,6 +7110,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
+ OUT_RING(obj->gtt_offset + offset);
+ OUT_RING(MI_NOOP);
+
++ intel_mark_page_flip_active(intel_crtc);
+ ADVANCE_LP_RING();
+ return 0;
+
+@@ -7153,6 +7154,10 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
+ pf = 0;
+ pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
+ OUT_RING(pf | pipesrc);
++
++ intel_mark_page_flip_active(intel_crtc);
++
++ intel_mark_page_flip_active(intel_crtc);
+ ADVANCE_LP_RING();
+ return 0;
+
+@@ -7242,6 +7247,8 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
+ intel_ring_emit(ring, (fb->pitch | obj->tiling_mode));
+ intel_ring_emit(ring, (obj->gtt_offset));
+ intel_ring_emit(ring, (MI_NOOP));
++
++ intel_mark_page_flip_active(intel_crtc);
+ intel_ring_advance(ring);
+ return 0;
+
+diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
+index bcadf74..5212284 100644
+--- a/drivers/gpu/drm/i915/intel_drv.h
++++ b/drivers/gpu/drm/i915/intel_drv.h
+@@ -257,7 +257,10 @@ struct intel_unpin_work {
+ struct drm_i915_gem_object *old_fb_obj;
+ struct drm_i915_gem_object *pending_flip_obj;
+ struct drm_pending_vblank_event *event;
+- int pending;
++ atomic_t pending;
++#define INTEL_FLIP_INACTIVE 0
++#define INTEL_FLIP_PENDING 1
++#define INTEL_FLIP_COMPLETE 2
+ bool enable_stall_check;
+ };
+
+diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
+index 57152a7..cf5ea3d 100644
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -779,6 +779,22 @@ static const struct dmi_system_id intel_no_lvds[] = {
+ DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"),
+ },
+ },
++ {
++ .callback = intel_no_lvds_dmi_callback,
++ .ident = "Gigabyte GA-D525TUD",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
++ DMI_MATCH(DMI_BOARD_NAME, "D525TUD"),
++ },
++ },
++ {
++ .callback = intel_no_lvds_dmi_callback,
++ .ident = "Supermicro X7SPA-H",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "X7SPA-H"),
++ },
++ },
+
+ { } /* terminating entry */
+ };
+diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
+index a4011b0..a25d08a 100644
+--- a/drivers/gpu/drm/radeon/atombios_crtc.c
++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
+@@ -541,6 +541,11 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
+
+ if (rdev->family < CHIP_RV770)
+ pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
++ /* use frac fb div on APUs */
++ if (ASIC_IS_DCE41(rdev))
++ pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
++ if (ASIC_IS_DCE32(rdev) && mode->clock > 165000)
++ pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+ } else {
+ pll->flags |= RADEON_PLL_LEGACY;
+
+diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
+index aca4755..f0dc04b 100644
+--- a/drivers/gpu/drm/radeon/atombios_encoders.c
++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
+@@ -111,7 +111,7 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
+ ((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
+ (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE))) {
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+- radeon_dp_set_link_config(connector, mode);
++ radeon_dp_set_link_config(connector, adjusted_mode);
+ }
+
+ return true;
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index b919b11..0977849 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -1730,7 +1730,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
+ case CHIP_SUMO:
+ rdev->config.evergreen.num_ses = 1;
+ rdev->config.evergreen.max_pipes = 4;
+- rdev->config.evergreen.max_tile_pipes = 2;
++ rdev->config.evergreen.max_tile_pipes = 4;
+ if (rdev->pdev->device == 0x9648)
+ rdev->config.evergreen.max_simds = 3;
+ else if ((rdev->pdev->device == 0x9647) ||
+@@ -1819,7 +1819,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
+ break;
+ case CHIP_CAICOS:
+ rdev->config.evergreen.num_ses = 1;
+- rdev->config.evergreen.max_pipes = 4;
++ rdev->config.evergreen.max_pipes = 2;
+ rdev->config.evergreen.max_tile_pipes = 2;
+ rdev->config.evergreen.max_simds = 2;
+ rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
+diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
+index 899c712..2da7335 100644
+--- a/drivers/hid/hid-apple.c
++++ b/drivers/hid/hid-apple.c
+@@ -458,6 +458,9 @@ static const struct hid_device_id apple_devices[] = {
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO),
+ .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN |
+ APPLE_ISO_KEYBOARD },
++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
++ USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI),
++ .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS),
+ .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI),
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index a21e763..279b863 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1386,6 +1386,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
++ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 652f230..2d41336 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -131,6 +131,7 @@
+ #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI 0x0239
+ #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO 0x023a
+ #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b
++#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI 0x0255
+ #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256
+ #define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a
+ #define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b
+diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
+index 2ab7175..7cf3ffe 100644
+--- a/drivers/hid/hid-magicmouse.c
++++ b/drivers/hid/hid-magicmouse.c
+@@ -418,6 +418,8 @@ static void magicmouse_setup_input(struct input_dev *input, struct hid_device *h
+ __set_bit(BTN_TOOL_TRIPLETAP, input->keybit);
+ __set_bit(BTN_TOOL_QUADTAP, input->keybit);
+ __set_bit(BTN_TOUCH, input->keybit);
++ __set_bit(INPUT_PROP_POINTER, input->propbit);
++ __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
+ }
+
+ if (report_touches) {
+diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
+index 19b4412..3d630bb 100644
+--- a/drivers/hwmon/coretemp.c
++++ b/drivers/hwmon/coretemp.c
+@@ -190,6 +190,27 @@ static ssize_t show_temp(struct device *dev,
+ return tdata->valid ? sprintf(buf, "%d\n", tdata->temp) : -EAGAIN;
+ }
+
++struct tjmax {
++ char const *id;
++ int tjmax;
++};
++
++static struct tjmax __cpuinitconst tjmax_table[] = {
++ { "CPU D410", 100000 },
++ { "CPU D425", 100000 },
++ { "CPU D510", 100000 },
++ { "CPU D525", 100000 },
++ { "CPU N450", 100000 },
++ { "CPU N455", 100000 },
++ { "CPU N470", 100000 },
++ { "CPU N475", 100000 },
++ { "CPU 230", 100000 }, /* Model 0x1c, stepping 2 */
++ { "CPU 330", 125000 }, /* Model 0x1c, stepping 2 */
++ { "CPU CE4110", 110000 }, /* Model 0x1c, stepping 10 */
++ { "CPU CE4150", 110000 }, /* Model 0x1c, stepping 10 */
++ { "CPU CE4170", 110000 }, /* Model 0x1c, stepping 10 */
++};
++
+ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
+ {
+ /* The 100C is default for both mobile and non mobile CPUs */
+@@ -200,6 +221,13 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
+ int err;
+ u32 eax, edx;
+ struct pci_dev *host_bridge;
++ int i;
++
++ /* explicit tjmax table entries override heuristics */
++ for (i = 0; i < ARRAY_SIZE(tjmax_table); i++) {
++ if (strstr(c->x86_model_id, tjmax_table[i].id))
++ return tjmax_table[i].tjmax;
++ }
+
+ /* Early chips have no MSR for TjMax */
+
+@@ -208,7 +236,8 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
+
+ /* Atom CPUs */
+
+- if (c->x86_model == 0x1c) {
++ if (c->x86_model == 0x1c || c->x86_model == 0x26
++ || c->x86_model == 0x27) {
+ usemsr_ee = 0;
+
+ host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
+@@ -221,6 +250,9 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
+ tjmax = 90000;
+
+ pci_dev_put(host_bridge);
++ } else if (c->x86_model == 0x36) {
++ usemsr_ee = 0;
++ tjmax = 100000;
+ }
+
+ if (c->x86_model > 0xe && usemsr_ee) {
+diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
+index ac2d6cb..770e959 100644
+--- a/drivers/hwmon/fam15h_power.c
++++ b/drivers/hwmon/fam15h_power.c
+@@ -31,6 +31,9 @@ MODULE_DESCRIPTION("AMD Family 15h CPU processor power monitor");
+ MODULE_AUTHOR("Andreas Herrmann <andreas.herrmann3@amd.com>");
+ MODULE_LICENSE("GPL");
+
++/* Family 16h Northbridge's function 4 PCI ID */
++#define PCI_DEVICE_ID_AMD_16H_NB_F4 0x1534
++
+ /* D18F3 */
+ #define REG_NORTHBRIDGE_CAP 0xe8
+
+@@ -256,6 +259,7 @@ static void __devexit fam15h_power_remove(struct pci_dev *pdev)
+
+ static DEFINE_PCI_DEVICE_TABLE(fam15h_power_id_table) = {
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
++ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
+ {}
+ };
+ MODULE_DEVICE_TABLE(pci, fam15h_power_id_table);
+diff --git a/drivers/input/joystick/walkera0701.c b/drivers/input/joystick/walkera0701.c
+index 4dfa1ee..f8f892b 100644
+--- a/drivers/input/joystick/walkera0701.c
++++ b/drivers/input/joystick/walkera0701.c
+@@ -196,6 +196,7 @@ static void walkera0701_close(struct input_dev *dev)
+ struct walkera_dev *w = input_get_drvdata(dev);
+
+ parport_disable_irq(w->parport);
++ hrtimer_cancel(&w->timer);
+ }
+
+ static int walkera0701_connect(struct walkera_dev *w, int parport)
+@@ -224,6 +225,9 @@ static int walkera0701_connect(struct walkera_dev *w, int parport)
+ if (parport_claim(w->pardevice))
+ goto init_err1;
+
++ hrtimer_init(&w->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ w->timer.function = timer_handler;
++
+ w->input_dev = input_allocate_device();
+ if (!w->input_dev)
+ goto init_err2;
+@@ -254,8 +258,6 @@ static int walkera0701_connect(struct walkera_dev *w, int parport)
+ if (err)
+ goto init_err3;
+
+- hrtimer_init(&w->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+- w->timer.function = timer_handler;
+ return 0;
+
+ init_err3:
+@@ -271,7 +273,6 @@ static int walkera0701_connect(struct walkera_dev *w, int parport)
+
+ static void walkera0701_disconnect(struct walkera_dev *w)
+ {
+- hrtimer_cancel(&w->timer);
+ input_unregister_device(w->input_dev);
+ parport_release(w->pardevice);
+ parport_unregister_device(w->pardevice);
+diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
+index cd1a843..031270c 100644
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -914,6 +914,7 @@ static int __init i8042_platform_init(void)
+ int retval;
+
+ #ifdef CONFIG_X86
++ u8 a20_on = 0xdf;
+ /* Just return if pre-detection shows no i8042 controller exist */
+ if (!x86_platform.i8042_detect())
+ return -ENODEV;
+@@ -953,6 +954,14 @@ static int __init i8042_platform_init(void)
+
+ if (dmi_check_system(i8042_dmi_dritek_table))
+ i8042_dritek = true;
++
++ /*
++ * A20 was already enabled during early kernel init. But some buggy
++ * BIOSes (in MSI Laptops) require A20 to be enabled using 8042 to
++ * resume from S3. So we do it here and hope that nothing breaks.
++ */
++ i8042_command(&a20_on, 0x10d1);
++ i8042_command(NULL, 0x00ff); /* Null command for SMM firmware */
+ #endif /* CONFIG_X86 */
+
+ return retval;
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index b9062c0..9a6cc92 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -1801,10 +1801,17 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
+ if (!pte)
+ return -ENOMEM;
+ /* It is large page*/
+- if (largepage_lvl > 1)
++ if (largepage_lvl > 1) {
+ pteval |= DMA_PTE_LARGE_PAGE;
+- else
++ /* Ensure that old small page tables are removed to make room
++ for superpage, if they exist. */
++ dma_pte_clear_range(domain, iov_pfn,
++ iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
++ dma_pte_free_pagetable(domain, iov_pfn,
++ iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
++ } else {
+ pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
++ }
+
+ }
+ /* We don't need lock here, nobody else
+diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
+index cb641f1..bf5cfd7 100644
+--- a/drivers/leds/leds-lp5521.c
++++ b/drivers/leds/leds-lp5521.c
+@@ -198,9 +198,14 @@ static int lp5521_load_program(struct lp5521_engine *eng, const u8 *pattern)
+
+ /* move current engine to direct mode and remember the state */
+ ret = lp5521_set_engine_mode(eng, LP5521_CMD_DIRECT);
++ if (ret)
++ return ret;
++
+ /* Mode change requires min 500 us delay. 1 - 2 ms with margin */
+ usleep_range(1000, 2000);
+- ret |= lp5521_read(client, LP5521_REG_OP_MODE, &mode);
++ ret = lp5521_read(client, LP5521_REG_OP_MODE, &mode);
++ if (ret)
++ return ret;
+
+ /* For loading, all the engines to load mode */
+ lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_DIRECT);
+@@ -216,8 +221,7 @@ static int lp5521_load_program(struct lp5521_engine *eng, const u8 *pattern)
+ LP5521_PROG_MEM_SIZE,
+ pattern);
+
+- ret |= lp5521_write(client, LP5521_REG_OP_MODE, mode);
+- return ret;
++ return lp5521_write(client, LP5521_REG_OP_MODE, mode);
+ }
+
+ static int lp5521_set_led_current(struct lp5521_chip *chip, int led, u8 curr)
+@@ -692,9 +696,9 @@ static int __devinit lp5521_probe(struct i2c_client *client,
+ * otherwise further access to the R G B channels in the
+ * LP5521_REG_ENABLE register will not have any effect - strange!
+ */
+- lp5521_read(client, LP5521_REG_R_CURRENT, &buf);
+- if (buf != LP5521_REG_R_CURR_DEFAULT) {
+- dev_err(&client->dev, "error in reseting chip\n");
++ ret = lp5521_read(client, LP5521_REG_R_CURRENT, &buf);
++ if (ret || buf != LP5521_REG_R_CURR_DEFAULT) {
++ dev_err(&client->dev, "error in resetting chip\n");
+ goto fail2;
+ }
+ usleep_range(10000, 20000);
+diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
+index 1ce84ed..42c873f 100644
+--- a/drivers/md/dm-ioctl.c
++++ b/drivers/md/dm-ioctl.c
+@@ -1562,6 +1562,14 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl **param)
+ if (copy_from_user(dmi, user, tmp.data_size))
+ goto bad;
+
++ /*
++ * Abort if something changed the ioctl data while it was being copied.
++ */
++ if (dmi->data_size != tmp.data_size) {
++ DMERR("rejecting ioctl: data size modified while processing parameters");
++ goto bad;
++ }
++
+ /* Wipe the user buffer so we do not return it to userspace */
+ if (secure_data && clear_user(user, tmp.data_size))
+ goto bad;
+diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
+index d279c76..acba54e 100644
+--- a/drivers/md/persistent-data/dm-btree-internal.h
++++ b/drivers/md/persistent-data/dm-btree-internal.h
+@@ -36,13 +36,13 @@ struct node_header {
+ __le32 padding;
+ } __packed;
+
+-struct node {
++struct btree_node {
+ struct node_header header;
+ __le64 keys[0];
+ } __packed;
+
+
+-void inc_children(struct dm_transaction_manager *tm, struct node *n,
++void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
+ struct dm_btree_value_type *vt);
+
+ int new_block(struct dm_btree_info *info, struct dm_block **result);
+@@ -64,7 +64,7 @@ struct ro_spine {
+ void init_ro_spine(struct ro_spine *s, struct dm_btree_info *info);
+ int exit_ro_spine(struct ro_spine *s);
+ int ro_step(struct ro_spine *s, dm_block_t new_child);
+-struct node *ro_node(struct ro_spine *s);
++struct btree_node *ro_node(struct ro_spine *s);
+
+ struct shadow_spine {
+ struct dm_btree_info *info;
+@@ -98,12 +98,12 @@ int shadow_root(struct shadow_spine *s);
+ /*
+ * Some inlines.
+ */
+-static inline __le64 *key_ptr(struct node *n, uint32_t index)
++static inline __le64 *key_ptr(struct btree_node *n, uint32_t index)
+ {
+ return n->keys + index;
+ }
+
+-static inline void *value_base(struct node *n)
++static inline void *value_base(struct btree_node *n)
+ {
+ return &n->keys[le32_to_cpu(n->header.max_entries)];
+ }
+@@ -111,7 +111,7 @@ static inline void *value_base(struct node *n)
+ /*
+ * FIXME: Now that value size is stored in node we don't need the third parm.
+ */
+-static inline void *value_ptr(struct node *n, uint32_t index, size_t value_size)
++static inline void *value_ptr(struct btree_node *n, uint32_t index, size_t value_size)
+ {
+ BUG_ON(value_size != le32_to_cpu(n->header.value_size));
+ return value_base(n) + (value_size * index);
+@@ -120,7 +120,7 @@ static inline void *value_ptr(struct node *n, uint32_t index, size_t value_size)
+ /*
+ * Assumes the values are suitably-aligned and converts to core format.
+ */
+-static inline uint64_t value64(struct node *n, uint32_t index)
++static inline uint64_t value64(struct btree_node *n, uint32_t index)
+ {
+ __le64 *values_le = value_base(n);
+
+@@ -130,7 +130,7 @@ static inline uint64_t value64(struct node *n, uint32_t index)
+ /*
+ * Searching for a key within a single node.
+ */
+-int lower_bound(struct node *n, uint64_t key);
++int lower_bound(struct btree_node *n, uint64_t key);
+
+ extern struct dm_block_validator btree_node_validator;
+
+diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
+index 1a35caf..e6cdfde 100644
+--- a/drivers/md/persistent-data/dm-btree-remove.c
++++ b/drivers/md/persistent-data/dm-btree-remove.c
+@@ -53,7 +53,7 @@
+ /*
+ * Some little utilities for moving node data around.
+ */
+-static void node_shift(struct node *n, int shift)
++static void node_shift(struct btree_node *n, int shift)
+ {
+ uint32_t nr_entries = le32_to_cpu(n->header.nr_entries);
+ uint32_t value_size = le32_to_cpu(n->header.value_size);
+@@ -79,7 +79,7 @@ static void node_shift(struct node *n, int shift)
+ }
+ }
+
+-static void node_copy(struct node *left, struct node *right, int shift)
++static void node_copy(struct btree_node *left, struct btree_node *right, int shift)
+ {
+ uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
+ uint32_t value_size = le32_to_cpu(left->header.value_size);
+@@ -108,7 +108,7 @@ static void node_copy(struct node *left, struct node *right, int shift)
+ /*
+ * Delete a specific entry from a leaf node.
+ */
+-static void delete_at(struct node *n, unsigned index)
++static void delete_at(struct btree_node *n, unsigned index)
+ {
+ unsigned nr_entries = le32_to_cpu(n->header.nr_entries);
+ unsigned nr_to_copy = nr_entries - (index + 1);
+@@ -128,7 +128,7 @@ static void delete_at(struct node *n, unsigned index)
+ n->header.nr_entries = cpu_to_le32(nr_entries - 1);
+ }
+
+-static unsigned merge_threshold(struct node *n)
++static unsigned merge_threshold(struct btree_node *n)
+ {
+ return le32_to_cpu(n->header.max_entries) / 3;
+ }
+@@ -136,7 +136,7 @@ static unsigned merge_threshold(struct node *n)
+ struct child {
+ unsigned index;
+ struct dm_block *block;
+- struct node *n;
++ struct btree_node *n;
+ };
+
+ static struct dm_btree_value_type le64_type = {
+@@ -147,7 +147,7 @@ static struct dm_btree_value_type le64_type = {
+ .equal = NULL
+ };
+
+-static int init_child(struct dm_btree_info *info, struct node *parent,
++static int init_child(struct dm_btree_info *info, struct btree_node *parent,
+ unsigned index, struct child *result)
+ {
+ int r, inc;
+@@ -177,7 +177,7 @@ static int exit_child(struct dm_btree_info *info, struct child *c)
+ return dm_tm_unlock(info->tm, c->block);
+ }
+
+-static void shift(struct node *left, struct node *right, int count)
++static void shift(struct btree_node *left, struct btree_node *right, int count)
+ {
+ uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
+ uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
+@@ -203,11 +203,11 @@ static void shift(struct node *left, struct node *right, int count)
+ right->header.nr_entries = cpu_to_le32(nr_right + count);
+ }
+
+-static void __rebalance2(struct dm_btree_info *info, struct node *parent,
++static void __rebalance2(struct dm_btree_info *info, struct btree_node *parent,
+ struct child *l, struct child *r)
+ {
+- struct node *left = l->n;
+- struct node *right = r->n;
++ struct btree_node *left = l->n;
++ struct btree_node *right = r->n;
+ uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
+ uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
+ unsigned threshold = 2 * merge_threshold(left) + 1;
+@@ -239,7 +239,7 @@ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
+ unsigned left_index)
+ {
+ int r;
+- struct node *parent;
++ struct btree_node *parent;
+ struct child left, right;
+
+ parent = dm_block_data(shadow_current(s));
+@@ -270,9 +270,9 @@ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
+ * in right, then rebalance2. This wastes some cpu, but I want something
+ * simple atm.
+ */
+-static void delete_center_node(struct dm_btree_info *info, struct node *parent,
++static void delete_center_node(struct dm_btree_info *info, struct btree_node *parent,
+ struct child *l, struct child *c, struct child *r,
+- struct node *left, struct node *center, struct node *right,
++ struct btree_node *left, struct btree_node *center, struct btree_node *right,
+ uint32_t nr_left, uint32_t nr_center, uint32_t nr_right)
+ {
+ uint32_t max_entries = le32_to_cpu(left->header.max_entries);
+@@ -301,9 +301,9 @@ static void delete_center_node(struct dm_btree_info *info, struct node *parent,
+ /*
+ * Redistributes entries among 3 sibling nodes.
+ */
+-static void redistribute3(struct dm_btree_info *info, struct node *parent,
++static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
+ struct child *l, struct child *c, struct child *r,
+- struct node *left, struct node *center, struct node *right,
++ struct btree_node *left, struct btree_node *center, struct btree_node *right,
+ uint32_t nr_left, uint32_t nr_center, uint32_t nr_right)
+ {
+ int s;
+@@ -343,12 +343,12 @@ static void redistribute3(struct dm_btree_info *info, struct node *parent,
+ *key_ptr(parent, r->index) = right->keys[0];
+ }
+
+-static void __rebalance3(struct dm_btree_info *info, struct node *parent,
++static void __rebalance3(struct dm_btree_info *info, struct btree_node *parent,
+ struct child *l, struct child *c, struct child *r)
+ {
+- struct node *left = l->n;
+- struct node *center = c->n;
+- struct node *right = r->n;
++ struct btree_node *left = l->n;
++ struct btree_node *center = c->n;
++ struct btree_node *right = r->n;
+
+ uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
+ uint32_t nr_center = le32_to_cpu(center->header.nr_entries);
+@@ -371,7 +371,7 @@ static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
+ unsigned left_index)
+ {
+ int r;
+- struct node *parent = dm_block_data(shadow_current(s));
++ struct btree_node *parent = dm_block_data(shadow_current(s));
+ struct child left, center, right;
+
+ /*
+@@ -421,7 +421,7 @@ static int get_nr_entries(struct dm_transaction_manager *tm,
+ {
+ int r;
+ struct dm_block *block;
+- struct node *n;
++ struct btree_node *n;
+
+ r = dm_tm_read_lock(tm, b, &btree_node_validator, &block);
+ if (r)
+@@ -438,7 +438,7 @@ static int rebalance_children(struct shadow_spine *s,
+ {
+ int i, r, has_left_sibling, has_right_sibling;
+ uint32_t child_entries;
+- struct node *n;
++ struct btree_node *n;
+
+ n = dm_block_data(shadow_current(s));
+
+@@ -483,7 +483,7 @@ static int rebalance_children(struct shadow_spine *s,
+ return r;
+ }
+
+-static int do_leaf(struct node *n, uint64_t key, unsigned *index)
++static int do_leaf(struct btree_node *n, uint64_t key, unsigned *index)
+ {
+ int i = lower_bound(n, key);
+
+@@ -506,7 +506,7 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
+ uint64_t key, unsigned *index)
+ {
+ int i = *index, r;
+- struct node *n;
++ struct btree_node *n;
+
+ for (;;) {
+ r = shadow_step(s, root, vt);
+@@ -556,7 +556,7 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
+ unsigned level, last_level = info->levels - 1;
+ int index = 0, r = 0;
+ struct shadow_spine spine;
+- struct node *n;
++ struct btree_node *n;
+
+ init_shadow_spine(&spine, info);
+ for (level = 0; level < info->levels; level++) {
+diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c
+index d9a7912..2f0805c 100644
+--- a/drivers/md/persistent-data/dm-btree-spine.c
++++ b/drivers/md/persistent-data/dm-btree-spine.c
+@@ -23,7 +23,7 @@ static void node_prepare_for_write(struct dm_block_validator *v,
+ struct dm_block *b,
+ size_t block_size)
+ {
+- struct node *n = dm_block_data(b);
++ struct btree_node *n = dm_block_data(b);
+ struct node_header *h = &n->header;
+
+ h->blocknr = cpu_to_le64(dm_block_location(b));
+@@ -38,7 +38,7 @@ static int node_check(struct dm_block_validator *v,
+ struct dm_block *b,
+ size_t block_size)
+ {
+- struct node *n = dm_block_data(b);
++ struct btree_node *n = dm_block_data(b);
+ struct node_header *h = &n->header;
+ size_t value_size;
+ __le32 csum_disk;
+@@ -164,7 +164,7 @@ int ro_step(struct ro_spine *s, dm_block_t new_child)
+ return r;
+ }
+
+-struct node *ro_node(struct ro_spine *s)
++struct btree_node *ro_node(struct ro_spine *s)
+ {
+ struct dm_block *block;
+
+diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
+index bd1e7ff..bbb2ec5 100644
+--- a/drivers/md/persistent-data/dm-btree.c
++++ b/drivers/md/persistent-data/dm-btree.c
+@@ -38,7 +38,7 @@ static void array_insert(void *base, size_t elt_size, unsigned nr_elts,
+ /*----------------------------------------------------------------*/
+
+ /* makes the assumption that no two keys are the same. */
+-static int bsearch(struct node *n, uint64_t key, int want_hi)
++static int bsearch(struct btree_node *n, uint64_t key, int want_hi)
+ {
+ int lo = -1, hi = le32_to_cpu(n->header.nr_entries);
+
+@@ -58,12 +58,12 @@ static int bsearch(struct node *n, uint64_t key, int want_hi)
+ return want_hi ? hi : lo;
+ }
+
+-int lower_bound(struct node *n, uint64_t key)
++int lower_bound(struct btree_node *n, uint64_t key)
+ {
+ return bsearch(n, key, 0);
+ }
+
+-void inc_children(struct dm_transaction_manager *tm, struct node *n,
++void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
+ struct dm_btree_value_type *vt)
+ {
+ unsigned i;
+@@ -78,7 +78,7 @@ void inc_children(struct dm_transaction_manager *tm, struct node *n,
+ value_ptr(n, i, vt->size));
+ }
+
+-static int insert_at(size_t value_size, struct node *node, unsigned index,
++static int insert_at(size_t value_size, struct btree_node *node, unsigned index,
+ uint64_t key, void *value)
+ __dm_written_to_disk(value)
+ {
+@@ -123,7 +123,7 @@ int dm_btree_empty(struct dm_btree_info *info, dm_block_t *root)
+ {
+ int r;
+ struct dm_block *b;
+- struct node *n;
++ struct btree_node *n;
+ size_t block_size;
+ uint32_t max_entries;
+
+@@ -155,7 +155,7 @@ EXPORT_SYMBOL_GPL(dm_btree_empty);
+ #define MAX_SPINE_DEPTH 64
+ struct frame {
+ struct dm_block *b;
+- struct node *n;
++ struct btree_node *n;
+ unsigned level;
+ unsigned nr_children;
+ unsigned current_child;
+@@ -296,7 +296,7 @@ EXPORT_SYMBOL_GPL(dm_btree_del);
+ /*----------------------------------------------------------------*/
+
+ static int btree_lookup_raw(struct ro_spine *s, dm_block_t block, uint64_t key,
+- int (*search_fn)(struct node *, uint64_t),
++ int (*search_fn)(struct btree_node *, uint64_t),
+ uint64_t *result_key, void *v, size_t value_size)
+ {
+ int i, r;
+@@ -407,7 +407,7 @@ static int btree_split_sibling(struct shadow_spine *s, dm_block_t root,
+ size_t size;
+ unsigned nr_left, nr_right;
+ struct dm_block *left, *right, *parent;
+- struct node *ln, *rn, *pn;
++ struct btree_node *ln, *rn, *pn;
+ __le64 location;
+
+ left = shadow_current(s);
+@@ -492,7 +492,7 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
+ size_t size;
+ unsigned nr_left, nr_right;
+ struct dm_block *left, *right, *new_parent;
+- struct node *pn, *ln, *rn;
++ struct btree_node *pn, *ln, *rn;
+ __le64 val;
+
+ new_parent = shadow_current(s);
+@@ -577,7 +577,7 @@ static int btree_insert_raw(struct shadow_spine *s, dm_block_t root,
+ uint64_t key, unsigned *index)
+ {
+ int r, i = *index, top = 1;
+- struct node *node;
++ struct btree_node *node;
+
+ for (;;) {
+ r = shadow_step(s, root, vt);
+@@ -644,7 +644,7 @@ static int insert(struct dm_btree_info *info, dm_block_t root,
+ unsigned level, index = -1, last_level = info->levels - 1;
+ dm_block_t block = root;
+ struct shadow_spine spine;
+- struct node *n;
++ struct btree_node *n;
+ struct dm_btree_value_type le64_type;
+
+ le64_type.context = NULL;
+diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
+index 411f523..6dad2ef 100644
+--- a/drivers/mfd/mfd-core.c
++++ b/drivers/mfd/mfd-core.c
+@@ -19,6 +19,10 @@
+ #include <linux/slab.h>
+ #include <linux/module.h>
+
++static struct device_type mfd_dev_type = {
++ .name = "mfd_device",
++};
++
+ int mfd_cell_enable(struct platform_device *pdev)
+ {
+ const struct mfd_cell *cell = mfd_get_cell(pdev);
+@@ -88,6 +92,7 @@ static int mfd_add_device(struct device *parent, int id,
+ goto fail_device;
+
+ pdev->dev.parent = parent;
++ pdev->dev.type = &mfd_dev_type;
+
+ if (cell->pdata_size) {
+ ret = platform_device_add_data(pdev,
+@@ -183,10 +188,16 @@ EXPORT_SYMBOL(mfd_add_devices);
+
+ static int mfd_remove_devices_fn(struct device *dev, void *c)
+ {
+- struct platform_device *pdev = to_platform_device(dev);
+- const struct mfd_cell *cell = mfd_get_cell(pdev);
++ struct platform_device *pdev;
++ const struct mfd_cell *cell;
+ atomic_t **usage_count = c;
+
++ if (dev->type != &mfd_dev_type)
++ return 0;
++
++ pdev = to_platform_device(dev);
++ cell = mfd_get_cell(pdev);
++
+ /* find the base address of usage_count pointers (for freeing) */
+ if (!*usage_count || (cell->usage_count < *usage_count))
+ *usage_count = cell->usage_count;
+diff --git a/drivers/misc/hpilo.c b/drivers/misc/hpilo.c
+index fffc227..9c99680 100644
+--- a/drivers/misc/hpilo.c
++++ b/drivers/misc/hpilo.c
+@@ -735,7 +735,14 @@ static void ilo_remove(struct pci_dev *pdev)
+ free_irq(pdev->irq, ilo_hw);
+ ilo_unmap_device(pdev, ilo_hw);
+ pci_release_regions(pdev);
+- pci_disable_device(pdev);
++ /*
++ * pci_disable_device(pdev) used to be here. But this PCI device has
++ * two functions with interrupt lines connected to a single pin. The
++ * other one is a USB host controller. So when we disable the PIN here
++ * e.g. by rmmod hpilo, the controller stops working. It is because
++ * the interrupt link is disabled in ACPI since it is not refcounted
++ * yet. See acpi_pci_link_free_irq called from acpi_pci_irq_disable.
++ */
+ kfree(ilo_hw);
+ ilo_hwdev[(minor / MAX_CCB)] = 0;
+ }
+@@ -820,7 +827,7 @@ unmap:
+ free_regions:
+ pci_release_regions(pdev);
+ disable:
+- pci_disable_device(pdev);
++/* pci_disable_device(pdev); see comment in ilo_remove */
+ free:
+ kfree(ilo_hw);
+ out:
+diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
+index 8d082b4..d971817 100644
+--- a/drivers/misc/sgi-xp/xpc_main.c
++++ b/drivers/misc/sgi-xp/xpc_main.c
+@@ -53,6 +53,10 @@
+ #include <linux/kthread.h>
+ #include "xpc.h"
+
++#ifdef CONFIG_X86_64
++#include <asm/traps.h>
++#endif
++
+ /* define two XPC debug device structures to be used with dev_dbg() et al */
+
+ struct device_driver xpc_dbg_name = {
+@@ -1079,6 +1083,9 @@ xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused)
+ return NOTIFY_DONE;
+ }
+
++/* Used to only allow one cpu to complete disconnect */
++static unsigned int xpc_die_disconnecting;
++
+ /*
+ * Notify other partitions to deactivate from us by first disengaging from all
+ * references to our memory.
+@@ -1092,6 +1099,9 @@ xpc_die_deactivate(void)
+ long keep_waiting;
+ long wait_to_print;
+
++ if (cmpxchg(&xpc_die_disconnecting, 0, 1))
++ return;
++
+ /* keep xpc_hb_checker thread from doing anything (just in case) */
+ xpc_exiting = 1;
+
+@@ -1159,7 +1169,7 @@ xpc_die_deactivate(void)
+ * about the lack of a heartbeat.
+ */
+ static int
+-xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
++xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
+ {
+ #ifdef CONFIG_IA64 /* !!! temporary kludge */
+ switch (event) {
+@@ -1191,7 +1201,27 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
+ break;
+ }
+ #else
+- xpc_die_deactivate();
++ struct die_args *die_args = _die_args;
++
++ switch (event) {
++ case DIE_TRAP:
++ if (die_args->trapnr == X86_TRAP_DF)
++ xpc_die_deactivate();
++
++ if (((die_args->trapnr == X86_TRAP_MF) ||
++ (die_args->trapnr == X86_TRAP_XF)) &&
++ !user_mode_vm(die_args->regs))
++ xpc_die_deactivate();
++
++ break;
++ case DIE_INT3:
++ case DIE_DEBUG:
++ break;
++ case DIE_OOPS:
++ case DIE_GPF:
++ default:
++ xpc_die_deactivate();
++ }
+ #endif
+
+ return NOTIFY_DONE;
+diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
+index 559d30d..d5505f3 100644
+--- a/drivers/mmc/host/sh_mmcif.c
++++ b/drivers/mmc/host/sh_mmcif.c
+@@ -1003,10 +1003,6 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
+ host->sd_error = true;
+ dev_dbg(&host->pd->dev, "int err state = %08x\n", state);
+ }
+- if (host->state == STATE_IDLE) {
+- dev_info(&host->pd->dev, "Spurious IRQ status 0x%x", state);
+- return IRQ_HANDLED;
+- }
+ if (state & ~(INT_CMD12RBE | INT_CMD12CRE))
+ complete(&host->intr_wait);
+ else
+diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+index bb2fe60..1d02ec9 100644
+--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
++++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+@@ -135,6 +135,15 @@ int gpmi_init(struct gpmi_nand_data *this)
+ if (ret)
+ goto err_out;
+
++ /*
++ * Reset BCH here, too. We got failures otherwise :(
++ * See later BCH reset for explanation of MX23 handling
++ */
++ ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
++ if (ret)
++ goto err_out;
++
++
+ /* Choose NAND mode. */
+ writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR);
+
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 6c284d1..202ae34 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1366,6 +1366,8 @@ static void bond_compute_features(struct bonding *bond)
+ struct net_device *bond_dev = bond->dev;
+ u32 vlan_features = BOND_VLAN_FEATURES;
+ unsigned short max_hard_header_len = ETH_HLEN;
++ unsigned int gso_max_size = GSO_MAX_SIZE;
++ u16 gso_max_segs = GSO_MAX_SEGS;
+ int i;
+
+ read_lock(&bond->lock);
+@@ -1379,11 +1381,16 @@ static void bond_compute_features(struct bonding *bond)
+
+ if (slave->dev->hard_header_len > max_hard_header_len)
+ max_hard_header_len = slave->dev->hard_header_len;
++
++ gso_max_size = min(gso_max_size, slave->dev->gso_max_size);
++ gso_max_segs = min(gso_max_segs, slave->dev->gso_max_segs);
+ }
+
+ done:
+ bond_dev->vlan_features = vlan_features;
+ bond_dev->hard_header_len = max_hard_header_len;
++ bond_dev->gso_max_segs = gso_max_segs;
++ netif_set_gso_max_size(bond_dev, gso_max_size);
+
+ read_unlock(&bond->lock);
+
+diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
+index 4ef7e2f..a03fde9 100644
+--- a/drivers/net/bonding/bond_sysfs.c
++++ b/drivers/net/bonding/bond_sysfs.c
+@@ -1579,6 +1579,7 @@ static ssize_t bonding_store_slaves_active(struct device *d,
+ goto out;
+ }
+
++ read_lock(&bond->lock);
+ bond_for_each_slave(bond, slave, i) {
+ if (!bond_is_active_slave(slave)) {
+ if (new_value)
+@@ -1587,6 +1588,7 @@ static ssize_t bonding_store_slaves_active(struct device *d,
+ slave->inactive = 1;
+ }
+ }
++ read_unlock(&bond->lock);
+ out:
+ return ret;
+ }
+diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
+index 25695bd..a319057 100644
+--- a/drivers/net/can/dev.c
++++ b/drivers/net/can/dev.c
+@@ -569,8 +569,7 @@ void close_candev(struct net_device *dev)
+ {
+ struct can_priv *priv = netdev_priv(dev);
+
+- if (del_timer_sync(&priv->restart_timer))
+- dev_put(dev);
++ del_timer_sync(&priv->restart_timer);
+ can_flush_echo_skb(dev);
+ }
+ EXPORT_SYMBOL_GPL(close_candev);
+diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c
+index 1063093..e8ee2bc 100644
+--- a/drivers/net/ethernet/8390/ne.c
++++ b/drivers/net/ethernet/8390/ne.c
+@@ -814,6 +814,7 @@ static int __init ne_drv_probe(struct platform_device *pdev)
+ dev->irq = irq[this_dev];
+ dev->mem_end = bad[this_dev];
+ }
++ SET_NETDEV_DEV(dev, &pdev->dev);
+ err = do_ne_probe(dev);
+ if (err) {
+ free_netdev(dev);
+diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c
+index 5039f08..43e9ab4 100644
+--- a/drivers/net/irda/sir_dev.c
++++ b/drivers/net/irda/sir_dev.c
+@@ -222,7 +222,7 @@ static void sirdev_config_fsm(struct work_struct *work)
+ break;
+
+ case SIRDEV_STATE_DONGLE_SPEED:
+- if (dev->dongle_drv->reset) {
++ if (dev->dongle_drv->set_speed) {
+ ret = dev->dongle_drv->set_speed(dev, fsm->param);
+ if (ret < 0) {
+ fsm->result = ret;
+diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
+index 00ed9c1..ac53952 100644
+--- a/drivers/net/usb/ipheth.c
++++ b/drivers/net/usb/ipheth.c
+@@ -62,6 +62,7 @@
+ #define USB_PRODUCT_IPAD 0x129a
+ #define USB_PRODUCT_IPHONE_4_VZW 0x129c
+ #define USB_PRODUCT_IPHONE_4S 0x12a0
++#define USB_PRODUCT_IPHONE_5 0x12a8
+
+ #define IPHETH_USBINTF_CLASS 255
+ #define IPHETH_USBINTF_SUBCLASS 253
+@@ -113,6 +114,10 @@ static struct usb_device_id ipheth_table[] = {
+ USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4S,
+ IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
+ IPHETH_USBINTF_PROTO) },
++ { USB_DEVICE_AND_INTERFACE_INFO(
++ USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_5,
++ IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
++ IPHETH_USBINTF_PROTO) },
+ { }
+ };
+ MODULE_DEVICE_TABLE(usb, ipheth_table);
+diff --git a/drivers/net/wimax/i2400m/i2400m-usb.h b/drivers/net/wimax/i2400m/i2400m-usb.h
+index 6650fde..9f1e947 100644
+--- a/drivers/net/wimax/i2400m/i2400m-usb.h
++++ b/drivers/net/wimax/i2400m/i2400m-usb.h
+@@ -152,6 +152,9 @@ enum {
+ /* Device IDs */
+ USB_DEVICE_ID_I6050 = 0x0186,
+ USB_DEVICE_ID_I6050_2 = 0x0188,
++ USB_DEVICE_ID_I6150 = 0x07d6,
++ USB_DEVICE_ID_I6150_2 = 0x07d7,
++ USB_DEVICE_ID_I6150_3 = 0x07d9,
+ USB_DEVICE_ID_I6250 = 0x0187,
+ };
+
+diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
+index 2c1b8b6..6bb7f3c 100644
+--- a/drivers/net/wimax/i2400m/usb.c
++++ b/drivers/net/wimax/i2400m/usb.c
+@@ -492,6 +492,9 @@ int i2400mu_probe(struct usb_interface *iface,
+ switch (id->idProduct) {
+ case USB_DEVICE_ID_I6050:
+ case USB_DEVICE_ID_I6050_2:
++ case USB_DEVICE_ID_I6150:
++ case USB_DEVICE_ID_I6150_2:
++ case USB_DEVICE_ID_I6150_3:
+ case USB_DEVICE_ID_I6250:
+ i2400mu->i6050 = 1;
+ break;
+@@ -741,6 +744,9 @@ static
+ struct usb_device_id i2400mu_id_table[] = {
+ { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050) },
+ { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050_2) },
++ { USB_DEVICE(0x8087, USB_DEVICE_ID_I6150) },
++ { USB_DEVICE(0x8087, USB_DEVICE_ID_I6150_2) },
++ { USB_DEVICE(0x8087, USB_DEVICE_ID_I6150_3) },
+ { USB_DEVICE(0x8086, USB_DEVICE_ID_I6250) },
+ { USB_DEVICE(0x8086, 0x0181) },
+ { USB_DEVICE(0x8086, 0x1403) },
+diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+index cc54153..498a3c1 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
++++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+@@ -835,98 +835,98 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
+
+ static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+- {0x0000a2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
+- {0x0000a2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
+- {0x0000a2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
++ {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
++ {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
++ {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+- {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+- {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+- {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+- {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+- {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+- {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
+- {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
+- {0x0000a518, 0x21002220, 0x21002220, 0x16000402, 0x16000402},
+- {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404},
+- {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
+- {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
+- {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
+- {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
+- {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
+- {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
+- {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
+- {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
+- {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
+- {0x0000a544, 0x52022470, 0x52022470, 0x3f001861, 0x3f001861},
+- {0x0000a548, 0x55022490, 0x55022490, 0x43001a81, 0x43001a81},
+- {0x0000a54c, 0x59022492, 0x59022492, 0x47001a83, 0x47001a83},
+- {0x0000a550, 0x5d022692, 0x5d022692, 0x4a001c84, 0x4a001c84},
+- {0x0000a554, 0x61022892, 0x61022892, 0x4e001ce3, 0x4e001ce3},
+- {0x0000a558, 0x65024890, 0x65024890, 0x52001ce5, 0x52001ce5},
+- {0x0000a55c, 0x69024892, 0x69024892, 0x56001ce9, 0x56001ce9},
+- {0x0000a560, 0x6e024c92, 0x6e024c92, 0x5a001ceb, 0x5a001ceb},
+- {0x0000a564, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+- {0x0000a568, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+- {0x0000a56c, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+- {0x0000a570, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+- {0x0000a574, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+- {0x0000a578, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+- {0x0000a57c, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
+- {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
+- {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
+- {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
+- {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
+- {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
+- {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400},
+- {0x0000a598, 0x21802220, 0x21802220, 0x16800402, 0x16800402},
+- {0x0000a59c, 0x27802223, 0x27802223, 0x19800404, 0x19800404},
+- {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603},
+- {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02},
+- {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04},
+- {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20},
+- {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20},
+- {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22},
+- {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24},
+- {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640},
+- {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
+- {0x0000a5c4, 0x52822470, 0x52822470, 0x3f801861, 0x3f801861},
+- {0x0000a5c8, 0x55822490, 0x55822490, 0x43801a81, 0x43801a81},
+- {0x0000a5cc, 0x59822492, 0x59822492, 0x47801a83, 0x47801a83},
+- {0x0000a5d0, 0x5d822692, 0x5d822692, 0x4a801c84, 0x4a801c84},
+- {0x0000a5d4, 0x61822892, 0x61822892, 0x4e801ce3, 0x4e801ce3},
+- {0x0000a5d8, 0x65824890, 0x65824890, 0x52801ce5, 0x52801ce5},
+- {0x0000a5dc, 0x69824892, 0x69824892, 0x56801ce9, 0x56801ce9},
+- {0x0000a5e0, 0x6e824c92, 0x6e824c92, 0x5a801ceb, 0x5a801ceb},
+- {0x0000a5e4, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
+- {0x0000a5e8, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
+- {0x0000a5ec, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
+- {0x0000a5f0, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
+- {0x0000a5f4, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
+- {0x0000a5f8, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
+- {0x0000a5fc, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
++ {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
++ {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
++ {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
++ {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
++ {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200},
++ {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202},
++ {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400},
++ {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402},
++ {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404},
++ {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603},
++ {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02},
++ {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04},
++ {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20},
++ {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20},
++ {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22},
++ {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24},
++ {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640},
++ {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
++ {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
++ {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
++ {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
++ {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
++ {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
++ {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
++ {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9},
++ {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb},
++ {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
++ {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
++ {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
++ {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
++ {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
++ {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
++ {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
++ {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
++ {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002},
++ {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004},
++ {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200},
++ {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202},
++ {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400},
++ {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402},
++ {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404},
++ {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603},
++ {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02},
++ {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04},
++ {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20},
++ {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20},
++ {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22},
++ {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24},
++ {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640},
++ {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660},
++ {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861},
++ {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81},
++ {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83},
++ {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84},
++ {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3},
++ {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5},
++ {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9},
++ {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb},
++ {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
++ {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
++ {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
++ {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
++ {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
++ {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
++ {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+- {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+- {0x0000a614, 0x02004000, 0x02004000, 0x01404000, 0x01404000},
+- {0x0000a618, 0x02004801, 0x02004801, 0x01404501, 0x01404501},
+- {0x0000a61c, 0x02808a02, 0x02808a02, 0x02008501, 0x02008501},
+- {0x0000a620, 0x0380ce03, 0x0380ce03, 0x0280ca03, 0x0280ca03},
+- {0x0000a624, 0x04411104, 0x04411104, 0x03010c04, 0x03010c04},
+- {0x0000a628, 0x04411104, 0x04411104, 0x04014c04, 0x04014c04},
+- {0x0000a62c, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+- {0x0000a630, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+- {0x0000a634, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+- {0x0000a638, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+- {0x0000a63c, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
+- {0x0000b2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
+- {0x0000b2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
+- {0x0000b2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
++ {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
++ {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
++ {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
++ {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
++ {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
++ {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
++ {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
++ {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
++ {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
++ {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
++ {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
++ {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
++ {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
++ {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
++ {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+- {0x0000c2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352},
+- {0x0000c2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584},
+- {0x0000c2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800},
++ {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
++ {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
++ {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
+ {0x00016048, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
+diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+index 6335a86..69bcdb6 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
++++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+@@ -69,13 +69,13 @@
+ #define AR9300_BASE_ADDR 0x3ff
+ #define AR9300_BASE_ADDR_512 0x1ff
+
+-#define AR9300_OTP_BASE 0x14000
+-#define AR9300_OTP_STATUS 0x15f18
++#define AR9300_OTP_BASE (AR_SREV_9340(ah) ? 0x30000 : 0x14000)
++#define AR9300_OTP_STATUS (AR_SREV_9340(ah) ? 0x30018 : 0x15f18)
+ #define AR9300_OTP_STATUS_TYPE 0x7
+ #define AR9300_OTP_STATUS_VALID 0x4
+ #define AR9300_OTP_STATUS_ACCESS_BUSY 0x2
+ #define AR9300_OTP_STATUS_SM_BUSY 0x1
+-#define AR9300_OTP_READ_DATA 0x15f1c
++#define AR9300_OTP_READ_DATA (AR_SREV_9340(ah) ? 0x3001c : 0x15f1c)
+
+ enum targetPowerHTRates {
+ HT_TARGET_RATE_0_8_16,
+diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+index fb937ba..e9d73e7 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
++++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+@@ -34,9 +34,6 @@
+ */
+ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
+ {
+-#define PCIE_PLL_ON_CREQ_DIS_L1_2P0 \
+- ar9462_pciephy_pll_on_clkreq_disable_L1_2p0
+-
+ #define AR9462_BB_CTX_COEFJ(x) \
+ ar9462_##x##_baseband_core_txfir_coeff_japan_2484
+
+@@ -369,13 +366,13 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
+
+ /* Awake -> Sleep Setting */
+ INIT_INI_ARRAY(&ah->iniPcieSerdes,
+- PCIE_PLL_ON_CREQ_DIS_L1_2P0,
+- ARRAY_SIZE(PCIE_PLL_ON_CREQ_DIS_L1_2P0),
++ ar9462_pciephy_clkreq_disable_L1_2p0,
++ ARRAY_SIZE(ar9462_pciephy_clkreq_disable_L1_2p0),
+ 2);
+ /* Sleep -> Awake Setting */
+ INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
+- PCIE_PLL_ON_CREQ_DIS_L1_2P0,
+- ARRAY_SIZE(PCIE_PLL_ON_CREQ_DIS_L1_2P0),
++ ar9462_pciephy_clkreq_disable_L1_2p0,
++ ARRAY_SIZE(ar9462_pciephy_clkreq_disable_L1_2p0),
+ 2);
+
+ /* Fast clock modal settings */
+diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
+index d771de5..bcabfbf 100644
+--- a/drivers/net/wireless/ath/ath9k/calib.c
++++ b/drivers/net/wireless/ath/ath9k/calib.c
+@@ -69,6 +69,7 @@ s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan)
+
+ if (chan && chan->noisefloor) {
+ s8 delta = chan->noisefloor -
++ ATH9K_NF_CAL_NOISE_THRESH -
+ ath9k_hw_get_default_nf(ah, chan);
+ if (delta > 0)
+ noise += delta;
+diff --git a/drivers/net/wireless/ath/ath9k/calib.h b/drivers/net/wireless/ath/ath9k/calib.h
+index 05b9dbf..e300a73 100644
+--- a/drivers/net/wireless/ath/ath9k/calib.h
++++ b/drivers/net/wireless/ath/ath9k/calib.h
+@@ -22,6 +22,9 @@
+ #define AR_PHY_CCA_FILTERWINDOW_LENGTH_INIT 3
+ #define AR_PHY_CCA_FILTERWINDOW_LENGTH 5
+
++/* Internal noise floor can vary by about 6db depending on the frequency */
++#define ATH9K_NF_CAL_NOISE_THRESH 6
++
+ #define NUM_NF_READINGS 6
+ #define ATH9K_NF_CAL_HIST_MAX 5
+
+diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
+index 17fb25d..9fefb56 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
++++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
+@@ -311,6 +311,14 @@ static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
+ rxb->page_dma = dma_map_page(bus(trans)->dev, page, 0,
+ PAGE_SIZE << hw_params(trans).rx_page_order,
+ DMA_FROM_DEVICE);
++ if (dma_mapping_error(bus(trans)->dev, rxb->page_dma)) {
++ rxb->page = NULL;
++ spin_lock_irqsave(&rxq->lock, flags);
++ list_add(&rxb->list, &rxq->rx_used);
++ spin_unlock_irqrestore(&rxq->lock, flags);
++ __free_pages(page, hw_params(trans).rx_page_order);
++ return;
++ }
+ /* dma address must be no more than 36 bits */
+ BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
+ /* and also 256 byte aligned! */
+@@ -489,8 +497,19 @@ static void iwl_rx_handle(struct iwl_trans *trans)
+ 0, PAGE_SIZE <<
+ hw_params(trans).rx_page_order,
+ DMA_FROM_DEVICE);
+- list_add_tail(&rxb->list, &rxq->rx_free);
+- rxq->free_count++;
++ if (dma_mapping_error(bus(trans)->dev, rxb->page_dma)) {
++ /*
++ * free the page(s) as well to not break
++ * the invariant that the items on the used
++ * list have no page(s)
++ */
++ __free_pages(rxb->page, hw_params(trans).rx_page_order);
++ rxb->page = NULL;
++ list_add_tail(&rxb->list, &rxq->rx_used);
++ } else {
++ list_add_tail(&rxb->list, &rxq->rx_free);
++ rxq->free_count++;
++ }
+ } else
+ list_add_tail(&rxb->list, &rxq->rx_used);
+
+@@ -1263,12 +1282,20 @@ static irqreturn_t iwl_isr(int irq, void *data)
+ * back-to-back ISRs and sporadic interrupts from our NIC.
+ * If we have something to service, the tasklet will re-enable ints.
+ * If we *don't* have something, we'll re-enable before leaving here. */
+- inta_mask = iwl_read32(bus(trans), CSR_INT_MASK); /* just for debug */
++ inta_mask = iwl_read32(bus(trans), CSR_INT_MASK);
+ iwl_write32(bus(trans), CSR_INT_MASK, 0x00000000);
+
+ /* Discover which interrupts are active/pending */
+ inta = iwl_read32(bus(trans), CSR_INT);
+
++ if (inta & (~inta_mask)) {
++ IWL_DEBUG_ISR(trans,
++ "We got a masked interrupt (0x%08x)...Ack and ignore\n",
++ inta & (~inta_mask));
++ iwl_write32(bus(trans), CSR_INT, inta & (~inta_mask));
++ inta &= inta_mask;
++ }
++
+ /* Ignore interrupt if there's nothing in NIC to service.
+ * This may be due to IRQ shared with another device,
+ * or due to sporadic interrupts thrown from our NIC. */
+@@ -1349,7 +1376,7 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
+ * If we have something to service, the tasklet will re-enable ints.
+ * If we *don't* have something, we'll re-enable before leaving here.
+ */
+- inta_mask = iwl_read32(bus(trans), CSR_INT_MASK); /* just for debug */
++ inta_mask = iwl_read32(bus(trans), CSR_INT_MASK);
+ iwl_write32(bus(trans), CSR_INT_MASK, 0x00000000);
+
+
+diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
+index d21e8f5..291906e 100644
+--- a/drivers/pnp/pnpacpi/core.c
++++ b/drivers/pnp/pnpacpi/core.c
+@@ -58,7 +58,7 @@ static inline int __init is_exclusive_device(struct acpi_device *dev)
+ if (!(('0' <= (c) && (c) <= '9') || ('A' <= (c) && (c) <= 'F'))) \
+ return 0
+ #define TEST_ALPHA(c) \
+- if (!('@' <= (c) || (c) <= 'Z')) \
++ if (!('A' <= (c) && (c) <= 'Z')) \
+ return 0
+ static int __init ispnpidacpi(const char *id)
+ {
+@@ -95,6 +95,9 @@ static int pnpacpi_set_resources(struct pnp_dev *dev)
+ return -ENODEV;
+ }
+
++ if (WARN_ON_ONCE(acpi_dev != dev->data))
++ dev->data = acpi_dev;
++
+ ret = pnpacpi_build_resource_template(dev, &buffer);
+ if (ret)
+ return ret;
+diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
+index bd3531d..77a6faf 100644
+--- a/drivers/regulator/wm831x-dcdc.c
++++ b/drivers/regulator/wm831x-dcdc.c
+@@ -330,7 +330,7 @@ static int wm831x_buckv_set_voltage(struct regulator_dev *rdev,
+ if (vsel > dcdc->dvs_vsel) {
+ ret = wm831x_set_bits(wm831x, dvs_reg,
+ WM831X_DC1_DVS_VSEL_MASK,
+- dcdc->dvs_vsel);
++ vsel);
+ if (ret == 0)
+ dcdc->dvs_vsel = vsel;
+ else
+diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
+index 07a4fd2..daa6b90 100644
+--- a/drivers/s390/cio/device_pgid.c
++++ b/drivers/s390/cio/device_pgid.c
+@@ -234,7 +234,7 @@ static int pgid_cmp(struct pgid *p1, struct pgid *p2)
+ * Determine pathgroup state from PGID data.
+ */
+ static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
+- int *mismatch, int *reserved, u8 *reset)
++ int *mismatch, u8 *reserved, u8 *reset)
+ {
+ struct pgid *pgid = &cdev->private->pgid[0];
+ struct pgid *first = NULL;
+@@ -248,7 +248,7 @@ static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
+ if ((cdev->private->pgid_valid_mask & lpm) == 0)
+ continue;
+ if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE)
+- *reserved = 1;
++ *reserved |= lpm;
+ if (pgid_is_reset(pgid)) {
+ *reset |= lpm;
+ continue;
+@@ -316,14 +316,14 @@ static void snid_done(struct ccw_device *cdev, int rc)
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct pgid *pgid;
+ int mismatch = 0;
+- int reserved = 0;
++ u8 reserved = 0;
+ u8 reset = 0;
+ u8 donepm;
+
+ if (rc)
+ goto out;
+ pgid_analyze(cdev, &pgid, &mismatch, &reserved, &reset);
+- if (reserved)
++ if (reserved == cdev->private->pgid_valid_mask)
+ rc = -EUSERS;
+ else if (mismatch)
+ rc = -EOPNOTSUPP;
+@@ -336,7 +336,7 @@ static void snid_done(struct ccw_device *cdev, int rc)
+ }
+ out:
+ CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x "
+- "todo=%02x mism=%d rsvd=%d reset=%02x\n", id->ssid,
++ "todo=%02x mism=%d rsvd=%02x reset=%02x\n", id->ssid,
+ id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm,
+ cdev->private->pgid_todo_mask, mismatch, reserved, reset);
+ switch (rc) {
+diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
+index f17c92c..0fd5ea7 100644
+--- a/drivers/scsi/aha152x.c
++++ b/drivers/scsi/aha152x.c
+@@ -2985,8 +2985,8 @@ static int get_command(char *pos, Scsi_Cmnd * ptr)
+ char *start = pos;
+ int i;
+
+- SPRINTF("0x%08x: target=%d; lun=%d; cmnd=( ",
+- (unsigned int) ptr, ptr->device->id, ptr->device->lun);
++ SPRINTF("%p: target=%d; lun=%d; cmnd=( ",
++ ptr, ptr->device->id, ptr->device->lun);
+
+ for (i = 0; i < COMMAND_SIZE(ptr->cmnd[0]); i++)
+ SPRINTF("0x%02x ", ptr->cmnd[i]);
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 22523aa..0f48550 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -98,6 +98,15 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
++ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1920},
++ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
++ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
++ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
++ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924},
++ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1925},
++ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
++ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
++ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x334d},
+ {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
+ {0,}
+@@ -117,13 +126,22 @@ static struct board_type products[] = {
+ {0x3249103C, "Smart Array P812", &SA5_access},
+ {0x324a103C, "Smart Array P712m", &SA5_access},
+ {0x324b103C, "Smart Array P711m", &SA5_access},
+- {0x3350103C, "Smart Array", &SA5_access},
+- {0x3351103C, "Smart Array", &SA5_access},
+- {0x3352103C, "Smart Array", &SA5_access},
+- {0x3353103C, "Smart Array", &SA5_access},
+- {0x3354103C, "Smart Array", &SA5_access},
+- {0x3355103C, "Smart Array", &SA5_access},
+- {0x3356103C, "Smart Array", &SA5_access},
++ {0x3350103C, "Smart Array P222", &SA5_access},
++ {0x3351103C, "Smart Array P420", &SA5_access},
++ {0x3352103C, "Smart Array P421", &SA5_access},
++ {0x3353103C, "Smart Array P822", &SA5_access},
++ {0x3354103C, "Smart Array P420i", &SA5_access},
++ {0x3355103C, "Smart Array P220i", &SA5_access},
++ {0x3356103C, "Smart Array P721m", &SA5_access},
++ {0x1920103C, "Smart Array", &SA5_access},
++ {0x1921103C, "Smart Array", &SA5_access},
++ {0x1922103C, "Smart Array", &SA5_access},
++ {0x1923103C, "Smart Array", &SA5_access},
++ {0x1924103C, "Smart Array", &SA5_access},
++ {0x1925103C, "Smart Array", &SA5_access},
++ {0x1926103C, "Smart Array", &SA5_access},
++ {0x1928103C, "Smart Array", &SA5_access},
++ {0x334d103C, "Smart Array P822se", &SA5_access},
+ {0xFFFF103C, "Unknown Smart Array", &SA5_access},
+ };
+
+diff --git a/drivers/scsi/mvsas/mv_94xx.h b/drivers/scsi/mvsas/mv_94xx.h
+index 8f7eb4f..487aa6f 100644
+--- a/drivers/scsi/mvsas/mv_94xx.h
++++ b/drivers/scsi/mvsas/mv_94xx.h
+@@ -258,21 +258,11 @@ enum sas_sata_phy_regs {
+ #define SPI_ADDR_VLD_94XX (1U << 1)
+ #define SPI_CTRL_SpiStart_94XX (1U << 0)
+
+-#define mv_ffc(x) ffz(x)
+-
+ static inline int
+ mv_ffc64(u64 v)
+ {
+- int i;
+- i = mv_ffc((u32)v);
+- if (i >= 0)
+- return i;
+- i = mv_ffc((u32)(v>>32));
+-
+- if (i != 0)
+- return 32 + i;
+-
+- return -1;
++ u64 x = ~v;
++ return x ? __ffs64(x) : -1;
+ }
+
+ #define r_reg_set_enable(i) \
+diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
+index c04a4f5..da24955 100644
+--- a/drivers/scsi/mvsas/mv_sas.h
++++ b/drivers/scsi/mvsas/mv_sas.h
+@@ -69,7 +69,7 @@ extern struct kmem_cache *mvs_task_list_cache;
+ #define DEV_IS_EXPANDER(type) \
+ ((type == EDGE_DEV) || (type == FANOUT_DEV))
+
+-#define bit(n) ((u32)1 << n)
++#define bit(n) ((u64)1 << n)
+
+ #define for_each_phy(__lseq_mask, __mc, __lseq) \
+ for ((__mc) = (__lseq_mask), (__lseq) = 0; \
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index f9e5b85..82a5ca6 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -3541,9 +3541,9 @@ qla2x00_do_dpc(void *data)
+ "ISP abort end.\n");
+ }
+
+- if (test_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags)) {
++ if (test_and_clear_bit(FCPORT_UPDATE_NEEDED,
++ &base_vha->dpc_flags)) {
+ qla2x00_update_fcports(base_vha);
+- clear_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
+ }
+
+ if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) {
+diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
+index 08d48a3..72ca515 100644
+--- a/drivers/scsi/scsi_sysfs.c
++++ b/drivers/scsi/scsi_sysfs.c
+@@ -246,11 +246,11 @@ show_shost_active_mode(struct device *dev,
+
+ static DEVICE_ATTR(active_mode, S_IRUGO | S_IWUSR, show_shost_active_mode, NULL);
+
+-static int check_reset_type(char *str)
++static int check_reset_type(const char *str)
+ {
+- if (strncmp(str, "adapter", 10) == 0)
++ if (sysfs_streq(str, "adapter"))
+ return SCSI_ADAPTER_RESET;
+- else if (strncmp(str, "firmware", 10) == 0)
++ else if (sysfs_streq(str, "firmware"))
+ return SCSI_FIRMWARE_RESET;
+ else
+ return 0;
+@@ -263,12 +263,9 @@ store_host_reset(struct device *dev, struct device_attribute *attr,
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct scsi_host_template *sht = shost->hostt;
+ int ret = -EINVAL;
+- char str[10];
+ int type;
+
+- sscanf(buf, "%s", str);
+- type = check_reset_type(str);
+-
++ type = check_reset_type(buf);
+ if (!type)
+ goto exit_store_host_reset;
+
+diff --git a/drivers/staging/bcm/InterfaceInit.c b/drivers/staging/bcm/InterfaceInit.c
+index a09d351..3582535 100644
+--- a/drivers/staging/bcm/InterfaceInit.c
++++ b/drivers/staging/bcm/InterfaceInit.c
+@@ -4,10 +4,12 @@ static struct usb_device_id InterfaceUsbtable[] = {
+ { USB_DEVICE(BCM_USB_VENDOR_ID_T3, BCM_USB_PRODUCT_ID_T3) },
+ { USB_DEVICE(BCM_USB_VENDOR_ID_T3, BCM_USB_PRODUCT_ID_T3B) },
+ { USB_DEVICE(BCM_USB_VENDOR_ID_T3, BCM_USB_PRODUCT_ID_T3L) },
+- { USB_DEVICE(BCM_USB_VENDOR_ID_T3, BCM_USB_PRODUCT_ID_SM250) },
++ { USB_DEVICE(BCM_USB_VENDOR_ID_T3, BCM_USB_PRODUCT_ID_SYM) },
+ { USB_DEVICE(BCM_USB_VENDOR_ID_ZTE, BCM_USB_PRODUCT_ID_226) },
+ { USB_DEVICE(BCM_USB_VENDOR_ID_FOXCONN, BCM_USB_PRODUCT_ID_1901) },
+ { USB_DEVICE(BCM_USB_VENDOR_ID_ZTE, BCM_USB_PRODUCT_ID_ZTE_TU25) },
++ { USB_DEVICE(BCM_USB_VENDOR_ID_ZTE, BCM_USB_PRODUCT_ID_ZTE_226) },
++ { USB_DEVICE(BCM_USB_VENDOR_ID_ZTE, BCM_USB_PRODUCT_ID_ZTE_326) },
+ { }
+ };
+ MODULE_DEVICE_TABLE(usb, InterfaceUsbtable);
+diff --git a/drivers/staging/bcm/InterfaceInit.h b/drivers/staging/bcm/InterfaceInit.h
+index 058315a..6fa4f09 100644
+--- a/drivers/staging/bcm/InterfaceInit.h
++++ b/drivers/staging/bcm/InterfaceInit.h
+@@ -8,10 +8,11 @@
+ #define BCM_USB_PRODUCT_ID_T3 0x0300
+ #define BCM_USB_PRODUCT_ID_T3B 0x0210
+ #define BCM_USB_PRODUCT_ID_T3L 0x0220
+-#define BCM_USB_PRODUCT_ID_SM250 0xbccd
+ #define BCM_USB_PRODUCT_ID_SYM 0x15E
+ #define BCM_USB_PRODUCT_ID_1901 0xe017
+-#define BCM_USB_PRODUCT_ID_226 0x0132
++#define BCM_USB_PRODUCT_ID_226 0x0132 /* not sure if this is valid */
++#define BCM_USB_PRODUCT_ID_ZTE_226 0x172
++#define BCM_USB_PRODUCT_ID_ZTE_326 0x173 /* ZTE AX326 */
+ #define BCM_USB_PRODUCT_ID_ZTE_TU25 0x0007
+
+ #define BCM_USB_MINOR_BASE 192
+diff --git a/drivers/staging/vt6656/dpc.c b/drivers/staging/vt6656/dpc.c
+index 08021f4..4664e9d 100644
+--- a/drivers/staging/vt6656/dpc.c
++++ b/drivers/staging/vt6656/dpc.c
+@@ -1238,7 +1238,7 @@ static BOOL s_bHandleRxEncryption (
+
+ PayloadLen -= (WLAN_HDR_ADDR3_LEN + 8 + 4); // 24 is 802.11 header, 8 is IV&ExtIV, 4 is crc
+ *pdwRxTSC47_16 = cpu_to_le32(*(PDWORD)(pbyIV + 4));
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ExtIV: %lx\n",*pdwRxTSC47_16);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ExtIV: %x\n", *pdwRxTSC47_16);
+ if (byDecMode == KEY_CTL_TKIP) {
+ *pwRxTSC15_0 = cpu_to_le16(MAKEWORD(*(pbyIV+2), *pbyIV));
+ } else {
+@@ -1349,7 +1349,7 @@ static BOOL s_bHostWepRxEncryption (
+
+ PayloadLen -= (WLAN_HDR_ADDR3_LEN + 8 + 4); // 24 is 802.11 header, 8 is IV&ExtIV, 4 is crc
+ *pdwRxTSC47_16 = cpu_to_le32(*(PDWORD)(pbyIV + 4));
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ExtIV: %lx\n",*pdwRxTSC47_16);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ExtIV: %x\n", *pdwRxTSC47_16);
+
+ if (byDecMode == KEY_CTL_TKIP) {
+ *pwRxTSC15_0 = cpu_to_le16(MAKEWORD(*(pbyIV+2), *pbyIV));
+diff --git a/drivers/staging/vt6656/key.c b/drivers/staging/vt6656/key.c
+index 27bb523..fd93e83 100644
+--- a/drivers/staging/vt6656/key.c
++++ b/drivers/staging/vt6656/key.c
+@@ -223,7 +223,7 @@ BOOL KeybSetKey(
+ PSKeyManagement pTable,
+ PBYTE pbyBSSID,
+ DWORD dwKeyIndex,
+- unsigned long uKeyLength,
++ u32 uKeyLength,
+ PQWORD pKeyRSC,
+ PBYTE pbyKey,
+ BYTE byKeyDecMode
+@@ -235,7 +235,8 @@ BOOL KeybSetKey(
+ PSKeyItem pKey;
+ unsigned int uKeyIdx;
+
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Enter KeybSetKey: %lX\n", dwKeyIndex);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
++ "Enter KeybSetKey: %X\n", dwKeyIndex);
+
+ j = (MAX_KEY_TABLE-1);
+ for (i=0;i<(MAX_KEY_TABLE-1);i++) {
+@@ -261,7 +262,9 @@ BOOL KeybSetKey(
+ if ((dwKeyIndex & TRANSMIT_KEY) != 0) {
+ // Group transmit key
+ pTable->KeyTable[i].dwGTKeyIndex = dwKeyIndex;
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Group transmit key(R)[%lX]: %d\n", pTable->KeyTable[i].dwGTKeyIndex, i);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
++ "Group transmit key(R)[%X]: %d\n",
++ pTable->KeyTable[i].dwGTKeyIndex, i);
+ }
+ pTable->KeyTable[i].wKeyCtl &= 0xFF0F; // clear group key control filed
+ pTable->KeyTable[i].wKeyCtl |= (byKeyDecMode << 4);
+@@ -302,9 +305,12 @@ BOOL KeybSetKey(
+ }
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n");
+
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %lx\n ", pKey->dwTSC47_16);
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->wTSC15_0: %x\n ", pKey->wTSC15_0);
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %lx\n ", pKey->dwKeyIndex);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %x\n ",
++ pKey->dwTSC47_16);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->wTSC15_0: %x\n ",
++ pKey->wTSC15_0);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %x\n ",
++ pKey->dwKeyIndex);
+
+ return (TRUE);
+ }
+@@ -326,7 +332,9 @@ BOOL KeybSetKey(
+ if ((dwKeyIndex & TRANSMIT_KEY) != 0) {
+ // Group transmit key
+ pTable->KeyTable[j].dwGTKeyIndex = dwKeyIndex;
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Group transmit key(N)[%lX]: %d\n", pTable->KeyTable[j].dwGTKeyIndex, j);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
++ "Group transmit key(N)[%X]: %d\n",
++ pTable->KeyTable[j].dwGTKeyIndex, j);
+ }
+ pTable->KeyTable[j].wKeyCtl &= 0xFF0F; // clear group key control filed
+ pTable->KeyTable[j].wKeyCtl |= (byKeyDecMode << 4);
+@@ -367,9 +375,11 @@ BOOL KeybSetKey(
+ }
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n");
+
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %lx\n ", pKey->dwTSC47_16);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %x\n ",
++ pKey->dwTSC47_16);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->wTSC15_0: %x\n ", pKey->wTSC15_0);
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %lx\n ", pKey->dwKeyIndex);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %x\n ",
++ pKey->dwKeyIndex);
+
+ return (TRUE);
+ }
+@@ -597,7 +607,8 @@ BOOL KeybGetTransmitKey(PSKeyManagement pTable, PBYTE pbyBSSID, DWORD dwKeyType,
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"%x ", pTable->KeyTable[i].abyBSSID[ii]);
+ }
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n");
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"dwGTKeyIndex: %lX\n", pTable->KeyTable[i].dwGTKeyIndex);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"dwGTKeyIndex: %X\n",
++ pTable->KeyTable[i].dwGTKeyIndex);
+
+ return (TRUE);
+ }
+@@ -664,7 +675,7 @@ BOOL KeybSetDefaultKey(
+ void *pDeviceHandler,
+ PSKeyManagement pTable,
+ DWORD dwKeyIndex,
+- unsigned long uKeyLength,
++ u32 uKeyLength,
+ PQWORD pKeyRSC,
+ PBYTE pbyKey,
+ BYTE byKeyDecMode
+@@ -693,7 +704,10 @@ BOOL KeybSetDefaultKey(
+ if ((dwKeyIndex & TRANSMIT_KEY) != 0) {
+ // Group transmit key
+ pTable->KeyTable[MAX_KEY_TABLE-1].dwGTKeyIndex = dwKeyIndex;
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Group transmit key(R)[%lX]: %d\n", pTable->KeyTable[MAX_KEY_TABLE-1].dwGTKeyIndex, MAX_KEY_TABLE-1);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
++ "Group transmit key(R)[%X]: %d\n",
++ pTable->KeyTable[MAX_KEY_TABLE-1].dwGTKeyIndex,
++ MAX_KEY_TABLE-1);
+
+ }
+ pTable->KeyTable[MAX_KEY_TABLE-1].wKeyCtl &= 0x7F00; // clear all key control filed
+@@ -744,9 +758,11 @@ BOOL KeybSetDefaultKey(
+ }
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"\n");
+
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %lx\n", pKey->dwTSC47_16);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwTSC47_16: %x\n",
++ pKey->dwTSC47_16);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->wTSC15_0: %x\n", pKey->wTSC15_0);
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %lx\n", pKey->dwKeyIndex);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pKey->dwKeyIndex: %x\n",
++ pKey->dwKeyIndex);
+
+ return (TRUE);
+ }
+@@ -772,7 +788,7 @@ BOOL KeybSetAllGroupKey(
+ void *pDeviceHandler,
+ PSKeyManagement pTable,
+ DWORD dwKeyIndex,
+- unsigned long uKeyLength,
++ u32 uKeyLength,
+ PQWORD pKeyRSC,
+ PBYTE pbyKey,
+ BYTE byKeyDecMode
+@@ -784,7 +800,8 @@ BOOL KeybSetAllGroupKey(
+ PSKeyItem pKey;
+ unsigned int uKeyIdx;
+
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Enter KeybSetAllGroupKey: %lX\n", dwKeyIndex);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Enter KeybSetAllGroupKey: %X\n",
++ dwKeyIndex);
+
+
+ if ((dwKeyIndex & PAIRWISE_KEY) != 0) { // Pairwise key
+@@ -801,7 +818,9 @@ BOOL KeybSetAllGroupKey(
+ if ((dwKeyIndex & TRANSMIT_KEY) != 0) {
+ // Group transmit key
+ pTable->KeyTable[i].dwGTKeyIndex = dwKeyIndex;
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Group transmit key(R)[%lX]: %d\n", pTable->KeyTable[i].dwGTKeyIndex, i);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
++ "Group transmit key(R)[%X]: %d\n",
++ pTable->KeyTable[i].dwGTKeyIndex, i);
+
+ }
+ pTable->KeyTable[i].wKeyCtl &= 0xFF0F; // clear group key control filed
+diff --git a/drivers/staging/vt6656/key.h b/drivers/staging/vt6656/key.h
+index f749c7a..bd35d39 100644
+--- a/drivers/staging/vt6656/key.h
++++ b/drivers/staging/vt6656/key.h
+@@ -58,7 +58,7 @@
+ typedef struct tagSKeyItem
+ {
+ BOOL bKeyValid;
+- unsigned long uKeyLength;
++ u32 uKeyLength;
+ BYTE abyKey[MAX_KEY_LEN];
+ QWORD KeyRSC;
+ DWORD dwTSC47_16;
+@@ -107,7 +107,7 @@ BOOL KeybSetKey(
+ PSKeyManagement pTable,
+ PBYTE pbyBSSID,
+ DWORD dwKeyIndex,
+- unsigned long uKeyLength,
++ u32 uKeyLength,
+ PQWORD pKeyRSC,
+ PBYTE pbyKey,
+ BYTE byKeyDecMode
+@@ -146,7 +146,7 @@ BOOL KeybSetDefaultKey(
+ void *pDeviceHandler,
+ PSKeyManagement pTable,
+ DWORD dwKeyIndex,
+- unsigned long uKeyLength,
++ u32 uKeyLength,
+ PQWORD pKeyRSC,
+ PBYTE pbyKey,
+ BYTE byKeyDecMode
+@@ -156,7 +156,7 @@ BOOL KeybSetAllGroupKey(
+ void *pDeviceHandler,
+ PSKeyManagement pTable,
+ DWORD dwKeyIndex,
+- unsigned long uKeyLength,
++ u32 uKeyLength,
+ PQWORD pKeyRSC,
+ PBYTE pbyKey,
+ BYTE byKeyDecMode
+diff --git a/drivers/staging/vt6656/mac.c b/drivers/staging/vt6656/mac.c
+index 26c19d1..0636d82 100644
+--- a/drivers/staging/vt6656/mac.c
++++ b/drivers/staging/vt6656/mac.c
+@@ -262,7 +262,8 @@ BYTE pbyData[24];
+ dwData1 <<= 16;
+ dwData1 |= MAKEWORD(*(pbyAddr+4), *(pbyAddr+5));
+
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"1. wOffset: %d, Data: %lX, KeyCtl:%X\n", wOffset, dwData1, wKeyCtl);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"1. wOffset: %d, Data: %X,"\
++ " KeyCtl:%X\n", wOffset, dwData1, wKeyCtl);
+
+ //VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset);
+ //VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData);
+@@ -279,7 +280,8 @@ BYTE pbyData[24];
+ dwData2 <<= 8;
+ dwData2 |= *(pbyAddr+0);
+
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"2. wOffset: %d, Data: %lX\n", wOffset, dwData2);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"2. wOffset: %d, Data: %X\n",
++ wOffset, dwData2);
+
+ //VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset);
+ //VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData);
+diff --git a/drivers/staging/vt6656/rf.c b/drivers/staging/vt6656/rf.c
+index 3fd0478..8cf0881 100644
+--- a/drivers/staging/vt6656/rf.c
++++ b/drivers/staging/vt6656/rf.c
+@@ -769,6 +769,9 @@ BYTE byPwr = pDevice->byCCKPwr;
+ return TRUE;
+ }
+
++ if (uCH == 0)
++ return -EINVAL;
++
+ switch (uRATE) {
+ case RATE_1M:
+ case RATE_2M:
+diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
+index fe21868..3beb126 100644
+--- a/drivers/staging/vt6656/rxtx.c
++++ b/drivers/staging/vt6656/rxtx.c
+@@ -377,7 +377,8 @@ s_vFillTxKey (
+ *(pbyIVHead+3) = (BYTE)(((pDevice->byKeyIndex << 6) & 0xc0) | 0x20); // 0x20 is ExtIV
+ // Append IV&ExtIV after Mac Header
+ *pdwExtIV = cpu_to_le32(pTransmitKey->dwTSC47_16);
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"vFillTxKey()---- pdwExtIV: %lx\n", *pdwExtIV);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"vFillTxKey()---- pdwExtIV: %x\n",
++ *pdwExtIV);
+
+ } else if (pTransmitKey->byCipherSuite == KEY_CTL_CCMP) {
+ pTransmitKey->wTSC15_0++;
+@@ -1753,7 +1754,8 @@ s_bPacketToWirelessUsb(
+ MIC_vAppend((PBYTE)&(psEthHeader->abyDstAddr[0]), 12);
+ dwMIC_Priority = 0;
+ MIC_vAppend((PBYTE)&dwMIC_Priority, 4);
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MIC KEY: %lX, %lX\n", dwMICKey0, dwMICKey1);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MIC KEY: %X, %X\n",
++ dwMICKey0, dwMICKey1);
+
+ ///////////////////////////////////////////////////////////////////
+
+@@ -2635,7 +2637,8 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb) {
+ MIC_vAppend((PBYTE)&(sEthHeader.abyDstAddr[0]), 12);
+ dwMIC_Priority = 0;
+ MIC_vAppend((PBYTE)&dwMIC_Priority, 4);
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"DMA0_tx_8021:MIC KEY: %lX, %lX\n", dwMICKey0, dwMICKey1);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"DMA0_tx_8021:MIC KEY:"\
++ " %X, %X\n", dwMICKey0, dwMICKey1);
+
+ uLength = cbHeaderSize + cbMacHdLen + uPadding + cbIVlen;
+
+@@ -2655,7 +2658,8 @@ vDMA0_tx_80211(PSDevice pDevice, struct sk_buff *skb) {
+
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"uLength: %d, %d\n", uLength, cbFrameBodySize);
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"cbReqCount:%d, %d, %d, %d\n", cbReqCount, cbHeaderSize, uPadding, cbIVlen);
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MIC:%lx, %lx\n", *pdwMIC_L, *pdwMIC_R);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"MIC:%x, %x\n",
++ *pdwMIC_L, *pdwMIC_R);
+
+ }
+
+@@ -3029,7 +3033,8 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb)
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"error: KEY is GTK!!~~\n");
+ }
+ else {
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Find PTK [%lX]\n", pTransmitKey->dwKeyIndex);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Find PTK [%X]\n",
++ pTransmitKey->dwKeyIndex);
+ bNeedEncryption = TRUE;
+ }
+ }
+@@ -3043,7 +3048,8 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb)
+ if (pDevice->bEnableHostWEP) {
+ if ((uNodeIndex != 0) &&
+ (pMgmt->sNodeDBTable[uNodeIndex].dwKeyIndex & PAIRWISE_KEY)) {
+- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Find PTK [%lX]\n", pTransmitKey->dwKeyIndex);
++ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Find PTK [%X]\n",
++ pTransmitKey->dwKeyIndex);
+ bNeedEncryption = TRUE;
+ }
+ }
+diff --git a/drivers/staging/vt6656/ttype.h b/drivers/staging/vt6656/ttype.h
+index 8e9450e..dfbf747 100644
+--- a/drivers/staging/vt6656/ttype.h
++++ b/drivers/staging/vt6656/ttype.h
+@@ -29,6 +29,8 @@
+ #ifndef __TTYPE_H__
+ #define __TTYPE_H__
+
++#include <linux/types.h>
++
+ /******* Common definitions and typedefs ***********************************/
+
+ typedef int BOOL;
+@@ -42,17 +44,17 @@ typedef int BOOL;
+
+ /****** Simple typedefs ***************************************************/
+
+-typedef unsigned char BYTE; // 8-bit
+-typedef unsigned short WORD; // 16-bit
+-typedef unsigned long DWORD; // 32-bit
++typedef u8 BYTE;
++typedef u16 WORD;
++typedef u32 DWORD;
+
+ // QWORD is for those situation that we want
+ // an 8-byte-aligned 8 byte long structure
+ // which is NOT really a floating point number.
+ typedef union tagUQuadWord {
+ struct {
+- DWORD dwLowDword;
+- DWORD dwHighDword;
++ u32 dwLowDword;
++ u32 dwHighDword;
+ } u;
+ double DoNotUseThisField;
+ } UQuadWord;
+@@ -60,8 +62,8 @@ typedef UQuadWord QWORD; // 64-bit
+
+ /****** Common pointer types ***********************************************/
+
+-typedef unsigned long ULONG_PTR; // 32-bit
+-typedef unsigned long DWORD_PTR; // 32-bit
++typedef u32 ULONG_PTR;
++typedef u32 DWORD_PTR;
+
+ // boolean pointer
+
+diff --git a/drivers/staging/vt6656/wcmd.c b/drivers/staging/vt6656/wcmd.c
+index 78ea121..31fb96a 100644
+--- a/drivers/staging/vt6656/wcmd.c
++++ b/drivers/staging/vt6656/wcmd.c
+@@ -316,17 +316,19 @@ s_MgrMakeProbeRequest(
+ return pTxPacket;
+ }
+
+-void vCommandTimerWait(void *hDeviceContext, unsigned int MSecond)
++void vCommandTimerWait(void *hDeviceContext, unsigned long MSecond)
+ {
+- PSDevice pDevice = (PSDevice)hDeviceContext;
++ PSDevice pDevice = (PSDevice)hDeviceContext;
+
+- init_timer(&pDevice->sTimerCommand);
+- pDevice->sTimerCommand.data = (unsigned long)pDevice;
+- pDevice->sTimerCommand.function = (TimerFunction)vRunCommand;
+- // RUN_AT :1 msec ~= (HZ/1024)
+- pDevice->sTimerCommand.expires = (unsigned int)RUN_AT((MSecond * HZ) >> 10);
+- add_timer(&pDevice->sTimerCommand);
+- return;
++ init_timer(&pDevice->sTimerCommand);
++
++ pDevice->sTimerCommand.data = (unsigned long)pDevice;
++ pDevice->sTimerCommand.function = (TimerFunction)vRunCommand;
++ pDevice->sTimerCommand.expires = RUN_AT((MSecond * HZ) / 1000);
++
++ add_timer(&pDevice->sTimerCommand);
++
++ return;
+ }
+
+ void vRunCommand(void *hDeviceContext)
+diff --git a/drivers/staging/vt6656/wpa2.h b/drivers/staging/vt6656/wpa2.h
+index 46c2959..c359252 100644
+--- a/drivers/staging/vt6656/wpa2.h
++++ b/drivers/staging/vt6656/wpa2.h
+@@ -45,8 +45,8 @@ typedef struct tagsPMKIDInfo {
+ } PMKIDInfo, *PPMKIDInfo;
+
+ typedef struct tagSPMKIDCache {
+- unsigned long BSSIDInfoCount;
+- PMKIDInfo BSSIDInfo[MAX_PMKID_CACHE];
++ u32 BSSIDInfoCount;
++ PMKIDInfo BSSIDInfo[MAX_PMKID_CACHE];
+ } SPMKIDCache, *PSPMKIDCache;
+
+
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 6fa7222..3effde2 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -2367,7 +2367,7 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
+ if (!conn_p)
+ return;
+
+- cmd = iscsit_allocate_cmd(conn_p, GFP_KERNEL);
++ cmd = iscsit_allocate_cmd(conn_p, GFP_ATOMIC);
+ if (!cmd) {
+ iscsit_dec_conn_usage_count(conn_p);
+ return;
+diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
+index eb0c9fe..0df4a5f 100644
+--- a/drivers/target/iscsi/iscsi_target_login.c
++++ b/drivers/target/iscsi/iscsi_target_login.c
+@@ -130,13 +130,13 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
+
+ initiatorname_param = iscsi_find_param_from_key(
+ INITIATORNAME, conn->param_list);
+- if (!initiatorname_param)
+- return -1;
+-
+ sessiontype_param = iscsi_find_param_from_key(
+ SESSIONTYPE, conn->param_list);
+- if (!sessiontype_param)
++ if (!initiatorname_param || !sessiontype_param) {
++ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
++ ISCSI_LOGIN_STATUS_MISSING_FIELDS);
+ return -1;
++ }
+
+ sessiontype = (strncmp(sessiontype_param->value, NORMAL, 6)) ? 1 : 0;
+
+diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
+index 98936cb..7d85f88 100644
+--- a/drivers/target/iscsi/iscsi_target_nego.c
++++ b/drivers/target/iscsi/iscsi_target_nego.c
+@@ -632,8 +632,11 @@ static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_log
+ login->req_buf,
+ payload_length,
+ conn->param_list);
+- if (ret < 0)
++ if (ret < 0) {
++ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
++ ISCSI_LOGIN_STATUS_INIT_ERR);
+ return -1;
++ }
+
+ if (login->first_request)
+ if (iscsi_target_check_first_request(conn, login) < 0)
+@@ -648,8 +651,11 @@ static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_log
+ login->rsp_buf,
+ &login->rsp_length,
+ conn->param_list);
+- if (ret < 0)
++ if (ret < 0) {
++ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
++ ISCSI_LOGIN_STATUS_INIT_ERR);
+ return -1;
++ }
+
+ if (!login->auth_complete &&
+ ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication) {
+diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
+index cafa477..ea29eaf 100644
+--- a/drivers/target/target_core_file.c
++++ b/drivers/target/target_core_file.c
+@@ -300,7 +300,7 @@ static int fd_do_readv(struct se_task *task)
+
+ for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
+ iov[i].iov_len = sg->length;
+- iov[i].iov_base = sg_virt(sg);
++ iov[i].iov_base = kmap(sg_page(sg)) + sg->offset;
+ }
+
+ old_fs = get_fs();
+@@ -308,6 +308,8 @@ static int fd_do_readv(struct se_task *task)
+ ret = vfs_readv(fd, &iov[0], task->task_sg_nents, &pos);
+ set_fs(old_fs);
+
++ for_each_sg(task->task_sg, sg, task->task_sg_nents, i)
++ kunmap(sg_page(sg));
+ kfree(iov);
+ /*
+ * Return zeros and GOOD status even if the READ did not return
+@@ -353,7 +355,7 @@ static int fd_do_writev(struct se_task *task)
+
+ for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
+ iov[i].iov_len = sg->length;
+- iov[i].iov_base = sg_virt(sg);
++ iov[i].iov_base = kmap(sg_page(sg)) + sg->offset;
+ }
+
+ old_fs = get_fs();
+@@ -361,6 +363,9 @@ static int fd_do_writev(struct se_task *task)
+ ret = vfs_writev(fd, &iov[0], task->task_sg_nents, &pos);
+ set_fs(old_fs);
+
++ for_each_sg(task->task_sg, sg, task->task_sg_nents, i)
++ kunmap(sg_page(sg));
++
+ kfree(iov);
+
+ if (ret < 0 || ret != task->task_size) {
+diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
+index 64ddb63..3f28fdb 100644
+--- a/drivers/target/tcm_fc/tfc_sess.c
++++ b/drivers/target/tcm_fc/tfc_sess.c
+@@ -465,7 +465,6 @@ static void ft_sess_rcu_free(struct rcu_head *rcu)
+ {
+ struct ft_sess *sess = container_of(rcu, struct ft_sess, rcu);
+
+- transport_deregister_session(sess->se_sess);
+ kfree(sess);
+ }
+
+@@ -473,6 +472,7 @@ static void ft_sess_free(struct kref *kref)
+ {
+ struct ft_sess *sess = container_of(kref, struct ft_sess, kref);
+
++ transport_deregister_session(sess->se_sess);
+ call_rcu(&sess->rcu, ft_sess_rcu_free);
+ }
+
+diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
+index d5f923b..e1abb45 100644
+--- a/drivers/telephony/ixj.c
++++ b/drivers/telephony/ixj.c
+@@ -3190,12 +3190,12 @@ static void ixj_write_cid(IXJ *j)
+
+ ixj_fsk_alloc(j);
+
+- strcpy(sdmf1, j->cid_send.month);
+- strcat(sdmf1, j->cid_send.day);
+- strcat(sdmf1, j->cid_send.hour);
+- strcat(sdmf1, j->cid_send.min);
+- strcpy(sdmf2, j->cid_send.number);
+- strcpy(sdmf3, j->cid_send.name);
++ strlcpy(sdmf1, j->cid_send.month, sizeof(sdmf1));
++ strlcat(sdmf1, j->cid_send.day, sizeof(sdmf1));
++ strlcat(sdmf1, j->cid_send.hour, sizeof(sdmf1));
++ strlcat(sdmf1, j->cid_send.min, sizeof(sdmf1));
++ strlcpy(sdmf2, j->cid_send.number, sizeof(sdmf2));
++ strlcpy(sdmf3, j->cid_send.name, sizeof(sdmf3));
+
+ len1 = strlen(sdmf1);
+ len2 = strlen(sdmf2);
+@@ -3340,12 +3340,12 @@ static void ixj_write_cidcw(IXJ *j)
+ ixj_pre_cid(j);
+ }
+ j->flags.cidcw_ack = 0;
+- strcpy(sdmf1, j->cid_send.month);
+- strcat(sdmf1, j->cid_send.day);
+- strcat(sdmf1, j->cid_send.hour);
+- strcat(sdmf1, j->cid_send.min);
+- strcpy(sdmf2, j->cid_send.number);
+- strcpy(sdmf3, j->cid_send.name);
++ strlcpy(sdmf1, j->cid_send.month, sizeof(sdmf1));
++ strlcat(sdmf1, j->cid_send.day, sizeof(sdmf1));
++ strlcat(sdmf1, j->cid_send.hour, sizeof(sdmf1));
++ strlcat(sdmf1, j->cid_send.min, sizeof(sdmf1));
++ strlcpy(sdmf2, j->cid_send.number, sizeof(sdmf2));
++ strlcpy(sdmf3, j->cid_send.name, sizeof(sdmf3));
+
+ len1 = strlen(sdmf1);
+ len2 = strlen(sdmf2);
+diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
+index 9aaed0d..97b2c55 100644
+--- a/drivers/usb/class/cdc-wdm.c
++++ b/drivers/usb/class/cdc-wdm.c
+@@ -301,7 +301,7 @@ static void cleanup(struct wdm_device *desc)
+ desc->sbuf,
+ desc->validity->transfer_dma);
+ usb_free_coherent(interface_to_usbdev(desc->intf),
+- desc->bMaxPacketSize0,
++ desc->wMaxCommand,
+ desc->inbuf,
+ desc->response->transfer_dma);
+ kfree(desc->orq);
+@@ -788,7 +788,7 @@ out:
+ err3:
+ usb_set_intfdata(intf, NULL);
+ usb_free_coherent(interface_to_usbdev(desc->intf),
+- desc->bMaxPacketSize0,
++ desc->wMaxCommand,
+ desc->inbuf,
+ desc->response->transfer_dma);
+ err2:
+diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
+index ef116a5..ab11ca3c 100644
+--- a/drivers/usb/core/message.c
++++ b/drivers/usb/core/message.c
+@@ -1770,28 +1770,8 @@ free_interfaces:
+ goto free_interfaces;
+ }
+
+- ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+- USB_REQ_SET_CONFIGURATION, 0, configuration, 0,
+- NULL, 0, USB_CTRL_SET_TIMEOUT);
+- if (ret < 0) {
+- /* All the old state is gone, so what else can we do?
+- * The device is probably useless now anyway.
+- */
+- cp = NULL;
+- }
+-
+- dev->actconfig = cp;
+- if (!cp) {
+- usb_set_device_state(dev, USB_STATE_ADDRESS);
+- usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
+- mutex_unlock(hcd->bandwidth_mutex);
+- usb_autosuspend_device(dev);
+- goto free_interfaces;
+- }
+- mutex_unlock(hcd->bandwidth_mutex);
+- usb_set_device_state(dev, USB_STATE_CONFIGURED);
+-
+- /* Initialize the new interface structures and the
++ /*
++ * Initialize the new interface structures and the
+ * hc/hcd/usbcore interface/endpoint state.
+ */
+ for (i = 0; i < nintf; ++i) {
+@@ -1835,6 +1815,35 @@ free_interfaces:
+ }
+ kfree(new_interfaces);
+
++ ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
++ USB_REQ_SET_CONFIGURATION, 0, configuration, 0,
++ NULL, 0, USB_CTRL_SET_TIMEOUT);
++ if (ret < 0 && cp) {
++ /*
++ * All the old state is gone, so what else can we do?
++ * The device is probably useless now anyway.
++ */
++ usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
++ for (i = 0; i < nintf; ++i) {
++ usb_disable_interface(dev, cp->interface[i], true);
++ put_device(&cp->interface[i]->dev);
++ cp->interface[i] = NULL;
++ }
++ cp = NULL;
++ }
++
++ dev->actconfig = cp;
++ mutex_unlock(hcd->bandwidth_mutex);
++
++ if (!cp) {
++ usb_set_device_state(dev, USB_STATE_ADDRESS);
++
++ /* Leave LPM disabled while the device is unconfigured. */
++ usb_autosuspend_device(dev);
++ return ret;
++ }
++ usb_set_device_state(dev, USB_STATE_CONFIGURED);
++
+ if (cp->string == NULL &&
+ !(dev->quirks & USB_QUIRK_CONFIG_INTF_STRINGS))
+ cp->string = usb_cache_string(dev, cp->desc.iConfiguration);
+diff --git a/drivers/usb/gadget/f_ecm.c b/drivers/usb/gadget/f_ecm.c
+index 11c07cb..72fd355 100644
+--- a/drivers/usb/gadget/f_ecm.c
++++ b/drivers/usb/gadget/f_ecm.c
+@@ -790,9 +790,9 @@ fail:
+ /* we might as well release our claims on endpoints */
+ if (ecm->notify)
+ ecm->notify->driver_data = NULL;
+- if (ecm->port.out_ep->desc)
++ if (ecm->port.out_ep)
+ ecm->port.out_ep->driver_data = NULL;
+- if (ecm->port.in_ep->desc)
++ if (ecm->port.in_ep)
+ ecm->port.in_ep->driver_data = NULL;
+
+ ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
+diff --git a/drivers/usb/gadget/f_eem.c b/drivers/usb/gadget/f_eem.c
+index 1a7b2dd..a9cf2052 100644
+--- a/drivers/usb/gadget/f_eem.c
++++ b/drivers/usb/gadget/f_eem.c
+@@ -319,10 +319,9 @@ fail:
+ if (f->hs_descriptors)
+ usb_free_descriptors(f->hs_descriptors);
+
+- /* we might as well release our claims on endpoints */
+- if (eem->port.out_ep->desc)
++ if (eem->port.out_ep)
+ eem->port.out_ep->driver_data = NULL;
+- if (eem->port.in_ep->desc)
++ if (eem->port.in_ep)
+ eem->port.in_ep->driver_data = NULL;
+
+ ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
+diff --git a/drivers/usb/gadget/f_midi.c b/drivers/usb/gadget/f_midi.c
+index 3797b3d..dfd7b98 100644
+--- a/drivers/usb/gadget/f_midi.c
++++ b/drivers/usb/gadget/f_midi.c
+@@ -416,6 +416,7 @@ static void f_midi_unbind(struct usb_configuration *c, struct usb_function *f)
+ midi->id = NULL;
+
+ usb_free_descriptors(f->descriptors);
++ usb_free_descriptors(f->hs_descriptors);
+ kfree(midi);
+ }
+
+diff --git a/drivers/usb/gadget/f_ncm.c b/drivers/usb/gadget/f_ncm.c
+index aab8ede..d7811ae 100644
+--- a/drivers/usb/gadget/f_ncm.c
++++ b/drivers/usb/gadget/f_ncm.c
+@@ -1259,9 +1259,9 @@ fail:
+ /* we might as well release our claims on endpoints */
+ if (ncm->notify)
+ ncm->notify->driver_data = NULL;
+- if (ncm->port.out_ep->desc)
++ if (ncm->port.out_ep)
+ ncm->port.out_ep->driver_data = NULL;
+- if (ncm->port.in_ep->desc)
++ if (ncm->port.in_ep)
+ ncm->port.in_ep->driver_data = NULL;
+
+ ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
+diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c
+index 16a509a..5431493 100644
+--- a/drivers/usb/gadget/f_phonet.c
++++ b/drivers/usb/gadget/f_phonet.c
+@@ -532,7 +532,7 @@ int pn_bind(struct usb_configuration *c, struct usb_function *f)
+
+ req = usb_ep_alloc_request(fp->out_ep, GFP_KERNEL);
+ if (!req)
+- goto err;
++ goto err_req;
+
+ req->complete = pn_rx_complete;
+ fp->out_reqv[i] = req;
+@@ -541,14 +541,18 @@ int pn_bind(struct usb_configuration *c, struct usb_function *f)
+ /* Outgoing USB requests */
+ fp->in_req = usb_ep_alloc_request(fp->in_ep, GFP_KERNEL);
+ if (!fp->in_req)
+- goto err;
++ goto err_req;
+
+ INFO(cdev, "USB CDC Phonet function\n");
+ INFO(cdev, "using %s, OUT %s, IN %s\n", cdev->gadget->name,
+ fp->out_ep->name, fp->in_ep->name);
+ return 0;
+
++err_req:
++ for (i = 0; i < phonet_rxq_size && fp->out_reqv[i]; i++)
++ usb_ep_free_request(fp->out_ep, fp->out_reqv[i]);
+ err:
++
+ if (fp->out_ep)
+ fp->out_ep->driver_data = NULL;
+ if (fp->in_ep)
+diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
+index 704d1d9..817d611 100644
+--- a/drivers/usb/gadget/f_rndis.c
++++ b/drivers/usb/gadget/f_rndis.c
+@@ -802,9 +802,9 @@ fail:
+ /* we might as well release our claims on endpoints */
+ if (rndis->notify)
+ rndis->notify->driver_data = NULL;
+- if (rndis->port.out_ep->desc)
++ if (rndis->port.out_ep)
+ rndis->port.out_ep->driver_data = NULL;
+- if (rndis->port.in_ep->desc)
++ if (rndis->port.in_ep)
+ rndis->port.in_ep->driver_data = NULL;
+
+ ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
+diff --git a/drivers/usb/gadget/f_subset.c b/drivers/usb/gadget/f_subset.c
+index 21ab474..e5bb966 100644
+--- a/drivers/usb/gadget/f_subset.c
++++ b/drivers/usb/gadget/f_subset.c
+@@ -370,9 +370,9 @@ fail:
+ usb_free_descriptors(f->hs_descriptors);
+
+ /* we might as well release our claims on endpoints */
+- if (geth->port.out_ep->desc)
++ if (geth->port.out_ep)
+ geth->port.out_ep->driver_data = NULL;
+- if (geth->port.in_ep->desc)
++ if (geth->port.in_ep)
+ geth->port.in_ep->driver_data = NULL;
+
+ ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
+diff --git a/drivers/usb/gadget/f_uvc.c b/drivers/usb/gadget/f_uvc.c
+index 2022fe49..a0abc65 100644
+--- a/drivers/usb/gadget/f_uvc.c
++++ b/drivers/usb/gadget/f_uvc.c
+@@ -335,7 +335,6 @@ uvc_register_video(struct uvc_device *uvc)
+ return -ENOMEM;
+
+ video->parent = &cdev->gadget->dev;
+- video->minor = -1;
+ video->fops = &uvc_v4l2_fops;
+ video->release = video_device_release;
+ strncpy(video->name, cdev->gadget->name, sizeof(video->name));
+@@ -462,23 +461,12 @@ uvc_function_unbind(struct usb_configuration *c, struct usb_function *f)
+
+ INFO(cdev, "uvc_function_unbind\n");
+
+- if (uvc->vdev) {
+- if (uvc->vdev->minor == -1)
+- video_device_release(uvc->vdev);
+- else
+- video_unregister_device(uvc->vdev);
+- uvc->vdev = NULL;
+- }
+-
+- if (uvc->control_ep)
+- uvc->control_ep->driver_data = NULL;
+- if (uvc->video.ep)
+- uvc->video.ep->driver_data = NULL;
++ video_unregister_device(uvc->vdev);
++ uvc->control_ep->driver_data = NULL;
++ uvc->video.ep->driver_data = NULL;
+
+- if (uvc->control_req) {
+- usb_ep_free_request(cdev->gadget->ep0, uvc->control_req);
+- kfree(uvc->control_buf);
+- }
++ usb_ep_free_request(cdev->gadget->ep0, uvc->control_req);
++ kfree(uvc->control_buf);
+
+ kfree(f->descriptors);
+ kfree(f->hs_descriptors);
+@@ -563,7 +551,22 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
+ return 0;
+
+ error:
+- uvc_function_unbind(c, f);
++ if (uvc->vdev)
++ video_device_release(uvc->vdev);
++
++ if (uvc->control_ep)
++ uvc->control_ep->driver_data = NULL;
++ if (uvc->video.ep)
++ uvc->video.ep->driver_data = NULL;
++
++ if (uvc->control_req) {
++ usb_ep_free_request(cdev->gadget->ep0, uvc->control_req);
++ kfree(uvc->control_buf);
++ }
++
++ kfree(f->descriptors);
++ kfree(f->hs_descriptors);
++ kfree(f->ss_descriptors);
+ return ret;
+ }
+
+diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
+index a79e64b..b71e22e 100644
+--- a/drivers/usb/host/ehci-pci.c
++++ b/drivers/usb/host/ehci-pci.c
+@@ -359,7 +359,8 @@ static bool usb_is_intel_switchable_ehci(struct pci_dev *pdev)
+ pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ (pdev->device == 0x1E26 ||
+ pdev->device == 0x8C2D ||
+- pdev->device == 0x8C26);
++ pdev->device == 0x8C26 ||
++ pdev->device == 0x9C26);
+ }
+
+ static void ehci_enable_xhci_companion(void)
+diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
+index 2023733..5bb2dcb 100644
+--- a/drivers/usb/host/ehci-q.c
++++ b/drivers/usb/host/ehci-q.c
+@@ -264,18 +264,14 @@ ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status)
+ __releases(ehci->lock)
+ __acquires(ehci->lock)
+ {
+- if (likely (urb->hcpriv != NULL)) {
+- struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv;
+-
+- /* S-mask in a QH means it's an interrupt urb */
+- if ((qh->hw->hw_info2 & cpu_to_hc32(ehci, QH_SMASK)) != 0) {
+-
+- /* ... update hc-wide periodic stats (for usbfs) */
+- ehci_to_hcd(ehci)->self.bandwidth_int_reqs--;
+- }
+- qh_put (qh);
++ if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
++ /* ... update hc-wide periodic stats */
++ ehci_to_hcd(ehci)->self.bandwidth_int_reqs--;
+ }
+
++ if (usb_pipetype(urb->pipe) != PIPE_ISOCHRONOUS)
++ qh_put((struct ehci_qh *) urb->hcpriv);
++
+ if (unlikely(urb->unlinked)) {
+ COUNT(ehci->stats.unlink);
+ } else {
+diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
+index a60679c..34655d0 100644
+--- a/drivers/usb/host/ehci-sched.c
++++ b/drivers/usb/host/ehci-sched.c
+@@ -1684,7 +1684,7 @@ itd_link_urb (
+
+ /* don't need that schedule data any more */
+ iso_sched_free (stream, iso_sched);
+- urb->hcpriv = NULL;
++ urb->hcpriv = stream;
+
+ timer_action (ehci, TIMER_IO_WATCHDOG);
+ return enable_periodic(ehci);
+@@ -2094,7 +2094,7 @@ sitd_link_urb (
+
+ /* don't need that schedule data any more */
+ iso_sched_free (stream, sched);
+- urb->hcpriv = NULL;
++ urb->hcpriv = stream;
+
+ timer_action (ehci, TIMER_IO_WATCHDOG);
+ return enable_periodic(ehci);
+diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
+index 15dc51d..e0ae777 100644
+--- a/drivers/usb/host/ohci-q.c
++++ b/drivers/usb/host/ohci-q.c
+@@ -1130,6 +1130,25 @@ dl_done_list (struct ohci_hcd *ohci)
+
+ while (td) {
+ struct td *td_next = td->next_dl_td;
++ struct ed *ed = td->ed;
++
++ /*
++ * Some OHCI controllers (NVIDIA for sure, maybe others)
++ * occasionally forget to add TDs to the done queue. Since
++ * TDs for a given endpoint are always processed in order,
++ * if we find a TD on the donelist then all of its
++ * predecessors must be finished as well.
++ */
++ for (;;) {
++ struct td *td2;
++
++ td2 = list_first_entry(&ed->td_list, struct td,
++ td_list);
++ if (td2 == td)
++ break;
++ takeback_td(ohci, td2);
++ }
++
+ takeback_td(ohci, td);
+ td = td_next;
+ }
+diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
+index c2815a5..5cc401b 100644
+--- a/drivers/usb/host/pci-quirks.c
++++ b/drivers/usb/host/pci-quirks.c
+@@ -723,6 +723,7 @@ static int handshake(void __iomem *ptr, u32 mask, u32 done,
+ }
+
+ #define PCI_DEVICE_ID_INTEL_LYNX_POINT_XHCI 0x8C31
++#define PCI_DEVICE_ID_INTEL_LYNX_POINT_LP_XHCI 0x9C31
+
+ bool usb_is_intel_ppt_switchable_xhci(struct pci_dev *pdev)
+ {
+@@ -736,7 +737,8 @@ bool usb_is_intel_lpt_switchable_xhci(struct pci_dev *pdev)
+ {
+ return pdev->class == PCI_CLASS_SERIAL_USB_XHCI &&
+ pdev->vendor == PCI_VENDOR_ID_INTEL &&
+- pdev->device == PCI_DEVICE_ID_INTEL_LYNX_POINT_XHCI;
++ (pdev->device == PCI_DEVICE_ID_INTEL_LYNX_POINT_XHCI ||
++ pdev->device == PCI_DEVICE_ID_INTEL_LYNX_POINT_LP_XHCI);
+ }
+
+ bool usb_is_intel_switchable_xhci(struct pci_dev *pdev)
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 4cddbfc..5719c4d 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -178,8 +178,15 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
+ struct xhci_segment *next;
+
+ next = xhci_segment_alloc(xhci, flags);
+- if (!next)
++ if (!next) {
++ prev = ring->first_seg;
++ while (prev) {
++ next = prev->next;
++ xhci_segment_free(xhci, prev);
++ prev = next;
++ }
+ goto fail;
++ }
+ xhci_link_segments(xhci, prev, next, link_trbs, isoc);
+
+ prev = next;
+@@ -199,7 +206,7 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
+ return ring;
+
+ fail:
+- xhci_ring_free(xhci, ring);
++ kfree(ring);
+ return NULL;
+ }
+
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 4ed7572..aca647a 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -29,6 +29,7 @@
+ /* Device for a quirk */
+ #define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73
+ #define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000
++#define PCI_DEVICE_ID_FRESCO_LOGIC_FL1400 0x1400
+
+ #define PCI_VENDOR_ID_ETRON 0x1b6f
+ #define PCI_DEVICE_ID_ASROCK_P67 0x7023
+@@ -58,8 +59,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+
+ /* Look for vendor-specific quirks */
+ if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
+- pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK) {
+- if (pdev->revision == 0x0) {
++ (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK ||
++ pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1400)) {
++ if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK &&
++ pdev->revision == 0x0) {
+ xhci->quirks |= XHCI_RESET_EP_QUIRK;
+ xhci_dbg(xhci, "QUIRK: Fresco Logic xHC needs configure"
+ " endpoint cmd after reset endpoint\n");
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 7de9993..1ba98f5 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -2995,11 +2995,11 @@ static u32 xhci_td_remainder(unsigned int remainder)
+ }
+
+ /*
+- * For xHCI 1.0 host controllers, TD size is the number of packets remaining in
+- * the TD (*not* including this TRB).
++ * For xHCI 1.0 host controllers, TD size is the number of max packet sized
++ * packets remaining in the TD (*not* including this TRB).
+ *
+ * Total TD packet count = total_packet_count =
+- * roundup(TD size in bytes / wMaxPacketSize)
++ * DIV_ROUND_UP(TD size in bytes / wMaxPacketSize)
+ *
+ * Packets transferred up to and including this TRB = packets_transferred =
+ * rounddown(total bytes transferred including this TRB / wMaxPacketSize)
+@@ -3007,15 +3007,16 @@ static u32 xhci_td_remainder(unsigned int remainder)
+ * TD size = total_packet_count - packets_transferred
+ *
+ * It must fit in bits 21:17, so it can't be bigger than 31.
++ * The last TRB in a TD must have the TD size set to zero.
+ */
+-
+ static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
+- unsigned int total_packet_count, struct urb *urb)
++ unsigned int total_packet_count, struct urb *urb,
++ unsigned int num_trbs_left)
+ {
+ int packets_transferred;
+
+ /* One TRB with a zero-length data packet. */
+- if (running_total == 0 && trb_buff_len == 0)
++ if (num_trbs_left == 0 || (running_total == 0 && trb_buff_len == 0))
+ return 0;
+
+ /* All the TRB queueing functions don't count the current TRB in
+@@ -3024,7 +3025,9 @@ static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
+ packets_transferred = (running_total + trb_buff_len) /
+ usb_endpoint_maxp(&urb->ep->desc);
+
+- return xhci_td_remainder(total_packet_count - packets_transferred);
++ if ((total_packet_count - packets_transferred) > 31)
++ return 31 << 17;
++ return (total_packet_count - packets_transferred) << 17;
+ }
+
+ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+@@ -3051,7 +3054,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+
+ num_trbs = count_sg_trbs_needed(xhci, urb);
+ num_sgs = urb->num_mapped_sgs;
+- total_packet_count = roundup(urb->transfer_buffer_length,
++ total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length,
+ usb_endpoint_maxp(&urb->ep->desc));
+
+ trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
+@@ -3141,7 +3144,8 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ running_total);
+ } else {
+ remainder = xhci_v1_0_td_remainder(running_total,
+- trb_buff_len, total_packet_count, urb);
++ trb_buff_len, total_packet_count, urb,
++ num_trbs - 1);
+ }
+ length_field = TRB_LEN(trb_buff_len) |
+ remainder |
+@@ -3258,7 +3262,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ start_cycle = ep_ring->cycle_state;
+
+ running_total = 0;
+- total_packet_count = roundup(urb->transfer_buffer_length,
++ total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length,
+ usb_endpoint_maxp(&urb->ep->desc));
+ /* How much data is in the first TRB? */
+ addr = (u64) urb->transfer_dma;
+@@ -3304,7 +3308,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ running_total);
+ } else {
+ remainder = xhci_v1_0_td_remainder(running_total,
+- trb_buff_len, total_packet_count, urb);
++ trb_buff_len, total_packet_count, urb,
++ num_trbs - 1);
+ }
+ length_field = TRB_LEN(trb_buff_len) |
+ remainder |
+@@ -3579,7 +3584,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ addr = start_addr + urb->iso_frame_desc[i].offset;
+ td_len = urb->iso_frame_desc[i].length;
+ td_remain_len = td_len;
+- total_packet_count = roundup(td_len,
++ total_packet_count = DIV_ROUND_UP(td_len,
+ usb_endpoint_maxp(&urb->ep->desc));
+ /* A zero-length transfer still involves at least one packet. */
+ if (total_packet_count == 0)
+@@ -3659,7 +3664,8 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ } else {
+ remainder = xhci_v1_0_td_remainder(
+ running_total, trb_buff_len,
+- total_packet_count, urb);
++ total_packet_count, urb,
++ (trbs_per_td - j - 1));
+ }
+ length_field = TRB_LEN(trb_buff_len) |
+ remainder |
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index dab05d1..9dc5870 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -472,7 +472,7 @@ static bool compliance_mode_recovery_timer_quirk_check(void)
+ if (strstr(dmi_product_name, "Z420") ||
+ strstr(dmi_product_name, "Z620") ||
+ strstr(dmi_product_name, "Z820") ||
+- strstr(dmi_product_name, "Z1"))
++ strstr(dmi_product_name, "Z1 Workstation"))
+ return true;
+
+ return false;
+@@ -2241,7 +2241,7 @@ static bool xhci_is_async_ep(unsigned int ep_type)
+
+ static bool xhci_is_sync_in_ep(unsigned int ep_type)
+ {
+- return (ep_type == ISOC_IN_EP || ep_type != INT_IN_EP);
++ return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP);
+ }
+
+ static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw)
+diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
+index 318fb4e..64d7b38 100644
+--- a/drivers/usb/musb/cppi_dma.c
++++ b/drivers/usb/musb/cppi_dma.c
+@@ -1313,6 +1313,7 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id)
+
+ return IRQ_HANDLED;
+ }
++EXPORT_SYMBOL_GPL(cppi_interrupt);
+
+ /* Instantiate a software object representing a DMA controller. */
+ struct dma_controller *__init
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 35e6b5f..381d00d 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -120,6 +120,7 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
+ { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
+ { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
++ { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
+ { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
+ { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
+ { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index e29a664..3f989d6 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -197,6 +197,7 @@ static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_THROTTLE_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GATEWAY_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_PID) },
++ { USB_DEVICE(NEWPORT_VID, NEWPORT_AGILIS_PID) },
+ { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) },
+ { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) },
+@@ -1807,7 +1808,7 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial)
+ dbg("%s", __func__);
+
+ if ((udev->manufacturer && !strcmp(udev->manufacturer, "CALAO Systems")) ||
+- (udev->product && !strcmp(udev->product, "BeagleBone/XDS100")))
++ (udev->product && !strcmp(udev->product, "BeagleBone/XDS100V2")))
+ return ftdi_jtag_probe(serial);
+
+ return 0;
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 7b5eb74..aedf65f 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -752,6 +752,12 @@
+ #define TTI_VID 0x103E /* Vendor Id */
+ #define TTI_QL355P_PID 0x03E8 /* TTi QL355P power supply */
+
++/*
++ * Newport Cooperation (www.newport.com)
++ */
++#define NEWPORT_VID 0x104D
++#define NEWPORT_AGILIS_PID 0x3000
++
+ /* Interbiometrics USB I/O Board */
+ /* Developed for Interbiometrics by Rudolf Gugler */
+ #define INTERBIOMETRICS_VID 0x1209
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index a5f875d..872807b 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -80,6 +80,7 @@ static void option_instat_callback(struct urb *urb);
+ #define OPTION_PRODUCT_GTM380_MODEM 0x7201
+
+ #define HUAWEI_VENDOR_ID 0x12D1
++#define HUAWEI_PRODUCT_E173 0x140C
+ #define HUAWEI_PRODUCT_K4505 0x1464
+ #define HUAWEI_PRODUCT_K3765 0x1465
+ #define HUAWEI_PRODUCT_K4605 0x14C6
+@@ -552,6 +553,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLX) },
+ { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GKE) },
+ { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLE) },
++ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff),
+@@ -883,6 +886,10 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0128, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0135, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0136, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0137, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0139, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0142, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0143, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0144, 0xff, 0xff, 0xff) },
+@@ -903,20 +910,34 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0189, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0191, 0xff, 0xff, 0xff), /* ZTE EuFi890 */
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0196, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0197, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0199, 0xff, 0xff, 0xff), /* ZTE MF820S */
+ .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0200, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0201, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0254, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0257, 0xff, 0xff, 0xff), /* ZTE MF821 */
+ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0265, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0284, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0317, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0326, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0330, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0395, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1018, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1021, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) },
+@@ -1096,6 +1117,10 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1301, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1302, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1303, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1333, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1401, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1402, 0xff, 0xff, 0xff),
+diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig
+index fe2d803..303c34b 100644
+--- a/drivers/usb/storage/Kconfig
++++ b/drivers/usb/storage/Kconfig
+@@ -203,7 +203,7 @@ config USB_STORAGE_ENE_UB6250
+
+ config USB_UAS
+ tristate "USB Attached SCSI"
+- depends on USB && SCSI
++ depends on USB && SCSI && BROKEN
+ help
+ The USB Attached SCSI protocol is supported by some USB
+ storage devices. It permits higher performance by supporting
+diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
+index c7a2c20..dc2eed1 100644
+--- a/drivers/virtio/virtio_ring.c
++++ b/drivers/virtio/virtio_ring.c
+@@ -121,6 +121,13 @@ static int vring_add_indirect(struct vring_virtqueue *vq,
+ unsigned head;
+ int i;
+
++ /*
++ * We require lowmem mappings for the descriptors because
++ * otherwise virt_to_phys will give us bogus addresses in the
++ * virtqueue.
++ */
++ gfp &= ~(__GFP_HIGHMEM | __GFP_HIGH);
++
+ desc = kmalloc((out + in) * sizeof(struct vring_desc), gfp);
+ if (!desc)
+ return -ENOMEM;
+diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
+index 1e9edbd..ca52e92 100644
+--- a/fs/binfmt_misc.c
++++ b/fs/binfmt_misc.c
+@@ -175,7 +175,10 @@ static int load_misc_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+ goto _error;
+ bprm->argc ++;
+
+- bprm->interp = iname; /* for binfmt_script */
++ /* Update interp in case binfmt_script needs it. */
++ retval = bprm_change_interp(iname, bprm);
++ if (retval < 0)
++ goto _error;
+
+ interp_file = open_exec (iname);
+ retval = PTR_ERR (interp_file);
+diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c
+index 396a988..e39c18a 100644
+--- a/fs/binfmt_script.c
++++ b/fs/binfmt_script.c
+@@ -82,7 +82,9 @@ static int load_script(struct linux_binprm *bprm,struct pt_regs *regs)
+ retval = copy_strings_kernel(1, &i_name, bprm);
+ if (retval) return retval;
+ bprm->argc++;
+- bprm->interp = interp;
++ retval = bprm_change_interp(interp, bprm);
++ if (retval < 0)
++ return retval;
+
+ /*
+ * OK, now restart the process with the interpreter's dentry.
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 63c0c6b..bb7f4cc 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -1492,7 +1492,7 @@ static struct dentry * d_find_any_alias(struct inode *inode)
+ */
+ struct dentry *d_obtain_alias(struct inode *inode)
+ {
+- static const struct qstr anonstring = { .name = "" };
++ static const struct qstr anonstring = { .name = "/", .len = 1 };
+ struct dentry *tmp;
+ struct dentry *res;
+
+diff --git a/fs/exec.c b/fs/exec.c
+index 121ccae..c27fa0d 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -1095,7 +1095,8 @@ int flush_old_exec(struct linux_binprm * bprm)
+ bprm->mm = NULL; /* We're using it now */
+
+ set_fs(USER_DS);
+- current->flags &= ~(PF_RANDOMIZE | PF_KTHREAD | PF_NOFREEZE);
++ current->flags &=
++ ~(PF_RANDOMIZE | PF_KTHREAD | PF_NOFREEZE | PF_FREEZER_NOSIG);
+ flush_thread();
+ current->personality &= ~bprm->per_clear;
+
+@@ -1201,9 +1202,24 @@ void free_bprm(struct linux_binprm *bprm)
+ mutex_unlock(&current->signal->cred_guard_mutex);
+ abort_creds(bprm->cred);
+ }
++ /* If a binfmt changed the interp, free it. */
++ if (bprm->interp != bprm->filename)
++ kfree(bprm->interp);
+ kfree(bprm);
+ }
+
++int bprm_change_interp(char *interp, struct linux_binprm *bprm)
++{
++ /* If a binfmt changed the interp, free it first. */
++ if (bprm->interp != bprm->filename)
++ kfree(bprm->interp);
++ bprm->interp = kstrdup(interp, GFP_KERNEL);
++ if (!bprm->interp)
++ return -ENOMEM;
++ return 0;
++}
++EXPORT_SYMBOL(bprm_change_interp);
++
+ /*
+ * install the new credentials for this executable
+ */
+diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
+index a5c29bb..8535c45 100644
+--- a/fs/ext4/acl.c
++++ b/fs/ext4/acl.c
+@@ -410,8 +410,10 @@ ext4_xattr_set_acl(struct dentry *dentry, const char *name, const void *value,
+
+ retry:
+ handle = ext4_journal_start(inode, EXT4_DATA_TRANS_BLOCKS(inode->i_sb));
+- if (IS_ERR(handle))
+- return PTR_ERR(handle);
++ if (IS_ERR(handle)) {
++ error = PTR_ERR(handle);
++ goto release_and_out;
++ }
+ error = ext4_set_acl(handle, inode, type, acl);
+ ext4_journal_stop(handle);
+ if (error == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index bac2330..8424dda 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -1422,6 +1422,7 @@ static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd)
+
+ index = mpd->first_page;
+ end = mpd->next_page - 1;
++ pagevec_init(&pvec, 0);
+ while (index <= end) {
+ nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
+ if (nr_pages == 0)
+diff --git a/fs/nfs/client.c b/fs/nfs/client.c
+index 873bf00..d03a400 100644
+--- a/fs/nfs/client.c
++++ b/fs/nfs/client.c
+@@ -672,8 +672,7 @@ static int nfs_create_rpc_client(struct nfs_client *clp,
+ */
+ static void nfs_destroy_server(struct nfs_server *server)
+ {
+- if (!(server->flags & NFS_MOUNT_LOCAL_FLOCK) ||
+- !(server->flags & NFS_MOUNT_LOCAL_FCNTL))
++ if (server->nlm_host)
+ nlmclnt_done(server->nlm_host);
+ }
+
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 864b831..2f98c53 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -5544,13 +5544,26 @@ static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
+ rpc_call_start(task);
+ }
+
++static void nfs41_sequence_prepare_privileged(struct rpc_task *task, void *data)
++{
++ rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
++ nfs41_sequence_prepare(task, data);
++}
++
+ static const struct rpc_call_ops nfs41_sequence_ops = {
+ .rpc_call_done = nfs41_sequence_call_done,
+ .rpc_call_prepare = nfs41_sequence_prepare,
+ .rpc_release = nfs41_sequence_release,
+ };
+
+-static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
++static const struct rpc_call_ops nfs41_sequence_privileged_ops = {
++ .rpc_call_done = nfs41_sequence_call_done,
++ .rpc_call_prepare = nfs41_sequence_prepare_privileged,
++ .rpc_release = nfs41_sequence_release,
++};
++
++static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred,
++ const struct rpc_call_ops *seq_ops)
+ {
+ struct nfs4_sequence_data *calldata;
+ struct rpc_message msg = {
+@@ -5560,7 +5573,7 @@ static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_
+ struct rpc_task_setup task_setup_data = {
+ .rpc_client = clp->cl_rpcclient,
+ .rpc_message = &msg,
+- .callback_ops = &nfs41_sequence_ops,
++ .callback_ops = seq_ops,
+ .flags = RPC_TASK_ASYNC | RPC_TASK_SOFT,
+ };
+
+@@ -5586,7 +5599,7 @@ static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cr
+
+ if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
+ return 0;
+- task = _nfs41_proc_sequence(clp, cred);
++ task = _nfs41_proc_sequence(clp, cred, &nfs41_sequence_ops);
+ if (IS_ERR(task))
+ ret = PTR_ERR(task);
+ else
+@@ -5600,7 +5613,7 @@ static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
+ struct rpc_task *task;
+ int ret;
+
+- task = _nfs41_proc_sequence(clp, cred);
++ task = _nfs41_proc_sequence(clp, cred, &nfs41_sequence_privileged_ops);
+ if (IS_ERR(task)) {
+ ret = PTR_ERR(task);
+ goto out;
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index b8c5538..fe5c5fb 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -193,6 +193,7 @@ static __be32
+ do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
+ {
+ struct svc_fh resfh;
++ int accmode;
+ __be32 status;
+
+ fh_init(&resfh, NFS4_FHSIZE);
+@@ -252,9 +253,10 @@ do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_o
+ /* set reply cache */
+ fh_copy_shallow(&open->op_openowner->oo_owner.so_replay.rp_openfh,
+ &resfh.fh_handle);
+- if (!open->op_created)
+- status = do_open_permission(rqstp, current_fh, open,
+- NFSD_MAY_NOP);
++ accmode = NFSD_MAY_NOP;
++ if (open->op_created)
++ accmode |= NFSD_MAY_OWNER_OVERRIDE;
++ status = do_open_permission(rqstp, current_fh, open, accmode);
+
+ out:
+ fh_put(&resfh);
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index d225b51..8b197d2 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -2309,7 +2309,7 @@ nfsd4_init_slabs(void)
+ if (openowner_slab == NULL)
+ goto out_nomem;
+ lockowner_slab = kmem_cache_create("nfsd4_lockowners",
+- sizeof(struct nfs4_openowner), 0, 0, NULL);
++ sizeof(struct nfs4_lockowner), 0, 0, NULL);
+ if (lockowner_slab == NULL)
+ goto out_nomem;
+ file_slab = kmem_cache_create("nfsd4_files",
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 87a1746..800c215 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -2909,11 +2909,16 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
+ len = maxcount;
+ v = 0;
+ while (len > 0) {
+- pn = resp->rqstp->rq_resused++;
++ pn = resp->rqstp->rq_resused;
++ if (!resp->rqstp->rq_respages[pn]) { /* ran out of pages */
++ maxcount -= len;
++ break;
++ }
+ resp->rqstp->rq_vec[v].iov_base =
+ page_address(resp->rqstp->rq_respages[pn]);
+ resp->rqstp->rq_vec[v].iov_len =
+ len < PAGE_SIZE ? len : PAGE_SIZE;
++ resp->rqstp->rq_resused++;
+ v++;
+ len -= PAGE_SIZE;
+ }
+@@ -2959,6 +2964,8 @@ nfsd4_encode_readlink(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd
+ return nfserr;
+ if (resp->xbuf->page_len)
+ return nfserr_resource;
++ if (!resp->rqstp->rq_respages[resp->rqstp->rq_resused])
++ return nfserr_resource;
+
+ page = page_address(resp->rqstp->rq_respages[resp->rqstp->rq_resused++]);
+
+@@ -3008,6 +3015,8 @@ nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4
+ return nfserr;
+ if (resp->xbuf->page_len)
+ return nfserr_resource;
++ if (!resp->rqstp->rq_respages[resp->rqstp->rq_resused])
++ return nfserr_resource;
+
+ RESERVE_SPACE(8); /* verifier */
+ savep = p;
+diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
+index eda7d7e..7595582 100644
+--- a/fs/nfsd/nfssvc.c
++++ b/fs/nfsd/nfssvc.c
+@@ -633,7 +633,7 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
+ }
+
+ /* Store reply in cache. */
+- nfsd_cache_update(rqstp, proc->pc_cachetype, statp + 1);
++ nfsd_cache_update(rqstp, rqstp->rq_cachetype, statp + 1);
+ return 1;
+ }
+
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index 5c3cd82..1ec1fde 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -1458,13 +1458,19 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ case NFS3_CREATE_EXCLUSIVE:
+ if ( dchild->d_inode->i_mtime.tv_sec == v_mtime
+ && dchild->d_inode->i_atime.tv_sec == v_atime
+- && dchild->d_inode->i_size == 0 )
++ && dchild->d_inode->i_size == 0 ) {
++ if (created)
++ *created = 1;
+ break;
++ }
+ case NFS4_CREATE_EXCLUSIVE4_1:
+ if ( dchild->d_inode->i_mtime.tv_sec == v_mtime
+ && dchild->d_inode->i_atime.tv_sec == v_atime
+- && dchild->d_inode->i_size == 0 )
++ && dchild->d_inode->i_size == 0 ) {
++ if (created)
++ *created = 1;
+ goto set_attr;
++ }
+ /* fallthru */
+ case NFS3_CREATE_GUARDED:
+ err = nfserr_exist;
+diff --git a/fs/proc/array.c b/fs/proc/array.c
+index 3a1dafd..439b5a1 100644
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -204,7 +204,7 @@ static inline void task_state(struct seq_file *m, struct pid_namespace *ns,
+ group_info = cred->group_info;
+ task_unlock(p);
+
+- for (g = 0; g < min(group_info->ngroups, NGROUPS_SMALL); g++)
++ for (g = 0; g < group_info->ngroups; g++)
+ seq_printf(m, "%d ", GROUP_AT(group_info, g));
+ put_cred(cred);
+
+diff --git a/fs/udf/inode.c b/fs/udf/inode.c
+index e2787d0..15df1a4 100644
+--- a/fs/udf/inode.c
++++ b/fs/udf/inode.c
+@@ -744,6 +744,8 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
+ goal, err);
+ if (!newblocknum) {
+ brelse(prev_epos.bh);
++ brelse(cur_epos.bh);
++ brelse(next_epos.bh);
+ *err = -ENOSPC;
+ return NULL;
+ }
+@@ -774,6 +776,8 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
+ udf_update_extents(inode, laarr, startnum, endnum, &prev_epos);
+
+ brelse(prev_epos.bh);
++ brelse(cur_epos.bh);
++ brelse(next_epos.bh);
+
+ newblock = udf_get_pblock(inode->i_sb, newblocknum,
+ iinfo->i_location.partitionReferenceNum, 0);
+diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
+index fd88a39..f606406 100644
+--- a/include/linux/binfmts.h
++++ b/include/linux/binfmts.h
+@@ -127,6 +127,7 @@ extern int setup_arg_pages(struct linux_binprm * bprm,
+ unsigned long stack_top,
+ int executable_stack);
+ extern int bprm_mm_init(struct linux_binprm *bprm);
++extern int bprm_change_interp(char *interp, struct linux_binprm *bprm);
+ extern int copy_strings_kernel(int argc, const char *const *argv,
+ struct linux_binprm *bprm);
+ extern int prepare_bprm_creds(struct linux_binprm *bprm);
+diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
+index 1b7f9d5..9bab75f 100644
+--- a/include/linux/cgroup.h
++++ b/include/linux/cgroup.h
+@@ -32,7 +32,6 @@ extern int cgroup_lock_is_held(void);
+ extern bool cgroup_lock_live_group(struct cgroup *cgrp);
+ extern void cgroup_unlock(void);
+ extern void cgroup_fork(struct task_struct *p);
+-extern void cgroup_fork_callbacks(struct task_struct *p);
+ extern void cgroup_post_fork(struct task_struct *p);
+ extern void cgroup_exit(struct task_struct *p, int run_callbacks);
+ extern int cgroupstats_build(struct cgroupstats *stats,
+diff --git a/include/linux/freezer.h b/include/linux/freezer.h
+index b5d6b6a..862e67b 100644
+--- a/include/linux/freezer.h
++++ b/include/linux/freezer.h
+@@ -88,9 +88,16 @@ static inline int cgroup_freezing_or_frozen(struct task_struct *task)
+ * parent.
+ */
+
+-/*
+- * If the current task is a user space one, tell the freezer not to count it as
+- * freezable.
++/**
++ * freezer_do_not_count - tell freezer to ignore %current if a user space task
++ *
++ * Tell freezers to ignore the current task when determining whether the
++ * target frozen state is reached. IOW, the current task will be
++ * considered frozen enough by freezers.
++ *
++ * The caller shouldn't do anything which isn't allowed for a frozen task
++ * until freezer_cont() is called. Usually, freezer[_do_not]_count() pair
++ * wrap a scheduling operation and nothing much else.
+ */
+ static inline void freezer_do_not_count(void)
+ {
+@@ -98,24 +105,48 @@ static inline void freezer_do_not_count(void)
+ current->flags |= PF_FREEZER_SKIP;
+ }
+
+-/*
+- * If the current task is a user space one, tell the freezer to count it as
+- * freezable again and try to freeze it.
++/**
++ * freezer_count - tell freezer to stop ignoring %current if a user space task
++ *
++ * Undo freezer_do_not_count(). It tells freezers that %current should be
++ * considered again and tries to freeze if freezing condition is already in
++ * effect.
+ */
+ static inline void freezer_count(void)
+ {
+ if (current->mm) {
+ current->flags &= ~PF_FREEZER_SKIP;
++ /*
++ * If freezing is in progress, the following paired with smp_mb()
++ * in freezer_should_skip() ensures that either we see %true
++ * freezing() or freezer_should_skip() sees !PF_FREEZER_SKIP.
++ */
++ smp_mb();
+ try_to_freeze();
+ }
+ }
+
+-/*
+- * Check if the task should be counted as freezable by the freezer
++/**
++ * freezer_should_skip - whether to skip a task when determining frozen
++ * state is reached
++ * @p: task in quesion
++ *
++ * This function is used by freezers after establishing %true freezing() to
++ * test whether a task should be skipped when determining the target frozen
++ * state is reached. IOW, if this function returns %true, @p is considered
++ * frozen enough.
+ */
+-static inline int freezer_should_skip(struct task_struct *p)
++static inline bool freezer_should_skip(struct task_struct *p)
+ {
+- return !!(p->flags & PF_FREEZER_SKIP);
++ /*
++ * The following smp_mb() paired with the one in freezer_count()
++ * ensures that either freezer_count() sees %true freezing() or we
++ * see cleared %PF_FREEZER_SKIP and return %false. This makes it
++ * impossible for a task to slip frozen state testing after
++ * clearing %PF_FREEZER_SKIP.
++ */
++ smp_mb();
++ return p->flags & PF_FREEZER_SKIP;
+ }
+
+ /*
+diff --git a/include/linux/highmem.h b/include/linux/highmem.h
+index 3a93f73..52e9620 100644
+--- a/include/linux/highmem.h
++++ b/include/linux/highmem.h
+@@ -38,10 +38,17 @@ extern unsigned long totalhigh_pages;
+
+ void kmap_flush_unused(void);
+
++struct page *kmap_to_page(void *addr);
++
+ #else /* CONFIG_HIGHMEM */
+
+ static inline unsigned int nr_free_highpages(void) { return 0; }
+
++static inline struct page *kmap_to_page(void *addr)
++{
++ return virt_to_page(addr);
++}
++
+ #define totalhigh_pages 0UL
+
+ #ifndef ARCH_HAS_KMAP
+diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
+index 3e8f2f7..f85c5ab 100644
+--- a/include/linux/mempolicy.h
++++ b/include/linux/mempolicy.h
+@@ -137,16 +137,6 @@ static inline void mpol_cond_put(struct mempolicy *pol)
+ __mpol_put(pol);
+ }
+
+-extern struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
+- struct mempolicy *frompol);
+-static inline struct mempolicy *mpol_cond_copy(struct mempolicy *tompol,
+- struct mempolicy *frompol)
+-{
+- if (!frompol)
+- return frompol;
+- return __mpol_cond_copy(tompol, frompol);
+-}
+-
+ extern struct mempolicy *__mpol_dup(struct mempolicy *pol);
+ static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
+ {
+@@ -270,12 +260,6 @@ static inline void mpol_cond_put(struct mempolicy *pol)
+ {
+ }
+
+-static inline struct mempolicy *mpol_cond_copy(struct mempolicy *to,
+- struct mempolicy *from)
+-{
+- return from;
+-}
+-
+ static inline void mpol_get(struct mempolicy *pol)
+ {
+ }
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index 1874c5e..5776609 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -752,6 +752,7 @@
+ #define PCI_DEVICE_ID_HP_CISSD 0x3238
+ #define PCI_DEVICE_ID_HP_CISSE 0x323a
+ #define PCI_DEVICE_ID_HP_CISSF 0x323b
++#define PCI_DEVICE_ID_HP_CISSH 0x323c
+ #define PCI_DEVICE_ID_HP_ZX2_IOC 0x4031
+
+ #define PCI_VENDOR_ID_PCTECH 0x1042
+diff --git a/kernel/cgroup.c b/kernel/cgroup.c
+index 6337535..b6cacf1 100644
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -2636,9 +2636,7 @@ static int cgroup_create_dir(struct cgroup *cgrp, struct dentry *dentry,
+ dentry->d_fsdata = cgrp;
+ inc_nlink(parent->d_inode);
+ rcu_assign_pointer(cgrp->dentry, dentry);
+- dget(dentry);
+ }
+- dput(dentry);
+
+ return error;
+ }
+@@ -4508,41 +4506,19 @@ void cgroup_fork(struct task_struct *child)
+ }
+
+ /**
+- * cgroup_fork_callbacks - run fork callbacks
+- * @child: the new task
+- *
+- * Called on a new task very soon before adding it to the
+- * tasklist. No need to take any locks since no-one can
+- * be operating on this task.
+- */
+-void cgroup_fork_callbacks(struct task_struct *child)
+-{
+- if (need_forkexit_callback) {
+- int i;
+- /*
+- * forkexit callbacks are only supported for builtin
+- * subsystems, and the builtin section of the subsys array is
+- * immutable, so we don't need to lock the subsys array here.
+- */
+- for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
+- struct cgroup_subsys *ss = subsys[i];
+- if (ss->fork)
+- ss->fork(ss, child);
+- }
+- }
+-}
+-
+-/**
+ * cgroup_post_fork - called on a new task after adding it to the task list
+ * @child: the task in question
+ *
+- * Adds the task to the list running through its css_set if necessary.
+- * Has to be after the task is visible on the task list in case we race
+- * with the first call to cgroup_iter_start() - to guarantee that the
+- * new task ends up on its list.
++ * Adds the task to the list running through its css_set if necessary and
++ * call the subsystem fork() callbacks. Has to be after the task is
++ * visible on the task list in case we race with the first call to
++ * cgroup_iter_start() - to guarantee that the new task ends up on its
++ * list.
+ */
+ void cgroup_post_fork(struct task_struct *child)
+ {
++ int i;
++
+ if (use_task_css_set_links) {
+ write_lock(&css_set_lock);
+ task_lock(child);
+@@ -4551,7 +4527,21 @@ void cgroup_post_fork(struct task_struct *child)
+ task_unlock(child);
+ write_unlock(&css_set_lock);
+ }
++
++ /*
++ * Call ss->fork(). This must happen after @child is linked on
++ * css_set; otherwise, @child might change state between ->fork()
++ * and addition to css_set.
++ */
++ if (need_forkexit_callback) {
++ for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
++ struct cgroup_subsys *ss = subsys[i];
++ if (ss->fork)
++ ss->fork(ss, child);
++ }
++ }
+ }
++
+ /**
+ * cgroup_exit - detach cgroup from exiting task
+ * @tsk: pointer to task_struct of exiting process
+diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
+index 213c035..6c132a4 100644
+--- a/kernel/cgroup_freezer.c
++++ b/kernel/cgroup_freezer.c
+@@ -197,23 +197,15 @@ static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
+ {
+ struct freezer *freezer;
+
+- /*
+- * No lock is needed, since the task isn't on tasklist yet,
+- * so it can't be moved to another cgroup, which means the
+- * freezer won't be removed and will be valid during this
+- * function call. Nevertheless, apply RCU read-side critical
+- * section to suppress RCU lockdep false positives.
+- */
+ rcu_read_lock();
+ freezer = task_freezer(task);
+- rcu_read_unlock();
+
+ /*
+ * The root cgroup is non-freezable, so we can skip the
+ * following check.
+ */
+ if (!freezer->css.cgroup->parent)
+- return;
++ goto out;
+
+ spin_lock_irq(&freezer->lock);
+ BUG_ON(freezer->state == CGROUP_FROZEN);
+@@ -221,7 +213,10 @@ static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
+ /* Locking avoids race with FREEZING -> THAWED transitions. */
+ if (freezer->state == CGROUP_FREEZING)
+ freeze_task(task, true);
++
+ spin_unlock_irq(&freezer->lock);
++out:
++ rcu_read_unlock();
+ }
+
+ /*
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 222457a..ce0c182 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -1057,7 +1057,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+ {
+ int retval;
+ struct task_struct *p;
+- int cgroup_callbacks_done = 0;
+
+ if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
+ return ERR_PTR(-EINVAL);
+@@ -1312,12 +1311,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+ p->group_leader = p;
+ INIT_LIST_HEAD(&p->thread_group);
+
+- /* Now that the task is set up, run cgroup callbacks if
+- * necessary. We need to run them before the task is visible
+- * on the tasklist. */
+- cgroup_fork_callbacks(p);
+- cgroup_callbacks_done = 1;
+-
+ /* Need tasklist lock for parent etc handling! */
+ write_lock_irq(&tasklist_lock);
+
+@@ -1419,7 +1412,7 @@ bad_fork_cleanup_cgroup:
+ #endif
+ if (clone_flags & CLONE_THREAD)
+ threadgroup_fork_read_unlock(current);
+- cgroup_exit(p, cgroup_callbacks_done);
++ cgroup_exit(p, 0);
+ delayacct_tsk_free(p);
+ module_put(task_thread_info(p)->exec_domain->module);
+ bad_fork_cleanup_count:
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index 7600092..382a6bd 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -701,6 +701,7 @@ static void
+ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
+ {
+ cpumask_var_t mask;
++ bool valid = true;
+
+ if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
+ return;
+@@ -715,10 +716,18 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
+ }
+
+ raw_spin_lock_irq(&desc->lock);
+- cpumask_copy(mask, desc->irq_data.affinity);
++ /*
++ * This code is triggered unconditionally. Check the affinity
++ * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
++ */
++ if (desc->irq_data.affinity)
++ cpumask_copy(mask, desc->irq_data.affinity);
++ else
++ valid = false;
+ raw_spin_unlock_irq(&desc->lock);
+
+- set_cpus_allowed_ptr(current, mask);
++ if (valid)
++ set_cpus_allowed_ptr(current, mask);
+ free_cpumask_var(mask);
+ }
+ #else
+@@ -950,6 +959,16 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
+ */
+ get_task_struct(t);
+ new->thread = t;
++ /*
++ * Tell the thread to set its affinity. This is
++ * important for shared interrupt handlers as we do
++ * not invoke setup_affinity() for the secondary
++ * handlers as everything is already set up. Even for
++ * interrupts marked with IRQF_NO_BALANCE this is
++ * correct as we want the thread to move to the cpu(s)
++ * on which the requesting code placed the interrupt.
++ */
++ set_bit(IRQTF_AFFINITY, &new->thread_flags);
+ }
+
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
+diff --git a/kernel/rcutree.c b/kernel/rcutree.c
+index a122196..1aa52af 100644
+--- a/kernel/rcutree.c
++++ b/kernel/rcutree.c
+@@ -202,13 +202,13 @@ DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
+ };
+ #endif /* #ifdef CONFIG_NO_HZ */
+
+-static int blimit = 10; /* Maximum callbacks per rcu_do_batch. */
+-static int qhimark = 10000; /* If this many pending, ignore blimit. */
+-static int qlowmark = 100; /* Once only this many pending, use blimit. */
++static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */
++static long qhimark = 10000; /* If this many pending, ignore blimit. */
++static long qlowmark = 100; /* Once only this many pending, use blimit. */
+
+-module_param(blimit, int, 0);
+-module_param(qhimark, int, 0);
+-module_param(qlowmark, int, 0);
++module_param(blimit, long, 0);
++module_param(qhimark, long, 0);
++module_param(qlowmark, long, 0);
+
+ int rcu_cpu_stall_suppress __read_mostly;
+ module_param(rcu_cpu_stall_suppress, int, 0644);
+@@ -1260,7 +1260,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
+ {
+ unsigned long flags;
+ struct rcu_head *next, *list, **tail;
+- int bl, count;
++ long bl, count;
+
+ /* If no callbacks are ready, just return.*/
+ if (!cpu_has_callbacks_ready_to_invoke(rdp)) {
+diff --git a/kernel/sched_autogroup.c b/kernel/sched_autogroup.c
+index 429242f..f280df1 100644
+--- a/kernel/sched_autogroup.c
++++ b/kernel/sched_autogroup.c
+@@ -160,15 +160,11 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag)
+
+ p->signal->autogroup = autogroup_kref_get(ag);
+
+- if (!ACCESS_ONCE(sysctl_sched_autogroup_enabled))
+- goto out;
+-
+ t = p;
+ do {
+ sched_move_task(t);
+ } while_each_thread(p, t);
+
+-out:
+ unlock_task_sighand(p, &flags);
+ autogroup_kref_put(prev);
+ }
+diff --git a/kernel/sched_autogroup.h b/kernel/sched_autogroup.h
+index c2f0e72..3d7a50e 100644
+--- a/kernel/sched_autogroup.h
++++ b/kernel/sched_autogroup.h
+@@ -1,11 +1,6 @@
+ #ifdef CONFIG_SCHED_AUTOGROUP
+
+ struct autogroup {
+- /*
+- * reference doesn't mean how many thread attach to this
+- * autogroup now. It just stands for the number of task
+- * could use this autogroup.
+- */
+ struct kref kref;
+ struct task_group *tg;
+ struct rw_semaphore lock;
+diff --git a/kernel/sys.c b/kernel/sys.c
+index d7c4ab0..f5939c2 100644
+--- a/kernel/sys.c
++++ b/kernel/sys.c
+@@ -1190,7 +1190,7 @@ static int override_release(char __user *release, size_t len)
+ rest++;
+ }
+ v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
+- copy = min(sizeof(buf), max_t(size_t, 1, len));
++ copy = clamp_t(size_t, len, 1, sizeof(buf));
+ copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
+ ret = copy_to_user(release, buf, copy + 1);
+ }
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 25b4f4d..54dba59 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -2074,7 +2074,7 @@ static void reset_iter_read(struct ftrace_iterator *iter)
+ {
+ iter->pos = 0;
+ iter->func_pos = 0;
+- iter->flags &= ~(FTRACE_ITER_PRINTALL & FTRACE_ITER_HASH);
++ iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH);
+ }
+
+ static void *t_start(struct seq_file *m, loff_t *pos)
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index f5b7b5c..6fdc629 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -2683,7 +2683,7 @@ unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
+ unsigned long flags;
+ struct ring_buffer_per_cpu *cpu_buffer;
+ struct buffer_page *bpage;
+- unsigned long ret;
++ unsigned long ret = 0;
+
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
+ return 0;
+@@ -2698,7 +2698,8 @@ unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
+ bpage = cpu_buffer->reader_page;
+ else
+ bpage = rb_set_head_page(cpu_buffer);
+- ret = bpage->page->time_stamp;
++ if (bpage)
++ ret = bpage->page->time_stamp;
+ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+
+ return ret;
+@@ -3005,6 +3006,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
+ * Splice the empty reader page into the list around the head.
+ */
+ reader = rb_set_head_page(cpu_buffer);
++ if (!reader)
++ goto out;
+ cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
+ cpu_buffer->reader_page->list.prev = reader->list.prev;
+
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index d551d5f..7bf068a 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -1146,8 +1146,8 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
+ if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
+ unsigned int lcpu;
+
+- BUG_ON(timer_pending(timer));
+- BUG_ON(!list_empty(&work->entry));
++ WARN_ON_ONCE(timer_pending(timer));
++ WARN_ON_ONCE(!list_empty(&work->entry));
+
+ timer_stats_timer_set_start_info(&dwork->timer);
+
+diff --git a/mm/dmapool.c b/mm/dmapool.c
+index c5ab33b..da1b0f0 100644
+--- a/mm/dmapool.c
++++ b/mm/dmapool.c
+@@ -50,7 +50,6 @@ struct dma_pool { /* the pool */
+ size_t allocation;
+ size_t boundary;
+ char name[32];
+- wait_queue_head_t waitq;
+ struct list_head pools;
+ };
+
+@@ -62,8 +61,6 @@ struct dma_page { /* cacheable header for 'allocation' bytes */
+ unsigned int offset;
+ };
+
+-#define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000)
+-
+ static DEFINE_MUTEX(pools_lock);
+
+ static ssize_t
+@@ -172,7 +169,6 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
+ retval->size = size;
+ retval->boundary = boundary;
+ retval->allocation = allocation;
+- init_waitqueue_head(&retval->waitq);
+
+ if (dev) {
+ int ret;
+@@ -227,7 +223,6 @@ static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
+ memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
+ #endif
+ pool_initialise_page(pool, page);
+- list_add(&page->page_list, &pool->page_list);
+ page->in_use = 0;
+ page->offset = 0;
+ } else {
+@@ -315,30 +310,21 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
+ might_sleep_if(mem_flags & __GFP_WAIT);
+
+ spin_lock_irqsave(&pool->lock, flags);
+- restart:
+ list_for_each_entry(page, &pool->page_list, page_list) {
+ if (page->offset < pool->allocation)
+ goto ready;
+ }
+- page = pool_alloc_page(pool, GFP_ATOMIC);
+- if (!page) {
+- if (mem_flags & __GFP_WAIT) {
+- DECLARE_WAITQUEUE(wait, current);
+
+- __set_current_state(TASK_UNINTERRUPTIBLE);
+- __add_wait_queue(&pool->waitq, &wait);
+- spin_unlock_irqrestore(&pool->lock, flags);
++ /* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
++ spin_unlock_irqrestore(&pool->lock, flags);
+
+- schedule_timeout(POOL_TIMEOUT_JIFFIES);
++ page = pool_alloc_page(pool, mem_flags);
++ if (!page)
++ return NULL;
+
+- spin_lock_irqsave(&pool->lock, flags);
+- __remove_wait_queue(&pool->waitq, &wait);
+- goto restart;
+- }
+- retval = NULL;
+- goto done;
+- }
++ spin_lock_irqsave(&pool->lock, flags);
+
++ list_add(&page->page_list, &pool->page_list);
+ ready:
+ page->in_use++;
+ offset = page->offset;
+@@ -348,7 +334,6 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
+ #ifdef DMAPOOL_DEBUG
+ memset(retval, POOL_POISON_ALLOCATED, pool->size);
+ #endif
+- done:
+ spin_unlock_irqrestore(&pool->lock, flags);
+ return retval;
+ }
+@@ -435,8 +420,6 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
+ page->in_use--;
+ *(int *)vaddr = page->offset;
+ page->offset = offset;
+- if (waitqueue_active(&pool->waitq))
+- wake_up_locked(&pool->waitq);
+ /*
+ * Resist a temptation to do
+ * if (!is_page_busy(page)) pool_free_page(pool, page);
+diff --git a/mm/highmem.c b/mm/highmem.c
+index 57d82c6..2a07f97 100644
+--- a/mm/highmem.c
++++ b/mm/highmem.c
+@@ -94,6 +94,19 @@ static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait);
+ do { spin_unlock(&kmap_lock); (void)(flags); } while (0)
+ #endif
+
++struct page *kmap_to_page(void *vaddr)
++{
++ unsigned long addr = (unsigned long)vaddr;
++
++ if (addr >= PKMAP_ADDR(0) && addr <= PKMAP_ADDR(LAST_PKMAP)) {
++ int i = (addr - PKMAP_ADDR(0)) >> PAGE_SHIFT;
++ return pte_page(pkmap_page_table[i]);
++ }
++
++ return virt_to_page(addr);
++}
++EXPORT_SYMBOL(kmap_to_page);
++
+ static void flush_all_zero_pkmaps(void)
+ {
+ int i;
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 8f005e9..470cbb4 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -921,6 +921,8 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ count_vm_event(THP_FAULT_FALLBACK);
+ ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
+ pmd, orig_pmd, page, haddr);
++ if (ret & VM_FAULT_OOM)
++ split_huge_page(page);
+ put_page(page);
+ goto out;
+ }
+@@ -928,6 +930,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
+
+ if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
+ put_page(new_page);
++ split_huge_page(page);
+ put_page(page);
+ ret |= VM_FAULT_OOM;
+ goto out;
+diff --git a/mm/memory.c b/mm/memory.c
+index 70f5daf..15e686a 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -3469,6 +3469,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ if (unlikely(is_vm_hugetlb_page(vma)))
+ return hugetlb_fault(mm, vma, address, flags);
+
++retry:
+ pgd = pgd_offset(mm, address);
+ pud = pud_alloc(mm, pgd, address);
+ if (!pud)
+@@ -3482,13 +3483,24 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ pmd, flags);
+ } else {
+ pmd_t orig_pmd = *pmd;
++ int ret;
++
+ barrier();
+ if (pmd_trans_huge(orig_pmd)) {
+ if (flags & FAULT_FLAG_WRITE &&
+ !pmd_write(orig_pmd) &&
+- !pmd_trans_splitting(orig_pmd))
+- return do_huge_pmd_wp_page(mm, vma, address,
+- pmd, orig_pmd);
++ !pmd_trans_splitting(orig_pmd)) {
++ ret = do_huge_pmd_wp_page(mm, vma, address, pmd,
++ orig_pmd);
++ /*
++ * If COW results in an oom, the huge pmd will
++ * have been split, so retry the fault on the
++ * pte for a smaller charge.
++ */
++ if (unlikely(ret & VM_FAULT_OOM))
++ goto retry;
++ return ret;
++ }
+ return 0;
+ }
+ }
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index 4c82c21..c59d44b 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -1999,28 +1999,6 @@ struct mempolicy *__mpol_dup(struct mempolicy *old)
+ return new;
+ }
+
+-/*
+- * If *frompol needs [has] an extra ref, copy *frompol to *tompol ,
+- * eliminate the * MPOL_F_* flags that require conditional ref and
+- * [NOTE!!!] drop the extra ref. Not safe to reference *frompol directly
+- * after return. Use the returned value.
+- *
+- * Allows use of a mempolicy for, e.g., multiple allocations with a single
+- * policy lookup, even if the policy needs/has extra ref on lookup.
+- * shmem_readahead needs this.
+- */
+-struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
+- struct mempolicy *frompol)
+-{
+- if (!mpol_needs_cond_ref(frompol))
+- return frompol;
+-
+- *tompol = *frompol;
+- tompol->flags &= ~MPOL_F_SHARED; /* copy doesn't need unref */
+- __mpol_put(frompol);
+- return tompol;
+-}
+-
+ /* Slow path of a mempolicy comparison */
+ int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
+ {
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 2d46e23..12b9e80 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -798,24 +798,28 @@ static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
+ static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
+ struct shmem_inode_info *info, pgoff_t index)
+ {
+- struct mempolicy mpol, *spol;
+ struct vm_area_struct pvma;
+-
+- spol = mpol_cond_copy(&mpol,
+- mpol_shared_policy_lookup(&info->policy, index));
++ struct page *page;
+
+ /* Create a pseudo vma that just contains the policy */
+ pvma.vm_start = 0;
+ pvma.vm_pgoff = index;
+ pvma.vm_ops = NULL;
+- pvma.vm_policy = spol;
+- return swapin_readahead(swap, gfp, &pvma, 0);
++ pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
++
++ page = swapin_readahead(swap, gfp, &pvma, 0);
++
++ /* Drop reference taken by mpol_shared_policy_lookup() */
++ mpol_cond_put(pvma.vm_policy);
++
++ return page;
+ }
+
+ static struct page *shmem_alloc_page(gfp_t gfp,
+ struct shmem_inode_info *info, pgoff_t index)
+ {
+ struct vm_area_struct pvma;
++ struct page *page;
+
+ /* Create a pseudo vma that just contains the policy */
+ pvma.vm_start = 0;
+@@ -823,10 +827,12 @@ static struct page *shmem_alloc_page(gfp_t gfp,
+ pvma.vm_ops = NULL;
+ pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
+
+- /*
+- * alloc_page_vma() will drop the shared policy reference
+- */
+- return alloc_page_vma(gfp, &pvma, 0);
++ page = alloc_page_vma(gfp, &pvma, 0);
++
++ /* Drop reference taken by mpol_shared_policy_lookup() */
++ mpol_cond_put(pvma.vm_policy);
++
++ return page;
+ }
+ #else /* !CONFIG_NUMA */
+ #ifdef CONFIG_TMPFS
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 1e4ee1a..313381c 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -2492,19 +2492,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
+ }
+ #endif
+
+-static bool zone_balanced(struct zone *zone, int order,
+- unsigned long balance_gap, int classzone_idx)
+-{
+- if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone) +
+- balance_gap, classzone_idx, 0))
+- return false;
+-
+- if (COMPACTION_BUILD && order && !compaction_suitable(zone, order))
+- return false;
+-
+- return true;
+-}
+-
+ /*
+ * pgdat_balanced is used when checking if a node is balanced for high-order
+ * allocations. Only zones that meet watermarks and are in a zone allowed
+@@ -2564,7 +2551,8 @@ static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining,
+ continue;
+ }
+
+- if (!zone_balanced(zone, order, 0, i))
++ if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone),
++ i, 0))
+ all_zones_ok = false;
+ else
+ balanced += zone->present_pages;
+@@ -2667,7 +2655,8 @@ loop_again:
+ shrink_active_list(SWAP_CLUSTER_MAX, zone,
+ &sc, priority, 0);
+
+- if (!zone_balanced(zone, order, 0, 0)) {
++ if (!zone_watermark_ok_safe(zone, order,
++ high_wmark_pages(zone), 0, 0)) {
+ end_zone = i;
+ break;
+ } else {
+@@ -2728,8 +2717,9 @@ loop_again:
+ (zone->present_pages +
+ KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
+ KSWAPD_ZONE_BALANCE_GAP_RATIO);
+- if (!zone_balanced(zone, order,
+- balance_gap, end_zone)) {
++ if (!zone_watermark_ok_safe(zone, order,
++ high_wmark_pages(zone) + balance_gap,
++ end_zone, 0)) {
+ shrink_zone(priority, zone, &sc);
+
+ reclaim_state->reclaimed_slab = 0;
+@@ -2756,7 +2746,8 @@ loop_again:
+ continue;
+ }
+
+- if (!zone_balanced(zone, order, 0, end_zone)) {
++ if (!zone_watermark_ok_safe(zone, order,
++ high_wmark_pages(zone), end_zone, 0)) {
+ all_zones_ok = 0;
+ /*
+ * We are still under min water mark. This
+diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
+index 32aa983..55f0c09 100644
+--- a/net/9p/trans_virtio.c
++++ b/net/9p/trans_virtio.c
+@@ -37,6 +37,7 @@
+ #include <linux/inet.h>
+ #include <linux/idr.h>
+ #include <linux/file.h>
++#include <linux/highmem.h>
+ #include <linux/slab.h>
+ #include <net/9p/9p.h>
+ #include <linux/parser.h>
+@@ -323,7 +324,7 @@ static int p9_get_mapped_pages(struct virtio_chan *chan,
+ int count = nr_pages;
+ while (nr_pages) {
+ s = rest_of_page(data);
+- pages[index++] = virt_to_page(data);
++ pages[index++] = kmap_to_page(data);
+ data += s;
+ nr_pages--;
+ }
+diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
+index 7ee4ead..14c4864 100644
+--- a/net/bluetooth/rfcomm/sock.c
++++ b/net/bluetooth/rfcomm/sock.c
+@@ -486,7 +486,7 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
+ long timeo;
+ int err = 0;
+
+- lock_sock(sk);
++ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+
+ if (sk->sk_type != SOCK_STREAM) {
+ err = -EINVAL;
+@@ -523,7 +523,7 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
+
+ release_sock(sk);
+ timeo = schedule_timeout(timeo);
+- lock_sock(sk);
++ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+ }
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(sk_sleep(sk), &wait);
+diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
+index fdaabf2..a4e7131 100644
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -684,28 +684,27 @@ EXPORT_SYMBOL(ip_defrag);
+
+ struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user)
+ {
+- const struct iphdr *iph;
++ struct iphdr iph;
+ u32 len;
+
+ if (skb->protocol != htons(ETH_P_IP))
+ return skb;
+
+- if (!pskb_may_pull(skb, sizeof(struct iphdr)))
++ if (!skb_copy_bits(skb, 0, &iph, sizeof(iph)))
+ return skb;
+
+- iph = ip_hdr(skb);
+- if (iph->ihl < 5 || iph->version != 4)
++ if (iph.ihl < 5 || iph.version != 4)
+ return skb;
+- if (!pskb_may_pull(skb, iph->ihl*4))
+- return skb;
+- iph = ip_hdr(skb);
+- len = ntohs(iph->tot_len);
+- if (skb->len < len || len < (iph->ihl * 4))
++
++ len = ntohs(iph.tot_len);
++ if (skb->len < len || len < (iph.ihl * 4))
+ return skb;
+
+- if (ip_is_fragment(ip_hdr(skb))) {
++ if (ip_is_fragment(&iph)) {
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (skb) {
++ if (!pskb_may_pull(skb, iph.ihl*4))
++ return skb;
+ if (pskb_trim_rcsum(skb, len))
+ return skb;
+ memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
+diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
+index 6c85564..0018b65 100644
+--- a/net/sctp/chunk.c
++++ b/net/sctp/chunk.c
+@@ -183,7 +183,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
+
+ msg = sctp_datamsg_new(GFP_KERNEL);
+ if (!msg)
+- return NULL;
++ return ERR_PTR(-ENOMEM);
+
+ /* Note: Calculate this outside of the loop, so that all fragments
+ * have the same expiration.
+@@ -280,11 +280,14 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
+
+ chunk = sctp_make_datafrag_empty(asoc, sinfo, len, frag, 0);
+
+- if (!chunk)
++ if (!chunk) {
++ err = -ENOMEM;
+ goto errout;
++ }
++
+ err = sctp_user_addto_chunk(chunk, offset, len, msgh->msg_iov);
+ if (err < 0)
+- goto errout;
++ goto errout_chunk_free;
+
+ offset += len;
+
+@@ -315,8 +318,10 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
+
+ chunk = sctp_make_datafrag_empty(asoc, sinfo, over, frag, 0);
+
+- if (!chunk)
++ if (!chunk) {
++ err = -ENOMEM;
+ goto errout;
++ }
+
+ err = sctp_user_addto_chunk(chunk, offset, over,msgh->msg_iov);
+
+@@ -324,7 +329,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
+ __skb_pull(chunk->skb, (__u8 *)chunk->chunk_hdr
+ - (__u8 *)chunk->skb->data);
+ if (err < 0)
+- goto errout;
++ goto errout_chunk_free;
+
+ sctp_datamsg_assign(msg, chunk);
+ list_add_tail(&chunk->frag_list, &msg->chunks);
+@@ -332,6 +337,9 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
+
+ return msg;
+
++errout_chunk_free:
++ sctp_chunk_free(chunk);
++
+ errout:
+ list_for_each_safe(pos, temp, &msg->chunks) {
+ list_del_init(pos);
+@@ -339,7 +347,7 @@ errout:
+ sctp_chunk_free(chunk);
+ }
+ sctp_datamsg_put(msg);
+- return NULL;
++ return ERR_PTR(err);
+ }
+
+ /* Check whether this message has expired. */
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 8e49d76..fa8333b 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -1908,8 +1908,8 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
+
+ /* Break the message into multiple chunks of maximum size. */
+ datamsg = sctp_datamsg_from_user(asoc, sinfo, msg, msg_len);
+- if (!datamsg) {
+- err = -ENOMEM;
++ if (IS_ERR(datamsg)) {
++ err = PTR_ERR(datamsg);
+ goto out_free;
+ }
+
+diff --git a/security/device_cgroup.c b/security/device_cgroup.c
+index 92e24bb..4450fbe 100644
+--- a/security/device_cgroup.c
++++ b/security/device_cgroup.c
+@@ -202,8 +202,8 @@ static void devcgroup_destroy(struct cgroup_subsys *ss,
+
+ dev_cgroup = cgroup_to_devcgroup(cgroup);
+ list_for_each_entry_safe(wh, tmp, &dev_cgroup->whitelist, list) {
+- list_del_rcu(&wh->list);
+- kfree_rcu(wh, rcu);
++ list_del(&wh->list);
++ kfree(wh);
+ }
+ kfree(dev_cgroup);
+ }
+@@ -278,7 +278,7 @@ static int may_access_whitelist(struct dev_cgroup *c,
+ {
+ struct dev_whitelist_item *whitem;
+
+- list_for_each_entry_rcu(whitem, &c->whitelist, list) {
++ list_for_each_entry(whitem, &c->whitelist, list) {
+ if (whitem->type & DEV_ALL)
+ return 1;
+ if ((refwh->type & DEV_BLOCK) && !(whitem->type & DEV_BLOCK))
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 53345bc..a1e312f 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -148,6 +148,7 @@ MODULE_SUPPORTED_DEVICE("{{Intel, ICH6},"
+ "{Intel, PCH},"
+ "{Intel, CPT},"
+ "{Intel, PPT},"
++ "{Intel, LPT},"
+ "{Intel, PBG},"
+ "{Intel, SCH},"
+ "{ATI, SB450},"
+@@ -2973,6 +2974,10 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
+ { PCI_DEVICE(0x8086, 0x1e20),
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP |
+ AZX_DCAPS_BUFSIZE},
++ /* Lynx Point */
++ { PCI_DEVICE(0x8086, 0x8c20),
++ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP |
++ AZX_DCAPS_BUFSIZE},
+ /* SCH */
+ { PCI_DEVICE(0x8086, 0x811b),
+ .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP |
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 58c287b..498b62e 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -4505,6 +4505,7 @@ static const struct snd_pci_quirk cxt5051_fixups[] = {
+ };
+
+ static const struct snd_pci_quirk cxt5066_fixups[] = {
++ SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC),
+ SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
+ SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo T410", CXT_PINCFG_LENOVO_TP410),
+ SND_PCI_QUIRK(0x17aa, 0x215f, "Lenovo T510", CXT_PINCFG_LENOVO_TP410),
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index c119f33..bde2615 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -424,9 +424,11 @@ static void hdmi_init_pin(struct hda_codec *codec, hda_nid_t pin_nid)
+ if (get_wcaps(codec, pin_nid) & AC_WCAP_OUT_AMP)
+ snd_hda_codec_write(codec, pin_nid, 0,
+ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE);
+- /* Disable pin out until stream is active*/
++ /* Enable pin out: some machines with GM965 gets broken output when
++ * the pin is disabled or changed while using with HDMI
++ */
+ snd_hda_codec_write(codec, pin_nid, 0,
+- AC_VERB_SET_PIN_WIDGET_CONTROL, 0);
++ AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
+ }
+
+ static int hdmi_get_channel_count(struct hda_codec *codec, hda_nid_t cvt_nid)
+@@ -1141,17 +1143,11 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
+ struct hdmi_spec *spec = codec->spec;
+ int pin_idx = hinfo_to_pin_index(spec, hinfo);
+ hda_nid_t pin_nid = spec->pins[pin_idx].pin_nid;
+- int pinctl;
+
+ hdmi_set_channel_count(codec, cvt_nid, substream->runtime->channels);
+
+ hdmi_setup_audio_infoframe(codec, pin_idx, substream);
+
+- pinctl = snd_hda_codec_read(codec, pin_nid, 0,
+- AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
+- snd_hda_codec_write(codec, pin_nid, 0,
+- AC_VERB_SET_PIN_WIDGET_CONTROL, pinctl | PIN_OUT);
+-
+ return hdmi_setup_stream(codec, cvt_nid, pin_nid, stream_tag, format);
+ }
+
+@@ -1163,7 +1159,6 @@ static int generic_hdmi_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
+ int cvt_idx, pin_idx;
+ struct hdmi_spec_per_cvt *per_cvt;
+ struct hdmi_spec_per_pin *per_pin;
+- int pinctl;
+
+ snd_hda_codec_cleanup_stream(codec, hinfo->nid);
+
+@@ -1182,11 +1177,6 @@ static int generic_hdmi_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
+ return -EINVAL;
+ per_pin = &spec->pins[pin_idx];
+
+- pinctl = snd_hda_codec_read(codec, per_pin->pin_nid, 0,
+- AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
+- snd_hda_codec_write(codec, per_pin->pin_nid, 0,
+- AC_VERB_SET_PIN_WIDGET_CONTROL,
+- pinctl & ~PIN_OUT);
+ snd_hda_spdif_ctls_unassign(codec, pin_idx);
+ }
+
+@@ -1911,6 +1901,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
+ { .id = 0x80862804, .name = "IbexPeak HDMI", .patch = patch_generic_hdmi },
+ { .id = 0x80862805, .name = "CougarPoint HDMI", .patch = patch_generic_hdmi },
+ { .id = 0x80862806, .name = "PantherPoint HDMI", .patch = patch_generic_hdmi },
++{ .id = 0x80862880, .name = "CedarTrail HDMI", .patch = patch_generic_hdmi },
+ { .id = 0x808629fb, .name = "Crestline HDMI", .patch = patch_generic_hdmi },
+ {} /* terminator */
+ };
+@@ -1957,6 +1948,7 @@ MODULE_ALIAS("snd-hda-codec-id:80862803");
+ MODULE_ALIAS("snd-hda-codec-id:80862804");
+ MODULE_ALIAS("snd-hda-codec-id:80862805");
+ MODULE_ALIAS("snd-hda-codec-id:80862806");
++MODULE_ALIAS("snd-hda-codec-id:80862880");
+ MODULE_ALIAS("snd-hda-codec-id:808629fb");
+
+ MODULE_LICENSE("GPL");
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 1a09fbf..f3e0b24 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -3854,6 +3854,7 @@ static void alc_auto_init_std(struct hda_codec *codec)
+ ((spec)->beep_amp = HDA_COMPOSE_AMP_VAL(nid, 3, idx, dir))
+
+ static const struct snd_pci_quirk beep_white_list[] = {
++ SND_PCI_QUIRK(0x1043, 0x103c, "ASUS", 1),
+ SND_PCI_QUIRK(0x1043, 0x829f, "ASUS", 1),
+ SND_PCI_QUIRK(0x1043, 0x83ce, "EeePC", 1),
+ SND_PCI_QUIRK(0x1043, 0x831a, "EeePC", 1),
+@@ -5468,8 +5469,8 @@ static void alc861vd_fixup_dallas(struct hda_codec *codec,
+ const struct alc_fixup *fix, int action)
+ {
+ if (action == ALC_FIXUP_ACT_PRE_PROBE) {
+- snd_hda_override_pin_caps(codec, 0x18, 0x00001714);
+- snd_hda_override_pin_caps(codec, 0x19, 0x0000171c);
++ snd_hda_override_pin_caps(codec, 0x18, 0x00000734);
++ snd_hda_override_pin_caps(codec, 0x19, 0x0000073c);
+ }
+ }
+
+diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
+index 2b973f5..467a73b 100644
+--- a/sound/pci/hda/patch_sigmatel.c
++++ b/sound/pci/hda/patch_sigmatel.c
+@@ -1716,7 +1716,7 @@ static const struct snd_pci_quirk stac92hd83xxx_cfg_tbl[] = {
+ SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1658,
+ "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1659,
+- "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD),
++ "HP Pavilion dv7", STAC_HP_DV7_4000),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x165A,
+ "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x165B,
+diff --git a/sound/usb/midi.c b/sound/usb/midi.c
+index eeefbce..34b9bb7 100644
+--- a/sound/usb/midi.c
++++ b/sound/usb/midi.c
+@@ -116,6 +116,7 @@ struct snd_usb_midi {
+ struct list_head list;
+ struct timer_list error_timer;
+ spinlock_t disc_lock;
++ struct rw_semaphore disc_rwsem;
+ struct mutex mutex;
+ u32 usb_id;
+ int next_midi_device;
+@@ -125,8 +126,10 @@ struct snd_usb_midi {
+ struct snd_usb_midi_in_endpoint *in;
+ } endpoints[MIDI_MAX_ENDPOINTS];
+ unsigned long input_triggered;
+- unsigned int opened;
++ bool autopm_reference;
++ unsigned int opened[2];
+ unsigned char disconnected;
++ unsigned char input_running;
+
+ struct snd_kcontrol *roland_load_ctl;
+ };
+@@ -148,7 +151,6 @@ struct snd_usb_midi_out_endpoint {
+ struct snd_usb_midi_out_endpoint* ep;
+ struct snd_rawmidi_substream *substream;
+ int active;
+- bool autopm_reference;
+ uint8_t cable; /* cable number << 4 */
+ uint8_t state;
+ #define STATE_UNKNOWN 0
+@@ -1033,29 +1035,58 @@ static void update_roland_altsetting(struct snd_usb_midi* umidi)
+ snd_usbmidi_input_start(&umidi->list);
+ }
+
+-static void substream_open(struct snd_rawmidi_substream *substream, int open)
++static int substream_open(struct snd_rawmidi_substream *substream, int dir,
++ int open)
+ {
+ struct snd_usb_midi* umidi = substream->rmidi->private_data;
+ struct snd_kcontrol *ctl;
++ int err;
++
++ down_read(&umidi->disc_rwsem);
++ if (umidi->disconnected) {
++ up_read(&umidi->disc_rwsem);
++ return open ? -ENODEV : 0;
++ }
+
+ mutex_lock(&umidi->mutex);
+ if (open) {
+- if (umidi->opened++ == 0 && umidi->roland_load_ctl) {
+- ctl = umidi->roland_load_ctl;
+- ctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE;
+- snd_ctl_notify(umidi->card,
++ if (!umidi->opened[0] && !umidi->opened[1]) {
++ err = usb_autopm_get_interface(umidi->iface);
++ umidi->autopm_reference = err >= 0;
++ if (err < 0 && err != -EACCES) {
++ mutex_unlock(&umidi->mutex);
++ up_read(&umidi->disc_rwsem);
++ return -EIO;
++ }
++ if (umidi->roland_load_ctl) {
++ ctl = umidi->roland_load_ctl;
++ ctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE;
++ snd_ctl_notify(umidi->card,
+ SNDRV_CTL_EVENT_MASK_INFO, &ctl->id);
+- update_roland_altsetting(umidi);
++ update_roland_altsetting(umidi);
++ }
+ }
++ umidi->opened[dir]++;
++ if (umidi->opened[1])
++ snd_usbmidi_input_start(&umidi->list);
+ } else {
+- if (--umidi->opened == 0 && umidi->roland_load_ctl) {
+- ctl = umidi->roland_load_ctl;
+- ctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE;
+- snd_ctl_notify(umidi->card,
++ umidi->opened[dir]--;
++ if (!umidi->opened[1])
++ snd_usbmidi_input_stop(&umidi->list);
++ if (!umidi->opened[0] && !umidi->opened[1]) {
++ if (umidi->roland_load_ctl) {
++ ctl = umidi->roland_load_ctl;
++ ctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE;
++ snd_ctl_notify(umidi->card,
+ SNDRV_CTL_EVENT_MASK_INFO, &ctl->id);
++ }
++ if (umidi->autopm_reference)
++ usb_autopm_put_interface(umidi->iface);
+ }
+ }
+ mutex_unlock(&umidi->mutex);
++ up_read(&umidi->disc_rwsem);
++ return 0;
+ }
+
+ static int snd_usbmidi_output_open(struct snd_rawmidi_substream *substream)
+@@ -1063,7 +1094,6 @@ static int snd_usbmidi_output_open(struct snd_rawmidi_substream *substream)
+ struct snd_usb_midi* umidi = substream->rmidi->private_data;
+ struct usbmidi_out_port* port = NULL;
+ int i, j;
+- int err;
+
+ for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i)
+ if (umidi->endpoints[i].out)
+@@ -1076,25 +1106,15 @@ static int snd_usbmidi_output_open(struct snd_rawmidi_substream *substream)
+ snd_BUG();
+ return -ENXIO;
+ }
+- err = usb_autopm_get_interface(umidi->iface);
+- port->autopm_reference = err >= 0;
+- if (err < 0 && err != -EACCES)
+- return -EIO;
++
+ substream->runtime->private_data = port;
+ port->state = STATE_UNKNOWN;
+- substream_open(substream, 1);
+- return 0;
++ return substream_open(substream, 0, 1);
+ }
+
+ static int snd_usbmidi_output_close(struct snd_rawmidi_substream *substream)
+ {
+- struct snd_usb_midi* umidi = substream->rmidi->private_data;
+- struct usbmidi_out_port *port = substream->runtime->private_data;
+-
+- substream_open(substream, 0);
+- if (port->autopm_reference)
+- usb_autopm_put_interface(umidi->iface);
+- return 0;
++ return substream_open(substream, 0, 0);
+ }
+
+ static void snd_usbmidi_output_trigger(struct snd_rawmidi_substream *substream, int up)
+@@ -1147,14 +1167,12 @@ static void snd_usbmidi_output_drain(struct snd_rawmidi_substream *substream)
+
+ static int snd_usbmidi_input_open(struct snd_rawmidi_substream *substream)
+ {
+- substream_open(substream, 1);
+- return 0;
++ return substream_open(substream, 1, 1);
+ }
+
+ static int snd_usbmidi_input_close(struct snd_rawmidi_substream *substream)
+ {
+- substream_open(substream, 0);
+- return 0;
++ return substream_open(substream, 1, 0);
+ }
+
+ static void snd_usbmidi_input_trigger(struct snd_rawmidi_substream *substream, int up)
+@@ -1403,9 +1421,12 @@ void snd_usbmidi_disconnect(struct list_head* p)
+ * a timer may submit an URB. To reliably break the cycle
+ * a flag under lock must be used
+ */
++ down_write(&umidi->disc_rwsem);
+ spin_lock_irq(&umidi->disc_lock);
+ umidi->disconnected = 1;
+ spin_unlock_irq(&umidi->disc_lock);
++ up_write(&umidi->disc_rwsem);
++
+ for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) {
+ struct snd_usb_midi_endpoint* ep = &umidi->endpoints[i];
+ if (ep->out)
+@@ -2060,12 +2081,15 @@ void snd_usbmidi_input_stop(struct list_head* p)
+ unsigned int i, j;
+
+ umidi = list_entry(p, struct snd_usb_midi, list);
++ if (!umidi->input_running)
++ return;
+ for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) {
+ struct snd_usb_midi_endpoint* ep = &umidi->endpoints[i];
+ if (ep->in)
+ for (j = 0; j < INPUT_URBS; ++j)
+ usb_kill_urb(ep->in->urbs[j]);
+ }
++ umidi->input_running = 0;
+ }
+
+ static void snd_usbmidi_input_start_ep(struct snd_usb_midi_in_endpoint* ep)
+@@ -2090,8 +2114,11 @@ void snd_usbmidi_input_start(struct list_head* p)
+ int i;
+
+ umidi = list_entry(p, struct snd_usb_midi, list);
++ if (umidi->input_running || !umidi->opened[1])
++ return;
+ for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i)
+ snd_usbmidi_input_start_ep(umidi->endpoints[i].in);
++ umidi->input_running = 1;
+ }
+
+ /*
+@@ -2117,6 +2144,7 @@ int snd_usbmidi_create(struct snd_card *card,
+ umidi->usb_protocol_ops = &snd_usbmidi_standard_ops;
+ init_timer(&umidi->error_timer);
+ spin_lock_init(&umidi->disc_lock);
++ init_rwsem(&umidi->disc_rwsem);
+ mutex_init(&umidi->mutex);
+ umidi->usb_id = USB_ID(le16_to_cpu(umidi->dev->descriptor.idVendor),
+ le16_to_cpu(umidi->dev->descriptor.idProduct));
+@@ -2229,9 +2257,6 @@ int snd_usbmidi_create(struct snd_card *card,
+ }
+
+ list_add_tail(&umidi->list, midi_list);
+-
+- for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i)
+- snd_usbmidi_input_start_ep(umidi->endpoints[i].in);
+ return 0;
+ }
+
diff --git a/1036_linux-3.2.37.patch b/1036_linux-3.2.37.patch
new file mode 100644
index 00000000..ad132518
--- /dev/null
+++ b/1036_linux-3.2.37.patch
@@ -0,0 +1,1689 @@
+diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
+index a4399f5..3b979c6 100644
+--- a/Documentation/networking/ip-sysctl.txt
++++ b/Documentation/networking/ip-sysctl.txt
+@@ -524,6 +524,11 @@ tcp_thin_dupack - BOOLEAN
+ Documentation/networking/tcp-thin.txt
+ Default: 0
+
++tcp_challenge_ack_limit - INTEGER
++ Limits number of Challenge ACK sent per second, as recommended
++ in RFC 5961 (Improving TCP's Robustness to Blind In-Window Attacks)
++ Default: 100
++
+ UDP variables:
+
+ udp_mem - vector of 3 INTEGERs: min, pressure, max
+diff --git a/Makefile b/Makefile
+index 2052c29..21c77e2 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 36
++SUBLEVEL = 37
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
+index ec8affe..e74f86e 100644
+--- a/arch/powerpc/kernel/time.c
++++ b/arch/powerpc/kernel/time.c
+@@ -859,13 +859,8 @@ void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
+
+ void update_vsyscall_tz(void)
+ {
+- /* Make userspace gettimeofday spin until we're done. */
+- ++vdso_data->tb_update_count;
+- smp_mb();
+ vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
+ vdso_data->tz_dsttime = sys_tz.tz_dsttime;
+- smp_mb();
+- ++vdso_data->tb_update_count;
+ }
+
+ static void __init clocksource_init(void)
+diff --git a/arch/powerpc/platforms/40x/ppc40x_simple.c b/arch/powerpc/platforms/40x/ppc40x_simple.c
+index e8dd5c5..d10c123 100644
+--- a/arch/powerpc/platforms/40x/ppc40x_simple.c
++++ b/arch/powerpc/platforms/40x/ppc40x_simple.c
+@@ -55,7 +55,8 @@ static const char *board[] __initdata = {
+ "amcc,haleakala",
+ "amcc,kilauea",
+ "amcc,makalu",
+- "est,hotfoot"
++ "est,hotfoot",
++ NULL
+ };
+
+ static int __init ppc40x_probe(void)
+diff --git a/arch/sparc/include/asm/hugetlb.h b/arch/sparc/include/asm/hugetlb.h
+index 1770610..f368cef 100644
+--- a/arch/sparc/include/asm/hugetlb.h
++++ b/arch/sparc/include/asm/hugetlb.h
+@@ -58,14 +58,20 @@ static inline pte_t huge_pte_wrprotect(pte_t pte)
+ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+ {
+- ptep_set_wrprotect(mm, addr, ptep);
++ pte_t old_pte = *ptep;
++ set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
+ }
+
+ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, int dirty)
+ {
+- return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
++ int changed = !pte_same(*ptep, pte);
++ if (changed) {
++ set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
++ flush_tlb_page(vma, addr);
++ }
++ return changed;
+ }
+
+ static inline pte_t huge_ptep_get(pte_t *ptep)
+diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
+index 8ab80ba..792b66f 100644
+--- a/drivers/acpi/scan.c
++++ b/drivers/acpi/scan.c
+@@ -789,8 +789,8 @@ acpi_bus_extract_wakeup_device_power_package(acpi_handle handle,
+ static void acpi_bus_set_run_wake_flags(struct acpi_device *device)
+ {
+ struct acpi_device_id button_device_ids[] = {
+- {"PNP0C0D", 0},
+ {"PNP0C0C", 0},
++ {"PNP0C0D", 0},
+ {"PNP0C0E", 0},
+ {"", 0},
+ };
+@@ -802,6 +802,11 @@ static void acpi_bus_set_run_wake_flags(struct acpi_device *device)
+ /* Power button, Lid switch always enable wakeup */
+ if (!acpi_match_device_ids(device, button_device_ids)) {
+ device->wakeup.flags.run_wake = 1;
++ if (!acpi_match_device_ids(device, &button_device_ids[1])) {
++ /* Do not use Lid/sleep button for S5 wakeup */
++ if (device->wakeup.sleep_state == ACPI_STATE_S5)
++ device->wakeup.sleep_state = ACPI_STATE_S4;
++ }
+ device_set_wakeup_capable(&device->dev, true);
+ return;
+ }
+@@ -1152,7 +1157,7 @@ static void acpi_device_set_id(struct acpi_device *device)
+ acpi_add_id(device, ACPI_DOCK_HID);
+ else if (!acpi_ibm_smbus_match(device))
+ acpi_add_id(device, ACPI_SMBUS_IBM_HID);
+- else if (!acpi_device_hid(device) &&
++ else if (list_empty(&device->pnp.ids) &&
+ ACPI_IS_ROOT_DEVICE(device->parent)) {
+ acpi_add_id(device, ACPI_BUS_HID); /* \_SB, LNXSYBUS */
+ strcpy(device->pnp.device_name, ACPI_BUS_DEVICE_NAME);
+diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
+index db195ab..e49ddd0 100644
+--- a/drivers/block/aoe/aoe.h
++++ b/drivers/block/aoe/aoe.h
+@@ -1,5 +1,5 @@
+ /* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */
+-#define VERSION "47"
++#define VERSION "47q"
+ #define AOE_MAJOR 152
+ #define DEVICE_NAME "aoe"
+
+diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
+index 321de7b..7eca463 100644
+--- a/drivers/block/aoe/aoeblk.c
++++ b/drivers/block/aoe/aoeblk.c
+@@ -276,8 +276,6 @@ aoeblk_gdalloc(void *vp)
+ goto err_mempool;
+ blk_queue_make_request(d->blkq, aoeblk_make_request);
+ d->blkq->backing_dev_info.name = "aoe";
+- if (bdi_init(&d->blkq->backing_dev_info))
+- goto err_blkq;
+ spin_lock_irqsave(&d->lock, flags);
+ gd->major = AOE_MAJOR;
+ gd->first_minor = d->sysminor * AOE_PARTITIONS;
+@@ -298,9 +296,6 @@ aoeblk_gdalloc(void *vp)
+ aoedisk_add_sysfs(d);
+ return;
+
+-err_blkq:
+- blk_cleanup_queue(d->blkq);
+- d->blkq = NULL;
+ err_mempool:
+ mempool_destroy(d->bufpool);
+ err_disk:
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index 791df46..012a9d2 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -1305,6 +1305,7 @@ static inline void intel_unregister_dsm_handler(void) { return; }
+ #endif /* CONFIG_ACPI */
+
+ /* modesetting */
++extern void i915_redisable_vga(struct drm_device *dev);
+ extern void intel_modeset_init(struct drm_device *dev);
+ extern void intel_modeset_gem_init(struct drm_device *dev);
+ extern void intel_modeset_cleanup(struct drm_device *dev);
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 54acad3..fa9639b 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -8898,6 +8898,23 @@ static void i915_disable_vga(struct drm_device *dev)
+ POSTING_READ(vga_reg);
+ }
+
++void i915_redisable_vga(struct drm_device *dev)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ u32 vga_reg;
++
++ if (HAS_PCH_SPLIT(dev))
++ vga_reg = CPU_VGACNTRL;
++ else
++ vga_reg = VGACNTRL;
++
++ if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
++ DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
++ I915_WRITE(vga_reg, VGA_DISP_DISABLE);
++ POSTING_READ(vga_reg);
++ }
++}
++
+ void intel_modeset_init(struct drm_device *dev)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
+index cf5ea3d..c6d0966 100644
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -535,6 +535,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
+
+ mutex_lock(&dev->mode_config.mutex);
+ drm_helper_resume_force_mode(dev);
++ i915_redisable_vga(dev);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return NOTIFY_OK;
+diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
+index d5af089..2bb29c9 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
++++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
+@@ -940,7 +940,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+ if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.base = dev_priv->gart_info.aper_base;
+- mem->bus.is_iomem = true;
++ mem->bus.is_iomem = !dev->agp->cant_use_aperture;
+ }
+ #endif
+ break;
+diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
+index 81fc100..1b98338 100644
+--- a/drivers/gpu/drm/radeon/radeon_combios.c
++++ b/drivers/gpu/drm/radeon/radeon_combios.c
+@@ -1536,6 +1536,9 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
+ of_machine_is_compatible("PowerBook6,7")) {
+ /* ibook */
+ rdev->mode_info.connector_table = CT_IBOOK;
++ } else if (of_machine_is_compatible("PowerMac3,5")) {
++ /* PowerMac G4 Silver radeon 7500 */
++ rdev->mode_info.connector_table = CT_MAC_G4_SILVER;
+ } else if (of_machine_is_compatible("PowerMac4,4")) {
+ /* emac */
+ rdev->mode_info.connector_table = CT_EMAC;
+@@ -1561,6 +1564,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
+ (rdev->pdev->subsystem_device == 0x4150)) {
+ /* Mac G5 tower 9600 */
+ rdev->mode_info.connector_table = CT_MAC_G5_9600;
++ } else if ((rdev->pdev->device == 0x4c66) &&
++ (rdev->pdev->subsystem_vendor == 0x1002) &&
++ (rdev->pdev->subsystem_device == 0x4c66)) {
++ /* SAM440ep RV250 embedded board */
++ rdev->mode_info.connector_table = CT_SAM440EP;
+ } else
+ #endif /* CONFIG_PPC_PMAC */
+ #ifdef CONFIG_PPC64
+@@ -2134,6 +2142,115 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
+ CONNECTOR_OBJECT_ID_SVIDEO,
+ &hpd);
+ break;
++ case CT_SAM440EP:
++ DRM_INFO("Connector Table: %d (SAM440ep embedded board)\n",
++ rdev->mode_info.connector_table);
++ /* LVDS */
++ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_NONE_DETECTED, 0, 0);
++ hpd.hpd = RADEON_HPD_NONE;
++ radeon_add_legacy_encoder(dev,
++ radeon_get_encoder_enum(dev,
++ ATOM_DEVICE_LCD1_SUPPORT,
++ 0),
++ ATOM_DEVICE_LCD1_SUPPORT);
++ radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
++ DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
++ CONNECTOR_OBJECT_ID_LVDS,
++ &hpd);
++ /* DVI-I - secondary dac, int tmds */
++ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
++ hpd.hpd = RADEON_HPD_1; /* ??? */
++ radeon_add_legacy_encoder(dev,
++ radeon_get_encoder_enum(dev,
++ ATOM_DEVICE_DFP1_SUPPORT,
++ 0),
++ ATOM_DEVICE_DFP1_SUPPORT);
++ radeon_add_legacy_encoder(dev,
++ radeon_get_encoder_enum(dev,
++ ATOM_DEVICE_CRT2_SUPPORT,
++ 2),
++ ATOM_DEVICE_CRT2_SUPPORT);
++ radeon_add_legacy_connector(dev, 1,
++ ATOM_DEVICE_DFP1_SUPPORT |
++ ATOM_DEVICE_CRT2_SUPPORT,
++ DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
++ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
++ &hpd);
++ /* VGA - primary dac */
++ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
++ hpd.hpd = RADEON_HPD_NONE;
++ radeon_add_legacy_encoder(dev,
++ radeon_get_encoder_enum(dev,
++ ATOM_DEVICE_CRT1_SUPPORT,
++ 1),
++ ATOM_DEVICE_CRT1_SUPPORT);
++ radeon_add_legacy_connector(dev, 2,
++ ATOM_DEVICE_CRT1_SUPPORT,
++ DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
++ CONNECTOR_OBJECT_ID_VGA,
++ &hpd);
++ /* TV - TV DAC */
++ ddc_i2c.valid = false;
++ hpd.hpd = RADEON_HPD_NONE;
++ radeon_add_legacy_encoder(dev,
++ radeon_get_encoder_enum(dev,
++ ATOM_DEVICE_TV1_SUPPORT,
++ 2),
++ ATOM_DEVICE_TV1_SUPPORT);
++ radeon_add_legacy_connector(dev, 3, ATOM_DEVICE_TV1_SUPPORT,
++ DRM_MODE_CONNECTOR_SVIDEO,
++ &ddc_i2c,
++ CONNECTOR_OBJECT_ID_SVIDEO,
++ &hpd);
++ break;
++ case CT_MAC_G4_SILVER:
++ DRM_INFO("Connector Table: %d (mac g4 silver)\n",
++ rdev->mode_info.connector_table);
++ /* DVI-I - tv dac, int tmds */
++ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
++ hpd.hpd = RADEON_HPD_1; /* ??? */
++ radeon_add_legacy_encoder(dev,
++ radeon_get_encoder_enum(dev,
++ ATOM_DEVICE_DFP1_SUPPORT,
++ 0),
++ ATOM_DEVICE_DFP1_SUPPORT);
++ radeon_add_legacy_encoder(dev,
++ radeon_get_encoder_enum(dev,
++ ATOM_DEVICE_CRT2_SUPPORT,
++ 2),
++ ATOM_DEVICE_CRT2_SUPPORT);
++ radeon_add_legacy_connector(dev, 0,
++ ATOM_DEVICE_DFP1_SUPPORT |
++ ATOM_DEVICE_CRT2_SUPPORT,
++ DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
++ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
++ &hpd);
++ /* VGA - primary dac */
++ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
++ hpd.hpd = RADEON_HPD_NONE;
++ radeon_add_legacy_encoder(dev,
++ radeon_get_encoder_enum(dev,
++ ATOM_DEVICE_CRT1_SUPPORT,
++ 1),
++ ATOM_DEVICE_CRT1_SUPPORT);
++ radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT1_SUPPORT,
++ DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
++ CONNECTOR_OBJECT_ID_VGA,
++ &hpd);
++ /* TV - TV DAC */
++ ddc_i2c.valid = false;
++ hpd.hpd = RADEON_HPD_NONE;
++ radeon_add_legacy_encoder(dev,
++ radeon_get_encoder_enum(dev,
++ ATOM_DEVICE_TV1_SUPPORT,
++ 2),
++ ATOM_DEVICE_TV1_SUPPORT);
++ radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
++ DRM_MODE_CONNECTOR_SVIDEO,
++ &ddc_i2c,
++ CONNECTOR_OBJECT_ID_SVIDEO,
++ &hpd);
++ break;
+ default:
+ DRM_INFO("Connector table: %d (invalid)\n",
+ rdev->mode_info.connector_table);
+diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
+index 87d494d..6fd53b6 100644
+--- a/drivers/gpu/drm/radeon/radeon_connectors.c
++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
+@@ -689,7 +689,7 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
+ ret = connector_status_disconnected;
+
+ if (radeon_connector->ddc_bus)
+- dret = radeon_ddc_probe(radeon_connector);
++ dret = radeon_ddc_probe(radeon_connector, false);
+ if (dret) {
+ radeon_connector->detected_by_load = false;
+ if (radeon_connector->edid) {
+@@ -871,7 +871,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
+ bool dret = false;
+
+ if (radeon_connector->ddc_bus)
+- dret = radeon_ddc_probe(radeon_connector);
++ dret = radeon_ddc_probe(radeon_connector, false);
+ if (dret) {
+ radeon_connector->detected_by_load = false;
+ if (radeon_connector->edid) {
+@@ -1299,7 +1299,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
+ if (encoder) {
+ /* setup ddc on the bridge */
+ radeon_atom_ext_encoder_setup_ddc(encoder);
+- if (radeon_ddc_probe(radeon_connector)) /* try DDC */
++ /* bridge chips are always aux */
++ if (radeon_ddc_probe(radeon_connector, true)) /* try DDC */
+ ret = connector_status_connected;
+ else if (radeon_connector->dac_load_detect) { /* try load detection */
+ struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+@@ -1317,7 +1318,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
+ if (radeon_dp_getdpcd(radeon_connector))
+ ret = connector_status_connected;
+ } else {
+- if (radeon_ddc_probe(radeon_connector))
++ /* try non-aux ddc (DP to DVI/HMDI/etc. adapter) */
++ if (radeon_ddc_probe(radeon_connector, false))
+ ret = connector_status_connected;
+ }
+ }
+diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
+index a22d6e6..aec8e0c 100644
+--- a/drivers/gpu/drm/radeon/radeon_display.c
++++ b/drivers/gpu/drm/radeon/radeon_display.c
+@@ -701,10 +701,15 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
+ if (radeon_connector->router.ddc_valid)
+ radeon_router_select_ddc_port(radeon_connector);
+
+- if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
+- (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) ||
+- (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) !=
+- ENCODER_OBJECT_ID_NONE)) {
++ if (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) !=
++ ENCODER_OBJECT_ID_NONE) {
++ struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
++
++ if (dig->dp_i2c_bus)
++ radeon_connector->edid = drm_get_edid(&radeon_connector->base,
++ &dig->dp_i2c_bus->adapter);
++ } else if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
++ (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
+ struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
+
+ if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
+diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
+index 1441b00..cf20351 100644
+--- a/drivers/gpu/drm/radeon/radeon_i2c.c
++++ b/drivers/gpu/drm/radeon/radeon_i2c.c
+@@ -34,7 +34,7 @@
+ * radeon_ddc_probe
+ *
+ */
+-bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
++bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux)
+ {
+ u8 out = 0x0;
+ u8 buf[8];
+@@ -58,7 +58,13 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
+ if (radeon_connector->router.ddc_valid)
+ radeon_router_select_ddc_port(radeon_connector);
+
+- ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
++ if (use_aux) {
++ struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
++ ret = i2c_transfer(&dig->dp_i2c_bus->adapter, msgs, 2);
++ } else {
++ ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
++ }
++
+ if (ret != 2)
+ /* Couldn't find an accessible DDC on this connector */
+ return false;
+diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
+index 8254d5a..bb42df4 100644
+--- a/drivers/gpu/drm/radeon/radeon_mode.h
++++ b/drivers/gpu/drm/radeon/radeon_mode.h
+@@ -210,6 +210,8 @@ enum radeon_connector_table {
+ CT_RN50_POWER,
+ CT_MAC_X800,
+ CT_MAC_G5_9600,
++ CT_SAM440EP,
++ CT_MAC_G4_SILVER
+ };
+
+ enum radeon_dvo_chip {
+@@ -521,7 +523,7 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c,
+ u8 val);
+ extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector);
+ extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
+-extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
++extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux);
+ extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
+
+ extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector);
+diff --git a/drivers/hwmon/lm73.c b/drivers/hwmon/lm73.c
+index 9e64d96..376d9d9 100644
+--- a/drivers/hwmon/lm73.c
++++ b/drivers/hwmon/lm73.c
+@@ -49,6 +49,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
+ struct i2c_client *client = to_i2c_client(dev);
+ long temp;
+ short value;
++ s32 err;
+
+ int status = strict_strtol(buf, 10, &temp);
+ if (status < 0)
+@@ -57,8 +58,8 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
+ /* Write value */
+ value = (short) SENSORS_LIMIT(temp/250, (LM73_TEMP_MIN*4),
+ (LM73_TEMP_MAX*4)) << 5;
+- i2c_smbus_write_word_swapped(client, attr->index, value);
+- return count;
++ err = i2c_smbus_write_word_swapped(client, attr->index, value);
++ return (err < 0) ? err : count;
+ }
+
+ static ssize_t show_temp(struct device *dev, struct device_attribute *da,
+@@ -66,11 +67,16 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *da,
+ {
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct i2c_client *client = to_i2c_client(dev);
++ int temp;
++
++ s32 err = i2c_smbus_read_word_swapped(client, attr->index);
++ if (err < 0)
++ return err;
++
+ /* use integer division instead of equivalent right shift to
+ guarantee arithmetic shift and preserve the sign */
+- int temp = ((s16) (i2c_smbus_read_word_swapped(client,
+- attr->index))*250) / 32;
+- return sprintf(buf, "%d\n", temp);
++ temp = (((s16) err) * 250) / 32;
++ return scnprintf(buf, PAGE_SIZE, "%d\n", temp);
+ }
+
+
+diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
+index 568b4f1..3ade373 100644
+--- a/drivers/infiniband/hw/nes/nes.h
++++ b/drivers/infiniband/hw/nes/nes.h
+@@ -524,6 +524,7 @@ void nes_iwarp_ce_handler(struct nes_device *, struct nes_hw_cq *);
+ int nes_destroy_cqp(struct nes_device *);
+ int nes_nic_cm_xmit(struct sk_buff *, struct net_device *);
+ void nes_recheck_link_status(struct work_struct *work);
++void nes_terminate_timeout(unsigned long context);
+
+ /* nes_nic.c */
+ struct net_device *nes_netdev_init(struct nes_device *, void __iomem *);
+diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
+index 7c0ff19..4cd1bf7 100644
+--- a/drivers/infiniband/hw/nes/nes_hw.c
++++ b/drivers/infiniband/hw/nes/nes_hw.c
+@@ -75,7 +75,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
+ static void process_critical_error(struct nes_device *nesdev);
+ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number);
+ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode);
+-static void nes_terminate_timeout(unsigned long context);
+ static void nes_terminate_start_timer(struct nes_qp *nesqp);
+
+ #ifdef CONFIG_INFINIBAND_NES_DEBUG
+@@ -3522,7 +3521,7 @@ static void nes_terminate_received(struct nes_device *nesdev,
+ }
+
+ /* Timeout routine in case terminate fails to complete */
+-static void nes_terminate_timeout(unsigned long context)
++void nes_terminate_timeout(unsigned long context)
+ {
+ struct nes_qp *nesqp = (struct nes_qp *)(unsigned long)context;
+
+@@ -3532,11 +3531,7 @@ static void nes_terminate_timeout(unsigned long context)
+ /* Set a timer in case hw cannot complete the terminate sequence */
+ static void nes_terminate_start_timer(struct nes_qp *nesqp)
+ {
+- init_timer(&nesqp->terminate_timer);
+- nesqp->terminate_timer.function = nes_terminate_timeout;
+- nesqp->terminate_timer.expires = jiffies + HZ;
+- nesqp->terminate_timer.data = (unsigned long)nesqp;
+- add_timer(&nesqp->terminate_timer);
++ mod_timer(&nesqp->terminate_timer, (jiffies + HZ));
+ }
+
+ /**
+diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
+index 5095bc4..b0471b4 100644
+--- a/drivers/infiniband/hw/nes/nes_verbs.c
++++ b/drivers/infiniband/hw/nes/nes_verbs.c
+@@ -1404,6 +1404,9 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
+ }
+
+ nesqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR);
++ init_timer(&nesqp->terminate_timer);
++ nesqp->terminate_timer.function = nes_terminate_timeout;
++ nesqp->terminate_timer.data = (unsigned long)nesqp;
+
+ /* update the QP table */
+ nesdev->nesadapter->qp_table[nesqp->hwqp.qp_id-NES_FIRST_QPN] = nesqp;
+@@ -1413,7 +1416,6 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
+ return &nesqp->ibqp;
+ }
+
+-
+ /**
+ * nes_clean_cq
+ */
+@@ -2559,6 +2561,11 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ return ibmr;
+ case IWNES_MEMREG_TYPE_QP:
+ case IWNES_MEMREG_TYPE_CQ:
++ if (!region->length) {
++ nes_debug(NES_DBG_MR, "Unable to register zero length region for CQ\n");
++ ib_umem_release(region);
++ return ERR_PTR(-EINVAL);
++ }
+ nespbl = kzalloc(sizeof(*nespbl), GFP_KERNEL);
+ if (!nespbl) {
+ nes_debug(NES_DBG_MR, "Unable to allocate PBL\n");
+diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+index e9d73e7..979d225 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
++++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+@@ -701,8 +701,8 @@ static void ar9003_rx_gain_table_mode0(struct ath_hw *ah)
+ 2);
+ else if (AR_SREV_9485_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+- ar9485Common_wo_xlna_rx_gain_1_1,
+- ARRAY_SIZE(ar9485Common_wo_xlna_rx_gain_1_1),
++ ar9485_common_rx_gain_1_1,
++ ARRAY_SIZE(ar9485_common_rx_gain_1_1),
+ 2);
+ else if (AR_SREV_9580(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
+index 8a009bc..7ca84c3 100644
+--- a/drivers/net/wireless/p54/p54usb.c
++++ b/drivers/net/wireless/p54/p54usb.c
+@@ -47,6 +47,7 @@ static struct usb_device_id p54u_table[] = {
+ {USB_DEVICE(0x0411, 0x0050)}, /* Buffalo WLI2-USB2-G54 */
+ {USB_DEVICE(0x045e, 0x00c2)}, /* Microsoft MN-710 */
+ {USB_DEVICE(0x0506, 0x0a11)}, /* 3COM 3CRWE254G72 */
++ {USB_DEVICE(0x0675, 0x0530)}, /* DrayTek Vigor 530 */
+ {USB_DEVICE(0x06b9, 0x0120)}, /* Thomson SpeedTouch 120g */
+ {USB_DEVICE(0x0707, 0xee06)}, /* SMC 2862W-G */
+ {USB_DEVICE(0x07aa, 0x001c)}, /* Corega CG-WLUSB2GT */
+@@ -82,6 +83,8 @@ static struct usb_device_id p54u_table[] = {
+ {USB_DEVICE(0x06a9, 0x000e)}, /* Westell 802.11g USB (A90-211WG-01) */
+ {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */
+ {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */
++ {USB_DEVICE(0x0803, 0x4310)}, /* Zoom 4410a */
++ {USB_DEVICE(0x083a, 0x4503)}, /* T-Com Sinus 154 data II */
+ {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */
+ {USB_DEVICE(0x083a, 0xc501)}, /* Zoom Wireless-G 4410 */
+ {USB_DEVICE(0x083a, 0xf503)}, /* Accton FD7050E ver 1010ec */
+@@ -101,6 +104,7 @@ static struct usb_device_id p54u_table[] = {
+ {USB_DEVICE(0x13B1, 0x000C)}, /* Linksys WUSB54AG */
+ {USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */
+ {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */
++ /* {USB_DEVICE(0x15a9, 0x0002)}, * Also SparkLAN WL-682 with 3887 */
+ {USB_DEVICE(0x1668, 0x1050)}, /* Actiontec 802UIG-1 */
+ {USB_DEVICE(0x1740, 0x1000)}, /* Senao NUB-350 */
+ {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */
+diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
+index fb19447..67cbe5a 100644
+--- a/drivers/net/wireless/rt2x00/rt2800lib.c
++++ b/drivers/net/wireless/rt2x00/rt2800lib.c
+@@ -4208,7 +4208,8 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
+ IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_SUPPORTS_PS |
+ IEEE80211_HW_PS_NULLFUNC_STACK |
+- IEEE80211_HW_AMPDU_AGGREGATION;
++ IEEE80211_HW_AMPDU_AGGREGATION |
++ IEEE80211_HW_TEARDOWN_AGGR_ON_BAR_FAIL;
+ /*
+ * Don't set IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING for USB devices
+ * unless we are capable of sending the buffered frames out after the
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 78fda9c..cab24f7 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -2747,7 +2747,7 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
+ if (PCI_FUNC(dev->devfn))
+ return;
+ /*
+- * RICOH 0xe823 SD/MMC card reader fails to recognize
++ * RICOH 0xe822 and 0xe823 SD/MMC card readers fail to recognize
+ * certain types of SD/MMC cards. Lowering the SD base
+ * clock frequency from 200Mhz to 50Mhz fixes this issue.
+ *
+@@ -2758,7 +2758,8 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
+ * 0xf9 - Key register for 0x150
+ * 0xfc - key register for 0xe1
+ */
+- if (dev->device == PCI_DEVICE_ID_RICOH_R5CE823) {
++ if (dev->device == PCI_DEVICE_ID_RICOH_R5CE822 ||
++ dev->device == PCI_DEVICE_ID_RICOH_R5CE823) {
+ pci_write_config_byte(dev, 0xf9, 0xfc);
+ pci_write_config_byte(dev, 0x150, 0x10);
+ pci_write_config_byte(dev, 0xf9, 0x00);
+@@ -2785,6 +2786,8 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
+ }
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
+ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832);
++DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832);
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
+ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
+ #endif /*CONFIG_MMC_RICOH_MMC*/
+diff --git a/drivers/rtc/rtc-vt8500.c b/drivers/rtc/rtc-vt8500.c
+index f93f412..f818dae 100644
+--- a/drivers/rtc/rtc-vt8500.c
++++ b/drivers/rtc/rtc-vt8500.c
+@@ -69,7 +69,7 @@
+ | ALARM_SEC_BIT)
+
+ #define VT8500_RTC_CR_ENABLE (1 << 0) /* Enable RTC */
+-#define VT8500_RTC_CR_24H (1 << 1) /* 24h time format */
++#define VT8500_RTC_CR_12H (1 << 1) /* 12h time format */
+ #define VT8500_RTC_CR_SM_ENABLE (1 << 2) /* Enable periodic irqs */
+ #define VT8500_RTC_CR_SM_SEC (1 << 3) /* 0: 1Hz/60, 1: 1Hz */
+ #define VT8500_RTC_CR_CALIB (1 << 4) /* Enable calibration */
+@@ -118,7 +118,7 @@ static int vt8500_rtc_read_time(struct device *dev, struct rtc_time *tm)
+ tm->tm_min = bcd2bin((time & TIME_MIN_MASK) >> TIME_MIN_S);
+ tm->tm_hour = bcd2bin((time & TIME_HOUR_MASK) >> TIME_HOUR_S);
+ tm->tm_mday = bcd2bin(date & DATE_DAY_MASK);
+- tm->tm_mon = bcd2bin((date & DATE_MONTH_MASK) >> DATE_MONTH_S);
++ tm->tm_mon = bcd2bin((date & DATE_MONTH_MASK) >> DATE_MONTH_S) - 1;
+ tm->tm_year = bcd2bin((date & DATE_YEAR_MASK) >> DATE_YEAR_S)
+ + ((date >> DATE_CENTURY_S) & 1 ? 200 : 100);
+ tm->tm_wday = (time & TIME_DOW_MASK) >> TIME_DOW_S;
+@@ -137,8 +137,9 @@ static int vt8500_rtc_set_time(struct device *dev, struct rtc_time *tm)
+ }
+
+ writel((bin2bcd(tm->tm_year - 100) << DATE_YEAR_S)
+- | (bin2bcd(tm->tm_mon) << DATE_MONTH_S)
+- | (bin2bcd(tm->tm_mday)),
++ | (bin2bcd(tm->tm_mon + 1) << DATE_MONTH_S)
++ | (bin2bcd(tm->tm_mday))
++ | ((tm->tm_year >= 200) << DATE_CENTURY_S),
+ vt8500_rtc->regbase + VT8500_RTC_DS);
+ writel((bin2bcd(tm->tm_wday) << TIME_DOW_S)
+ | (bin2bcd(tm->tm_hour) << TIME_HOUR_S)
+@@ -248,7 +249,7 @@ static int __devinit vt8500_rtc_probe(struct platform_device *pdev)
+ }
+
+ /* Enable RTC and set it to 24-hour mode */
+- writel(VT8500_RTC_CR_ENABLE | VT8500_RTC_CR_24H,
++ writel(VT8500_RTC_CR_ENABLE,
+ vt8500_rtc->regbase + VT8500_RTC_CR);
+
+ vt8500_rtc->rtc = rtc_device_register("vt8500-rtc", &pdev->dev,
+diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
+index a4884a5..c6ad694 100644
+--- a/drivers/scsi/mvsas/mv_sas.c
++++ b/drivers/scsi/mvsas/mv_sas.c
+@@ -1635,7 +1635,7 @@ int mvs_abort_task(struct sas_task *task)
+ mv_dprintk("mvs_abort_task() mvi=%p task=%p "
+ "slot=%p slot_idx=x%x\n",
+ mvi, task, slot, slot_idx);
+- mvs_tmf_timedout((unsigned long)task);
++ task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+ mvs_slot_task_free(mvi, task, slot, slot_idx);
+ rc = TMF_RESP_FUNC_COMPLETE;
+ goto out;
+diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c
+index d837d63..03cb95a 100644
+--- a/drivers/video/mxsfb.c
++++ b/drivers/video/mxsfb.c
+@@ -366,7 +366,8 @@ static void mxsfb_disable_controller(struct fb_info *fb_info)
+ loop--;
+ }
+
+- writel(VDCTRL4_SYNC_SIGNALS_ON, host->base + LCDC_VDCTRL4 + REG_CLR);
++ reg = readl(host->base + LCDC_VDCTRL4);
++ writel(reg & ~VDCTRL4_SYNC_SIGNALS_ON, host->base + LCDC_VDCTRL4);
+
+ clk_disable(host->clk);
+
+diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
+index 99a27cf..4e5dfb7 100644
+--- a/fs/cifs/transport.c
++++ b/fs/cifs/transport.c
+@@ -485,6 +485,13 @@ send_nt_cancel(struct TCP_Server_Info *server, struct smb_hdr *in_buf,
+ mutex_unlock(&server->srv_mutex);
+ return rc;
+ }
++
++ /*
++ * The response to this call was already factored into the sequence
++ * number when the call went out, so we must adjust it back downward
++ * after signing here.
++ */
++ --server->sequence_number;
+ rc = smb_send(server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
+ mutex_unlock(&server->srv_mutex);
+
+diff --git a/fs/eventpoll.c b/fs/eventpoll.c
+index a6f3763..451b9b8 100644
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -1197,10 +1197,30 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even
+ * otherwise we might miss an event that happens between the
+ * f_op->poll() call and the new event set registering.
+ */
+- epi->event.events = event->events;
++ epi->event.events = event->events; /* need barrier below */
+ epi->event.data = event->data; /* protected by mtx */
+
+ /*
++ * The following barrier has two effects:
++ *
++ * 1) Flush epi changes above to other CPUs. This ensures
++ * we do not miss events from ep_poll_callback if an
++ * event occurs immediately after we call f_op->poll().
++ * We need this because we did not take ep->lock while
++ * changing epi above (but ep_poll_callback does take
++ * ep->lock).
++ *
++ * 2) We also need to ensure we do not miss _past_ events
++ * when calling f_op->poll(). This barrier also
++ * pairs with the barrier in wq_has_sleeper (see
++ * comments for wq_has_sleeper).
++ *
++ * This barrier will now guarantee ep_poll_callback or f_op->poll
++ * (or both) will notice the readiness of an item.
++ */
++ smp_mb();
++
++ /*
+ * Get current event bits. We can safely use the file* here because
+ * its usage count has been increased by the caller of this function.
+ */
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 191580a..fbb92e6 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -2093,13 +2093,14 @@ ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
+ * removes index from the index block.
+ */
+ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
+- struct ext4_ext_path *path)
++ struct ext4_ext_path *path, int depth)
+ {
+ int err;
+ ext4_fsblk_t leaf;
+
+ /* free index block */
+- path--;
++ depth--;
++ path = path + depth;
+ leaf = ext4_idx_pblock(path->p_idx);
+ if (unlikely(path->p_hdr->eh_entries == 0)) {
+ EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
+@@ -2124,6 +2125,19 @@ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
+
+ ext4_free_blocks(handle, inode, NULL, leaf, 1,
+ EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
++
++ while (--depth >= 0) {
++ if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr))
++ break;
++ path--;
++ err = ext4_ext_get_access(handle, inode, path);
++ if (err)
++ break;
++ path->p_idx->ei_block = (path+1)->p_idx->ei_block;
++ err = ext4_ext_dirty(handle, inode, path);
++ if (err)
++ break;
++ }
+ return err;
+ }
+
+@@ -2454,7 +2468,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
+ /* if this leaf is free, then we should
+ * remove it from index block above */
+ if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
+- err = ext4_ext_rm_idx(handle, inode, path + depth);
++ err = ext4_ext_rm_idx(handle, inode, path, depth);
+
+ out:
+ return err;
+@@ -2587,7 +2601,7 @@ again:
+ /* index is empty, remove it;
+ * handle must be already prepared by the
+ * truncatei_leaf() */
+- err = ext4_ext_rm_idx(handle, inode, path + i);
++ err = ext4_ext_rm_idx(handle, inode, path, i);
+ }
+ /* root level has p_bh == NULL, brelse() eats this */
+ brelse(path[i].p_bh);
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index f8d5fce..24ac7a2 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -1942,6 +1942,16 @@ set_qf_format:
+ }
+ }
+ #endif
++ if (test_opt(sb, DIOREAD_NOLOCK)) {
++ int blocksize =
++ BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
++
++ if (blocksize < PAGE_CACHE_SIZE) {
++ ext4_msg(sb, KERN_ERR, "can't mount with "
++ "dioread_nolock if block size != PAGE_SIZE");
++ return 0;
++ }
++ }
+ return 1;
+ }
+
+@@ -3367,15 +3377,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ clear_opt(sb, DELALLOC);
+ }
+
+- blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
+- if (test_opt(sb, DIOREAD_NOLOCK)) {
+- if (blocksize < PAGE_SIZE) {
+- ext4_msg(sb, KERN_ERR, "can't mount with "
+- "dioread_nolock if block size != PAGE_SIZE");
+- goto failed_mount;
+- }
+- }
+-
+ sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
+ (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
+
+@@ -3417,6 +3418,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ if (!ext4_feature_set_ok(sb, (sb->s_flags & MS_RDONLY)))
+ goto failed_mount;
+
++ blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
+ if (blocksize < EXT4_MIN_BLOCK_SIZE ||
+ blocksize > EXT4_MAX_BLOCK_SIZE) {
+ ext4_msg(sb, KERN_ERR,
+@@ -4652,7 +4654,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
+ }
+
+ ext4_setup_system_zone(sb);
+- if (sbi->s_journal == NULL)
++ if (sbi->s_journal == NULL && !(old_sb_flags & MS_RDONLY))
+ ext4_commit_super(sb, 1);
+
+ #ifdef CONFIG_QUOTA
+diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
+index 8267de5..d7dd774 100644
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -179,7 +179,8 @@ repeat:
+ if (!new_transaction)
+ goto alloc_transaction;
+ write_lock(&journal->j_state_lock);
+- if (!journal->j_running_transaction) {
++ if (!journal->j_running_transaction &&
++ !journal->j_barrier_count) {
+ jbd2_get_transaction(journal, new_transaction);
+ new_transaction = NULL;
+ }
+diff --git a/fs/nfs/super.c b/fs/nfs/super.c
+index 8150344..1943898 100644
+--- a/fs/nfs/super.c
++++ b/fs/nfs/super.c
+@@ -1057,7 +1057,7 @@ static int nfs_get_option_str(substring_t args[], char **option)
+ {
+ kfree(*option);
+ *option = match_strdup(args);
+- return !option;
++ return !*option;
+ }
+
+ static int nfs_get_option_ul(substring_t args[], unsigned long *option)
+diff --git a/fs/udf/inode.c b/fs/udf/inode.c
+index 15df1a4..af37ce3 100644
+--- a/fs/udf/inode.c
++++ b/fs/udf/inode.c
+@@ -581,6 +581,7 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
+ struct udf_inode_info *iinfo = UDF_I(inode);
+ int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
+ int lastblock = 0;
++ bool isBeyondEOF;
+
+ prev_epos.offset = udf_file_entry_alloc_offset(inode);
+ prev_epos.block = iinfo->i_location;
+@@ -659,7 +660,7 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
+ /* Are we beyond EOF? */
+ if (etype == -1) {
+ int ret;
+-
++ isBeyondEOF = 1;
+ if (count) {
+ if (c)
+ laarr[0] = laarr[1];
+@@ -702,6 +703,7 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
+ endnum = c + 1;
+ lastblock = 1;
+ } else {
++ isBeyondEOF = 0;
+ endnum = startnum = ((count > 2) ? 2 : count);
+
+ /* if the current extent is in position 0,
+@@ -749,7 +751,8 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
+ *err = -ENOSPC;
+ return NULL;
+ }
+- iinfo->i_lenExtents += inode->i_sb->s_blocksize;
++ if (isBeyondEOF)
++ iinfo->i_lenExtents += inode->i_sb->s_blocksize;
+ }
+
+ /* if the extent the requsted block is located in contains multiple
+diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
+index e58fa77..34e0274 100644
+--- a/include/asm-generic/tlb.h
++++ b/include/asm-generic/tlb.h
+@@ -78,6 +78,14 @@ struct mmu_gather_batch {
+ #define MAX_GATHER_BATCH \
+ ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
+
++/*
++ * Limit the maximum number of mmu_gather batches to reduce a risk of soft
++ * lockups for non-preemptible kernels on huge machines when a lot of memory
++ * is zapped during unmapping.
++ * 10K pages freed at once should be safe even without a preemption point.
++ */
++#define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH)
++
+ /* struct mmu_gather is an opaque type used by the mm code for passing around
+ * any data needed by arch specific code for tlb_remove_page.
+ */
+@@ -94,6 +102,7 @@ struct mmu_gather {
+ struct mmu_gather_batch *active;
+ struct mmu_gather_batch local;
+ struct page *__pages[MMU_GATHER_BUNDLE];
++ unsigned int batch_count;
+ };
+
+ #define HAVE_GENERIC_MMU_GATHER
+diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
+index 59e4028..3fd17c2 100644
+--- a/include/linux/compiler-gcc.h
++++ b/include/linux/compiler-gcc.h
+@@ -50,6 +50,11 @@
+ # define inline inline __attribute__((always_inline))
+ # define __inline__ __inline__ __attribute__((always_inline))
+ # define __inline __inline __attribute__((always_inline))
++#else
++/* A lot of inline functions can cause havoc with function tracing */
++# define inline inline notrace
++# define __inline__ __inline__ notrace
++# define __inline __inline notrace
+ #endif
+
+ #define __deprecated __attribute__((deprecated))
+diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
+index e90a673..8d9b903 100644
+--- a/include/linux/page-flags.h
++++ b/include/linux/page-flags.h
+@@ -360,7 +360,7 @@ static inline void ClearPageCompound(struct page *page)
+ * pages on the LRU and/or pagecache.
+ */
+ TESTPAGEFLAG(Compound, compound)
+-__PAGEFLAG(Head, compound)
++__SETPAGEFLAG(Head, compound) __CLEARPAGEFLAG(Head, compound)
+
+ /*
+ * PG_reclaim is used in combination with PG_compound to mark the
+@@ -372,8 +372,14 @@ __PAGEFLAG(Head, compound)
+ * PG_compound & PG_reclaim => Tail page
+ * PG_compound & ~PG_reclaim => Head page
+ */
++#define PG_head_mask ((1L << PG_compound))
+ #define PG_head_tail_mask ((1L << PG_compound) | (1L << PG_reclaim))
+
++static inline int PageHead(struct page *page)
++{
++ return ((page->flags & PG_head_tail_mask) == PG_head_mask);
++}
++
+ static inline int PageTail(struct page *page)
+ {
+ return ((page->flags & PG_head_tail_mask) == PG_head_tail_mask);
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index 5776609..3db3da1 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -1543,6 +1543,7 @@
+ #define PCI_DEVICE_ID_RICOH_RL5C476 0x0476
+ #define PCI_DEVICE_ID_RICOH_RL5C478 0x0478
+ #define PCI_DEVICE_ID_RICOH_R5C822 0x0822
++#define PCI_DEVICE_ID_RICOH_R5CE822 0xe822
+ #define PCI_DEVICE_ID_RICOH_R5CE823 0xe823
+ #define PCI_DEVICE_ID_RICOH_R5C832 0x0832
+ #define PCI_DEVICE_ID_RICOH_R5C843 0x0843
+diff --git a/include/linux/snmp.h b/include/linux/snmp.h
+index e16557a..64f5ca7 100644
+--- a/include/linux/snmp.h
++++ b/include/linux/snmp.h
+@@ -209,7 +209,6 @@ enum
+ LINUX_MIB_TCPDSACKOFOSENT, /* TCPDSACKOfoSent */
+ LINUX_MIB_TCPDSACKRECV, /* TCPDSACKRecv */
+ LINUX_MIB_TCPDSACKOFORECV, /* TCPDSACKOfoRecv */
+- LINUX_MIB_TCPABORTONSYN, /* TCPAbortOnSyn */
+ LINUX_MIB_TCPABORTONDATA, /* TCPAbortOnData */
+ LINUX_MIB_TCPABORTONCLOSE, /* TCPAbortOnClose */
+ LINUX_MIB_TCPABORTONMEMORY, /* TCPAbortOnMemory */
+@@ -233,6 +232,8 @@ enum
+ LINUX_MIB_TCPTIMEWAITOVERFLOW, /* TCPTimeWaitOverflow */
+ LINUX_MIB_TCPREQQFULLDOCOOKIES, /* TCPReqQFullDoCookies */
+ LINUX_MIB_TCPREQQFULLDROP, /* TCPReqQFullDrop */
++ LINUX_MIB_TCPCHALLENGEACK, /* TCPChallengeACK */
++ LINUX_MIB_TCPSYNCHALLENGE, /* TCPSYNChallenge */
+ __LINUX_MIB_MAX
+ };
+
+diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
+index e6db62e..ca2755f 100644
+--- a/include/net/inet_connection_sock.h
++++ b/include/net/inet_connection_sock.h
+@@ -317,6 +317,7 @@ extern void inet_csk_reqsk_queue_prune(struct sock *parent,
+ const unsigned long max_rto);
+
+ extern void inet_csk_destroy_sock(struct sock *sk);
++extern void inet_csk_prepare_forced_close(struct sock *sk);
+
+ /*
+ * LISTEN is a special case for poll..
+diff --git a/include/net/mac80211.h b/include/net/mac80211.h
+index 72eddd1..1a6201a 100644
+--- a/include/net/mac80211.h
++++ b/include/net/mac80211.h
+@@ -1128,6 +1128,10 @@ enum sta_notify_cmd {
+ * @IEEE80211_HW_TX_AMPDU_SETUP_IN_HW: The device handles TX A-MPDU session
+ * setup strictly in HW. mac80211 should not attempt to do this in
+ * software.
++ *
++ * @IEEE80211_HW_TEARDOWN_AGGR_ON_BAR_FAIL: On this hardware TX BA session
++ * should be tear down once BAR frame will not be acked.
++ *
+ */
+ enum ieee80211_hw_flags {
+ IEEE80211_HW_HAS_RATE_CONTROL = 1<<0,
+@@ -1154,6 +1158,7 @@ enum ieee80211_hw_flags {
+ IEEE80211_HW_SUPPORTS_PER_STA_GTK = 1<<21,
+ IEEE80211_HW_AP_LINK_PS = 1<<22,
+ IEEE80211_HW_TX_AMPDU_SETUP_IN_HW = 1<<23,
++ IEEE80211_HW_TEARDOWN_AGGR_ON_BAR_FAIL = 1<<26,
+ };
+
+ /**
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index bb18c4d..0768715 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -251,6 +251,7 @@ extern int sysctl_tcp_max_ssthresh;
+ extern int sysctl_tcp_cookie_size;
+ extern int sysctl_tcp_thin_linear_timeouts;
+ extern int sysctl_tcp_thin_dupack;
++extern int sysctl_tcp_challenge_ack_limit;
+
+ extern atomic_long_t tcp_memory_allocated;
+ extern struct percpu_counter tcp_sockets_allocated;
+diff --git a/mm/memory.c b/mm/memory.c
+index 15e686a..4f2add1 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -205,10 +205,14 @@ static int tlb_next_batch(struct mmu_gather *tlb)
+ return 1;
+ }
+
++ if (tlb->batch_count == MAX_GATHER_BATCH_COUNT)
++ return 0;
++
+ batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
+ if (!batch)
+ return 0;
+
++ tlb->batch_count++;
+ batch->next = NULL;
+ batch->nr = 0;
+ batch->max = MAX_GATHER_BATCH;
+@@ -235,6 +239,7 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)
+ tlb->local.nr = 0;
+ tlb->local.max = ARRAY_SIZE(tlb->__pages);
+ tlb->active = &tlb->local;
++ tlb->batch_count = 0;
+
+ #ifdef CONFIG_HAVE_RCU_TABLE_FREE
+ tlb->batch = NULL;
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index c59d44b..4d1e637 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -2334,8 +2334,7 @@ void numa_default_policy(void)
+ */
+
+ /*
+- * "local" is pseudo-policy: MPOL_PREFERRED with MPOL_F_LOCAL flag
+- * Used only for mpol_parse_str() and mpol_to_str()
++ * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
+ */
+ #define MPOL_LOCAL MPOL_MAX
+ static const char * const policy_modes[] =
+@@ -2350,28 +2349,21 @@ static const char * const policy_modes[] =
+
+ #ifdef CONFIG_TMPFS
+ /**
+- * mpol_parse_str - parse string to mempolicy
++ * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
+ * @str: string containing mempolicy to parse
+ * @mpol: pointer to struct mempolicy pointer, returned on success.
+- * @no_context: flag whether to "contextualize" the mempolicy
++ * @unused: redundant argument, to be removed later.
+ *
+ * Format of input:
+ * <mode>[=<flags>][:<nodelist>]
+ *
+- * if @no_context is true, save the input nodemask in w.user_nodemask in
+- * the returned mempolicy. This will be used to "clone" the mempolicy in
+- * a specific context [cpuset] at a later time. Used to parse tmpfs mpol
+- * mount option. Note that if 'static' or 'relative' mode flags were
+- * specified, the input nodemask will already have been saved. Saving
+- * it again is redundant, but safe.
+- *
+ * On success, returns 0, else 1
+ */
+-int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
++int mpol_parse_str(char *str, struct mempolicy **mpol, int unused)
+ {
+ struct mempolicy *new = NULL;
+ unsigned short mode;
+- unsigned short uninitialized_var(mode_flags);
++ unsigned short mode_flags;
+ nodemask_t nodes;
+ char *nodelist = strchr(str, ':');
+ char *flags = strchr(str, '=');
+@@ -2459,24 +2451,23 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
+ if (IS_ERR(new))
+ goto out;
+
+- if (no_context) {
+- /* save for contextualization */
+- new->w.user_nodemask = nodes;
+- } else {
+- int ret;
+- NODEMASK_SCRATCH(scratch);
+- if (scratch) {
+- task_lock(current);
+- ret = mpol_set_nodemask(new, &nodes, scratch);
+- task_unlock(current);
+- } else
+- ret = -ENOMEM;
+- NODEMASK_SCRATCH_FREE(scratch);
+- if (ret) {
+- mpol_put(new);
+- goto out;
+- }
+- }
++ /*
++ * Save nodes for mpol_to_str() to show the tmpfs mount options
++ * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
++ */
++ if (mode != MPOL_PREFERRED)
++ new->v.nodes = nodes;
++ else if (nodelist)
++ new->v.preferred_node = first_node(nodes);
++ else
++ new->flags |= MPOL_F_LOCAL;
++
++ /*
++ * Save nodes for contextualization: this will be used to "clone"
++ * the mempolicy in a specific context [cpuset] at a later time.
++ */
++ new->w.user_nodemask = nodes;
++
+ err = 0;
+
+ out:
+@@ -2496,13 +2487,13 @@ out:
+ * @buffer: to contain formatted mempolicy string
+ * @maxlen: length of @buffer
+ * @pol: pointer to mempolicy to be formatted
+- * @no_context: "context free" mempolicy - use nodemask in w.user_nodemask
++ * @unused: redundant argument, to be removed later.
+ *
+ * Convert a mempolicy into a string.
+ * Returns the number of characters in buffer (if positive)
+ * or an error (negative)
+ */
+-int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
++int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int unused)
+ {
+ char *p = buffer;
+ int l;
+@@ -2528,7 +2519,7 @@ int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
+ case MPOL_PREFERRED:
+ nodes_clear(nodes);
+ if (flags & MPOL_F_LOCAL)
+- mode = MPOL_LOCAL; /* pseudo-policy */
++ mode = MPOL_LOCAL;
+ else
+ node_set(pol->v.preferred_node, nodes);
+ break;
+@@ -2536,10 +2527,7 @@ int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
+ case MPOL_BIND:
+ /* Fall through */
+ case MPOL_INTERLEAVE:
+- if (no_context)
+- nodes = pol->w.user_nodemask;
+- else
+- nodes = pol->v.nodes;
++ nodes = pol->v.nodes;
+ break;
+
+ default:
+diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
+index 3f4e541..72416c8 100644
+--- a/net/dccp/ipv4.c
++++ b/net/dccp/ipv4.c
+@@ -434,8 +434,8 @@ exit:
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
+ return NULL;
+ put_and_exit:
+- bh_unlock_sock(newsk);
+- sock_put(newsk);
++ inet_csk_prepare_forced_close(newsk);
++ dccp_done(newsk);
+ goto exit;
+ }
+
+diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
+index 17ee85c..592b78c 100644
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -609,7 +609,8 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
+ newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
+
+ if (__inet_inherit_port(sk, newsk) < 0) {
+- sock_put(newsk);
++ inet_csk_prepare_forced_close(newsk);
++ dccp_done(newsk);
+ goto out;
+ }
+ __inet6_hash(newsk, NULL);
+diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
+index c14d88a..907ef2c 100644
+--- a/net/ipv4/inet_connection_sock.c
++++ b/net/ipv4/inet_connection_sock.c
+@@ -647,6 +647,22 @@ void inet_csk_destroy_sock(struct sock *sk)
+ }
+ EXPORT_SYMBOL(inet_csk_destroy_sock);
+
++/* This function allows to force a closure of a socket after the call to
++ * tcp/dccp_create_openreq_child().
++ */
++void inet_csk_prepare_forced_close(struct sock *sk)
++{
++ /* sk_clone_lock locked the socket and set refcnt to 2 */
++ bh_unlock_sock(sk);
++ sock_put(sk);
++
++ /* The below has to be done to allow calling inet_csk_destroy_sock */
++ sock_set_flag(sk, SOCK_DEAD);
++ percpu_counter_inc(sk->sk_prot->orphan_count);
++ inet_sk(sk)->inet_num = 0;
++}
++EXPORT_SYMBOL(inet_csk_prepare_forced_close);
++
+ int inet_csk_listen_start(struct sock *sk, const int nr_table_entries)
+ {
+ struct inet_sock *inet = inet_sk(sk);
+diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
+index 466ea8b..f7fdbe9 100644
+--- a/net/ipv4/proc.c
++++ b/net/ipv4/proc.c
+@@ -233,7 +233,6 @@ static const struct snmp_mib snmp4_net_list[] = {
+ SNMP_MIB_ITEM("TCPDSACKOfoSent", LINUX_MIB_TCPDSACKOFOSENT),
+ SNMP_MIB_ITEM("TCPDSACKRecv", LINUX_MIB_TCPDSACKRECV),
+ SNMP_MIB_ITEM("TCPDSACKOfoRecv", LINUX_MIB_TCPDSACKOFORECV),
+- SNMP_MIB_ITEM("TCPAbortOnSyn", LINUX_MIB_TCPABORTONSYN),
+ SNMP_MIB_ITEM("TCPAbortOnData", LINUX_MIB_TCPABORTONDATA),
+ SNMP_MIB_ITEM("TCPAbortOnClose", LINUX_MIB_TCPABORTONCLOSE),
+ SNMP_MIB_ITEM("TCPAbortOnMemory", LINUX_MIB_TCPABORTONMEMORY),
+@@ -257,6 +256,8 @@ static const struct snmp_mib snmp4_net_list[] = {
+ SNMP_MIB_ITEM("TCPTimeWaitOverflow", LINUX_MIB_TCPTIMEWAITOVERFLOW),
+ SNMP_MIB_ITEM("TCPReqQFullDoCookies", LINUX_MIB_TCPREQQFULLDOCOOKIES),
+ SNMP_MIB_ITEM("TCPReqQFullDrop", LINUX_MIB_TCPREQQFULLDROP),
++ SNMP_MIB_ITEM("TCPChallengeACK", LINUX_MIB_TCPCHALLENGEACK),
++ SNMP_MIB_ITEM("TCPSYNChallenge", LINUX_MIB_TCPSYNCHALLENGE),
+ SNMP_MIB_SENTINEL
+ };
+
+diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
+index 69fd720..5485077 100644
+--- a/net/ipv4/sysctl_net_ipv4.c
++++ b/net/ipv4/sysctl_net_ipv4.c
+@@ -552,6 +552,13 @@ static struct ctl_table ipv4_table[] = {
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
++ {
++ .procname = "tcp_challenge_ack_limit",
++ .data = &sysctl_tcp_challenge_ack_limit,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec
++ },
+ #ifdef CONFIG_NET_DMA
+ {
+ .procname = "tcp_dma_copybreak",
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index a08a621..aab8f08 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -86,6 +86,9 @@ int sysctl_tcp_app_win __read_mostly = 31;
+ int sysctl_tcp_adv_win_scale __read_mostly = 1;
+ EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
+
++/* rfc5961 challenge ack rate limiting */
++int sysctl_tcp_challenge_ack_limit = 100;
++
+ int sysctl_tcp_stdurg __read_mostly;
+ int sysctl_tcp_rfc1337 __read_mostly;
+ int sysctl_tcp_max_orphans __read_mostly = NR_FILE;
+@@ -3700,6 +3703,24 @@ static int tcp_process_frto(struct sock *sk, int flag)
+ return 0;
+ }
+
++/* RFC 5961 7 [ACK Throttling] */
++static void tcp_send_challenge_ack(struct sock *sk)
++{
++ /* unprotected vars, we dont care of overwrites */
++ static u32 challenge_timestamp;
++ static unsigned int challenge_count;
++ u32 now = jiffies / HZ;
++
++ if (now != challenge_timestamp) {
++ challenge_timestamp = now;
++ challenge_count = 0;
++ }
++ if (++challenge_count <= sysctl_tcp_challenge_ack_limit) {
++ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
++ tcp_send_ack(sk);
++ }
++}
++
+ /* This routine deals with incoming acks, but not outgoing ones. */
+ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
+ {
+@@ -3718,8 +3739,14 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
+ /* If the ack is older than previous acks
+ * then we can probably ignore it.
+ */
+- if (before(ack, prior_snd_una))
++ if (before(ack, prior_snd_una)) {
++ /* RFC 5961 5.2 [Blind Data Injection Attack].[Mitigation] */
++ if (before(ack, prior_snd_una - tp->max_window)) {
++ tcp_send_challenge_ack(sk);
++ return -1;
++ }
+ goto old_ack;
++ }
+
+ /* If the ack includes data we haven't sent yet, discard
+ * this segment (RFC793 Section 3.9).
+@@ -5243,8 +5270,8 @@ out:
+ /* Does PAWS and seqno based validation of an incoming segment, flags will
+ * play significant role here.
+ */
+-static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
+- const struct tcphdr *th, int syn_inerr)
++static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
++ const struct tcphdr *th, int syn_inerr)
+ {
+ const u8 *hash_location;
+ struct tcp_sock *tp = tcp_sk(sk);
+@@ -5269,38 +5296,48 @@ static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
+ * an acknowledgment should be sent in reply (unless the RST
+ * bit is set, if so drop the segment and return)".
+ */
+- if (!th->rst)
++ if (!th->rst) {
++ if (th->syn)
++ goto syn_challenge;
+ tcp_send_dupack(sk, skb);
++ }
+ goto discard;
+ }
+
+ /* Step 2: check RST bit */
+ if (th->rst) {
+- tcp_reset(sk);
++ /* RFC 5961 3.2 :
++ * If sequence number exactly matches RCV.NXT, then
++ * RESET the connection
++ * else
++ * Send a challenge ACK
++ */
++ if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt)
++ tcp_reset(sk);
++ else
++ tcp_send_challenge_ack(sk);
+ goto discard;
+ }
+
+- /* ts_recent update must be made after we are sure that the packet
+- * is in window.
+- */
+- tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
+-
+ /* step 3: check security and precedence [ignored] */
+
+- /* step 4: Check for a SYN in window. */
+- if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
++ /* step 4: Check for a SYN
++ * RFC 5691 4.2 : Send a challenge ack
++ */
++ if (th->syn) {
++syn_challenge:
+ if (syn_inerr)
+ TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
+- NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN);
+- tcp_reset(sk);
+- return -1;
++ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE);
++ tcp_send_challenge_ack(sk);
++ goto discard;
+ }
+
+- return 1;
++ return true;
+
+ discard:
+ __kfree_skb(skb);
+- return 0;
++ return false;
+ }
+
+ /*
+@@ -5330,7 +5367,6 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
+ const struct tcphdr *th, unsigned int len)
+ {
+ struct tcp_sock *tp = tcp_sk(sk);
+- int res;
+
+ /*
+ * Header prediction.
+@@ -5510,14 +5546,18 @@ slow_path:
+ * Standard slow path.
+ */
+
+- res = tcp_validate_incoming(sk, skb, th, 1);
+- if (res <= 0)
+- return -res;
++ if (!tcp_validate_incoming(sk, skb, th, 1))
++ return 0;
+
+ step5:
+ if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0)
+ goto discard;
+
++ /* ts_recent update must be made after we are sure that the packet
++ * is in window.
++ */
++ tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
++
+ tcp_rcv_rtt_measure_ts(sk, skb);
+
+ /* Process urgent data. */
+@@ -5822,7 +5862,6 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ int queued = 0;
+- int res;
+
+ tp->rx_opt.saw_tstamp = 0;
+
+@@ -5877,9 +5916,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+ return 0;
+ }
+
+- res = tcp_validate_incoming(sk, skb, th, 0);
+- if (res <= 0)
+- return -res;
++ if (!tcp_validate_incoming(sk, skb, th, 0))
++ return 0;
+
+ /* step 5: check the ACK field */
+ if (th->ack) {
+@@ -5990,6 +6028,11 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+ } else
+ goto discard;
+
++ /* ts_recent update must be made after we are sure that the packet
++ * is in window.
++ */
++ tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
++
+ /* step 6: check the URG bit */
+ tcp_urg(sk, skb, th);
+
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 58c09a0..a97c9ad 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -1520,9 +1520,8 @@ exit:
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
+ return NULL;
+ put_and_exit:
+- tcp_clear_xmit_timers(newsk);
+- bh_unlock_sock(newsk);
+- sock_put(newsk);
++ inet_csk_prepare_forced_close(newsk);
++ tcp_done(newsk);
+ goto exit;
+ }
+ EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index ccab3c8..db10805 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -1524,7 +1524,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
+ #endif
+
+ if (__inet_inherit_port(sk, newsk) < 0) {
+- sock_put(newsk);
++ inet_csk_prepare_forced_close(newsk);
++ tcp_done(newsk);
+ goto out;
+ }
+ __inet6_hash(newsk, NULL);
+diff --git a/net/mac80211/status.c b/net/mac80211/status.c
+index 16518f3..67df50e 100644
+--- a/net/mac80211/status.c
++++ b/net/mac80211/status.c
+@@ -429,7 +429,11 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
+ IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
+ IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
+
+- ieee80211_set_bar_pending(sta, tid, ssn);
++ if (local->hw.flags &
++ IEEE80211_HW_TEARDOWN_AGGR_ON_BAR_FAIL)
++ ieee80211_stop_tx_ba_session(&sta->sta, tid);
++ else
++ ieee80211_set_bar_pending(sta, tid, ssn);
+ }
+ }
+
+diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
+index 29b942c..f08b9166 100644
+--- a/net/sched/sch_htb.c
++++ b/net/sched/sch_htb.c
+@@ -876,7 +876,7 @@ ok:
+ q->now = psched_get_time();
+ start_at = jiffies;
+
+- next_event = q->now + 5 * PSCHED_TICKS_PER_SEC;
++ next_event = q->now + 5LLU * PSCHED_TICKS_PER_SEC;
+
+ for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
+ /* common case optimization - skip event handler quickly */
+diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
+index c90b832..56c3f85 100644
+--- a/net/sunrpc/sched.c
++++ b/net/sunrpc/sched.c
+@@ -880,16 +880,35 @@ struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
+ return task;
+ }
+
++/*
++ * rpc_free_task - release rpc task and perform cleanups
++ *
++ * Note that we free up the rpc_task _after_ rpc_release_calldata()
++ * in order to work around a workqueue dependency issue.
++ *
++ * Tejun Heo states:
++ * "Workqueue currently considers two work items to be the same if they're
++ * on the same address and won't execute them concurrently - ie. it
++ * makes a work item which is queued again while being executed wait
++ * for the previous execution to complete.
++ *
++ * If a work function frees the work item, and then waits for an event
++ * which should be performed by another work item and *that* work item
++ * recycles the freed work item, it can create a false dependency loop.
++ * There really is no reliable way to detect this short of verifying
++ * every memory free."
++ *
++ */
+ static void rpc_free_task(struct rpc_task *task)
+ {
+- const struct rpc_call_ops *tk_ops = task->tk_ops;
+- void *calldata = task->tk_calldata;
++ unsigned short tk_flags = task->tk_flags;
++
++ rpc_release_calldata(task->tk_ops, task->tk_calldata);
+
+- if (task->tk_flags & RPC_TASK_DYNAMIC) {
++ if (tk_flags & RPC_TASK_DYNAMIC) {
+ dprintk("RPC: %5u freeing task\n", task->tk_pid);
+ mempool_free(task, rpc_task_mempool);
+ }
+- rpc_release_calldata(tk_ops, calldata);
+ }
+
+ static void rpc_async_release(struct work_struct *work)
diff --git a/1037_linux-3.2.38.patch b/1037_linux-3.2.38.patch
new file mode 100644
index 00000000..a3c106f5
--- /dev/null
+++ b/1037_linux-3.2.38.patch
@@ -0,0 +1,4587 @@
+diff --git a/Makefile b/Makefile
+index 21c77e2..c8c9d02 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 37
++SUBLEVEL = 38
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
+index 08c82fd..3606e85 100644
+--- a/arch/arm/kernel/head.S
++++ b/arch/arm/kernel/head.S
+@@ -221,6 +221,7 @@ __create_page_tables:
+ /*
+ * Then map boot params address in r2 or
+ * the first 1MB of ram if boot params address is not specified.
++ * We map 2 sections in case the ATAGs/DTB crosses a section boundary.
+ */
+ mov r0, r2, lsr #SECTION_SHIFT
+ movs r0, r0, lsl #SECTION_SHIFT
+@@ -229,6 +230,8 @@ __create_page_tables:
+ add r3, r3, #PAGE_OFFSET
+ add r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER)
+ orr r6, r7, r0
++ str r6, [r3], #1 << PMD_ORDER
++ add r6, r6, #1 << SECTION_SHIFT
+ str r6, [r3]
+
+ #ifdef CONFIG_DEBUG_LL
+diff --git a/arch/arm/mach-pxa/include/mach/mfp-pxa27x.h b/arch/arm/mach-pxa/include/mach/mfp-pxa27x.h
+index ec0f0b0..18c083e 100644
+--- a/arch/arm/mach-pxa/include/mach/mfp-pxa27x.h
++++ b/arch/arm/mach-pxa/include/mach/mfp-pxa27x.h
+@@ -460,6 +460,9 @@
+ GPIO76_LCD_PCLK, \
+ GPIO77_LCD_BIAS
+
++/* these enable a work-around for a hw bug in pxa27x during ac97 warm reset */
++#define GPIO113_AC97_nRESET_GPIO_HIGH MFP_CFG_OUT(GPIO113, AF0, DEFAULT)
++#define GPIO95_AC97_nRESET_GPIO_HIGH MFP_CFG_OUT(GPIO95, AF0, DEFAULT)
+
+ extern int keypad_set_wake(unsigned int on);
+ #endif /* __ASM_ARCH_MFP_PXA27X_H */
+diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c
+index bc5a98e..a9447f9 100644
+--- a/arch/arm/mach-pxa/pxa27x.c
++++ b/arch/arm/mach-pxa/pxa27x.c
+@@ -47,9 +47,9 @@ void pxa27x_clear_otgph(void)
+ EXPORT_SYMBOL(pxa27x_clear_otgph);
+
+ static unsigned long ac97_reset_config[] = {
+- GPIO113_GPIO,
++ GPIO113_AC97_nRESET_GPIO_HIGH,
+ GPIO113_AC97_nRESET,
+- GPIO95_GPIO,
++ GPIO95_AC97_nRESET_GPIO_HIGH,
+ GPIO95_AC97_nRESET,
+ };
+
+diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
+index 1aa664a..e1dd92c 100644
+--- a/arch/arm/mm/dma-mapping.c
++++ b/arch/arm/mm/dma-mapping.c
+@@ -500,25 +500,27 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
+ size_t size, enum dma_data_direction dir,
+ void (*op)(const void *, size_t, int))
+ {
++ unsigned long pfn;
++ size_t left = size;
++
++ pfn = page_to_pfn(page) + offset / PAGE_SIZE;
++ offset %= PAGE_SIZE;
++
+ /*
+ * A single sg entry may refer to multiple physically contiguous
+ * pages. But we still need to process highmem pages individually.
+ * If highmem is not configured then the bulk of this loop gets
+ * optimized out.
+ */
+- size_t left = size;
+ do {
+ size_t len = left;
+ void *vaddr;
+
++ page = pfn_to_page(pfn);
++
+ if (PageHighMem(page)) {
+- if (len + offset > PAGE_SIZE) {
+- if (offset >= PAGE_SIZE) {
+- page += offset / PAGE_SIZE;
+- offset %= PAGE_SIZE;
+- }
++ if (len + offset > PAGE_SIZE)
+ len = PAGE_SIZE - offset;
+- }
+ vaddr = kmap_high_get(page);
+ if (vaddr) {
+ vaddr += offset;
+@@ -535,7 +537,7 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
+ op(vaddr, len, dir);
+ }
+ offset = 0;
+- page++;
++ pfn++;
+ left -= len;
+ } while (left);
+ }
+diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S
+index cc926c9..323ce1a 100644
+--- a/arch/arm/vfp/entry.S
++++ b/arch/arm/vfp/entry.S
+@@ -22,7 +22,7 @@
+ @ IRQs disabled.
+ @
+ ENTRY(do_vfp)
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPT_COUNT
+ ldr r4, [r10, #TI_PREEMPT] @ get preempt count
+ add r11, r4, #1 @ increment it
+ str r11, [r10, #TI_PREEMPT]
+@@ -35,7 +35,7 @@ ENTRY(do_vfp)
+ ENDPROC(do_vfp)
+
+ ENTRY(vfp_null_entry)
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPT_COUNT
+ get_thread_info r10
+ ldr r4, [r10, #TI_PREEMPT] @ get preempt count
+ sub r11, r4, #1 @ decrement it
+@@ -53,7 +53,7 @@ ENDPROC(vfp_null_entry)
+
+ __INIT
+ ENTRY(vfp_testing_entry)
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPT_COUNT
+ get_thread_info r10
+ ldr r4, [r10, #TI_PREEMPT] @ get preempt count
+ sub r11, r4, #1 @ decrement it
+diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
+index 3a0efaa..6ff903e 100644
+--- a/arch/arm/vfp/vfphw.S
++++ b/arch/arm/vfp/vfphw.S
+@@ -167,7 +167,7 @@ vfp_hw_state_valid:
+ @ else it's one 32-bit instruction, so
+ @ always subtract 4 from the following
+ @ instruction address.
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPT_COUNT
+ get_thread_info r10
+ ldr r4, [r10, #TI_PREEMPT] @ get preempt count
+ sub r11, r4, #1 @ decrement it
+@@ -191,7 +191,7 @@ look_for_VFP_exceptions:
+ @ not recognised by VFP
+
+ DBGSTR "not VFP"
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPT_COUNT
+ get_thread_info r10
+ ldr r4, [r10, #TI_PREEMPT] @ get preempt count
+ sub r11, r4, #1 @ decrement it
+diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
+index 141dce3..2a73d82 100644
+--- a/arch/powerpc/kvm/emulate.c
++++ b/arch/powerpc/kvm/emulate.c
+@@ -35,6 +35,7 @@
+ #define OP_TRAP_64 2
+
+ #define OP_31_XOP_LWZX 23
++#define OP_31_XOP_DCBF 86
+ #define OP_31_XOP_LBZX 87
+ #define OP_31_XOP_STWX 151
+ #define OP_31_XOP_STBX 215
+@@ -370,6 +371,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
+ kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS);
+ break;
+
++ case OP_31_XOP_DCBF:
+ case OP_31_XOP_DCBI:
+ /* Do nothing. The guest is performing dcbi because
+ * hardware DMA is not snooped by the dcache, but
+diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
+index c447a27..945b7cd 100644
+--- a/arch/s390/include/asm/timex.h
++++ b/arch/s390/include/asm/timex.h
+@@ -137,4 +137,32 @@ static inline unsigned long long get_clock_monotonic(void)
+ return get_clock_xt() - sched_clock_base_cc;
+ }
+
++/**
++ * tod_to_ns - convert a TOD format value to nanoseconds
++ * @todval: to be converted TOD format value
++ * Returns: number of nanoseconds that correspond to the TOD format value
++ *
++ * Converting a 64 Bit TOD format value to nanoseconds means that the value
++ * must be divided by 4.096. In order to achieve that we multiply with 125
++ * and divide by 512:
++ *
++ * ns = (todval * 125) >> 9;
++ *
++ * In order to avoid an overflow with the multiplication we can rewrite this.
++ * With a split todval == 2^32 * th + tl (th upper 32 bits, tl lower 32 bits)
++ * we end up with
++ *
++ * ns = ((2^32 * th + tl) * 125 ) >> 9;
++ * -> ns = (2^23 * th * 125) + ((tl * 125) >> 9);
++ *
++ */
++static inline unsigned long long tod_to_ns(unsigned long long todval)
++{
++ unsigned long long ns;
++
++ ns = ((todval >> 32) << 23) * 125;
++ ns += ((todval & 0xffffffff) * 125) >> 9;
++ return ns;
++}
++
+ #endif
+diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
+index e03c555..8644366 100644
+--- a/arch/s390/kernel/time.c
++++ b/arch/s390/kernel/time.c
+@@ -64,7 +64,7 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators);
+ */
+ unsigned long long notrace __kprobes sched_clock(void)
+ {
+- return (get_clock_monotonic() * 125) >> 9;
++ return tod_to_ns(get_clock_monotonic());
+ }
+
+ /*
+diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
+index 278ee00..5482d1e 100644
+--- a/arch/s390/kvm/interrupt.c
++++ b/arch/s390/kvm/interrupt.c
+@@ -391,7 +391,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
+ return 0;
+ }
+
+- sltime = ((vcpu->arch.sie_block->ckc - now)*125)>>9;
++ sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
+
+ hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
+ VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
+diff --git a/arch/sh/include/asm/elf.h b/arch/sh/include/asm/elf.h
+index f38112b..978b7fd 100644
+--- a/arch/sh/include/asm/elf.h
++++ b/arch/sh/include/asm/elf.h
+@@ -202,9 +202,9 @@ extern void __kernel_vsyscall;
+ if (vdso_enabled) \
+ NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE); \
+ else \
+- NEW_AUX_ENT(AT_IGNORE, 0);
++ NEW_AUX_ENT(AT_IGNORE, 0)
+ #else
+-#define VSYSCALL_AUX_ENT
++#define VSYSCALL_AUX_ENT NEW_AUX_ENT(AT_IGNORE, 0)
+ #endif /* CONFIG_VSYSCALL */
+
+ #ifdef CONFIG_SH_FPU
+diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
+index 7093e4a..035cd81 100644
+--- a/arch/x86/include/asm/efi.h
++++ b/arch/x86/include/asm/efi.h
+@@ -90,6 +90,7 @@ extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
+ #endif /* CONFIG_X86_32 */
+
+ extern int add_efi_memmap;
++extern unsigned long x86_efi_facility;
+ extern void efi_set_executable(efi_memory_desc_t *md, bool executable);
+ extern void efi_memblock_x86_reserve_range(void);
+ extern void efi_call_phys_prelog(void);
+diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
+index 0012d09..88eae2a 100644
+--- a/arch/x86/include/asm/traps.h
++++ b/arch/x86/include/asm/traps.h
+@@ -89,4 +89,29 @@ asmlinkage void smp_thermal_interrupt(void);
+ asmlinkage void mce_threshold_interrupt(void);
+ #endif
+
++/* Interrupts/Exceptions */
++enum {
++ X86_TRAP_DE = 0, /* 0, Divide-by-zero */
++ X86_TRAP_DB, /* 1, Debug */
++ X86_TRAP_NMI, /* 2, Non-maskable Interrupt */
++ X86_TRAP_BP, /* 3, Breakpoint */
++ X86_TRAP_OF, /* 4, Overflow */
++ X86_TRAP_BR, /* 5, Bound Range Exceeded */
++ X86_TRAP_UD, /* 6, Invalid Opcode */
++ X86_TRAP_NM, /* 7, Device Not Available */
++ X86_TRAP_DF, /* 8, Double Fault */
++ X86_TRAP_OLD_MF, /* 9, Coprocessor Segment Overrun */
++ X86_TRAP_TS, /* 10, Invalid TSS */
++ X86_TRAP_NP, /* 11, Segment Not Present */
++ X86_TRAP_SS, /* 12, Stack Segment Fault */
++ X86_TRAP_GP, /* 13, General Protection Fault */
++ X86_TRAP_PF, /* 14, Page Fault */
++ X86_TRAP_SPURIOUS, /* 15, Spurious Interrupt */
++ X86_TRAP_MF, /* 16, x87 Floating-Point Exception */
++ X86_TRAP_AC, /* 17, Alignment Check */
++ X86_TRAP_MC, /* 18, Machine Check */
++ X86_TRAP_XF, /* 19, SIMD Floating-Point Exception */
++ X86_TRAP_IRET = 32, /* 32, IRET Exception */
++};
++
+ #endif /* _ASM_X86_TRAPS_H */
+diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
+index 4893d58..d2d488b8 100644
+--- a/arch/x86/kernel/entry_32.S
++++ b/arch/x86/kernel/entry_32.S
+@@ -1074,7 +1074,6 @@ ENTRY(xen_failsafe_callback)
+ lea 16(%esp),%esp
+ CFI_ADJUST_CFA_OFFSET -16
+ jz 5f
+- addl $16,%esp
+ jmp iret_exc
+ 5: pushl_cfi $-1 /* orig_ax = -1 => not a system call */
+ SAVE_ALL
+diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
+index b3300e6..e328f69 100644
+--- a/arch/x86/kernel/irqinit.c
++++ b/arch/x86/kernel/irqinit.c
+@@ -61,7 +61,7 @@ static irqreturn_t math_error_irq(int cpl, void *dev_id)
+ outb(0, 0xF0);
+ if (ignore_fpu_irq || !boot_cpu_data.hard_math)
+ return IRQ_NONE;
+- math_error(get_irq_regs(), 0, 16);
++ math_error(get_irq_regs(), 0, X86_TRAP_MF);
+ return IRQ_HANDLED;
+ }
+
+diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
+index 12fcbe2..f7d1a64 100644
+--- a/arch/x86/kernel/msr.c
++++ b/arch/x86/kernel/msr.c
+@@ -175,6 +175,9 @@ static int msr_open(struct inode *inode, struct file *file)
+ unsigned int cpu;
+ struct cpuinfo_x86 *c;
+
++ if (!capable(CAP_SYS_RAWIO))
++ return -EPERM;
++
+ cpu = iminor(file->f_path.dentry->d_inode);
+ if (cpu >= nr_cpu_ids || !cpu_online(cpu))
+ return -ENXIO; /* No such CPU */
+diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
+index e61f79c..47f4e5f 100644
+--- a/arch/x86/kernel/reboot.c
++++ b/arch/x86/kernel/reboot.c
+@@ -603,7 +603,7 @@ static void native_machine_emergency_restart(void)
+ break;
+
+ case BOOT_EFI:
+- if (efi_enabled)
++ if (efi_enabled(EFI_RUNTIME_SERVICES))
+ efi.reset_system(reboot_mode ?
+ EFI_RESET_WARM :
+ EFI_RESET_COLD,
+diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
+index 0d403aa..b506f41 100644
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -631,6 +631,83 @@ static __init void reserve_ibft_region(void)
+
+ static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10;
+
++static bool __init snb_gfx_workaround_needed(void)
++{
++#ifdef CONFIG_PCI
++ int i;
++ u16 vendor, devid;
++ static const __initconst u16 snb_ids[] = {
++ 0x0102,
++ 0x0112,
++ 0x0122,
++ 0x0106,
++ 0x0116,
++ 0x0126,
++ 0x010a,
++ };
++
++ /* Assume no if something weird is going on with PCI */
++ if (!early_pci_allowed())
++ return false;
++
++ vendor = read_pci_config_16(0, 2, 0, PCI_VENDOR_ID);
++ if (vendor != 0x8086)
++ return false;
++
++ devid = read_pci_config_16(0, 2, 0, PCI_DEVICE_ID);
++ for (i = 0; i < ARRAY_SIZE(snb_ids); i++)
++ if (devid == snb_ids[i])
++ return true;
++#endif
++
++ return false;
++}
++
++/*
++ * Sandy Bridge graphics has trouble with certain ranges, exclude
++ * them from allocation.
++ */
++static void __init trim_snb_memory(void)
++{
++ static const __initconst unsigned long bad_pages[] = {
++ 0x20050000,
++ 0x20110000,
++ 0x20130000,
++ 0x20138000,
++ 0x40004000,
++ };
++ int i;
++
++ if (!snb_gfx_workaround_needed())
++ return;
++
++ printk(KERN_DEBUG "reserving inaccessible SNB gfx pages\n");
++
++ /*
++ * Reserve all memory below the 1 MB mark that has not
++ * already been reserved.
++ */
++ memblock_reserve(0, 1<<20);
++
++ for (i = 0; i < ARRAY_SIZE(bad_pages); i++) {
++ if (memblock_reserve(bad_pages[i], PAGE_SIZE))
++ printk(KERN_WARNING "failed to reserve 0x%08lx\n",
++ bad_pages[i]);
++ }
++}
++
++/*
++ * Here we put platform-specific memory range workarounds, i.e.
++ * memory known to be corrupt or otherwise in need to be reserved on
++ * specific platforms.
++ *
++ * If this gets used more widely it could use a real dispatch mechanism.
++ */
++static void __init trim_platform_memory_ranges(void)
++{
++ trim_snb_memory();
++}
++
+ static void __init trim_bios_range(void)
+ {
+ /*
+@@ -651,6 +728,7 @@ static void __init trim_bios_range(void)
+ * take them out.
+ */
+ e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
++
+ sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
+ }
+
+@@ -750,15 +828,16 @@ void __init setup_arch(char **cmdline_p)
+ #endif
+ #ifdef CONFIG_EFI
+ if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
+-#ifdef CONFIG_X86_32
+- "EL32",
+-#else
+- "EL64",
+-#endif
+- 4)) {
+- efi_enabled = 1;
+- efi_memblock_x86_reserve_range();
++ "EL32", 4)) {
++ set_bit(EFI_BOOT, &x86_efi_facility);
++ } else if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
++ "EL64", 4)) {
++ set_bit(EFI_BOOT, &x86_efi_facility);
++ set_bit(EFI_64BIT, &x86_efi_facility);
+ }
++
++ if (efi_enabled(EFI_BOOT))
++ efi_memblock_x86_reserve_range();
+ #endif
+
+ x86_init.oem.arch_setup();
+@@ -831,7 +910,7 @@ void __init setup_arch(char **cmdline_p)
+
+ finish_e820_parsing();
+
+- if (efi_enabled)
++ if (efi_enabled(EFI_BOOT))
+ efi_init();
+
+ dmi_scan_machine();
+@@ -914,7 +993,7 @@ void __init setup_arch(char **cmdline_p)
+ * The EFI specification says that boot service code won't be called
+ * after ExitBootServices(). This is, in fact, a lie.
+ */
+- if (efi_enabled)
++ if (efi_enabled(EFI_MEMMAP))
+ efi_reserve_boot_services();
+
+ /* preallocate 4k for mptable mpc */
+@@ -929,6 +1008,8 @@ void __init setup_arch(char **cmdline_p)
+
+ setup_trampolines();
+
++ trim_platform_memory_ranges();
++
+ init_gbpages();
+
+ /* max_pfn_mapped is updated here */
+@@ -1048,7 +1129,7 @@ void __init setup_arch(char **cmdline_p)
+
+ #ifdef CONFIG_VT
+ #if defined(CONFIG_VGA_CONSOLE)
+- if (!efi_enabled || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
++ if (!efi_enabled(EFI_BOOT) || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
+ conswitchp = &vga_con;
+ #elif defined(CONFIG_DUMMY_CONSOLE)
+ conswitchp = &dummy_con;
+diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
+index 31d9d0f..e6fbb94 100644
+--- a/arch/x86/kernel/traps.c
++++ b/arch/x86/kernel/traps.c
+@@ -119,7 +119,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
+ * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
+ * On nmi (interrupt 2), do_trap should not be called.
+ */
+- if (trapnr < 6)
++ if (trapnr < X86_TRAP_UD)
+ goto vm86_trap;
+ goto trap_signal;
+ }
+@@ -203,27 +203,31 @@ dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
+ do_trap(trapnr, signr, str, regs, error_code, &info); \
+ }
+
+-DO_ERROR_INFO(0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip)
+-DO_ERROR(4, SIGSEGV, "overflow", overflow)
+-DO_ERROR(5, SIGSEGV, "bounds", bounds)
+-DO_ERROR_INFO(6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip)
+-DO_ERROR(9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
+-DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
+-DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
++DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error, FPE_INTDIV,
++ regs->ip)
++DO_ERROR(X86_TRAP_OF, SIGSEGV, "overflow", overflow)
++DO_ERROR(X86_TRAP_BR, SIGSEGV, "bounds", bounds)
++DO_ERROR_INFO(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN,
++ regs->ip)
++DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun",
++ coprocessor_segment_overrun)
++DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS)
++DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present)
+ #ifdef CONFIG_X86_32
+-DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
++DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment)
+ #endif
+-DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
++DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check,
++ BUS_ADRALN, 0)
+
+ #ifdef CONFIG_X86_64
+ /* Runs on IST stack */
+ dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code)
+ {
+ if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
+- 12, SIGBUS) == NOTIFY_STOP)
++ X86_TRAP_SS, SIGBUS) == NOTIFY_STOP)
+ return;
+ preempt_conditional_sti(regs);
+- do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL);
++ do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL);
+ preempt_conditional_cli(regs);
+ }
+
+@@ -233,10 +237,10 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
+ struct task_struct *tsk = current;
+
+ /* Return not checked because double check cannot be ignored */
+- notify_die(DIE_TRAP, str, regs, error_code, 8, SIGSEGV);
++ notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
+
+ tsk->thread.error_code = error_code;
+- tsk->thread.trap_no = 8;
++ tsk->thread.trap_no = X86_TRAP_DF;
+
+ /*
+ * This is always a kernel trap and never fixable (and thus must
+@@ -264,7 +268,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
+ goto gp_in_kernel;
+
+ tsk->thread.error_code = error_code;
+- tsk->thread.trap_no = 13;
++ tsk->thread.trap_no = X86_TRAP_GP;
+
+ if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
+ printk_ratelimit()) {
+@@ -291,9 +295,9 @@ gp_in_kernel:
+ return;
+
+ tsk->thread.error_code = error_code;
+- tsk->thread.trap_no = 13;
+- if (notify_die(DIE_GPF, "general protection fault", regs,
+- error_code, 13, SIGSEGV) == NOTIFY_STOP)
++ tsk->thread.trap_no = X86_TRAP_GP;
++ if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
++ X86_TRAP_GP, SIGSEGV) == NOTIFY_STOP)
+ return;
+ die("general protection fault", regs, error_code);
+ }
+@@ -302,13 +306,14 @@ gp_in_kernel:
+ dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
+ {
+ #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
+- if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
+- == NOTIFY_STOP)
++ if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
++ SIGTRAP) == NOTIFY_STOP)
+ return;
+ #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
+ #ifdef CONFIG_KPROBES
+- if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
+- == NOTIFY_STOP)
++
++ if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
++ SIGTRAP) == NOTIFY_STOP)
+ return;
+ #else
+ if (notify_die(DIE_TRAP, "int3", regs, error_code, 3, SIGTRAP)
+@@ -317,7 +322,7 @@ dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
+ #endif
+
+ preempt_conditional_sti(regs);
+- do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
++ do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
+ preempt_conditional_cli(regs);
+ }
+
+@@ -415,8 +420,8 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
+ preempt_conditional_sti(regs);
+
+ if (regs->flags & X86_VM_MASK) {
+- handle_vm86_trap((struct kernel_vm86_regs *) regs,
+- error_code, 1);
++ handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
++ X86_TRAP_DB);
+ preempt_conditional_cli(regs);
+ return;
+ }
+@@ -451,7 +456,8 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
+ struct task_struct *task = current;
+ siginfo_t info;
+ unsigned short err;
+- char *str = (trapnr == 16) ? "fpu exception" : "simd exception";
++ char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
++ "simd exception";
+
+ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
+ return;
+@@ -476,7 +482,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
+ info.si_signo = SIGFPE;
+ info.si_errno = 0;
+ info.si_addr = (void __user *)regs->ip;
+- if (trapnr == 16) {
++ if (trapnr == X86_TRAP_MF) {
+ unsigned short cwd, swd;
+ /*
+ * (~cwd & swd) will mask out exceptions that are not set to unmasked
+@@ -520,10 +526,11 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
+ info.si_code = FPE_FLTRES;
+ } else {
+ /*
+- * If we're using IRQ 13, or supposedly even some trap 16
+- * implementations, it's possible we get a spurious trap...
++ * If we're using IRQ 13, or supposedly even some trap
++ * X86_TRAP_MF implementations, it's possible
++ * we get a spurious trap, which is not an error.
+ */
+- return; /* Spurious trap, no error */
++ return;
+ }
+ force_sig_info(SIGFPE, &info, task);
+ }
+@@ -534,13 +541,13 @@ dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
+ ignore_fpu_irq = 1;
+ #endif
+
+- math_error(regs, error_code, 16);
++ math_error(regs, error_code, X86_TRAP_MF);
+ }
+
+ dotraplinkage void
+ do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
+ {
+- math_error(regs, error_code, 19);
++ math_error(regs, error_code, X86_TRAP_XF);
+ }
+
+ dotraplinkage void
+@@ -658,20 +665,21 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
+ info.si_errno = 0;
+ info.si_code = ILL_BADSTK;
+ info.si_addr = NULL;
+- if (notify_die(DIE_TRAP, "iret exception",
+- regs, error_code, 32, SIGILL) == NOTIFY_STOP)
++ if (notify_die(DIE_TRAP, "iret exception", regs, error_code,
++ X86_TRAP_IRET, SIGILL) == NOTIFY_STOP)
+ return;
+- do_trap(32, SIGILL, "iret exception", regs, error_code, &info);
++ do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code,
++ &info);
+ }
+ #endif
+
+ /* Set of traps needed for early debugging. */
+ void __init early_trap_init(void)
+ {
+- set_intr_gate_ist(1, &debug, DEBUG_STACK);
++ set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK);
+ /* int3 can be called from all */
+- set_system_intr_gate_ist(3, &int3, DEBUG_STACK);
+- set_intr_gate(14, &page_fault);
++ set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK);
++ set_intr_gate(X86_TRAP_PF, &page_fault);
+ load_idt(&idt_descr);
+ }
+
+@@ -687,30 +695,30 @@ void __init trap_init(void)
+ early_iounmap(p, 4);
+ #endif
+
+- set_intr_gate(0, &divide_error);
+- set_intr_gate_ist(2, &nmi, NMI_STACK);
++ set_intr_gate(X86_TRAP_DE, &divide_error);
++ set_intr_gate_ist(X86_TRAP_NMI, &nmi, NMI_STACK);
+ /* int4 can be called from all */
+- set_system_intr_gate(4, &overflow);
+- set_intr_gate(5, &bounds);
+- set_intr_gate(6, &invalid_op);
+- set_intr_gate(7, &device_not_available);
++ set_system_intr_gate(X86_TRAP_OF, &overflow);
++ set_intr_gate(X86_TRAP_BR, &bounds);
++ set_intr_gate(X86_TRAP_UD, &invalid_op);
++ set_intr_gate(X86_TRAP_NM, &device_not_available);
+ #ifdef CONFIG_X86_32
+- set_task_gate(8, GDT_ENTRY_DOUBLEFAULT_TSS);
++ set_task_gate(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS);
+ #else
+- set_intr_gate_ist(8, &double_fault, DOUBLEFAULT_STACK);
++ set_intr_gate_ist(X86_TRAP_DF, &double_fault, DOUBLEFAULT_STACK);
+ #endif
+- set_intr_gate(9, &coprocessor_segment_overrun);
+- set_intr_gate(10, &invalid_TSS);
+- set_intr_gate(11, &segment_not_present);
+- set_intr_gate_ist(12, &stack_segment, STACKFAULT_STACK);
+- set_intr_gate(13, &general_protection);
+- set_intr_gate(15, &spurious_interrupt_bug);
+- set_intr_gate(16, &coprocessor_error);
+- set_intr_gate(17, &alignment_check);
++ set_intr_gate(X86_TRAP_OLD_MF, &coprocessor_segment_overrun);
++ set_intr_gate(X86_TRAP_TS, &invalid_TSS);
++ set_intr_gate(X86_TRAP_NP, &segment_not_present);
++ set_intr_gate_ist(X86_TRAP_SS, &stack_segment, STACKFAULT_STACK);
++ set_intr_gate(X86_TRAP_GP, &general_protection);
++ set_intr_gate(X86_TRAP_SPURIOUS, &spurious_interrupt_bug);
++ set_intr_gate(X86_TRAP_MF, &coprocessor_error);
++ set_intr_gate(X86_TRAP_AC, &alignment_check);
+ #ifdef CONFIG_X86_MCE
+- set_intr_gate_ist(18, &machine_check, MCE_STACK);
++ set_intr_gate_ist(X86_TRAP_MC, &machine_check, MCE_STACK);
+ #endif
+- set_intr_gate(19, &simd_coprocessor_error);
++ set_intr_gate(X86_TRAP_XF, &simd_coprocessor_error);
+
+ /* Reserve all the builtin and the syscall vector: */
+ for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
+diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
+index 4d320b2..bef9991 100644
+--- a/arch/x86/platform/efi/efi.c
++++ b/arch/x86/platform/efi/efi.c
+@@ -49,9 +49,6 @@
+ #define EFI_DEBUG 1
+ #define PFX "EFI: "
+
+-int efi_enabled;
+-EXPORT_SYMBOL(efi_enabled);
+-
+ struct efi __read_mostly efi = {
+ .mps = EFI_INVALID_TABLE_ADDR,
+ .acpi = EFI_INVALID_TABLE_ADDR,
+@@ -70,9 +67,25 @@ struct efi_memory_map memmap;
+ static struct efi efi_phys __initdata;
+ static efi_system_table_t efi_systab __initdata;
+
++static inline bool efi_is_native(void)
++{
++ return IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT);
++}
++
++unsigned long x86_efi_facility;
++
++/*
++ * Returns 1 if 'facility' is enabled, 0 otherwise.
++ */
++int efi_enabled(int facility)
++{
++ return test_bit(facility, &x86_efi_facility) != 0;
++}
++EXPORT_SYMBOL(efi_enabled);
++
+ static int __init setup_noefi(char *arg)
+ {
+- efi_enabled = 0;
++ clear_bit(EFI_BOOT, &x86_efi_facility);
+ return 0;
+ }
+ early_param("noefi", setup_noefi);
+@@ -440,6 +453,9 @@ void __init efi_init(void)
+ int i = 0;
+ void *tmp;
+
++ if (!efi_is_native())
++ return;
++
+ #ifdef CONFIG_X86_32
+ efi_phys.systab = (efi_system_table_t *)boot_params.efi_info.efi_systab;
+ #else
+@@ -467,6 +483,8 @@ void __init efi_init(void)
+ efi.systab->hdr.revision >> 16,
+ efi.systab->hdr.revision & 0xffff);
+
++ set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility);
++
+ /*
+ * Show what we know for posterity
+ */
+@@ -529,6 +547,8 @@ void __init efi_init(void)
+ early_iounmap(config_tables,
+ efi.systab->nr_tables * sizeof(efi_config_table_t));
+
++ set_bit(EFI_CONFIG_TABLES, &x86_efi_facility);
++
+ /*
+ * Check out the runtime services table. We need to map
+ * the runtime services table so that we can grab the physical
+@@ -552,6 +572,8 @@ void __init efi_init(void)
+ * virtual mode.
+ */
+ efi.get_time = phys_efi_get_time;
++
++ set_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility);
+ } else
+ printk(KERN_ERR "Could not map the EFI runtime service "
+ "table!\n");
+@@ -571,6 +593,8 @@ void __init efi_init(void)
+ if (add_efi_memmap)
+ do_add_efi_memmap();
+
++ set_bit(EFI_MEMMAP, &x86_efi_facility);
++
+ #ifdef CONFIG_X86_32
+ x86_platform.get_wallclock = efi_get_time;
+ x86_platform.set_wallclock = efi_set_rtc_mmss;
+@@ -731,7 +755,7 @@ void __init efi_enter_virtual_mode(void)
+ *
+ * Call EFI services through wrapper functions.
+ */
+- efi.runtime_version = efi_systab.fw_revision;
++ efi.runtime_version = efi_systab.hdr.revision;
+ efi.get_time = virt_efi_get_time;
+ efi.set_time = virt_efi_set_time;
+ efi.get_wakeup_time = virt_efi_get_wakeup_time;
+@@ -747,6 +771,7 @@ void __init efi_enter_virtual_mode(void)
+ efi.query_capsule_caps = virt_efi_query_capsule_caps;
+ if (__supported_pte_mask & _PAGE_NX)
+ runtime_code_page_mkexec();
++ clear_bit(EFI_MEMMAP, &x86_efi_facility);
+ early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size);
+ memmap.map = NULL;
+ kfree(new_memmap);
+@@ -760,6 +785,9 @@ u32 efi_mem_type(unsigned long phys_addr)
+ efi_memory_desc_t *md;
+ void *p;
+
++ if (!efi_enabled(EFI_MEMMAP))
++ return 0;
++
+ for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
+ md = p;
+ if ((md->phys_addr <= phys_addr) &&
+diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
+index ac3aa54..0fba86d 100644
+--- a/arch/x86/platform/efi/efi_64.c
++++ b/arch/x86/platform/efi/efi_64.c
+@@ -38,7 +38,7 @@
+ #include <asm/cacheflush.h>
+ #include <asm/fixmap.h>
+
+-static pgd_t save_pgd __initdata;
++static pgd_t *save_pgd __initdata;
+ static unsigned long efi_flags __initdata;
+
+ static void __init early_code_mapping_set_exec(int executable)
+@@ -61,12 +61,20 @@ static void __init early_code_mapping_set_exec(int executable)
+ void __init efi_call_phys_prelog(void)
+ {
+ unsigned long vaddress;
++ int pgd;
++ int n_pgds;
+
+ early_code_mapping_set_exec(1);
+ local_irq_save(efi_flags);
+- vaddress = (unsigned long)__va(0x0UL);
+- save_pgd = *pgd_offset_k(0x0UL);
+- set_pgd(pgd_offset_k(0x0UL), *pgd_offset_k(vaddress));
++
++ n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE);
++ save_pgd = kmalloc(n_pgds * sizeof(pgd_t), GFP_KERNEL);
++
++ for (pgd = 0; pgd < n_pgds; pgd++) {
++ save_pgd[pgd] = *pgd_offset_k(pgd * PGDIR_SIZE);
++ vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
++ set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
++ }
+ __flush_tlb_all();
+ }
+
+@@ -75,7 +83,11 @@ void __init efi_call_phys_epilog(void)
+ /*
+ * After the lock is released, the original page table is restored.
+ */
+- set_pgd(pgd_offset_k(0x0UL), save_pgd);
++ int pgd;
++ int n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
++ for (pgd = 0; pgd < n_pgds; pgd++)
++ set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
++ kfree(save_pgd);
+ __flush_tlb_all();
+ local_irq_restore(efi_flags);
+ early_code_mapping_set_exec(0);
+diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
+index f31c5c5..a6664d2 100644
+--- a/drivers/acpi/osl.c
++++ b/drivers/acpi/osl.c
+@@ -255,7 +255,7 @@ acpi_physical_address __init acpi_os_get_root_pointer(void)
+ return acpi_rsdp;
+ #endif
+
+- if (efi_enabled) {
++ if (efi_enabled(EFI_CONFIG_TABLES)) {
+ if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
+ return efi.acpi20;
+ else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
+diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
+index 0e8e2de..de0791c 100644
+--- a/drivers/acpi/processor_idle.c
++++ b/drivers/acpi/processor_idle.c
+@@ -989,6 +989,9 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr)
+ return -EINVAL;
+ }
+
++ if (!dev)
++ return -EINVAL;
++
+ dev->cpu = pr->id;
+
+ if (max_cstate == 0)
+@@ -1175,6 +1178,7 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
+ }
+
+ /* Populate Updated C-state information */
++ acpi_processor_get_power_info(pr);
+ acpi_processor_setup_cpuidle_states(pr);
+
+ /* Enable all cpuidle devices */
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 608257a..b07edc4 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -395,7 +395,10 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
+
+ /* Asmedia */
+- { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1061 */
++ { PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci }, /* ASM1060 */
++ { PCI_VDEVICE(ASMEDIA, 0x0602), board_ahci }, /* ASM1060 */
++ { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */
++ { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */
+
+ /* Generic, PCI class code for AHCI */
+ { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
+index 4a0f314..be984e0 100644
+--- a/drivers/block/drbd/drbd_req.c
++++ b/drivers/block/drbd/drbd_req.c
+@@ -37,6 +37,7 @@ static void _drbd_start_io_acct(struct drbd_conf *mdev, struct drbd_request *req
+ const int rw = bio_data_dir(bio);
+ int cpu;
+ cpu = part_stat_lock();
++ part_round_stats(cpu, &mdev->vdisk->part0);
+ part_stat_inc(cpu, &mdev->vdisk->part0, ios[rw]);
+ part_stat_add(cpu, &mdev->vdisk->part0, sectors[rw], bio_sectors(bio));
+ part_inc_in_flight(&mdev->vdisk->part0, rw);
+diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
+index de9c800..166cb36 100644
+--- a/drivers/block/virtio_blk.c
++++ b/drivers/block/virtio_blk.c
+@@ -546,6 +546,7 @@ static void __devexit virtblk_remove(struct virtio_device *vdev)
+ {
+ struct virtio_blk *vblk = vdev->priv;
+ int index = vblk->index;
++ int refc;
+
+ /* Prevent config work handler from accessing the device. */
+ mutex_lock(&vblk->config_lock);
+@@ -560,11 +561,15 @@ static void __devexit virtblk_remove(struct virtio_device *vdev)
+
+ flush_work(&vblk->config_work);
+
++ refc = atomic_read(&disk_to_dev(vblk->disk)->kobj.kref.refcount);
+ put_disk(vblk->disk);
+ mempool_destroy(vblk->pool);
+ vdev->config->del_vqs(vdev);
+ kfree(vblk);
+- ida_simple_remove(&vd_index_ida, index);
++
++ /* Only free device id if we don't have any users */
++ if (refc == 1)
++ ida_simple_remove(&vd_index_ida, index);
+ }
+
+ static const struct virtio_device_id id_table[] = {
+diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
+index 2dbf32b..714560f 100644
+--- a/drivers/dma/ioat/dma_v3.c
++++ b/drivers/dma/ioat/dma_v3.c
+@@ -949,7 +949,7 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device)
+ goto free_resources;
+ }
+ }
+- dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_TO_DEVICE);
++ dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+
+ /* skip validate if the capability is not present */
+ if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
+diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
+index 495198a..8cc8676 100644
+--- a/drivers/edac/edac_pci_sysfs.c
++++ b/drivers/edac/edac_pci_sysfs.c
+@@ -257,7 +257,7 @@ static ssize_t edac_pci_dev_store(struct kobject *kobj,
+ struct edac_pci_dev_attribute *edac_pci_dev;
+ edac_pci_dev = (struct edac_pci_dev_attribute *)attr;
+
+- if (edac_pci_dev->show)
++ if (edac_pci_dev->store)
+ return edac_pci_dev->store(edac_pci_dev->value, buffer, count);
+ return -EIO;
+ }
+diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
+index b298158..982f1f5 100644
+--- a/drivers/firmware/dmi_scan.c
++++ b/drivers/firmware/dmi_scan.c
+@@ -16,6 +16,7 @@
+ */
+ static char dmi_empty_string[] = " ";
+
++static u16 __initdata dmi_ver;
+ /*
+ * Catch too early calls to dmi_check_system():
+ */
+@@ -118,12 +119,12 @@ static int __init dmi_walk_early(void (*decode)(const struct dmi_header *,
+ return 0;
+ }
+
+-static int __init dmi_checksum(const u8 *buf)
++static int __init dmi_checksum(const u8 *buf, u8 len)
+ {
+ u8 sum = 0;
+ int a;
+
+- for (a = 0; a < 15; a++)
++ for (a = 0; a < len; a++)
+ sum += buf[a];
+
+ return sum == 0;
+@@ -161,8 +162,10 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, int inde
+ return;
+
+ for (i = 0; i < 16 && (is_ff || is_00); i++) {
+- if(d[i] != 0x00) is_ff = 0;
+- if(d[i] != 0xFF) is_00 = 0;
++ if (d[i] != 0x00)
++ is_00 = 0;
++ if (d[i] != 0xFF)
++ is_ff = 0;
+ }
+
+ if (is_ff || is_00)
+@@ -172,7 +175,15 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, int inde
+ if (!s)
+ return;
+
+- sprintf(s, "%pUB", d);
++ /*
++ * As of version 2.6 of the SMBIOS specification, the first 3 fields of
++ * the UUID are supposed to be little-endian encoded. The specification
++ * says that this is the defacto standard.
++ */
++ if (dmi_ver >= 0x0206)
++ sprintf(s, "%pUL", d);
++ else
++ sprintf(s, "%pUB", d);
+
+ dmi_ident[slot] = s;
+ }
+@@ -404,35 +415,63 @@ static int __init dmi_present(const char __iomem *p)
+ u8 buf[15];
+
+ memcpy_fromio(buf, p, 15);
+- if ((memcmp(buf, "_DMI_", 5) == 0) && dmi_checksum(buf)) {
++ if (dmi_checksum(buf, 15)) {
+ dmi_num = (buf[13] << 8) | buf[12];
+ dmi_len = (buf[7] << 8) | buf[6];
+ dmi_base = (buf[11] << 24) | (buf[10] << 16) |
+ (buf[9] << 8) | buf[8];
+
+- /*
+- * DMI version 0.0 means that the real version is taken from
+- * the SMBIOS version, which we don't know at this point.
+- */
+- if (buf[14] != 0)
+- printk(KERN_INFO "DMI %d.%d present.\n",
+- buf[14] >> 4, buf[14] & 0xF);
+- else
+- printk(KERN_INFO "DMI present.\n");
+ if (dmi_walk_early(dmi_decode) == 0) {
++ if (dmi_ver)
++ pr_info("SMBIOS %d.%d present.\n",
++ dmi_ver >> 8, dmi_ver & 0xFF);
++ else {
++ dmi_ver = (buf[14] & 0xF0) << 4 |
++ (buf[14] & 0x0F);
++ pr_info("Legacy DMI %d.%d present.\n",
++ dmi_ver >> 8, dmi_ver & 0xFF);
++ }
+ dmi_dump_ids();
+ return 0;
+ }
+ }
++ dmi_ver = 0;
+ return 1;
+ }
+
++static int __init smbios_present(const char __iomem *p)
++{
++ u8 buf[32];
++ int offset = 0;
++
++ memcpy_fromio(buf, p, 32);
++ if ((buf[5] < 32) && dmi_checksum(buf, buf[5])) {
++ dmi_ver = (buf[6] << 8) + buf[7];
++
++ /* Some BIOS report weird SMBIOS version, fix that up */
++ switch (dmi_ver) {
++ case 0x021F:
++ case 0x0221:
++ pr_debug("SMBIOS version fixup(2.%d->2.%d)\n",
++ dmi_ver & 0xFF, 3);
++ dmi_ver = 0x0203;
++ break;
++ case 0x0233:
++ pr_debug("SMBIOS version fixup(2.%d->2.%d)\n", 51, 6);
++ dmi_ver = 0x0206;
++ break;
++ }
++ offset = 16;
++ }
++ return dmi_present(buf + offset);
++}
++
+ void __init dmi_scan_machine(void)
+ {
+ char __iomem *p, *q;
+ int rc;
+
+- if (efi_enabled) {
++ if (efi_enabled(EFI_CONFIG_TABLES)) {
+ if (efi.smbios == EFI_INVALID_TABLE_ADDR)
+ goto error;
+
+@@ -444,7 +483,7 @@ void __init dmi_scan_machine(void)
+ if (p == NULL)
+ goto error;
+
+- rc = dmi_present(p + 0x10); /* offset of _DMI_ string */
++ rc = smbios_present(p);
+ dmi_iounmap(p, 32);
+ if (!rc) {
+ dmi_available = 1;
+@@ -462,7 +501,12 @@ void __init dmi_scan_machine(void)
+ goto error;
+
+ for (q = p; q < p + 0x10000; q += 16) {
+- rc = dmi_present(q);
++ if (memcmp(q, "_SM_", 4) == 0 && q - p <= 0xFFE0)
++ rc = smbios_present(q);
++ else if (memcmp(q, "_DMI_", 5) == 0)
++ rc = dmi_present(q);
++ else
++ continue;
+ if (!rc) {
+ dmi_available = 1;
+ dmi_iounmap(p, 0x10000);
+diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
+index 3e60e8d..5d5a868 100644
+--- a/drivers/firmware/efivars.c
++++ b/drivers/firmware/efivars.c
+@@ -1222,7 +1222,7 @@ efivars_init(void)
+ printk(KERN_INFO "EFI Variables Facility v%s %s\n", EFIVARS_VERSION,
+ EFIVARS_DATE);
+
+- if (!efi_enabled)
++ if (!efi_enabled(EFI_RUNTIME_SERVICES))
+ return 0;
+
+ /* For now we'll register the efi directory at /sys/firmware/efi */
+@@ -1260,7 +1260,7 @@ err_put:
+ static void __exit
+ efivars_exit(void)
+ {
+- if (efi_enabled) {
++ if (efi_enabled(EFI_RUNTIME_SERVICES)) {
+ unregister_efivars(&__efivars);
+ kobject_put(efi_kobj);
+ }
+diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c
+index 4da4eb9..2224f1d 100644
+--- a/drivers/firmware/iscsi_ibft_find.c
++++ b/drivers/firmware/iscsi_ibft_find.c
+@@ -99,7 +99,7 @@ unsigned long __init find_ibft_region(unsigned long *sizep)
+ /* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will
+ * only use ACPI for this */
+
+- if (!efi_enabled)
++ if (!efi_enabled(EFI_BOOT))
+ find_ibft_in_mem();
+
+ if (ibft_addr) {
+diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
+index d00f905..10fe480 100644
+--- a/drivers/gpu/drm/i915/i915_debugfs.c
++++ b/drivers/gpu/drm/i915/i915_debugfs.c
+@@ -30,6 +30,7 @@
+ #include <linux/debugfs.h>
+ #include <linux/slab.h>
+ #include <linux/export.h>
++#include <generated/utsrelease.h>
+ #include "drmP.h"
+ #include "drm.h"
+ #include "intel_drv.h"
+@@ -755,6 +756,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
+
+ seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
+ error->time.tv_usec);
++ seq_printf(m, "Kernel: " UTS_RELEASE);
+ seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
+ seq_printf(m, "EIR: 0x%08x\n", error->eir);
+ seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 5950ba3..b0186b8 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -3456,14 +3456,15 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
+ goto out;
+ }
+
+- obj->user_pin_count++;
+- obj->pin_filp = file;
+- if (obj->user_pin_count == 1) {
++ if (obj->user_pin_count == 0) {
+ ret = i915_gem_object_pin(obj, args->alignment, true);
+ if (ret)
+ goto out;
+ }
+
++ obj->user_pin_count++;
++ obj->pin_filp = file;
++
+ /* XXX - flush the CPU caches for pinned objects
+ * as the X server doesn't manage domains yet
+ */
+diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+index 1202198..878b989 100644
+--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+@@ -657,6 +657,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
+ total = 0;
+ for (i = 0; i < count; i++) {
+ struct drm_i915_gem_relocation_entry __user *user_relocs;
++ u64 invalid_offset = (u64)-1;
++ int j;
+
+ user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr;
+
+@@ -667,6 +669,25 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
+ goto err;
+ }
+
++ /* As we do not update the known relocation offsets after
++ * relocating (due to the complexities in lock handling),
++ * we need to mark them as invalid now so that we force the
++ * relocation processing next time. Just in case the target
++ * object is evicted and then rebound into its old
++ * presumed_offset before the next execbuffer - if that
++ * happened we would make the mistake of assuming that the
++ * relocations were valid.
++ */
++ for (j = 0; j < exec[i].relocation_count; j++) {
++ if (copy_to_user(&user_relocs[j].presumed_offset,
++ &invalid_offset,
++ sizeof(invalid_offset))) {
++ ret = -EFAULT;
++ mutex_lock(&dev->struct_mutex);
++ goto err;
++ }
++ }
++
+ reloc_offset[i] = total;
+ total += exec[i].relocation_count;
+ }
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 7a10f5f..124dd87 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -27,6 +27,8 @@
+
+ #define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+
++#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a))
++
+ /*
+ * The Bridge device's PCI config space has information about the
+ * fb aperture size and the amount of pre-reserved memory.
+@@ -389,6 +391,7 @@
+ * the enables for writing to the corresponding low bit.
+ */
+ #define _3D_CHICKEN 0x02084
++#define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10)
+ #define _3D_CHICKEN2 0x0208c
+ /* Disables pipelining of read flushes past the SF-WIZ interface.
+ * Required on all Ironlake steppings according to the B-Spec, but the
+@@ -399,7 +402,8 @@
+
+ #define MI_MODE 0x0209c
+ # define VS_TIMER_DISPATCH (1 << 6)
+-# define MI_FLUSH_ENABLE (1 << 11)
++# define MI_FLUSH_ENABLE (1 << 12)
++# define ASYNC_FLIP_PERF_DISABLE (1 << 14)
+
+ #define GEN6_GT_MODE 0x20d0
+ #define GEN6_GT_MODE_HI (1 << 9)
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index fa9639b..c05e825 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -8279,6 +8279,10 @@ static void gen6_init_clock_gating(struct drm_device *dev)
+ I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_ELPIN_409_SELECT);
+
++ /* WaDisableHiZPlanesWhenMSAAEnabled */
++ I915_WRITE(_3D_CHICKEN,
++ _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
++
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
+index c6d0966..6601d21 100644
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -774,14 +774,6 @@ static const struct dmi_system_id intel_no_lvds[] = {
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+- .ident = "ZOTAC ZBOXSD-ID12/ID13",
+- .matches = {
+- DMI_MATCH(DMI_BOARD_VENDOR, "ZOTAC"),
+- DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"),
+- },
+- },
+- {
+- .callback = intel_no_lvds_dmi_callback,
+ .ident = "Gigabyte GA-D525TUD",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
+diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
+index 19085c0..4fddd21 100644
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
+@@ -398,15 +398,26 @@ static int init_render_ring(struct intel_ring_buffer *ring)
+
+ if (INTEL_INFO(dev)->gen > 3) {
+ int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
+- if (IS_GEN6(dev) || IS_GEN7(dev))
+- mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
+ I915_WRITE(MI_MODE, mode);
+- if (IS_GEN7(dev))
+- I915_WRITE(GFX_MODE_GEN7,
+- GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
+- GFX_MODE_ENABLE(GFX_REPLAY_MODE));
+ }
+
++ /* We need to disable the AsyncFlip performance optimisations in order
++ * to use MI_WAIT_FOR_EVENT within the CS. It should already be
++ * programmed to '1' on all products.
++ */
++ if (INTEL_INFO(dev)->gen >= 6)
++ I915_WRITE(MI_MODE, GFX_MODE_ENABLE(ASYNC_FLIP_PERF_DISABLE));
++
++ /* Required for the hardware to program scanline values for waiting */
++ if (INTEL_INFO(dev)->gen == 6)
++ I915_WRITE(GFX_MODE,
++ GFX_MODE_ENABLE(GFX_TLB_INVALIDATE_ALWAYS));
++
++ if (IS_GEN7(dev))
++ I915_WRITE(GFX_MODE_GEN7,
++ GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
++ GFX_MODE_ENABLE(GFX_REPLAY_MODE));
++
+ if (INTEL_INFO(dev)->gen >= 5) {
+ ret = init_pipe_control(ring);
+ if (ret)
+diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
+index 29afd71..1f32557 100644
+--- a/drivers/gpu/drm/radeon/radeon_cs.c
++++ b/drivers/gpu/drm/radeon/radeon_cs.c
+@@ -168,6 +168,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
+ if (p->chunks[i].kpage[0] == NULL || p->chunks[i].kpage[1] == NULL) {
+ kfree(p->chunks[i].kpage[0]);
+ kfree(p->chunks[i].kpage[1]);
++ p->chunks[i].kpage[0] = NULL;
++ p->chunks[i].kpage[1] = NULL;
+ return -ENOMEM;
+ }
+ p->chunks[i].kpage_idx[0] = -1;
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index c5762e3..bd959c1 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -354,7 +354,8 @@ bool radeon_card_posted(struct radeon_device *rdev)
+ {
+ uint32_t reg;
+
+- if (efi_enabled && rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE)
++ if (efi_enabled(EFI_BOOT) &&
++ rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE)
+ return false;
+
+ /* first check CRTCs */
+diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+index 8165953..a906803 100644
+--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
++++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+@@ -617,6 +617,14 @@ static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_enc
+ enum drm_connector_status found = connector_status_disconnected;
+ bool color = true;
+
++ /* just don't bother on RN50 those chip are often connected to remoting
++ * console hw and often we get failure to load detect those. So to make
++ * everyone happy report the encoder as always connected.
++ */
++ if (ASIC_IS_RN50(rdev)) {
++ return connector_status_connected;
++ }
++
+ /* save the regs we need */
+ vclk_ecp_cntl = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
+ crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
+diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
+index 3f28290..4fa2b11 100644
+--- a/drivers/idle/intel_idle.c
++++ b/drivers/idle/intel_idle.c
+@@ -431,10 +431,8 @@ static int intel_idle_probe(void)
+
+ if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */
+ lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
+- else {
++ else
+ on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
+- register_cpu_notifier(&setup_broadcast_notifier);
+- }
+
+ pr_debug(PREFIX "v" INTEL_IDLE_VERSION
+ " model 0x%X\n", boot_cpu_data.x86_model);
+@@ -597,6 +595,9 @@ static int __init intel_idle_init(void)
+ return retval;
+ }
+
++ if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE)
++ register_cpu_notifier(&setup_broadcast_notifier);
++
+ return 0;
+ }
+
+diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
+index ef2d493..62a4d5c 100644
+--- a/drivers/iommu/amd_iommu_init.c
++++ b/drivers/iommu/amd_iommu_init.c
+@@ -916,6 +916,38 @@ static void __init free_iommu_all(void)
+ }
+
+ /*
++ * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
++ * Workaround:
++ * BIOS should disable L2B micellaneous clock gating by setting
++ * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
++ */
++static void __init amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
++{
++ u32 value;
++
++ if ((boot_cpu_data.x86 != 0x15) ||
++ (boot_cpu_data.x86_model < 0x10) ||
++ (boot_cpu_data.x86_model > 0x1f))
++ return;
++
++ pci_write_config_dword(iommu->dev, 0xf0, 0x90);
++ pci_read_config_dword(iommu->dev, 0xf4, &value);
++
++ if (value & BIT(2))
++ return;
++
++ /* Select NB indirect register 0x90 and enable writing */
++ pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8));
++
++ pci_write_config_dword(iommu->dev, 0xf4, value | 0x4);
++ pr_info("AMD-Vi: Applying erratum 746 workaround for IOMMU at %s\n",
++ dev_name(&iommu->dev->dev));
++
++ /* Clear the enable writing bit */
++ pci_write_config_dword(iommu->dev, 0xf0, 0x90);
++}
++
++/*
+ * This function clues the initialization function for one IOMMU
+ * together and also allocates the command buffer and programs the
+ * hardware. It does NOT enable the IOMMU. This is done afterwards.
+@@ -970,6 +1002,8 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
+ if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
+ amd_iommu_np_cache = true;
+
++ amd_iommu_erratum_746_workaround(iommu);
++
+ return pci_enable_device(iommu->dev);
+ }
+
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index 9a6cc92..dffdca8 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -2302,8 +2302,39 @@ static int domain_add_dev_info(struct dmar_domain *domain,
+ return 0;
+ }
+
++static bool device_has_rmrr(struct pci_dev *dev)
++{
++ struct dmar_rmrr_unit *rmrr;
++ int i;
++
++ for_each_rmrr_units(rmrr) {
++ for (i = 0; i < rmrr->devices_cnt; i++) {
++ /*
++ * Return TRUE if this RMRR contains the device that
++ * is passed in.
++ */
++ if (rmrr->devices[i] == dev)
++ return true;
++ }
++ }
++ return false;
++}
++
+ static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
+ {
++
++ /*
++ * We want to prevent any device associated with an RMRR from
++ * getting placed into the SI Domain. This is done because
++ * problems exist when devices are moved in and out of domains
++ * and their respective RMRR info is lost. We exempt USB devices
++ * from this process due to their usage of RMRRs that are known
++ * to not be needed after BIOS hand-off to OS.
++ */
++ if (device_has_rmrr(pdev) &&
++ (pdev->class >> 8) != PCI_CLASS_SERIAL_USB)
++ return 0;
++
+ if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
+ return 1;
+
+@@ -4090,6 +4121,21 @@ static struct iommu_ops intel_iommu_ops = {
+ .domain_has_cap = intel_iommu_domain_has_cap,
+ };
+
++static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
++{
++ /* G4x/GM45 integrated gfx dmar support is totally busted. */
++ printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
++ dmar_map_gfx = 0;
++}
++
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
++
+ static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
+ {
+ /*
+@@ -4098,12 +4144,6 @@ static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
+ */
+ printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
+ rwbf_quirk = 1;
+-
+- /* https://bugzilla.redhat.com/show_bug.cgi?id=538163 */
+- if (dev->revision == 0x07) {
+- printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
+- dmar_map_gfx = 0;
+- }
+ }
+
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
+diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
+index 86cd532..21a3d77 100644
+--- a/drivers/net/can/c_can/c_can.c
++++ b/drivers/net/can/c_can/c_can.c
+@@ -914,7 +914,7 @@ static int c_can_handle_bus_err(struct net_device *dev,
+ break;
+ case LEC_ACK_ERROR:
+ netdev_dbg(dev, "ack error\n");
+- cf->data[2] |= (CAN_ERR_PROT_LOC_ACK |
++ cf->data[3] |= (CAN_ERR_PROT_LOC_ACK |
+ CAN_ERR_PROT_LOC_ACK_DEL);
+ break;
+ case LEC_BIT1_ERROR:
+@@ -927,7 +927,7 @@ static int c_can_handle_bus_err(struct net_device *dev,
+ break;
+ case LEC_CRC_ERROR:
+ netdev_dbg(dev, "CRC error\n");
+- cf->data[2] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
++ cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
+ CAN_ERR_PROT_LOC_CRC_DEL);
+ break;
+ default:
+diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
+index d11fbb2..b508a63 100644
+--- a/drivers/net/can/pch_can.c
++++ b/drivers/net/can/pch_can.c
+@@ -559,7 +559,7 @@ static void pch_can_error(struct net_device *ndev, u32 status)
+ stats->rx_errors++;
+ break;
+ case PCH_CRC_ERR:
+- cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ |
++ cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ |
+ CAN_ERR_PROT_LOC_CRC_DEL;
+ priv->can.can_stats.bus_error++;
+ stats->rx_errors++;
+diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
+index 79c70ae..1ef9df8 100644
+--- a/drivers/net/can/ti_hecc.c
++++ b/drivers/net/can/ti_hecc.c
+@@ -735,12 +735,12 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
+ }
+ if (err_status & HECC_CANES_CRCE) {
+ hecc_set_bit(priv, HECC_CANES, HECC_CANES_CRCE);
+- cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ |
++ cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ |
+ CAN_ERR_PROT_LOC_CRC_DEL;
+ }
+ if (err_status & HECC_CANES_ACKE) {
+ hecc_set_bit(priv, HECC_CANES, HECC_CANES_ACKE);
+- cf->data[2] |= CAN_ERR_PROT_LOC_ACK |
++ cf->data[3] |= CAN_ERR_PROT_LOC_ACK |
+ CAN_ERR_PROT_LOC_ACK_DEL;
+ }
+ }
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index 222954d..cf177b8 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -939,17 +939,18 @@ static int igb_request_msix(struct igb_adapter *adapter)
+ {
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_hw *hw = &adapter->hw;
+- int i, err = 0, vector = 0;
++ int i, err = 0, vector = 0, free_vector = 0;
+
+ err = request_irq(adapter->msix_entries[vector].vector,
+ igb_msix_other, 0, netdev->name, adapter);
+ if (err)
+- goto out;
+- vector++;
++ goto err_out;
+
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ struct igb_q_vector *q_vector = adapter->q_vector[i];
+
++ vector++;
++
+ q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
+
+ if (q_vector->rx.ring && q_vector->tx.ring)
+@@ -968,13 +969,22 @@ static int igb_request_msix(struct igb_adapter *adapter)
+ igb_msix_ring, 0, q_vector->name,
+ q_vector);
+ if (err)
+- goto out;
+- vector++;
++ goto err_free;
+ }
+
+ igb_configure_msix(adapter);
+ return 0;
+-out:
++
++err_free:
++ /* free already assigned IRQs */
++ free_irq(adapter->msix_entries[free_vector++].vector, adapter);
++
++ vector--;
++ for (i = 0; i < vector; i++) {
++ free_irq(adapter->msix_entries[free_vector++].vector,
++ adapter->q_vector[i]);
++ }
++err_out:
+ return err;
+ }
+
+diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+index 12a730d..ae750f9 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
++++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+@@ -946,6 +946,8 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
+ AR_PHY_CL_TAB_1,
+ AR_PHY_CL_TAB_2 };
+
++ ar9003_hw_set_chain_masks(ah, ah->caps.rx_chainmask, ah->caps.tx_chainmask);
++
+ if (rtt) {
+ if (!ar9003_hw_rtt_restore(ah, chan))
+ run_rtt_cal = true;
+diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+index 2330e7e..73be7ff 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
++++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+@@ -541,35 +541,22 @@ static void ar9003_hw_init_bb(struct ath_hw *ah,
+ udelay(synthDelay + BASE_ACTIVATE_DELAY);
+ }
+
+-static void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx)
++void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx)
+ {
+- switch (rx) {
+- case 0x5:
++ if (ah->caps.tx_chainmask == 5 || ah->caps.rx_chainmask == 5)
+ REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
+ AR_PHY_SWAP_ALT_CHAIN);
+- case 0x3:
+- case 0x1:
+- case 0x2:
+- case 0x7:
+- REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx);
+- REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx);
+- break;
+- default:
+- break;
+- }
++
++ REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx);
++ REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx);
+
+ if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) && (tx == 0x7))
+- REG_WRITE(ah, AR_SELFGEN_MASK, 0x3);
++ tx = 3;
+ else if (AR_SREV_9462(ah))
+ /* xxx only when MCI support is enabled */
+- REG_WRITE(ah, AR_SELFGEN_MASK, 0x3);
+- else
+- REG_WRITE(ah, AR_SELFGEN_MASK, tx);
++ tx = 3;
+
+- if (tx == 0x5) {
+- REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
+- AR_PHY_SWAP_ALT_CHAIN);
+- }
++ REG_WRITE(ah, AR_SELFGEN_MASK, tx);
+ }
+
+ /*
+diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
+index a13cabb..2bbc83e 100644
+--- a/drivers/net/wireless/ath/ath9k/beacon.c
++++ b/drivers/net/wireless/ath/ath9k/beacon.c
+@@ -155,6 +155,7 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
+ skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ bf->bf_buf_addr = 0;
++ bf->bf_mpdu = NULL;
+ }
+
+ /* Get a new beacon from mac80211 */
+diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
+index 1b90ed8..4f7843a 100644
+--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
++++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
+@@ -342,6 +342,8 @@ void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle,
+ endpoint->ep_callbacks.tx(endpoint->ep_callbacks.priv,
+ skb, htc_hdr->endpoint_id,
+ txok);
++ } else {
++ kfree_skb(skb);
+ }
+ }
+
+diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
+index a5c4ba8..0c65a09 100644
+--- a/drivers/net/wireless/ath/ath9k/hw.h
++++ b/drivers/net/wireless/ath/ath9k/hw.h
+@@ -1016,6 +1016,7 @@ int ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain);
+ int ar9003_paprd_init_table(struct ath_hw *ah);
+ bool ar9003_paprd_is_done(struct ath_hw *ah);
+ void ar9003_hw_set_paprd_txdesc(struct ath_hw *ah, void *ds, u8 chains);
++void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx);
+
+ /* Hardware family op attach helpers */
+ void ar5008_hw_attach_phy_ops(struct ath_hw *ah);
+diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
+index b4cbc82..d171a72 100644
+--- a/drivers/net/wireless/ath/ath9k/recv.c
++++ b/drivers/net/wireless/ath/ath9k/recv.c
+@@ -786,6 +786,7 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
+ return NULL;
+ }
+
++ list_del(&bf->list);
+ if (!bf->bf_mpdu)
+ return bf;
+
+@@ -1966,14 +1967,15 @@ requeue_drop_frag:
+ sc->rx.frag = NULL;
+ }
+ requeue:
++ list_add_tail(&bf->list, &sc->rx.rxbuf);
++ if (flush)
++ continue;
++
+ if (edma) {
+- list_add_tail(&bf->list, &sc->rx.rxbuf);
+ ath_rx_edma_buf_link(sc, qtype);
+ } else {
+- list_move_tail(&bf->list, &sc->rx.rxbuf);
+ ath_rx_buf_link(sc, bf);
+- if (!flush)
+- ath9k_hw_rxena(ah);
++ ath9k_hw_rxena(ah);
+ }
+ } while (1);
+
+diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+index 0d8a9cd..78c16eb 100644
+--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
++++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+@@ -1484,9 +1484,10 @@ void brcms_add_timer(struct brcms_timer *t, uint ms, int periodic)
+ #endif
+ t->ms = ms;
+ t->periodic = (bool) periodic;
+- t->set = true;
+-
+- atomic_inc(&t->wl->callbacks);
++ if (!t->set) {
++ t->set = true;
++ atomic_inc(&t->wl->callbacks);
++ }
+
+ ieee80211_queue_delayed_work(hw, &t->dly_wrk, msecs_to_jiffies(ms));
+ }
+diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
+index d34acf0..de94244 100644
+--- a/drivers/net/wireless/mwifiex/pcie.c
++++ b/drivers/net/wireless/mwifiex/pcie.c
+@@ -160,7 +160,7 @@ static int mwifiex_pcie_suspend(struct pci_dev *pdev, pm_message_t state)
+
+ if (pdev) {
+ card = (struct pcie_service_card *) pci_get_drvdata(pdev);
+- if (!card || card->adapter) {
++ if (!card || !card->adapter) {
+ pr_err("Card or adapter structure is not valid\n");
+ return 0;
+ }
+diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
+index 1679c25..56e1c4a 100644
+--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
++++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
+@@ -53,8 +53,7 @@ int mwifiex_copy_mcast_addr(struct mwifiex_multicast_list *mlist,
+ */
+ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter)
+ {
+- bool cancel_flag = false;
+- int status = adapter->cmd_wait_q.status;
++ int status;
+ struct cmd_ctrl_node *cmd_queued;
+
+ if (!adapter->cmd_queued)
+@@ -70,15 +69,14 @@ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter)
+ queue_work(adapter->workqueue, &adapter->main_work);
+
+ /* Wait for completion */
+- wait_event_interruptible(adapter->cmd_wait_q.wait,
+- *(cmd_queued->condition));
+- if (!*(cmd_queued->condition))
+- cancel_flag = true;
+-
+- if (cancel_flag) {
+- mwifiex_cancel_pending_ioctl(adapter);
+- dev_dbg(adapter->dev, "cmd cancel\n");
++ status = wait_event_interruptible(adapter->cmd_wait_q.wait,
++ *(cmd_queued->condition));
++ if (status) {
++ dev_err(adapter->dev, "cmd_wait_q terminated: %d\n", status);
++ return status;
+ }
++
++ status = adapter->cmd_wait_q.status;
+ adapter->cmd_wait_q.status = 0;
+
+ return status;
+@@ -240,6 +238,8 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
+
+ if (!netif_queue_stopped(priv->netdev))
+ netif_stop_queue(priv->netdev);
++ if (netif_carrier_ok(priv->netdev))
++ netif_carrier_off(priv->netdev);
+
+ /* Clear any past association response stored for
+ * application retrieval */
+@@ -271,6 +271,8 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
+
+ if (!netif_queue_stopped(priv->netdev))
+ netif_stop_queue(priv->netdev);
++ if (netif_carrier_ok(priv->netdev))
++ netif_carrier_off(priv->netdev);
+
+ if (!ret) {
+ dev_dbg(adapter->dev, "info: network found in scan"
+@@ -421,8 +423,11 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
+ return false;
+ }
+
+- wait_event_interruptible(adapter->hs_activate_wait_q,
+- adapter->hs_activate_wait_q_woken);
++ if (wait_event_interruptible(adapter->hs_activate_wait_q,
++ adapter->hs_activate_wait_q_woken)) {
++ dev_err(adapter->dev, "hs_activate_wait_q terminated\n");
++ return false;
++ }
+
+ return true;
+ }
+diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
+index 838f571..4fff912 100644
+--- a/drivers/pci/hotplug/pciehp.h
++++ b/drivers/pci/hotplug/pciehp.h
+@@ -44,8 +44,6 @@ extern int pciehp_poll_mode;
+ extern int pciehp_poll_time;
+ extern int pciehp_debug;
+ extern int pciehp_force;
+-extern struct workqueue_struct *pciehp_wq;
+-extern struct workqueue_struct *pciehp_ordered_wq;
+
+ #define dbg(format, arg...) \
+ do { \
+@@ -79,6 +77,7 @@ struct slot {
+ struct hotplug_slot *hotplug_slot;
+ struct delayed_work work; /* work for button event */
+ struct mutex lock;
++ struct workqueue_struct *wq;
+ };
+
+ struct event_info {
+diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
+index 7ac8358..9350af9 100644
+--- a/drivers/pci/hotplug/pciehp_core.c
++++ b/drivers/pci/hotplug/pciehp_core.c
+@@ -42,8 +42,6 @@ int pciehp_debug;
+ int pciehp_poll_mode;
+ int pciehp_poll_time;
+ int pciehp_force;
+-struct workqueue_struct *pciehp_wq;
+-struct workqueue_struct *pciehp_ordered_wq;
+
+ #define DRIVER_VERSION "0.4"
+ #define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>"
+@@ -341,33 +339,19 @@ static int __init pcied_init(void)
+ {
+ int retval = 0;
+
+- pciehp_wq = alloc_workqueue("pciehp", 0, 0);
+- if (!pciehp_wq)
+- return -ENOMEM;
+-
+- pciehp_ordered_wq = alloc_ordered_workqueue("pciehp_ordered", 0);
+- if (!pciehp_ordered_wq) {
+- destroy_workqueue(pciehp_wq);
+- return -ENOMEM;
+- }
+-
+ pciehp_firmware_init();
+ retval = pcie_port_service_register(&hpdriver_portdrv);
+ dbg("pcie_port_service_register = %d\n", retval);
+ info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
+- if (retval) {
+- destroy_workqueue(pciehp_ordered_wq);
+- destroy_workqueue(pciehp_wq);
++ if (retval)
+ dbg("Failure to register service\n");
+- }
++
+ return retval;
+ }
+
+ static void __exit pcied_cleanup(void)
+ {
+ dbg("unload_pciehpd()\n");
+- destroy_workqueue(pciehp_ordered_wq);
+- destroy_workqueue(pciehp_wq);
+ pcie_port_service_unregister(&hpdriver_portdrv);
+ info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n");
+ }
+diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
+index 085dbb5..38f0186 100644
+--- a/drivers/pci/hotplug/pciehp_ctrl.c
++++ b/drivers/pci/hotplug/pciehp_ctrl.c
+@@ -49,7 +49,7 @@ static int queue_interrupt_event(struct slot *p_slot, u32 event_type)
+ info->p_slot = p_slot;
+ INIT_WORK(&info->work, interrupt_event_handler);
+
+- queue_work(pciehp_wq, &info->work);
++ queue_work(p_slot->wq, &info->work);
+
+ return 0;
+ }
+@@ -344,7 +344,7 @@ void pciehp_queue_pushbutton_work(struct work_struct *work)
+ kfree(info);
+ goto out;
+ }
+- queue_work(pciehp_ordered_wq, &info->work);
++ queue_work(p_slot->wq, &info->work);
+ out:
+ mutex_unlock(&p_slot->lock);
+ }
+@@ -377,7 +377,7 @@ static void handle_button_press_event(struct slot *p_slot)
+ if (ATTN_LED(ctrl))
+ pciehp_set_attention_status(p_slot, 0);
+
+- queue_delayed_work(pciehp_wq, &p_slot->work, 5*HZ);
++ queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ);
+ break;
+ case BLINKINGOFF_STATE:
+ case BLINKINGON_STATE:
+@@ -439,7 +439,7 @@ static void handle_surprise_event(struct slot *p_slot)
+ else
+ p_slot->state = POWERON_STATE;
+
+- queue_work(pciehp_ordered_wq, &info->work);
++ queue_work(p_slot->wq, &info->work);
+ }
+
+ static void interrupt_event_handler(struct work_struct *work)
+diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
+index 7b14148..fef1748 100644
+--- a/drivers/pci/hotplug/pciehp_hpc.c
++++ b/drivers/pci/hotplug/pciehp_hpc.c
+@@ -789,24 +789,32 @@ static void pcie_shutdown_notification(struct controller *ctrl)
+ static int pcie_init_slot(struct controller *ctrl)
+ {
+ struct slot *slot;
++ char name[32];
+
+ slot = kzalloc(sizeof(*slot), GFP_KERNEL);
+ if (!slot)
+ return -ENOMEM;
+
++ snprintf(name, sizeof(name), "pciehp-%u", PSN(ctrl));
++ slot->wq = alloc_workqueue(name, 0, 0);
++ if (!slot->wq)
++ goto abort;
++
+ slot->ctrl = ctrl;
+ mutex_init(&slot->lock);
+ INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work);
+ ctrl->slot = slot;
+ return 0;
++abort:
++ kfree(slot);
++ return -ENOMEM;
+ }
+
+ static void pcie_cleanup_slot(struct controller *ctrl)
+ {
+ struct slot *slot = ctrl->slot;
+ cancel_delayed_work(&slot->work);
+- flush_workqueue(pciehp_wq);
+- flush_workqueue(pciehp_ordered_wq);
++ destroy_workqueue(slot->wq);
+ kfree(slot);
+ }
+
+diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
+index e0c90e6..2c2ac80 100644
+--- a/drivers/pci/hotplug/shpchp.h
++++ b/drivers/pci/hotplug/shpchp.h
+@@ -46,8 +46,6 @@
+ extern int shpchp_poll_mode;
+ extern int shpchp_poll_time;
+ extern int shpchp_debug;
+-extern struct workqueue_struct *shpchp_wq;
+-extern struct workqueue_struct *shpchp_ordered_wq;
+
+ #define dbg(format, arg...) \
+ do { \
+@@ -91,6 +89,7 @@ struct slot {
+ struct list_head slot_list;
+ struct delayed_work work; /* work for button event */
+ struct mutex lock;
++ struct workqueue_struct *wq;
+ u8 hp_slot;
+ };
+
+diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
+index dd7e0c5..754a7cd 100644
+--- a/drivers/pci/hotplug/shpchp_core.c
++++ b/drivers/pci/hotplug/shpchp_core.c
+@@ -39,8 +39,6 @@
+ int shpchp_debug;
+ int shpchp_poll_mode;
+ int shpchp_poll_time;
+-struct workqueue_struct *shpchp_wq;
+-struct workqueue_struct *shpchp_ordered_wq;
+
+ #define DRIVER_VERSION "0.4"
+ #define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>"
+@@ -123,6 +121,14 @@ static int init_slots(struct controller *ctrl)
+ slot->device = ctrl->slot_device_offset + i;
+ slot->hpc_ops = ctrl->hpc_ops;
+ slot->number = ctrl->first_slot + (ctrl->slot_num_inc * i);
++
++ snprintf(name, sizeof(name), "shpchp-%d", slot->number);
++ slot->wq = alloc_workqueue(name, 0, 0);
++ if (!slot->wq) {
++ retval = -ENOMEM;
++ goto error_info;
++ }
++
+ mutex_init(&slot->lock);
+ INIT_DELAYED_WORK(&slot->work, shpchp_queue_pushbutton_work);
+
+@@ -142,7 +148,7 @@ static int init_slots(struct controller *ctrl)
+ if (retval) {
+ ctrl_err(ctrl, "pci_hp_register failed with error %d\n",
+ retval);
+- goto error_info;
++ goto error_slotwq;
+ }
+
+ get_power_status(hotplug_slot, &info->power_status);
+@@ -154,6 +160,8 @@ static int init_slots(struct controller *ctrl)
+ }
+
+ return 0;
++error_slotwq:
++ destroy_workqueue(slot->wq);
+ error_info:
+ kfree(info);
+ error_hpslot:
+@@ -174,8 +182,7 @@ void cleanup_slots(struct controller *ctrl)
+ slot = list_entry(tmp, struct slot, slot_list);
+ list_del(&slot->slot_list);
+ cancel_delayed_work(&slot->work);
+- flush_workqueue(shpchp_wq);
+- flush_workqueue(shpchp_ordered_wq);
++ destroy_workqueue(slot->wq);
+ pci_hp_deregister(slot->hotplug_slot);
+ }
+ }
+@@ -358,25 +365,12 @@ static struct pci_driver shpc_driver = {
+
+ static int __init shpcd_init(void)
+ {
+- int retval = 0;
+-
+- shpchp_wq = alloc_ordered_workqueue("shpchp", 0);
+- if (!shpchp_wq)
+- return -ENOMEM;
+-
+- shpchp_ordered_wq = alloc_ordered_workqueue("shpchp_ordered", 0);
+- if (!shpchp_ordered_wq) {
+- destroy_workqueue(shpchp_wq);
+- return -ENOMEM;
+- }
++ int retval;
+
+ retval = pci_register_driver(&shpc_driver);
+ dbg("%s: pci_register_driver = %d\n", __func__, retval);
+ info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
+- if (retval) {
+- destroy_workqueue(shpchp_ordered_wq);
+- destroy_workqueue(shpchp_wq);
+- }
++
+ return retval;
+ }
+
+@@ -384,8 +378,6 @@ static void __exit shpcd_cleanup(void)
+ {
+ dbg("unload_shpchpd()\n");
+ pci_unregister_driver(&shpc_driver);
+- destroy_workqueue(shpchp_ordered_wq);
+- destroy_workqueue(shpchp_wq);
+ info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n");
+ }
+
+diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
+index b00b09b..3ffc1b2 100644
+--- a/drivers/pci/hotplug/shpchp_ctrl.c
++++ b/drivers/pci/hotplug/shpchp_ctrl.c
+@@ -51,7 +51,7 @@ static int queue_interrupt_event(struct slot *p_slot, u32 event_type)
+ info->p_slot = p_slot;
+ INIT_WORK(&info->work, interrupt_event_handler);
+
+- queue_work(shpchp_wq, &info->work);
++ queue_work(p_slot->wq, &info->work);
+
+ return 0;
+ }
+@@ -456,7 +456,7 @@ void shpchp_queue_pushbutton_work(struct work_struct *work)
+ kfree(info);
+ goto out;
+ }
+- queue_work(shpchp_ordered_wq, &info->work);
++ queue_work(p_slot->wq, &info->work);
+ out:
+ mutex_unlock(&p_slot->lock);
+ }
+@@ -504,7 +504,7 @@ static void handle_button_press_event(struct slot *p_slot)
+ p_slot->hpc_ops->green_led_blink(p_slot);
+ p_slot->hpc_ops->set_attention_status(p_slot, 0);
+
+- queue_delayed_work(shpchp_wq, &p_slot->work, 5*HZ);
++ queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ);
+ break;
+ case BLINKINGOFF_STATE:
+ case BLINKINGON_STATE:
+diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
+index 9674e9f..ee82c55 100644
+--- a/drivers/pci/pcie/aer/aerdrv_core.c
++++ b/drivers/pci/pcie/aer/aerdrv_core.c
+@@ -637,6 +637,7 @@ static void aer_recover_work_func(struct work_struct *work)
+ continue;
+ }
+ do_recovery(pdev, entry.severity);
++ pci_dev_put(pdev);
+ }
+ }
+ #endif
+diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
+index 2275162..c73ed00 100644
+--- a/drivers/pci/pcie/aspm.c
++++ b/drivers/pci/pcie/aspm.c
+@@ -790,6 +790,9 @@ void pcie_clear_aspm(struct pci_bus *bus)
+ {
+ struct pci_dev *child;
+
++ if (aspm_force)
++ return;
++
+ /*
+ * Clear any ASPM setup that the firmware has carried out on this bus
+ */
+diff --git a/drivers/platform/x86/ibm_rtl.c b/drivers/platform/x86/ibm_rtl.c
+index 811d436..2704386 100644
+--- a/drivers/platform/x86/ibm_rtl.c
++++ b/drivers/platform/x86/ibm_rtl.c
+@@ -255,7 +255,7 @@ static int __init ibm_rtl_init(void) {
+ if (force)
+ pr_warn("module loaded by force\n");
+ /* first ensure that we are running on IBM HW */
+- else if (efi_enabled || !dmi_check_system(ibm_rtl_dmi_table))
++ else if (efi_enabled(EFI_BOOT) || !dmi_check_system(ibm_rtl_dmi_table))
+ return -ENODEV;
+
+ /* Get the address for the Extended BIOS Data Area */
+diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
+index 21bc1a7..64e1f2d 100644
+--- a/drivers/platform/x86/samsung-laptop.c
++++ b/drivers/platform/x86/samsung-laptop.c
+@@ -22,6 +22,7 @@
+ #include <linux/platform_device.h>
+ #include <linux/rfkill.h>
+ #include <linux/acpi.h>
++#include <linux/efi.h>
+
+ /*
+ * This driver is needed because a number of Samsung laptops do not hook
+@@ -603,6 +604,9 @@ static int __init samsung_init(void)
+ int loca;
+ int retval;
+
++ if (efi_enabled(EFI_BOOT))
++ return -ENODEV;
++
+ mutex_init(&sabi_mutex);
+ handle_backlight = true;
+
+diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c
+index 8cba82d..32445a7 100644
+--- a/drivers/regulator/max8997.c
++++ b/drivers/regulator/max8997.c
+@@ -71,26 +71,26 @@ struct voltage_map_desc {
+ unsigned int n_bits;
+ };
+
+-/* Voltage maps in mV */
++/* Voltage maps in uV */
+ static const struct voltage_map_desc ldo_voltage_map_desc = {
+- .min = 800, .max = 3950, .step = 50, .n_bits = 6,
++ .min = 800000, .max = 3950000, .step = 50000, .n_bits = 6,
+ }; /* LDO1 ~ 18, 21 all */
+
+ static const struct voltage_map_desc buck1245_voltage_map_desc = {
+- .min = 650, .max = 2225, .step = 25, .n_bits = 6,
++ .min = 650000, .max = 2225000, .step = 25000, .n_bits = 6,
+ }; /* Buck1, 2, 4, 5 */
+
+ static const struct voltage_map_desc buck37_voltage_map_desc = {
+- .min = 750, .max = 3900, .step = 50, .n_bits = 6,
++ .min = 750000, .max = 3900000, .step = 50000, .n_bits = 6,
+ }; /* Buck3, 7 */
+
+-/* current map in mA */
++/* current map in uA */
+ static const struct voltage_map_desc charger_current_map_desc = {
+- .min = 200, .max = 950, .step = 50, .n_bits = 4,
++ .min = 200000, .max = 950000, .step = 50000, .n_bits = 4,
+ };
+
+ static const struct voltage_map_desc topoff_current_map_desc = {
+- .min = 50, .max = 200, .step = 10, .n_bits = 4,
++ .min = 50000, .max = 200000, .step = 10000, .n_bits = 4,
+ };
+
+ static const struct voltage_map_desc *reg_voltage_map[] = {
+@@ -199,7 +199,7 @@ static int max8997_list_voltage(struct regulator_dev *rdev,
+ if (val > desc->max)
+ return -EINVAL;
+
+- return val * 1000;
++ return val;
+ }
+
+ static int max8997_get_enable_register(struct regulator_dev *rdev,
+@@ -501,7 +501,6 @@ static int max8997_set_voltage_ldobuck(struct regulator_dev *rdev,
+ {
+ struct max8997_data *max8997 = rdev_get_drvdata(rdev);
+ struct i2c_client *i2c = max8997->iodev->i2c;
+- int min_vol = min_uV / 1000, max_vol = max_uV / 1000;
+ const struct voltage_map_desc *desc;
+ int rid = max8997_get_rid(rdev);
+ int reg, shift = 0, mask, ret;
+@@ -527,7 +526,7 @@ static int max8997_set_voltage_ldobuck(struct regulator_dev *rdev,
+
+ desc = reg_voltage_map[rid];
+
+- i = max8997_get_voltage_proper_val(desc, min_vol, max_vol);
++ i = max8997_get_voltage_proper_val(desc, min_uV, max_uV);
+ if (i < 0)
+ return i;
+
+@@ -546,7 +545,7 @@ static int max8997_set_voltage_ldobuck(struct regulator_dev *rdev,
+ /* If the voltage is increasing */
+ if (org < i)
+ udelay(DIV_ROUND_UP(desc->step * (i - org),
+- max8997->ramp_delay));
++ max8997->ramp_delay * 1000));
+ }
+
+ return ret;
+@@ -645,7 +644,6 @@ static int max8997_set_voltage_buck(struct regulator_dev *rdev,
+ const struct voltage_map_desc *desc;
+ int new_val, new_idx, damage, tmp_val, tmp_idx, tmp_dmg;
+ bool gpio_dvs_mode = false;
+- int min_vol = min_uV / 1000, max_vol = max_uV / 1000;
+
+ if (rid < MAX8997_BUCK1 || rid > MAX8997_BUCK7)
+ return -EINVAL;
+@@ -670,7 +668,7 @@ static int max8997_set_voltage_buck(struct regulator_dev *rdev,
+ selector);
+
+ desc = reg_voltage_map[rid];
+- new_val = max8997_get_voltage_proper_val(desc, min_vol, max_vol);
++ new_val = max8997_get_voltage_proper_val(desc, min_uV, max_uV);
+ if (new_val < 0)
+ return new_val;
+
+@@ -1002,8 +1000,8 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev)
+ max8997->buck1_vol[i] = ret =
+ max8997_get_voltage_proper_val(
+ &buck1245_voltage_map_desc,
+- pdata->buck1_voltage[i] / 1000,
+- pdata->buck1_voltage[i] / 1000 +
++ pdata->buck1_voltage[i],
++ pdata->buck1_voltage[i] +
+ buck1245_voltage_map_desc.step);
+ if (ret < 0)
+ goto err_alloc;
+@@ -1011,8 +1009,8 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev)
+ max8997->buck2_vol[i] = ret =
+ max8997_get_voltage_proper_val(
+ &buck1245_voltage_map_desc,
+- pdata->buck2_voltage[i] / 1000,
+- pdata->buck2_voltage[i] / 1000 +
++ pdata->buck2_voltage[i],
++ pdata->buck2_voltage[i] +
+ buck1245_voltage_map_desc.step);
+ if (ret < 0)
+ goto err_alloc;
+@@ -1020,8 +1018,8 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev)
+ max8997->buck5_vol[i] = ret =
+ max8997_get_voltage_proper_val(
+ &buck1245_voltage_map_desc,
+- pdata->buck5_voltage[i] / 1000,
+- pdata->buck5_voltage[i] / 1000 +
++ pdata->buck5_voltage[i],
++ pdata->buck5_voltage[i] +
+ buck1245_voltage_map_desc.step);
+ if (ret < 0)
+ goto err_alloc;
+diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
+index 41a1495..27fe1c6 100644
+--- a/drivers/regulator/max8998.c
++++ b/drivers/regulator/max8998.c
+@@ -497,7 +497,7 @@ buck2_exit:
+
+ difference = desc->min + desc->step*i - previous_vol/1000;
+ if (difference > 0)
+- udelay(difference / ((val & 0x0f) + 1));
++ udelay(DIV_ROUND_UP(difference, (val & 0x0f) + 1));
+
+ return ret;
+ }
+diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
+index 5c8b0dc..3784388 100644
+--- a/drivers/scsi/isci/init.c
++++ b/drivers/scsi/isci/init.c
+@@ -459,7 +459,7 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic
+ return -ENOMEM;
+ pci_set_drvdata(pdev, pci_info);
+
+- if (efi_enabled)
++ if (efi_enabled(EFI_RUNTIME_SERVICES))
+ orom = isci_get_efi_var(pdev);
+
+ if (!orom)
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 4b63c73..f44d633 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -2825,10 +2825,6 @@ static int __init init_sd(void)
+ if (err)
+ goto err_out;
+
+- err = scsi_register_driver(&sd_template.gendrv);
+- if (err)
+- goto err_out_class;
+-
+ sd_cdb_cache = kmem_cache_create("sd_ext_cdb", SD_EXT_CDB_SIZE,
+ 0, 0, NULL);
+ if (!sd_cdb_cache) {
+@@ -2842,8 +2838,15 @@ static int __init init_sd(void)
+ goto err_out_cache;
+ }
+
++ err = scsi_register_driver(&sd_template.gendrv);
++ if (err)
++ goto err_out_driver;
++
+ return 0;
+
++err_out_driver:
++ mempool_destroy(sd_cdb_pool);
++
+ err_out_cache:
+ kmem_cache_destroy(sd_cdb_cache);
+
+@@ -2866,10 +2869,10 @@ static void __exit exit_sd(void)
+
+ SCSI_LOG_HLQUEUE(3, printk("exit_sd: exiting sd driver\n"));
+
++ scsi_unregister_driver(&sd_template.gendrv);
+ mempool_destroy(sd_cdb_pool);
+ kmem_cache_destroy(sd_cdb_cache);
+
+- scsi_unregister_driver(&sd_template.gendrv);
+ class_unregister(&sd_disk_class);
+
+ for (i = 0; i < SD_MAJORS; i++)
+diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
+index 4c77e50..da26630 100644
+--- a/drivers/staging/comedi/Kconfig
++++ b/drivers/staging/comedi/Kconfig
+@@ -424,6 +424,7 @@ config COMEDI_ADQ12B
+
+ config COMEDI_NI_AT_A2150
+ tristate "NI AT-A2150 ISA card support"
++ select COMEDI_FC
+ depends on COMEDI_NI_COMMON
+ depends on VIRT_TO_BUS
+ default N
+diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
+index 9465bce..ab9f5ed 100644
+--- a/drivers/staging/comedi/comedi_fops.c
++++ b/drivers/staging/comedi/comedi_fops.c
+@@ -2207,6 +2207,7 @@ int comedi_alloc_board_minor(struct device *hardware_device)
+ kfree(info);
+ return -ENOMEM;
+ }
++ info->hardware_device = hardware_device;
+ comedi_device_init(info->device);
+ spin_lock_irqsave(&comedi_file_info_table_lock, flags);
+ for (i = 0; i < COMEDI_NUM_BOARD_MINORS; ++i) {
+@@ -2295,6 +2296,23 @@ void comedi_free_board_minor(unsigned minor)
+ }
+ }
+
++int comedi_find_board_minor(struct device *hardware_device)
++{
++ int minor;
++ struct comedi_device_file_info *info;
++
++ for (minor = 0; minor < COMEDI_NUM_BOARD_MINORS; minor++) {
++ spin_lock(&comedi_file_info_table_lock);
++ info = comedi_file_info_table[minor];
++ if (info && info->hardware_device == hardware_device) {
++ spin_unlock(&comedi_file_info_table_lock);
++ return minor;
++ }
++ spin_unlock(&comedi_file_info_table_lock);
++ }
++ return -ENODEV;
++}
++
+ int comedi_alloc_subdevice_minor(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+ {
+diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h
+index 7a0d4bc..00d3c65 100644
+--- a/drivers/staging/comedi/comedidev.h
++++ b/drivers/staging/comedi/comedidev.h
+@@ -234,6 +234,7 @@ struct comedi_device_file_info {
+ struct comedi_device *device;
+ struct comedi_subdevice *read_subdevice;
+ struct comedi_subdevice *write_subdevice;
++ struct device *hardware_device;
+ };
+
+ #ifdef CONFIG_COMEDI_DEBUG
+diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
+index db1fd63..538b568 100644
+--- a/drivers/staging/comedi/drivers.c
++++ b/drivers/staging/comedi/drivers.c
+@@ -823,25 +823,14 @@ static int comedi_auto_config(struct device *hardware_device,
+ int minor;
+ struct comedi_device_file_info *dev_file_info;
+ int retval;
+- unsigned *private_data = NULL;
+
+- if (!comedi_autoconfig) {
+- dev_set_drvdata(hardware_device, NULL);
++ if (!comedi_autoconfig)
+ return 0;
+- }
+
+ minor = comedi_alloc_board_minor(hardware_device);
+ if (minor < 0)
+ return minor;
+
+- private_data = kmalloc(sizeof(unsigned), GFP_KERNEL);
+- if (private_data == NULL) {
+- retval = -ENOMEM;
+- goto cleanup;
+- }
+- *private_data = minor;
+- dev_set_drvdata(hardware_device, private_data);
+-
+ dev_file_info = comedi_get_device_file_info(minor);
+
+ memset(&it, 0, sizeof(it));
+@@ -854,25 +843,22 @@ static int comedi_auto_config(struct device *hardware_device,
+ retval = comedi_device_attach(dev_file_info->device, &it);
+ mutex_unlock(&dev_file_info->device->mutex);
+
+-cleanup:
+- if (retval < 0) {
+- kfree(private_data);
++ if (retval < 0)
+ comedi_free_board_minor(minor);
+- }
+ return retval;
+ }
+
+ static void comedi_auto_unconfig(struct device *hardware_device)
+ {
+- unsigned *minor = (unsigned *)dev_get_drvdata(hardware_device);
+- if (minor == NULL)
+- return;
+-
+- BUG_ON(*minor >= COMEDI_NUM_BOARD_MINORS);
++ int minor;
+
+- comedi_free_board_minor(*minor);
+- dev_set_drvdata(hardware_device, NULL);
+- kfree(minor);
++ if (hardware_device == NULL)
++ return;
++ minor = comedi_find_board_minor(hardware_device);
++ if (minor < 0)
++ return;
++ BUG_ON(minor >= COMEDI_NUM_BOARD_MINORS);
++ comedi_free_board_minor(minor);
+ }
+
+ int comedi_pci_auto_config(struct pci_dev *pcidev, const char *board_name)
+diff --git a/drivers/staging/comedi/drivers/comedi_test.c b/drivers/staging/comedi/drivers/comedi_test.c
+index a804742..2567f9a 100644
+--- a/drivers/staging/comedi/drivers/comedi_test.c
++++ b/drivers/staging/comedi/drivers/comedi_test.c
+@@ -461,7 +461,7 @@ static int waveform_ai_cancel(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+ {
+ devpriv->timer_running = 0;
+- del_timer(&devpriv->timer);
++ del_timer_sync(&devpriv->timer);
+ return 0;
+ }
+
+diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c
+index 9148abd..9fee2f2 100644
+--- a/drivers/staging/comedi/drivers/ni_pcimio.c
++++ b/drivers/staging/comedi/drivers/ni_pcimio.c
+@@ -1021,7 +1021,7 @@ static const struct ni_board_struct ni_boards[] = {
+ .ao_range_table = &range_ni_M_625x_ao,
+ .reg_type = ni_reg_625x,
+ .ao_unipolar = 0,
+- .ao_speed = 357,
++ .ao_speed = 350,
+ .num_p0_dio_channels = 8,
+ .caldac = {caldac_none},
+ .has_8255 = 0,
+@@ -1040,7 +1040,7 @@ static const struct ni_board_struct ni_boards[] = {
+ .ao_range_table = &range_ni_M_625x_ao,
+ .reg_type = ni_reg_625x,
+ .ao_unipolar = 0,
+- .ao_speed = 357,
++ .ao_speed = 350,
+ .num_p0_dio_channels = 8,
+ .caldac = {caldac_none},
+ .has_8255 = 0,
+@@ -1076,7 +1076,7 @@ static const struct ni_board_struct ni_boards[] = {
+ .ao_range_table = &range_ni_M_625x_ao,
+ .reg_type = ni_reg_625x,
+ .ao_unipolar = 0,
+- .ao_speed = 357,
++ .ao_speed = 350,
+ .num_p0_dio_channels = 32,
+ .caldac = {caldac_none},
+ .has_8255 = 0,
+@@ -1095,7 +1095,7 @@ static const struct ni_board_struct ni_boards[] = {
+ .ao_range_table = &range_ni_M_625x_ao,
+ .reg_type = ni_reg_625x,
+ .ao_unipolar = 0,
+- .ao_speed = 357,
++ .ao_speed = 350,
+ .num_p0_dio_channels = 32,
+ .caldac = {caldac_none},
+ .has_8255 = 0,
+@@ -1131,7 +1131,7 @@ static const struct ni_board_struct ni_boards[] = {
+ .ao_range_table = &range_ni_M_628x_ao,
+ .reg_type = ni_reg_628x,
+ .ao_unipolar = 1,
+- .ao_speed = 357,
++ .ao_speed = 350,
+ .num_p0_dio_channels = 8,
+ .caldac = {caldac_none},
+ .has_8255 = 0,
+@@ -1150,7 +1150,7 @@ static const struct ni_board_struct ni_boards[] = {
+ .ao_range_table = &range_ni_M_628x_ao,
+ .reg_type = ni_reg_628x,
+ .ao_unipolar = 1,
+- .ao_speed = 357,
++ .ao_speed = 350,
+ .num_p0_dio_channels = 8,
+ .caldac = {caldac_none},
+ .has_8255 = 0,
+@@ -1186,7 +1186,7 @@ static const struct ni_board_struct ni_boards[] = {
+ .ao_range_table = &range_ni_M_628x_ao,
+ .reg_type = ni_reg_628x,
+ .ao_unipolar = 1,
+- .ao_speed = 357,
++ .ao_speed = 350,
+ .num_p0_dio_channels = 32,
+ .caldac = {caldac_none},
+ .has_8255 = 0,
+diff --git a/drivers/staging/comedi/internal.h b/drivers/staging/comedi/internal.h
+index 434ce34..4208fb4 100644
+--- a/drivers/staging/comedi/internal.h
++++ b/drivers/staging/comedi/internal.h
+@@ -7,6 +7,7 @@ int insn_inval(struct comedi_device *dev, struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data);
+ int comedi_alloc_board_minor(struct device *hardware_device);
+ void comedi_free_board_minor(unsigned minor);
++int comedi_find_board_minor(struct device *hardware_device);
+ void comedi_reset_async_buf(struct comedi_async *async);
+ int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
+ unsigned long new_size);
+diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
+index f4b738f..88d1d35 100644
+--- a/drivers/staging/rtl8712/usb_intf.c
++++ b/drivers/staging/rtl8712/usb_intf.c
+@@ -66,6 +66,8 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = {
+ {USB_DEVICE(0x0B05, 0x1791)}, /* 11n mode disable */
+ /* Belkin */
+ {USB_DEVICE(0x050D, 0x945A)},
++ /* ISY IWL - Belkin clone */
++ {USB_DEVICE(0x050D, 0x11F1)},
+ /* Corega */
+ {USB_DEVICE(0x07AA, 0x0047)},
+ /* D-Link */
+diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c
+index c241074..7843111 100644
+--- a/drivers/staging/speakup/synth.c
++++ b/drivers/staging/speakup/synth.c
+@@ -342,7 +342,7 @@ int synth_init(char *synth_name)
+
+ mutex_lock(&spk_mutex);
+ /* First, check if we already have it loaded. */
+- for (i = 0; synths[i] != NULL && i < MAXSYNTHS; i++)
++ for (i = 0; i < MAXSYNTHS && synths[i] != NULL; i++)
+ if (strcmp(synths[i]->name, synth_name) == 0)
+ synth = synths[i];
+
+@@ -423,7 +423,7 @@ int synth_add(struct spk_synth *in_synth)
+ int i;
+ int status = 0;
+ mutex_lock(&spk_mutex);
+- for (i = 0; synths[i] != NULL && i < MAXSYNTHS; i++)
++ for (i = 0; i < MAXSYNTHS && synths[i] != NULL; i++)
+ /* synth_remove() is responsible for rotating the array down */
+ if (in_synth == synths[i]) {
+ mutex_unlock(&spk_mutex);
+diff --git a/drivers/staging/usbip/usbip_common.c b/drivers/staging/usbip/usbip_common.c
+index 3b7a847..194e974 100644
+--- a/drivers/staging/usbip/usbip_common.c
++++ b/drivers/staging/usbip/usbip_common.c
+@@ -761,26 +761,25 @@ EXPORT_SYMBOL_GPL(usbip_recv_iso);
+ * buffer and iso packets need to be stored and be in propeper endian in urb
+ * before calling this function
+ */
+-int usbip_pad_iso(struct usbip_device *ud, struct urb *urb)
++void usbip_pad_iso(struct usbip_device *ud, struct urb *urb)
+ {
+ int np = urb->number_of_packets;
+ int i;
+- int ret;
+ int actualoffset = urb->actual_length;
+
+ if (!usb_pipeisoc(urb->pipe))
+- return 0;
++ return;
+
+ /* if no packets or length of data is 0, then nothing to unpack */
+ if (np == 0 || urb->actual_length == 0)
+- return 0;
++ return;
+
+ /*
+ * if actual_length is transfer_buffer_length then no padding is
+ * present.
+ */
+ if (urb->actual_length == urb->transfer_buffer_length)
+- return 0;
++ return;
+
+ /*
+ * loop over all packets from last to first (to prevent overwritting
+@@ -792,8 +791,6 @@ int usbip_pad_iso(struct usbip_device *ud, struct urb *urb)
+ urb->transfer_buffer + actualoffset,
+ urb->iso_frame_desc[i].actual_length);
+ }
+-
+- return ret;
+ }
+ EXPORT_SYMBOL_GPL(usbip_pad_iso);
+
+diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
+index be21617..e547dba 100644
+--- a/drivers/staging/usbip/usbip_common.h
++++ b/drivers/staging/usbip/usbip_common.h
+@@ -316,7 +316,7 @@ void usbip_header_correct_endian(struct usbip_header *pdu, int send);
+ void *usbip_alloc_iso_desc_pdu(struct urb *urb, ssize_t *bufflen);
+ /* some members of urb must be substituted before. */
+ int usbip_recv_iso(struct usbip_device *ud, struct urb *urb);
+-int usbip_pad_iso(struct usbip_device *ud, struct urb *urb);
++void usbip_pad_iso(struct usbip_device *ud, struct urb *urb);
+ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb);
+
+ /* usbip_event.c */
+diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
+index 3872b8c..1a7afaa 100644
+--- a/drivers/staging/usbip/vhci_rx.c
++++ b/drivers/staging/usbip/vhci_rx.c
+@@ -94,8 +94,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
+ return;
+
+ /* restore the padding in iso packets */
+- if (usbip_pad_iso(ud, urb) < 0)
+- return;
++ usbip_pad_iso(ud, urb);
+
+ if (usbip_dbg_flag_vhci_rx)
+ usbip_dump_urb(urb);
+diff --git a/drivers/staging/vt6656/bssdb.h b/drivers/staging/vt6656/bssdb.h
+index a8f97eb..991ce3e 100644
+--- a/drivers/staging/vt6656/bssdb.h
++++ b/drivers/staging/vt6656/bssdb.h
+@@ -92,7 +92,6 @@ typedef struct tagSRSNCapObject {
+ } SRSNCapObject, *PSRSNCapObject;
+
+ // BSS info(AP)
+-#pragma pack(1)
+ typedef struct tagKnownBSS {
+ // BSS info
+ BOOL bActive;
+diff --git a/drivers/staging/vt6656/int.h b/drivers/staging/vt6656/int.h
+index 3176c8d..c731b12 100644
+--- a/drivers/staging/vt6656/int.h
++++ b/drivers/staging/vt6656/int.h
+@@ -34,7 +34,6 @@
+ #include "device.h"
+
+ /*--------------------- Export Definitions -------------------------*/
+-#pragma pack(1)
+ typedef struct tagSINTData {
+ BYTE byTSR0;
+ BYTE byPkt0;
+diff --git a/drivers/staging/vt6656/iocmd.h b/drivers/staging/vt6656/iocmd.h
+index 22710ce..ae6e2d2 100644
+--- a/drivers/staging/vt6656/iocmd.h
++++ b/drivers/staging/vt6656/iocmd.h
+@@ -95,13 +95,12 @@ typedef enum tagWZONETYPE {
+ // Ioctl interface structure
+ // Command structure
+ //
+-#pragma pack(1)
+ typedef struct tagSCmdRequest {
+ u8 name[16];
+ void *data;
+ u16 wResult;
+ u16 wCmdCode;
+-} SCmdRequest, *PSCmdRequest;
++} __packed SCmdRequest, *PSCmdRequest;
+
+ //
+ // Scan
+@@ -111,7 +110,7 @@ typedef struct tagSCmdScan {
+
+ u8 ssid[SSID_MAXLEN + 2];
+
+-} SCmdScan, *PSCmdScan;
++} __packed SCmdScan, *PSCmdScan;
+
+ //
+ // BSS Join
+@@ -126,7 +125,7 @@ typedef struct tagSCmdBSSJoin {
+ BOOL bPSEnable;
+ BOOL bShareKeyAuth;
+
+-} SCmdBSSJoin, *PSCmdBSSJoin;
++} __packed SCmdBSSJoin, *PSCmdBSSJoin;
+
+ //
+ // Zonetype Setting
+@@ -137,7 +136,7 @@ typedef struct tagSCmdZoneTypeSet {
+ BOOL bWrite;
+ WZONETYPE ZoneType;
+
+-} SCmdZoneTypeSet, *PSCmdZoneTypeSet;
++} __packed SCmdZoneTypeSet, *PSCmdZoneTypeSet;
+
+ typedef struct tagSWPAResult {
+ char ifname[100];
+@@ -145,7 +144,7 @@ typedef struct tagSWPAResult {
+ u8 key_mgmt;
+ u8 eap_type;
+ BOOL authenticated;
+-} SWPAResult, *PSWPAResult;
++} __packed SWPAResult, *PSWPAResult;
+
+ typedef struct tagSCmdStartAP {
+
+@@ -157,7 +156,7 @@ typedef struct tagSCmdStartAP {
+ BOOL bShareKeyAuth;
+ u8 byBasicRate;
+
+-} SCmdStartAP, *PSCmdStartAP;
++} __packed SCmdStartAP, *PSCmdStartAP;
+
+ typedef struct tagSCmdSetWEP {
+
+@@ -167,7 +166,7 @@ typedef struct tagSCmdSetWEP {
+ BOOL bWepKeyAvailable[WEP_NKEYS];
+ u32 auWepKeyLength[WEP_NKEYS];
+
+-} SCmdSetWEP, *PSCmdSetWEP;
++} __packed SCmdSetWEP, *PSCmdSetWEP;
+
+ typedef struct tagSBSSIDItem {
+
+@@ -180,14 +179,14 @@ typedef struct tagSBSSIDItem {
+ BOOL bWEPOn;
+ u32 uRSSI;
+
+-} SBSSIDItem;
++} __packed SBSSIDItem;
+
+
+ typedef struct tagSBSSIDList {
+
+ u32 uItem;
+ SBSSIDItem sBSSIDList[0];
+-} SBSSIDList, *PSBSSIDList;
++} __packed SBSSIDList, *PSBSSIDList;
+
+
+ typedef struct tagSNodeItem {
+@@ -208,7 +207,7 @@ typedef struct tagSNodeItem {
+ u32 uTxAttempts;
+ u16 wFailureRatio;
+
+-} SNodeItem;
++} __packed SNodeItem;
+
+
+ typedef struct tagSNodeList {
+@@ -216,7 +215,7 @@ typedef struct tagSNodeList {
+ u32 uItem;
+ SNodeItem sNodeList[0];
+
+-} SNodeList, *PSNodeList;
++} __packed SNodeList, *PSNodeList;
+
+
+ typedef struct tagSCmdLinkStatus {
+@@ -229,7 +228,7 @@ typedef struct tagSCmdLinkStatus {
+ u32 uChannel;
+ u32 uLinkRate;
+
+-} SCmdLinkStatus, *PSCmdLinkStatus;
++} __packed SCmdLinkStatus, *PSCmdLinkStatus;
+
+ //
+ // 802.11 counter
+@@ -247,7 +246,7 @@ typedef struct tagSDot11MIBCount {
+ u32 ReceivedFragmentCount;
+ u32 MulticastReceivedFrameCount;
+ u32 FCSErrorCount;
+-} SDot11MIBCount, *PSDot11MIBCount;
++} __packed SDot11MIBCount, *PSDot11MIBCount;
+
+
+
+@@ -355,13 +354,13 @@ typedef struct tagSStatMIBCount {
+ u32 ullTxBroadcastBytes[2];
+ u32 ullTxMulticastBytes[2];
+ u32 ullTxDirectedBytes[2];
+-} SStatMIBCount, *PSStatMIBCount;
++} __packed SStatMIBCount, *PSStatMIBCount;
+
+ typedef struct tagSCmdValue {
+
+ u32 dwValue;
+
+-} SCmdValue, *PSCmdValue;
++} __packed SCmdValue, *PSCmdValue;
+
+ //
+ // hostapd & viawget ioctl related
+@@ -431,7 +430,7 @@ struct viawget_hostapd_param {
+ u8 ssid[32];
+ } scan_req;
+ } u;
+-};
++} __packed;
+
+ /*--------------------- Export Classes ----------------------------*/
+
+diff --git a/drivers/staging/vt6656/iowpa.h b/drivers/staging/vt6656/iowpa.h
+index 959c886..2522dde 100644
+--- a/drivers/staging/vt6656/iowpa.h
++++ b/drivers/staging/vt6656/iowpa.h
+@@ -67,12 +67,11 @@ enum {
+
+
+
+-#pragma pack(1)
+ typedef struct viawget_wpa_header {
+ u8 type;
+ u16 req_ie_len;
+ u16 resp_ie_len;
+-} viawget_wpa_header;
++} __packed viawget_wpa_header;
+
+ struct viawget_wpa_param {
+ u32 cmd;
+@@ -113,9 +112,8 @@ struct viawget_wpa_param {
+ u8 *buf;
+ } scan_results;
+ } u;
+-};
++} __packed;
+
+-#pragma pack(1)
+ struct viawget_scan_result {
+ u8 bssid[6];
+ u8 ssid[32];
+@@ -130,7 +128,7 @@ struct viawget_scan_result {
+ int noise;
+ int level;
+ int maxrate;
+-};
++} __packed;
+
+ /*--------------------- Export Classes ----------------------------*/
+
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 34d114a..9176b2e 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -4539,7 +4539,7 @@ int transport_send_check_condition_and_sense(
+ /* ILLEGAL REQUEST */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+ /* LOGICAL UNIT COMMUNICATION FAILURE */
+- buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80;
++ buffer[offset+SPC_ASC_KEY_OFFSET] = 0x08;
+ break;
+ }
+ /*
+diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
+index 3f28fdb..ab0a3fa 100644
+--- a/drivers/target/tcm_fc/tfc_sess.c
++++ b/drivers/target/tcm_fc/tfc_sess.c
+@@ -390,11 +390,11 @@ static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
+
+ tport = ft_tport_create(rdata->local_port);
+ if (!tport)
+- return 0; /* not a target for this local port */
++ goto not_target; /* not a target for this local port */
+
+ acl = ft_acl_get(tport->tpg, rdata);
+ if (!acl)
+- return 0;
++ goto not_target; /* no target for this remote */
+
+ if (!rspp)
+ goto fill;
+@@ -431,12 +431,18 @@ static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
+
+ /*
+ * OR in our service parameters with other provider (initiator), if any.
+- * TBD XXX - indicate RETRY capability?
+ */
+ fill:
+ fcp_parm = ntohl(spp->spp_params);
++ fcp_parm &= ~FCP_SPPF_RETRY;
+ spp->spp_params = htonl(fcp_parm | FCP_SPPF_TARG_FCN);
+ return FC_SPP_RESP_ACK;
++
++not_target:
++ fcp_parm = ntohl(spp->spp_params);
++ fcp_parm &= ~FCP_SPPF_TARG_FCN;
++ spp->spp_params = htonl(fcp_parm);
++ return 0;
+ }
+
+ /**
+diff --git a/drivers/tty/serial/8250.c b/drivers/tty/serial/8250.c
+index 70585b6..90dad17 100644
+--- a/drivers/tty/serial/8250.c
++++ b/drivers/tty/serial/8250.c
+@@ -316,6 +316,12 @@ static const struct serial8250_config uart_config[] = {
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+ .flags = UART_CAP_FIFO | UART_CAP_AFE | UART_CAP_EFR,
+ },
++ [PORT_BRCM_TRUMANAGE] = {
++ .name = "TruManage",
++ .fifo_size = 1,
++ .tx_loadsz = 1024,
++ .flags = UART_CAP_HFIFO,
++ },
+ };
+
+ #if defined(CONFIG_MIPS_ALCHEMY)
+@@ -1511,6 +1517,11 @@ static void transmit_chars(struct uart_8250_port *up)
+ up->port.icount.tx++;
+ if (uart_circ_empty(xmit))
+ break;
++ if (up->capabilities & UART_CAP_HFIFO) {
++ if ((serial_in(up, UART_LSR) & BOTH_EMPTY) !=
++ BOTH_EMPTY)
++ break;
++ }
+ } while (--count > 0);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+diff --git a/drivers/tty/serial/8250.h b/drivers/tty/serial/8250.h
+index 6edf4a6..902adcd 100644
+--- a/drivers/tty/serial/8250.h
++++ b/drivers/tty/serial/8250.h
+@@ -43,6 +43,7 @@ struct serial8250_config {
+ #define UART_CAP_AFE (1 << 11) /* MCR-based hw flow control */
+ #define UART_CAP_UUE (1 << 12) /* UART needs IER bit 6 set (Xscale) */
+ #define UART_CAP_RTOIE (1 << 13) /* UART needs IER bit 4 set (Xscale, Tegra) */
++#define UART_CAP_HFIFO (1 << 14) /* UART has a "hidden" FIFO */
+
+ #define UART_BUG_QUOT (1 << 0) /* UART has buggy quot LSB */
+ #define UART_BUG_TXEN (1 << 1) /* UART has buggy TX IIR status */
+diff --git a/drivers/tty/serial/8250_dw.c b/drivers/tty/serial/8250_dw.c
+index bf1fba6..b6278c1 100644
+--- a/drivers/tty/serial/8250_dw.c
++++ b/drivers/tty/serial/8250_dw.c
+@@ -79,7 +79,7 @@ static int dw8250_handle_irq(struct uart_port *p)
+ } else if ((iir & UART_IIR_BUSY) == UART_IIR_BUSY) {
+ /* Clear the USR and write the LCR again. */
+ (void)p->serial_in(p, UART_USR);
+- p->serial_out(p, d->last_lcr, UART_LCR);
++ p->serial_out(p, UART_LCR, d->last_lcr);
+
+ return 1;
+ }
+diff --git a/drivers/tty/serial/8250_pci.c b/drivers/tty/serial/8250_pci.c
+index e7d82c1..a753956 100644
+--- a/drivers/tty/serial/8250_pci.c
++++ b/drivers/tty/serial/8250_pci.c
+@@ -1077,6 +1077,18 @@ pci_omegapci_setup(struct serial_private *priv,
+ return setup_port(priv, port, 2, idx * 8, 0);
+ }
+
++static int
++pci_brcm_trumanage_setup(struct serial_private *priv,
++ const struct pciserial_board *board,
++ struct uart_port *port, int idx)
++{
++ int ret = pci_default_setup(priv, board, port, idx);
++
++ port->type = PORT_BRCM_TRUMANAGE;
++ port->flags = (port->flags | UPF_FIXED_PORT | UPF_FIXED_TYPE);
++ return ret;
++}
++
+ static int skip_tx_en_setup(struct serial_private *priv,
+ const struct pciserial_board *board,
+ struct uart_port *port, int idx)
+@@ -1138,6 +1150,7 @@ pci_xr17c154_setup(struct serial_private *priv,
+ #define PCI_DEVICE_ID_OXSEMI_16PCI958 0x9538
+ #define PCIE_DEVICE_ID_NEO_2_OX_IBM 0x00F6
+ #define PCI_DEVICE_ID_PLX_CRONYX_OMEGA 0xc001
++#define PCI_DEVICE_ID_BROADCOM_TRUMANAGE 0x160a
+
+ /* Unknown vendors/cards - this should not be in linux/pci_ids.h */
+ #define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584
+@@ -1672,6 +1685,17 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
+ .setup = pci_omegapci_setup,
+ },
+ /*
++ * Broadcom TruManage (NetXtreme)
++ */
++ {
++ .vendor = PCI_VENDOR_ID_BROADCOM,
++ .device = PCI_DEVICE_ID_BROADCOM_TRUMANAGE,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ .setup = pci_brcm_trumanage_setup,
++ },
++
++ /*
+ * Default "match everything" terminator entry
+ */
+ {
+@@ -1860,6 +1884,7 @@ enum pci_board_num_t {
+ pbn_ce4100_1_115200,
+ pbn_omegapci,
+ pbn_NETMOS9900_2s_115200,
++ pbn_brcm_trumanage,
+ };
+
+ /*
+@@ -2566,6 +2591,12 @@ static struct pciserial_board pci_boards[] __devinitdata = {
+ .num_ports = 2,
+ .base_baud = 115200,
+ },
++ [pbn_brcm_trumanage] = {
++ .flags = FL_BASE0,
++ .num_ports = 1,
++ .reg_shift = 2,
++ .base_baud = 115200,
++ },
+ };
+
+ static const struct pci_device_id softmodem_blacklist[] = {
+@@ -4108,6 +4139,13 @@ static struct pci_device_id serial_pci_tbl[] = {
+ pbn_omegapci },
+
+ /*
++ * Broadcom TruManage
++ */
++ { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BROADCOM_TRUMANAGE,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ pbn_brcm_trumanage },
++
++ /*
+ * These entries match devices with class COMMUNICATION_SERIAL,
+ * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL
+ */
+diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
+index 426434e..a6a6777 100644
+--- a/drivers/tty/serial/ifx6x60.c
++++ b/drivers/tty/serial/ifx6x60.c
+@@ -552,6 +552,7 @@ static void ifx_port_shutdown(struct tty_port *port)
+ container_of(port, struct ifx_spi_device, tty_port);
+
+ mrdy_set_low(ifx_dev);
++ del_timer(&ifx_dev->spi_timer);
+ clear_bit(IFX_SPI_STATE_TIMER_PENDING, &ifx_dev->flags);
+ tasklet_kill(&ifx_dev->io_work_tasklet);
+ }
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index df7f15d..0cdff38 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1509,6 +1509,9 @@ static const struct usb_device_id acm_ids[] = {
+ { USB_DEVICE(0x0572, 0x1340), /* Conexant CX93010-2x UCMxx */
+ .driver_info = NO_UNION_NORMAL,
+ },
++ { USB_DEVICE(0x05f9, 0x4002), /* PSC Scanning, Magellan 800i */
++ .driver_info = NO_UNION_NORMAL,
++ },
+ { USB_DEVICE(0x1bbb, 0x0003), /* Alcatel OT-I650 */
+ .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
+ },
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index a9a74d2..0ff8e9a 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -619,6 +619,60 @@ static int hub_hub_status(struct usb_hub *hub,
+ return ret;
+ }
+
++static int hub_set_port_link_state(struct usb_hub *hub, int port1,
++ unsigned int link_status)
++{
++ return set_port_feature(hub->hdev,
++ port1 | (link_status << 3),
++ USB_PORT_FEAT_LINK_STATE);
++}
++
++/*
++ * If USB 3.0 ports are placed into the Disabled state, they will no longer
++ * detect any device connects or disconnects. This is generally not what the
++ * USB core wants, since it expects a disabled port to produce a port status
++ * change event when a new device connects.
++ *
++ * Instead, set the link state to Disabled, wait for the link to settle into
++ * that state, clear any change bits, and then put the port into the RxDetect
++ * state.
++ */
++static int hub_usb3_port_disable(struct usb_hub *hub, int port1)
++{
++ int ret;
++ int total_time;
++ u16 portchange, portstatus;
++
++ if (!hub_is_superspeed(hub->hdev))
++ return -EINVAL;
++
++ ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED);
++ if (ret) {
++ dev_err(hub->intfdev, "cannot disable port %d (err = %d)\n",
++ port1, ret);
++ return ret;
++ }
++
++ /* Wait for the link to enter the disabled state. */
++ for (total_time = 0; ; total_time += HUB_DEBOUNCE_STEP) {
++ ret = hub_port_status(hub, port1, &portstatus, &portchange);
++ if (ret < 0)
++ return ret;
++
++ if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
++ USB_SS_PORT_LS_SS_DISABLED)
++ break;
++ if (total_time >= HUB_DEBOUNCE_TIMEOUT)
++ break;
++ msleep(HUB_DEBOUNCE_STEP);
++ }
++ if (total_time >= HUB_DEBOUNCE_TIMEOUT)
++ dev_warn(hub->intfdev, "Could not disable port %d after %d ms\n",
++ port1, total_time);
++
++ return hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_RX_DETECT);
++}
++
+ static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
+ {
+ struct usb_device *hdev = hub->hdev;
+@@ -627,8 +681,13 @@ static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
+ if (hdev->children[port1-1] && set_state)
+ usb_set_device_state(hdev->children[port1-1],
+ USB_STATE_NOTATTACHED);
+- if (!hub->error && !hub_is_superspeed(hub->hdev))
+- ret = clear_port_feature(hdev, port1, USB_PORT_FEAT_ENABLE);
++ if (!hub->error) {
++ if (hub_is_superspeed(hub->hdev))
++ ret = hub_usb3_port_disable(hub, port1);
++ else
++ ret = clear_port_feature(hdev, port1,
++ USB_PORT_FEAT_ENABLE);
++ }
+ if (ret)
+ dev_err(hub->intfdev, "cannot disable port %d (err = %d)\n",
+ port1, ret);
+@@ -2046,7 +2105,7 @@ static unsigned hub_is_wusb(struct usb_hub *hub)
+ #define HUB_SHORT_RESET_TIME 10
+ #define HUB_BH_RESET_TIME 50
+ #define HUB_LONG_RESET_TIME 200
+-#define HUB_RESET_TIMEOUT 500
++#define HUB_RESET_TIMEOUT 800
+
+ static int hub_port_reset(struct usb_hub *hub, int port1,
+ struct usb_device *udev, unsigned int delay, bool warm);
+@@ -2081,6 +2140,10 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
+ if (ret < 0)
+ return ret;
+
++ /* The port state is unknown until the reset completes. */
++ if ((portstatus & USB_PORT_STAT_RESET))
++ goto delay;
++
+ /*
+ * Some buggy devices require a warm reset to be issued even
+ * when the port appears not to be connected.
+@@ -2126,11 +2189,7 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
+ if ((portchange & USB_PORT_STAT_C_CONNECTION))
+ return -ENOTCONN;
+
+- /* if we`ve finished resetting, then break out of
+- * the loop
+- */
+- if (!(portstatus & USB_PORT_STAT_RESET) &&
+- (portstatus & USB_PORT_STAT_ENABLE)) {
++ if ((portstatus & USB_PORT_STAT_ENABLE)) {
+ if (hub_is_wusb(hub))
+ udev->speed = USB_SPEED_WIRELESS;
+ else if (hub_is_superspeed(hub->hdev))
+@@ -2144,10 +2203,15 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
+ return 0;
+ }
+ } else {
+- if (portchange & USB_PORT_STAT_C_BH_RESET)
+- return 0;
++ if (!(portstatus & USB_PORT_STAT_CONNECTION) ||
++ hub_port_warm_reset_required(hub,
++ portstatus))
++ return -ENOTCONN;
++
++ return 0;
+ }
+
++delay:
+ /* switch to the long delay after two short delay failures */
+ if (delay_time >= 2 * HUB_SHORT_RESET_TIME)
+ delay = HUB_LONG_RESET_TIME;
+@@ -2171,14 +2235,11 @@ static void hub_port_finish_reset(struct usb_hub *hub, int port1,
+ msleep(10 + 40);
+ update_devnum(udev, 0);
+ hcd = bus_to_hcd(udev->bus);
+- if (hcd->driver->reset_device) {
+- *status = hcd->driver->reset_device(hcd, udev);
+- if (*status < 0) {
+- dev_err(&udev->dev, "Cannot reset "
+- "HCD device state\n");
+- break;
+- }
+- }
++ /* The xHC may think the device is already reset,
++ * so ignore the status.
++ */
++ if (hcd->driver->reset_device)
++ hcd->driver->reset_device(hcd, udev);
+ }
+ /* FALL THROUGH */
+ case -ENOTCONN:
+@@ -2186,16 +2247,16 @@ static void hub_port_finish_reset(struct usb_hub *hub, int port1,
+ clear_port_feature(hub->hdev,
+ port1, USB_PORT_FEAT_C_RESET);
+ /* FIXME need disconnect() for NOTATTACHED device */
+- if (warm) {
++ if (hub_is_superspeed(hub->hdev)) {
+ clear_port_feature(hub->hdev, port1,
+ USB_PORT_FEAT_C_BH_PORT_RESET);
+ clear_port_feature(hub->hdev, port1,
+ USB_PORT_FEAT_C_PORT_LINK_STATE);
+- } else {
++ }
++ if (!warm)
+ usb_set_device_state(udev, *status
+ ? USB_STATE_NOTATTACHED
+ : USB_STATE_DEFAULT);
+- }
+ break;
+ }
+ }
+@@ -2469,7 +2530,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
+ static int finish_port_resume(struct usb_device *udev)
+ {
+ int status = 0;
+- u16 devstatus;
++ u16 devstatus = 0;
+
+ /* caller owns the udev device lock */
+ dev_dbg(&udev->dev, "%s\n",
+@@ -2514,7 +2575,13 @@ static int finish_port_resume(struct usb_device *udev)
+ if (status) {
+ dev_dbg(&udev->dev, "gone after usb resume? status %d\n",
+ status);
+- } else if (udev->actconfig) {
++ /*
++ * There are a few quirky devices which violate the standard
++ * by claiming to have remote wakeup enabled after a reset,
++ * which crash if the feature is cleared, hence check for
++ * udev->reset_resume
++ */
++ } else if (udev->actconfig && !udev->reset_resume) {
+ le16_to_cpus(&devstatus);
+ if (devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) {
+ status = usb_control_msg(udev,
+@@ -3663,9 +3730,14 @@ static void hub_events(void)
+ * SS.Inactive state.
+ */
+ if (hub_port_warm_reset_required(hub, portstatus)) {
++ int status;
++
+ dev_dbg(hub_dev, "warm reset port %d\n", i);
+- hub_port_reset(hub, i, NULL,
++ status = hub_port_reset(hub, i, NULL,
+ HUB_BH_RESET_TIME, true);
++ if (status < 0)
++ hub_port_disable(hub, i, 1);
++ connect_change = 0;
+ }
+
+ if (connect_change)
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 8b2a9d8..3f08c09 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -38,6 +38,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ /* Creative SB Audigy 2 NX */
+ { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
+
++ /* Microsoft LifeCam-VX700 v2.0 */
++ { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
++
+ /* Logitech Webcam C200 */
+ { USB_DEVICE(0x046d, 0x0802), .driver_info = USB_QUIRK_RESET_RESUME },
+
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 3700aa6..e9637f9 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -1277,6 +1277,7 @@ static int __devinit dwc3_gadget_init_endpoints(struct dwc3 *dwc)
+
+ if (epnum == 0 || epnum == 1) {
+ dep->endpoint.maxpacket = 512;
++ dep->endpoint.maxburst = 1;
+ dep->endpoint.ops = &dwc3_gadget_ep0_ops;
+ if (!epnum)
+ dwc->gadget.ep0 = &dep->endpoint;
+diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
+index d584eaf..c7423a7 100644
+--- a/drivers/usb/gadget/dummy_hcd.c
++++ b/drivers/usb/gadget/dummy_hcd.c
+@@ -126,10 +126,7 @@ static const char ep0name [] = "ep0";
+ static const char *const ep_name [] = {
+ ep0name, /* everyone has ep0 */
+
+- /* act like a net2280: high speed, six configurable endpoints */
+- "ep-a", "ep-b", "ep-c", "ep-d", "ep-e", "ep-f",
+-
+- /* or like pxa250: fifteen fixed function endpoints */
++ /* act like a pxa250: fifteen fixed function endpoints */
+ "ep1in-bulk", "ep2out-bulk", "ep3in-iso", "ep4out-iso", "ep5in-int",
+ "ep6in-bulk", "ep7out-bulk", "ep8in-iso", "ep9out-iso", "ep10in-int",
+ "ep11in-bulk", "ep12out-bulk", "ep13in-iso", "ep14out-iso",
+@@ -137,6 +134,10 @@ static const char *const ep_name [] = {
+
+ /* or like sa1100: two fixed function endpoints */
+ "ep1out-bulk", "ep2in-bulk",
++
++ /* and now some generic EPs so we have enough in multi config */
++ "ep3out", "ep4in", "ep5out", "ep6out", "ep7in", "ep8out", "ep9in",
++ "ep10out", "ep11out", "ep12in", "ep13out", "ep14in", "ep15out",
+ };
+ #define DUMMY_ENDPOINTS ARRAY_SIZE(ep_name)
+
+diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
+index c8ae199..b6989e4 100644
+--- a/drivers/usb/host/uhci-hcd.c
++++ b/drivers/usb/host/uhci-hcd.c
+@@ -448,6 +448,10 @@ static irqreturn_t uhci_irq(struct usb_hcd *hcd)
+ return IRQ_NONE;
+ uhci_writew(uhci, status, USBSTS); /* Clear it */
+
++ spin_lock(&uhci->lock);
++ if (unlikely(!uhci->is_initialized)) /* not yet configured */
++ goto done;
++
+ if (status & ~(USBSTS_USBINT | USBSTS_ERROR | USBSTS_RD)) {
+ if (status & USBSTS_HSE)
+ dev_err(uhci_dev(uhci), "host system error, "
+@@ -456,7 +460,6 @@ static irqreturn_t uhci_irq(struct usb_hcd *hcd)
+ dev_err(uhci_dev(uhci), "host controller process "
+ "error, something bad happened!\n");
+ if (status & USBSTS_HCH) {
+- spin_lock(&uhci->lock);
+ if (uhci->rh_state >= UHCI_RH_RUNNING) {
+ dev_err(uhci_dev(uhci),
+ "host controller halted, "
+@@ -474,15 +477,15 @@ static irqreturn_t uhci_irq(struct usb_hcd *hcd)
+ * pending unlinks */
+ mod_timer(&hcd->rh_timer, jiffies);
+ }
+- spin_unlock(&uhci->lock);
+ }
+ }
+
+- if (status & USBSTS_RD)
++ if (status & USBSTS_RD) {
++ spin_unlock(&uhci->lock);
+ usb_hcd_poll_rh_status(hcd);
+- else {
+- spin_lock(&uhci->lock);
++ } else {
+ uhci_scan_schedule(uhci);
++ done:
+ spin_unlock(&uhci->lock);
+ }
+
+@@ -660,9 +663,9 @@ static int uhci_start(struct usb_hcd *hcd)
+ */
+ mb();
+
++ spin_lock_irq(&uhci->lock);
+ configure_hc(uhci);
+ uhci->is_initialized = 1;
+- spin_lock_irq(&uhci->lock);
+ start_rh(uhci);
+ spin_unlock_irq(&uhci->lock);
+ return 0;
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index 978860b..24107a7 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -725,12 +725,39 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ break;
+ case USB_PORT_FEAT_LINK_STATE:
+ temp = xhci_readl(xhci, port_array[wIndex]);
++
++ /* Disable port */
++ if (link_state == USB_SS_PORT_LS_SS_DISABLED) {
++ xhci_dbg(xhci, "Disable port %d\n", wIndex);
++ temp = xhci_port_state_to_neutral(temp);
++ /*
++ * Clear all change bits, so that we get a new
++ * connection event.
++ */
++ temp |= PORT_CSC | PORT_PEC | PORT_WRC |
++ PORT_OCC | PORT_RC | PORT_PLC |
++ PORT_CEC;
++ xhci_writel(xhci, temp | PORT_PE,
++ port_array[wIndex]);
++ temp = xhci_readl(xhci, port_array[wIndex]);
++ break;
++ }
++
++ /* Put link in RxDetect (enable port) */
++ if (link_state == USB_SS_PORT_LS_RX_DETECT) {
++ xhci_dbg(xhci, "Enable port %d\n", wIndex);
++ xhci_set_link_state(xhci, port_array, wIndex,
++ link_state);
++ temp = xhci_readl(xhci, port_array[wIndex]);
++ break;
++ }
++
+ /* Software should not attempt to set
+- * port link state above '5' (Rx.Detect) and the port
++ * port link state above '3' (U3) and the port
+ * must be enabled.
+ */
+ if ((temp & PORT_PE) == 0 ||
+- (link_state > USB_SS_PORT_LS_RX_DETECT)) {
++ (link_state > USB_SS_PORT_LS_U3)) {
+ xhci_warn(xhci, "Cannot set link state.\n");
+ goto error;
+ }
+@@ -877,6 +904,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
+ int max_ports;
+ __le32 __iomem **port_array;
+ struct xhci_bus_state *bus_state;
++ bool reset_change = false;
+
+ max_ports = xhci_get_ports(hcd, &port_array);
+ bus_state = &xhci->bus_state[hcd_index(hcd)];
+@@ -903,6 +931,12 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
+ buf[(i + 1) / 8] |= 1 << (i + 1) % 8;
+ status = 1;
+ }
++ if ((temp & PORT_RC))
++ reset_change = true;
++ }
++ if (!status && !reset_change) {
++ xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
++ clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+ }
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return status ? retval : 0;
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 5719c4d..ee5ec11 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -1152,6 +1152,8 @@ static unsigned int xhci_microframes_to_exponent(struct usb_device *udev,
+ static unsigned int xhci_parse_microframe_interval(struct usb_device *udev,
+ struct usb_host_endpoint *ep)
+ {
++ if (ep->desc.bInterval == 0)
++ return 0;
+ return xhci_microframes_to_exponent(udev, ep,
+ ep->desc.bInterval, 0, 15);
+ }
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 1ba98f5..2ed591d 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -1661,6 +1661,15 @@ cleanup:
+ if (bogus_port_status)
+ return;
+
++ /*
++ * xHCI port-status-change events occur when the "or" of all the
++ * status-change bits in the portsc register changes from 0 to 1.
++ * New status changes won't cause an event if any other change
++ * bits are still set. When an event occurs, switch over to
++ * polling to avoid losing status changes.
++ */
++ xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
++ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+ spin_unlock(&xhci->lock);
+ /* Pass this up to the core */
+ usb_hcd_poll_rh_status(hcd);
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 9dc5870..53c8be1 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -871,6 +871,11 @@ int xhci_suspend(struct xhci_hcd *xhci)
+ struct usb_hcd *hcd = xhci_to_hcd(xhci);
+ u32 command;
+
++ /* Don't poll the roothubs on bus suspend. */
++ xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
++ clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
++ del_timer_sync(&hcd->rh_timer);
++
+ spin_lock_irq(&xhci->lock);
+ clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+ clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
+@@ -1055,6 +1060,11 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
+ if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
+ compliance_mode_recovery_timer_init(xhci);
+
++ /* Re-enable port polling. */
++ xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
++ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
++ usb_hcd_poll_rh_status(hcd);
++
+ return retval;
+ }
+ #endif /* CONFIG_PM */
+diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
+index 920f04e..641caf8 100644
+--- a/drivers/usb/musb/musb_core.c
++++ b/drivers/usb/musb/musb_core.c
+@@ -2372,10 +2372,7 @@ static int __init musb_init(void)
+ if (usb_disabled())
+ return 0;
+
+- pr_info("%s: version " MUSB_VERSION ", "
+- "?dma?"
+- ", "
+- "otg (peripheral+host)",
++ pr_info("%s: version " MUSB_VERSION ", ?dma?, otg (peripheral+host)\n",
+ musb_driver_name);
+ return platform_driver_probe(&musb_driver, musb_probe);
+ }
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 3f989d6..2cc7c18 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -881,6 +881,8 @@ static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(FTDI_VID, FTDI_DISTORTEC_JTAG_LOCK_PICK_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(FTDI_VID, FTDI_LUMEL_PD12_PID) },
++ /* Crucible Devices */
++ { USB_DEVICE(FTDI_VID, FTDI_CT_COMET_PID) },
+ { }, /* Optional parameter entry */
+ { } /* Terminating entry */
+ };
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index aedf65f..dd6edf8 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -1259,3 +1259,9 @@
+ * ATI command output: Cinterion MC55i
+ */
+ #define FTDI_CINTERION_MC55I_PID 0xA951
++
++/*
++ * Product: Comet Caller ID decoder
++ * Manufacturer: Crucible Technologies
++ */
++#define FTDI_CT_COMET_PID 0x8e08
+diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
+index 8a90d58..3de751d 100644
+--- a/drivers/usb/serial/io_ti.c
++++ b/drivers/usb/serial/io_ti.c
+@@ -558,6 +558,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
+ wait_queue_t wait;
+ unsigned long flags;
+
++ if (!tty)
++ return;
++
+ if (!timeout)
+ timeout = (HZ * EDGE_CLOSING_WAIT)/100;
+
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 872807b..9db3e23 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -288,6 +288,7 @@ static void option_instat_callback(struct urb *urb);
+ #define ALCATEL_VENDOR_ID 0x1bbb
+ #define ALCATEL_PRODUCT_X060S_X200 0x0000
+ #define ALCATEL_PRODUCT_X220_X500D 0x0017
++#define ALCATEL_PRODUCT_L100V 0x011e
+
+ #define PIRELLI_VENDOR_ID 0x1266
+ #define PIRELLI_PRODUCT_C100_1 0x1002
+@@ -429,9 +430,12 @@ static void option_instat_callback(struct urb *urb);
+ #define MEDIATEK_VENDOR_ID 0x0e8d
+ #define MEDIATEK_PRODUCT_DC_1COM 0x00a0
+ #define MEDIATEK_PRODUCT_DC_4COM 0x00a5
++#define MEDIATEK_PRODUCT_DC_4COM2 0x00a7
+ #define MEDIATEK_PRODUCT_DC_5COM 0x00a4
+ #define MEDIATEK_PRODUCT_7208_1COM 0x7101
+ #define MEDIATEK_PRODUCT_7208_2COM 0x7102
++#define MEDIATEK_PRODUCT_7103_2COM 0x7103
++#define MEDIATEK_PRODUCT_7106_2COM 0x7106
+ #define MEDIATEK_PRODUCT_FP_1COM 0x0003
+ #define MEDIATEK_PRODUCT_FP_2COM 0x0023
+ #define MEDIATEK_PRODUCT_FPDC_1COM 0x0043
+@@ -441,6 +445,14 @@ static void option_instat_callback(struct urb *urb);
+ #define CELLIENT_VENDOR_ID 0x2692
+ #define CELLIENT_PRODUCT_MEN200 0x9005
+
++/* Hyundai Petatel Inc. products */
++#define PETATEL_VENDOR_ID 0x1ff4
++#define PETATEL_PRODUCT_NP10T 0x600e
++
++/* TP-LINK Incorporated products */
++#define TPLINK_VENDOR_ID 0x2357
++#define TPLINK_PRODUCT_MA180 0x0201
++
+ /* some devices interfaces need special handling due to a number of reasons */
+ enum option_blacklist_reason {
+ OPTION_BLACKLIST_NONE = 0,
+@@ -922,8 +934,10 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0254, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0257, 0xff, 0xff, 0xff), /* ZTE MF821 */
+ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0265, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0284, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0265, 0xff, 0xff, 0xff), /* ONDA MT8205 */
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0284, 0xff, 0xff, 0xff), /* ZTE MF880 */
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0317, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0326, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+@@ -1190,6 +1204,8 @@ static const struct usb_device_id option_ids[] = {
+ .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
+ },
+ { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D) },
++ { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L100V),
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
+ { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
+ { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
+@@ -1294,7 +1310,14 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FP_2COM, 0x0a, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_1COM, 0x0a, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_2COM, 0x0a, 0x00, 0x00) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7103_2COM, 0xff, 0x00, 0x00) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7106_2COM, 0x02, 0x02, 0x01) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x02, 0x01) },
++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) },
+ { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
++ { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T) },
++ { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180),
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, option_ids);
+diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
+index 6873bb6..2263144 100644
+--- a/fs/cifs/cifs_dfs_ref.c
++++ b/fs/cifs/cifs_dfs_ref.c
+@@ -226,6 +226,8 @@ compose_mount_options_out:
+ compose_mount_options_err:
+ kfree(mountdata);
+ mountdata = ERR_PTR(rc);
++ kfree(*devname);
++ *devname = NULL;
+ goto compose_mount_options_out;
+ }
+
+diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
+index a86873e..31df53e 100644
+--- a/fs/nfs/namespace.c
++++ b/fs/nfs/namespace.c
+@@ -289,11 +289,31 @@ out_nofree:
+ return mnt;
+ }
+
++static int
++nfs_namespace_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
++{
++ if (NFS_FH(dentry->d_inode)->size != 0)
++ return nfs_getattr(mnt, dentry, stat);
++ generic_fillattr(dentry->d_inode, stat);
++ return 0;
++}
++
++static int
++nfs_namespace_setattr(struct dentry *dentry, struct iattr *attr)
++{
++ if (NFS_FH(dentry->d_inode)->size != 0)
++ return nfs_setattr(dentry, attr);
++ return -EACCES;
++}
++
+ const struct inode_operations nfs_mountpoint_inode_operations = {
+ .getattr = nfs_getattr,
++ .setattr = nfs_setattr,
+ };
+
+ const struct inode_operations nfs_referral_inode_operations = {
++ .getattr = nfs_namespace_getattr,
++ .setattr = nfs_namespace_setattr,
+ };
+
+ static void nfs_expire_automounts(struct work_struct *work)
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index 07354b7..b2e1136 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -1583,8 +1583,18 @@ static int nfs4_reset_session(struct nfs_client *clp)
+
+ nfs4_begin_drain_session(clp);
+ status = nfs4_proc_destroy_session(clp->cl_session);
+- if (status && status != -NFS4ERR_BADSESSION &&
+- status != -NFS4ERR_DEADSESSION) {
++ switch (status) {
++ case 0:
++ case -NFS4ERR_BADSESSION:
++ case -NFS4ERR_DEADSESSION:
++ break;
++ case -NFS4ERR_BACK_CHAN_BUSY:
++ case -NFS4ERR_DELAY:
++ set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
++ status = 0;
++ ssleep(1);
++ goto out;
++ default:
+ status = nfs4_recovery_handle_error(clp, status);
+ goto out;
+ }
+diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
+index 574d4ee..b367581 100644
+--- a/fs/xfs/xfs_aops.c
++++ b/fs/xfs/xfs_aops.c
+@@ -88,11 +88,11 @@ xfs_destroy_ioend(
+ }
+
+ if (ioend->io_iocb) {
++ inode_dio_done(ioend->io_inode);
+ if (ioend->io_isasync) {
+ aio_complete(ioend->io_iocb, ioend->io_error ?
+ ioend->io_error : ioend->io_result, 0);
+ }
+- inode_dio_done(ioend->io_inode);
+ }
+
+ mempool_free(ioend, xfs_ioend_pool);
+diff --git a/include/linux/efi.h b/include/linux/efi.h
+index 1328d8c..1721c41 100644
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -364,17 +364,30 @@ extern int __init efi_setup_pcdp_console(char *);
+ #endif
+
+ /*
+- * We play games with efi_enabled so that the compiler will, if possible, remove
+- * EFI-related code altogether.
++ * We play games with efi_enabled so that the compiler will, if
++ * possible, remove EFI-related code altogether.
+ */
++#define EFI_BOOT 0 /* Were we booted from EFI? */
++#define EFI_SYSTEM_TABLES 1 /* Can we use EFI system tables? */
++#define EFI_CONFIG_TABLES 2 /* Can we use EFI config tables? */
++#define EFI_RUNTIME_SERVICES 3 /* Can we use runtime services? */
++#define EFI_MEMMAP 4 /* Can we use EFI memory map? */
++#define EFI_64BIT 5 /* Is the firmware 64-bit? */
++
+ #ifdef CONFIG_EFI
+ # ifdef CONFIG_X86
+- extern int efi_enabled;
++extern int efi_enabled(int facility);
+ # else
+-# define efi_enabled 1
++static inline int efi_enabled(int facility)
++{
++ return 1;
++}
+ # endif
+ #else
+-# define efi_enabled 0
++static inline int efi_enabled(int facility)
++{
++ return 0;
++}
+ #endif
+
+ /*
+diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
+index eadf33d..8bec265 100644
+--- a/include/linux/serial_core.h
++++ b/include/linux/serial_core.h
+@@ -47,7 +47,8 @@
+ #define PORT_U6_16550A 19 /* ST-Ericsson U6xxx internal UART */
+ #define PORT_TEGRA 20 /* NVIDIA Tegra internal UART */
+ #define PORT_XR17D15X 21 /* Exar XR17D15x UART */
+-#define PORT_MAX_8250 21 /* max port ID */
++#define PORT_BRCM_TRUMANAGE 22
++#define PORT_MAX_8250 22 /* max port ID */
+
+ /*
+ * ARM specific type numbers. These are not currently guaranteed
+diff --git a/include/linux/syslog.h b/include/linux/syslog.h
+index 3891139..ce4c665 100644
+--- a/include/linux/syslog.h
++++ b/include/linux/syslog.h
+@@ -47,6 +47,12 @@
+ #define SYSLOG_FROM_CALL 0
+ #define SYSLOG_FROM_FILE 1
+
++/*
++ * Syslog priority (PRI) maximum length in char : '<[0-9]{1,3}>'
++ * See RFC5424 for details
++*/
++#define SYSLOG_PRI_MAX_LENGTH 5
++
+ int do_syslog(int type, char __user *buf, int count, bool from_file);
+
+ #endif /* _LINUX_SYSLOG_H */
+diff --git a/init/main.c b/init/main.c
+index cb08fea2..5d0eb1d 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -606,7 +606,7 @@ asmlinkage void __init start_kernel(void)
+ pidmap_init();
+ anon_vma_init();
+ #ifdef CONFIG_X86
+- if (efi_enabled)
++ if (efi_enabled(EFI_RUNTIME_SERVICES))
+ efi_enter_virtual_mode();
+ #endif
+ thread_info_cache_init();
+diff --git a/kernel/printk.c b/kernel/printk.c
+index 7982a0a..c0d12ea 100644
+--- a/kernel/printk.c
++++ b/kernel/printk.c
+@@ -633,8 +633,19 @@ static void call_console_drivers(unsigned start, unsigned end)
+ start_print = start;
+ while (cur_index != end) {
+ if (msg_level < 0 && ((end - cur_index) > 2)) {
++ /*
++ * prepare buf_prefix, as a contiguous array,
++ * to be processed by log_prefix function
++ */
++ char buf_prefix[SYSLOG_PRI_MAX_LENGTH+1];
++ unsigned i;
++ for (i = 0; i < ((end - cur_index)) && (i < SYSLOG_PRI_MAX_LENGTH); i++) {
++ buf_prefix[i] = LOG_BUF(cur_index + i);
++ }
++ buf_prefix[i] = '\0'; /* force '\0' as last string character */
++
+ /* strip log prefix */
+- cur_index += log_prefix(&LOG_BUF(cur_index), &msg_level, NULL);
++ cur_index += log_prefix((const char *)&buf_prefix, &msg_level, NULL);
+ start_print = cur_index;
+ }
+ while (cur_index != end) {
+diff --git a/kernel/smp.c b/kernel/smp.c
+index db197d6..9e800b2 100644
+--- a/kernel/smp.c
++++ b/kernel/smp.c
+@@ -31,6 +31,7 @@ struct call_function_data {
+ struct call_single_data csd;
+ atomic_t refs;
+ cpumask_var_t cpumask;
++ cpumask_var_t cpumask_ipi;
+ };
+
+ static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
+@@ -54,6 +55,9 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
+ if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
+ cpu_to_node(cpu)))
+ return notifier_from_errno(-ENOMEM);
++ if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
++ cpu_to_node(cpu)))
++ return notifier_from_errno(-ENOMEM);
+ break;
+
+ #ifdef CONFIG_HOTPLUG_CPU
+@@ -63,6 +67,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ free_cpumask_var(cfd->cpumask);
++ free_cpumask_var(cfd->cpumask_ipi);
+ break;
+ #endif
+ };
+@@ -524,6 +529,12 @@ void smp_call_function_many(const struct cpumask *mask,
+ return;
+ }
+
++ /*
++ * After we put an entry into the list, data->cpumask
++ * may be cleared again when another CPU sends another IPI for
++ * a SMP function call, so data->cpumask will be zero.
++ */
++ cpumask_copy(data->cpumask_ipi, data->cpumask);
+ raw_spin_lock_irqsave(&call_function.lock, flags);
+ /*
+ * Place entry at the _HEAD_ of the list, so that any cpu still
+@@ -547,7 +558,7 @@ void smp_call_function_many(const struct cpumask *mask,
+ smp_mb();
+
+ /* Send a message to all CPUs in the map */
+- arch_send_call_function_ipi_mask(data->cpumask);
++ arch_send_call_function_ipi_mask(data->cpumask_ipi);
+
+ /* Optionally wait for the CPUs to complete */
+ if (wait)
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 54dba59..4b1a96b 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -3482,7 +3482,7 @@ static int ftrace_module_notify(struct notifier_block *self,
+
+ struct notifier_block ftrace_module_nb = {
+ .notifier_call = ftrace_module_notify,
+- .priority = 0,
++ .priority = INT_MAX, /* Run before anything that can use kprobes */
+ };
+
+ extern unsigned long __start_mcount_loc[];
+diff --git a/mm/compaction.c b/mm/compaction.c
+index 46973fb..5f8ec82 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -714,14 +714,12 @@ static int compact_node(int nid)
+ }
+
+ /* Compact all nodes in the system */
+-static int compact_nodes(void)
++static void compact_nodes(void)
+ {
+ int nid;
+
+ for_each_online_node(nid)
+ compact_node(nid);
+-
+- return COMPACT_COMPLETE;
+ }
+
+ /* The written value is actually unused, all memory is compacted */
+@@ -732,7 +730,7 @@ int sysctl_compaction_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *length, loff_t *ppos)
+ {
+ if (write)
+- return compact_nodes();
++ compact_nodes();
+
+ return 0;
+ }
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index a88dded..4d3a697 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -5532,7 +5532,7 @@ static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
+ pfn &= (PAGES_PER_SECTION-1);
+ return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
+ #else
+- pfn = pfn - zone->zone_start_pfn;
++ pfn = pfn - round_down(zone->zone_start_pfn, pageblock_nr_pages);
+ return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
+ #endif /* CONFIG_SPARSEMEM */
+ }
+diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
+index 075a3e9..0274157 100644
+--- a/net/bluetooth/hidp/core.c
++++ b/net/bluetooth/hidp/core.c
+@@ -945,7 +945,7 @@ static int hidp_setup_hid(struct hidp_session *session,
+ hid->version = req->version;
+ hid->country = req->country;
+
+- strncpy(hid->name, req->name, 128);
++ strncpy(hid->name, req->name, sizeof(req->name) - 1);
+ strncpy(hid->phys, batostr(&bt_sk(session->ctrl_sock->sk)->src), 64);
+ strncpy(hid->uniq, batostr(&bt_sk(session->ctrl_sock->sk)->dst), 64);
+
+diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
+index 1c775f0..488600c 100644
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -1021,7 +1021,7 @@ void sta_info_init(struct ieee80211_local *local)
+
+ void sta_info_stop(struct ieee80211_local *local)
+ {
+- del_timer(&local->sta_cleanup);
++ del_timer_sync(&local->sta_cleanup);
+ sta_info_flush(local, NULL);
+ }
+
+diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
+index 56c3f85..18c5a50 100644
+--- a/net/sunrpc/sched.c
++++ b/net/sunrpc/sched.c
+@@ -918,8 +918,7 @@ static void rpc_async_release(struct work_struct *work)
+
+ static void rpc_release_resources_task(struct rpc_task *task)
+ {
+- if (task->tk_rqstp)
+- xprt_release(task);
++ xprt_release(task);
+ if (task->tk_msg.rpc_cred) {
+ put_rpccred(task->tk_msg.rpc_cred);
+ task->tk_msg.rpc_cred = NULL;
+diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
+index ffba207..6c91208 100644
+--- a/net/sunrpc/xprt.c
++++ b/net/sunrpc/xprt.c
+@@ -1132,10 +1132,18 @@ static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
+ void xprt_release(struct rpc_task *task)
+ {
+ struct rpc_xprt *xprt;
+- struct rpc_rqst *req;
++ struct rpc_rqst *req = task->tk_rqstp;
+
+- if (!(req = task->tk_rqstp))
++ if (req == NULL) {
++ if (task->tk_client) {
++ rcu_read_lock();
++ xprt = rcu_dereference(task->tk_client->cl_xprt);
++ if (xprt->snd_task == task)
++ xprt_release_write(xprt, task);
++ rcu_read_unlock();
++ }
+ return;
++ }
+
+ xprt = req->rq_xprt;
+ rpc_count_iostats(task);
+diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
+index 8738def..e76a470 100644
+--- a/security/integrity/evm/evm_crypto.c
++++ b/security/integrity/evm/evm_crypto.c
+@@ -175,9 +175,9 @@ int evm_update_evmxattr(struct dentry *dentry, const char *xattr_name,
+ rc = __vfs_setxattr_noperm(dentry, XATTR_NAME_EVM,
+ &xattr_data,
+ sizeof(xattr_data), 0);
+- }
+- else if (rc == -ENODATA)
++ } else if (rc == -ENODATA && inode->i_op->removexattr) {
+ rc = inode->i_op->removexattr(dentry, XATTR_NAME_EVM);
++ }
+ return rc;
+ }
+
+diff --git a/sound/arm/pxa2xx-ac97-lib.c b/sound/arm/pxa2xx-ac97-lib.c
+index d1aa421..52a4318 100644
+--- a/sound/arm/pxa2xx-ac97-lib.c
++++ b/sound/arm/pxa2xx-ac97-lib.c
+@@ -17,6 +17,7 @@
+ #include <linux/clk.h>
+ #include <linux/delay.h>
+ #include <linux/module.h>
++#include <linux/gpio.h>
+
+ #include <sound/ac97_codec.h>
+ #include <sound/pxa2xx-lib.h>
+@@ -147,6 +148,8 @@ static inline void pxa_ac97_warm_pxa27x(void)
+
+ static inline void pxa_ac97_cold_pxa27x(void)
+ {
++ unsigned int timeout;
++
+ GCR &= GCR_COLD_RST; /* clear everything but nCRST */
+ GCR &= ~GCR_COLD_RST; /* then assert nCRST */
+
+@@ -156,8 +159,10 @@ static inline void pxa_ac97_cold_pxa27x(void)
+ clk_enable(ac97conf_clk);
+ udelay(5);
+ clk_disable(ac97conf_clk);
+- GCR = GCR_COLD_RST;
+- udelay(50);
++ GCR = GCR_COLD_RST | GCR_WARM_RST;
++ timeout = 100; /* wait for the codec-ready bit to be set */
++ while (!((GSR | gsr_bits) & (GSR_PCR | GSR_SCR)) && timeout--)
++ mdelay(1);
+ }
+ #endif
+
+@@ -339,8 +344,21 @@ int __devinit pxa2xx_ac97_hw_probe(struct platform_device *dev)
+ }
+
+ if (cpu_is_pxa27x()) {
+- /* Use GPIO 113 as AC97 Reset on Bulverde */
++ /*
++ * This gpio is needed for a work-around to a bug in the ac97
++ * controller during warm reset. The direction and level is set
++ * here so that it is an output driven high when switching from
++ * AC97_nRESET alt function to generic gpio.
++ */
++ ret = gpio_request_one(reset_gpio, GPIOF_OUT_INIT_HIGH,
++ "pxa27x ac97 reset");
++ if (ret < 0) {
++ pr_err("%s: gpio_request_one() failed: %d\n",
++ __func__, ret);
++ goto err_conf;
++ }
+ pxa27x_assert_ac97reset(reset_gpio, 0);
++
+ ac97conf_clk = clk_get(&dev->dev, "AC97CONFCLK");
+ if (IS_ERR(ac97conf_clk)) {
+ ret = PTR_ERR(ac97conf_clk);
+@@ -383,6 +401,8 @@ EXPORT_SYMBOL_GPL(pxa2xx_ac97_hw_probe);
+
+ void pxa2xx_ac97_hw_remove(struct platform_device *dev)
+ {
++ if (cpu_is_pxa27x())
++ gpio_free(reset_gpio);
+ GCR |= GCR_ACLINK_OFF;
+ free_irq(IRQ_AC97, NULL);
+ if (ac97conf_clk) {
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index a1e312f..a166a85 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -569,29 +569,43 @@ static char *driver_short_names[] __devinitdata = {
+ #define get_azx_dev(substream) (substream->runtime->private_data)
+
+ #ifdef CONFIG_X86
+-static void __mark_pages_wc(struct azx *chip, void *addr, size_t size, bool on)
++static void __mark_pages_wc(struct azx *chip, struct snd_dma_buffer *dmab, bool on)
+ {
++ int pages;
++
+ if (azx_snoop(chip))
+ return;
+- if (addr && size) {
+- int pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ if (!dmab || !dmab->area || !dmab->bytes)
++ return;
++
++#ifdef CONFIG_SND_DMA_SGBUF
++ if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_SG) {
++ struct snd_sg_buf *sgbuf = dmab->private_data;
+ if (on)
+- set_memory_wc((unsigned long)addr, pages);
++ set_pages_array_wc(sgbuf->page_table, sgbuf->pages);
+ else
+- set_memory_wb((unsigned long)addr, pages);
++ set_pages_array_wb(sgbuf->page_table, sgbuf->pages);
++ return;
+ }
++#endif
++
++ pages = (dmab->bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ if (on)
++ set_memory_wc((unsigned long)dmab->area, pages);
++ else
++ set_memory_wb((unsigned long)dmab->area, pages);
+ }
+
+ static inline void mark_pages_wc(struct azx *chip, struct snd_dma_buffer *buf,
+ bool on)
+ {
+- __mark_pages_wc(chip, buf->area, buf->bytes, on);
++ __mark_pages_wc(chip, buf, on);
+ }
+ static inline void mark_runtime_wc(struct azx *chip, struct azx_dev *azx_dev,
+- struct snd_pcm_runtime *runtime, bool on)
++ struct snd_pcm_substream *substream, bool on)
+ {
+ if (azx_dev->wc_marked != on) {
+- __mark_pages_wc(chip, runtime->dma_area, runtime->dma_bytes, on);
++ __mark_pages_wc(chip, substream->runtime->dma_buffer_p, on);
+ azx_dev->wc_marked = on;
+ }
+ }
+@@ -602,7 +616,7 @@ static inline void mark_pages_wc(struct azx *chip, struct snd_dma_buffer *buf,
+ {
+ }
+ static inline void mark_runtime_wc(struct azx *chip, struct azx_dev *azx_dev,
+- struct snd_pcm_runtime *runtime, bool on)
++ struct snd_pcm_substream *substream, bool on)
+ {
+ }
+ #endif
+@@ -1776,11 +1790,10 @@ static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
+ {
+ struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
+ struct azx *chip = apcm->chip;
+- struct snd_pcm_runtime *runtime = substream->runtime;
+ struct azx_dev *azx_dev = get_azx_dev(substream);
+ int ret;
+
+- mark_runtime_wc(chip, azx_dev, runtime, false);
++ mark_runtime_wc(chip, azx_dev, substream, false);
+ azx_dev->bufsize = 0;
+ azx_dev->period_bytes = 0;
+ azx_dev->format_val = 0;
+@@ -1788,7 +1801,7 @@ static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
+ params_buffer_bytes(hw_params));
+ if (ret < 0)
+ return ret;
+- mark_runtime_wc(chip, azx_dev, runtime, true);
++ mark_runtime_wc(chip, azx_dev, substream, true);
+ return ret;
+ }
+
+@@ -1797,7 +1810,6 @@ static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
+ struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
+ struct azx_dev *azx_dev = get_azx_dev(substream);
+ struct azx *chip = apcm->chip;
+- struct snd_pcm_runtime *runtime = substream->runtime;
+ struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
+
+ /* reset BDL address */
+@@ -1810,7 +1822,7 @@ static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
+
+ snd_hda_codec_cleanup(apcm->codec, hinfo, substream);
+
+- mark_runtime_wc(chip, azx_dev, runtime, false);
++ mark_runtime_wc(chip, azx_dev, substream, false);
+ return snd_pcm_lib_free_pages(substream);
+ }
+
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 498b62e..c9269ce 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -592,24 +592,12 @@ static int conexant_build_controls(struct hda_codec *codec)
+ return 0;
+ }
+
+-#ifdef CONFIG_SND_HDA_POWER_SAVE
+-static int conexant_suspend(struct hda_codec *codec, pm_message_t state)
+-{
+- snd_hda_shutup_pins(codec);
+- return 0;
+-}
+-#endif
+-
+ static const struct hda_codec_ops conexant_patch_ops = {
+ .build_controls = conexant_build_controls,
+ .build_pcms = conexant_build_pcms,
+ .init = conexant_init,
+ .free = conexant_free,
+ .set_power_state = conexant_set_power,
+-#ifdef CONFIG_SND_HDA_POWER_SAVE
+- .suspend = conexant_suspend,
+-#endif
+- .reboot_notify = snd_hda_shutup_pins,
+ };
+
+ #ifdef CONFIG_SND_HDA_INPUT_BEEP
+@@ -4429,10 +4417,6 @@ static const struct hda_codec_ops cx_auto_patch_ops = {
+ .init = cx_auto_init,
+ .free = conexant_free,
+ .unsol_event = cx_auto_unsol_event,
+-#ifdef CONFIG_SND_HDA_POWER_SAVE
+- .suspend = conexant_suspend,
+-#endif
+- .reboot_notify = snd_hda_shutup_pins,
+ };
+
+ /*
+@@ -4614,6 +4598,18 @@ static const struct hda_codec_preset snd_hda_preset_conexant[] = {
+ .patch = patch_conexant_auto },
+ { .id = 0x14f150b9, .name = "CX20665",
+ .patch = patch_conexant_auto },
++ { .id = 0x14f1510f, .name = "CX20751/2",
++ .patch = patch_conexant_auto },
++ { .id = 0x14f15110, .name = "CX20751/2",
++ .patch = patch_conexant_auto },
++ { .id = 0x14f15111, .name = "CX20753/4",
++ .patch = patch_conexant_auto },
++ { .id = 0x14f15113, .name = "CX20755",
++ .patch = patch_conexant_auto },
++ { .id = 0x14f15114, .name = "CX20756",
++ .patch = patch_conexant_auto },
++ { .id = 0x14f15115, .name = "CX20757",
++ .patch = patch_conexant_auto },
+ {} /* terminator */
+ };
+
+@@ -4634,6 +4630,12 @@ MODULE_ALIAS("snd-hda-codec-id:14f150ab");
+ MODULE_ALIAS("snd-hda-codec-id:14f150ac");
+ MODULE_ALIAS("snd-hda-codec-id:14f150b8");
+ MODULE_ALIAS("snd-hda-codec-id:14f150b9");
++MODULE_ALIAS("snd-hda-codec-id:14f1510f");
++MODULE_ALIAS("snd-hda-codec-id:14f15110");
++MODULE_ALIAS("snd-hda-codec-id:14f15111");
++MODULE_ALIAS("snd-hda-codec-id:14f15113");
++MODULE_ALIAS("snd-hda-codec-id:14f15114");
++MODULE_ALIAS("snd-hda-codec-id:14f15115");
+
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("Conexant HD-audio codec");
+diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c
+index a3b9cbb..ba03dc2 100644
+--- a/sound/soc/codecs/wm2000.c
++++ b/sound/soc/codecs/wm2000.c
+@@ -224,9 +224,9 @@ static int wm2000_power_up(struct i2c_client *i2c, int analogue)
+
+ ret = wm2000_read(i2c, WM2000_REG_SPEECH_CLARITY);
+ if (wm2000->speech_clarity)
+- ret &= ~WM2000_SPEECH_CLARITY;
+- else
+ ret |= WM2000_SPEECH_CLARITY;
++ else
++ ret &= ~WM2000_SPEECH_CLARITY;
+ wm2000_write(i2c, WM2000_REG_SPEECH_CLARITY, ret);
+
+ wm2000_write(i2c, WM2000_REG_SYS_START0, 0x33);
+diff --git a/sound/soc/codecs/wm5100.c b/sound/soc/codecs/wm5100.c
+index 42d9039..a0cda1b 100644
+--- a/sound/soc/codecs/wm5100.c
++++ b/sound/soc/codecs/wm5100.c
+@@ -1446,15 +1446,9 @@ static int wm5100_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+ case SND_SOC_DAIFMT_DSP_A:
+ mask = 0;
+ break;
+- case SND_SOC_DAIFMT_DSP_B:
+- mask = 1;
+- break;
+ case SND_SOC_DAIFMT_I2S:
+ mask = 2;
+ break;
+- case SND_SOC_DAIFMT_LEFT_J:
+- mask = 3;
+- break;
+ default:
+ dev_err(codec->dev, "Unsupported DAI format %d\n",
+ fmt & SND_SOC_DAIFMT_FORMAT_MASK);
+diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
+index 24c5114..9ab2b3e 100644
+--- a/sound/usb/endpoint.c
++++ b/sound/usb/endpoint.c
+@@ -148,10 +148,8 @@ void snd_usb_release_substream_urbs(struct snd_usb_substream *subs, int force)
+ int i;
+
+ /* stop urbs (to be sure) */
+- if (!subs->stream->chip->shutdown) {
+- deactivate_urbs(subs, force, 1);
+- wait_clear_urbs(subs);
+- }
++ deactivate_urbs(subs, force, 1);
++ wait_clear_urbs(subs);
+
+ for (i = 0; i < MAX_URBS; i++)
+ release_urb_ctx(&subs->dataurb[i]);
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index 6730a33..9121dee 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -1239,16 +1239,23 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, void
+ }
+ channels = (hdr->bLength - 7) / csize - 1;
+ bmaControls = hdr->bmaControls;
++ if (hdr->bLength < 7 + csize) {
++ snd_printk(KERN_ERR "usbaudio: unit %u: "
++ "invalid UAC_FEATURE_UNIT descriptor\n",
++ unitid);
++ return -EINVAL;
++ }
+ } else {
+ struct uac2_feature_unit_descriptor *ftr = _ftr;
+ csize = 4;
+ channels = (hdr->bLength - 6) / 4 - 1;
+ bmaControls = ftr->bmaControls;
+- }
+-
+- if (hdr->bLength < 7 || !csize || hdr->bLength < 7 + csize) {
+- snd_printk(KERN_ERR "usbaudio: unit %u: invalid UAC_FEATURE_UNIT descriptor\n", unitid);
+- return -EINVAL;
++ if (hdr->bLength < 6 + csize) {
++ snd_printk(KERN_ERR "usbaudio: unit %u: "
++ "invalid UAC_FEATURE_UNIT descriptor\n",
++ unitid);
++ return -EINVAL;
++ }
+ }
+
+ /* parse the source unit */
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index a3ddac0..1b275f0 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -383,11 +383,13 @@ static int snd_usb_fasttrackpro_boot_quirk(struct usb_device *dev)
+ * rules
+ */
+ err = usb_driver_set_configuration(dev, 2);
+- if (err < 0) {
++ if (err < 0)
+ snd_printdd("error usb_driver_set_configuration: %d\n",
+ err);
+- return -ENODEV;
+- }
++ /* Always return an error, so that we stop creating a device
++ that will just be destroyed and recreated with a new
++ configuration */
++ return -ENODEV;
+ } else
+ snd_printk(KERN_INFO "usb-audio: Fast Track Pro config OK\n");
+
diff --git a/1038_linux-3.2.39.patch b/1038_linux-3.2.39.patch
new file mode 100644
index 00000000..5639e927
--- /dev/null
+++ b/1038_linux-3.2.39.patch
@@ -0,0 +1,2660 @@
+diff --git a/MAINTAINERS b/MAINTAINERS
+index 82d7fa6..83f156e 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -2584,7 +2584,7 @@ S: Maintained
+ F: drivers/net/ethernet/i825xx/eexpress.*
+
+ ETHERNET BRIDGE
+-M: Stephen Hemminger <shemminger@vyatta.com>
++M: Stephen Hemminger <stephen@networkplumber.org>
+ L: bridge@lists.linux-foundation.org
+ L: netdev@vger.kernel.org
+ W: http://www.linuxfoundation.org/en/Net:Bridge
+@@ -4475,7 +4475,7 @@ S: Supported
+ F: drivers/infiniband/hw/nes/
+
+ NETEM NETWORK EMULATOR
+-M: Stephen Hemminger <shemminger@vyatta.com>
++M: Stephen Hemminger <stephen@networkplumber.org>
+ L: netem@lists.linux-foundation.org
+ S: Maintained
+ F: net/sched/sch_netem.c
+@@ -5993,7 +5993,7 @@ S: Maintained
+ F: drivers/usb/misc/sisusbvga/
+
+ SKGE, SKY2 10/100/1000 GIGABIT ETHERNET DRIVERS
+-M: Stephen Hemminger <shemminger@vyatta.com>
++M: Stephen Hemminger <stephen@networkplumber.org>
+ L: netdev@vger.kernel.org
+ S: Maintained
+ F: drivers/net/ethernet/marvell/sk*
+diff --git a/Makefile b/Makefile
+index c8c9d02..0fceb8b 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 38
++SUBLEVEL = 39
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
+index a6253ec..95b4eb3 100644
+--- a/arch/x86/ia32/ia32entry.S
++++ b/arch/x86/ia32/ia32entry.S
+@@ -208,7 +208,7 @@ sysexit_from_sys_call:
+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
+ jnz ia32_ret_from_sys_call
+ TRACE_IRQS_ON
+- sti
++ ENABLE_INTERRUPTS(CLBR_NONE)
+ movl %eax,%esi /* second arg, syscall return value */
+ cmpl $0,%eax /* is it < 0? */
+ setl %al /* 1 if so, 0 if not */
+@@ -218,7 +218,7 @@ sysexit_from_sys_call:
+ GET_THREAD_INFO(%r10)
+ movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
+ movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
+- cli
++ DISABLE_INTERRUPTS(CLBR_NONE)
+ TRACE_IRQS_OFF
+ testl %edi,TI_flags(%r10)
+ jz \exit
+diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
+index c346d11..d4f278e 100644
+--- a/arch/x86/kernel/step.c
++++ b/arch/x86/kernel/step.c
+@@ -157,6 +157,34 @@ static int enable_single_step(struct task_struct *child)
+ return 1;
+ }
+
++static void set_task_blockstep(struct task_struct *task, bool on)
++{
++ unsigned long debugctl;
++
++ /*
++ * Ensure irq/preemption can't change debugctl in between.
++ * Note also that both TIF_BLOCKSTEP and debugctl should
++ * be changed atomically wrt preemption.
++ *
++ * NOTE: this means that set/clear TIF_BLOCKSTEP is only safe if
++ * task is current or it can't be running, otherwise we can race
++ * with __switch_to_xtra(). We rely on ptrace_freeze_traced() but
++ * PTRACE_KILL is not safe.
++ */
++ local_irq_disable();
++ debugctl = get_debugctlmsr();
++ if (on) {
++ debugctl |= DEBUGCTLMSR_BTF;
++ set_tsk_thread_flag(task, TIF_BLOCKSTEP);
++ } else {
++ debugctl &= ~DEBUGCTLMSR_BTF;
++ clear_tsk_thread_flag(task, TIF_BLOCKSTEP);
++ }
++ if (task == current)
++ update_debugctlmsr(debugctl);
++ local_irq_enable();
++}
++
+ /*
+ * Enable single or block step.
+ */
+@@ -169,19 +197,10 @@ static void enable_step(struct task_struct *child, bool block)
+ * So no one should try to use debugger block stepping in a program
+ * that uses user-mode single stepping itself.
+ */
+- if (enable_single_step(child) && block) {
+- unsigned long debugctl = get_debugctlmsr();
+-
+- debugctl |= DEBUGCTLMSR_BTF;
+- update_debugctlmsr(debugctl);
+- set_tsk_thread_flag(child, TIF_BLOCKSTEP);
+- } else if (test_tsk_thread_flag(child, TIF_BLOCKSTEP)) {
+- unsigned long debugctl = get_debugctlmsr();
+-
+- debugctl &= ~DEBUGCTLMSR_BTF;
+- update_debugctlmsr(debugctl);
+- clear_tsk_thread_flag(child, TIF_BLOCKSTEP);
+- }
++ if (enable_single_step(child) && block)
++ set_task_blockstep(child, true);
++ else if (test_tsk_thread_flag(child, TIF_BLOCKSTEP))
++ set_task_blockstep(child, false);
+ }
+
+ void user_enable_single_step(struct task_struct *child)
+@@ -199,13 +218,8 @@ void user_disable_single_step(struct task_struct *child)
+ /*
+ * Make sure block stepping (BTF) is disabled.
+ */
+- if (test_tsk_thread_flag(child, TIF_BLOCKSTEP)) {
+- unsigned long debugctl = get_debugctlmsr();
+-
+- debugctl &= ~DEBUGCTLMSR_BTF;
+- update_debugctlmsr(debugctl);
+- clear_tsk_thread_flag(child, TIF_BLOCKSTEP);
+- }
++ if (test_tsk_thread_flag(child, TIF_BLOCKSTEP))
++ set_task_blockstep(child, false);
+
+ /* Always clear TIF_SINGLESTEP... */
+ clear_tsk_thread_flag(child, TIF_SINGLESTEP);
+diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
+index b040b0e..7328f71 100644
+--- a/arch/x86/xen/xen-asm_32.S
++++ b/arch/x86/xen/xen-asm_32.S
+@@ -88,11 +88,11 @@ ENTRY(xen_iret)
+ */
+ #ifdef CONFIG_SMP
+ GET_THREAD_INFO(%eax)
+- movl TI_cpu(%eax), %eax
+- movl __per_cpu_offset(,%eax,4), %eax
+- mov xen_vcpu(%eax), %eax
++ movl %ss:TI_cpu(%eax), %eax
++ movl %ss:__per_cpu_offset(,%eax,4), %eax
++ mov %ss:xen_vcpu(%eax), %eax
+ #else
+- movl xen_vcpu, %eax
++ movl %ss:xen_vcpu, %eax
+ #endif
+
+ /* check IF state we're restoring */
+@@ -105,11 +105,11 @@ ENTRY(xen_iret)
+ * resuming the code, so we don't have to be worried about
+ * being preempted to another CPU.
+ */
+- setz XEN_vcpu_info_mask(%eax)
++ setz %ss:XEN_vcpu_info_mask(%eax)
+ xen_iret_start_crit:
+
+ /* check for unmasked and pending */
+- cmpw $0x0001, XEN_vcpu_info_pending(%eax)
++ cmpw $0x0001, %ss:XEN_vcpu_info_pending(%eax)
+
+ /*
+ * If there's something pending, mask events again so we can
+@@ -117,7 +117,7 @@ xen_iret_start_crit:
+ * touch XEN_vcpu_info_mask.
+ */
+ jne 1f
+- movb $1, XEN_vcpu_info_mask(%eax)
++ movb $1, %ss:XEN_vcpu_info_mask(%eax)
+
+ 1: popl %eax
+
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index b07edc4..62c1325 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -52,7 +52,9 @@
+ #define DRV_VERSION "3.0"
+
+ enum {
+- AHCI_PCI_BAR = 5,
++ AHCI_PCI_BAR_STA2X11 = 0,
++ AHCI_PCI_BAR_ENMOTUS = 2,
++ AHCI_PCI_BAR_STANDARD = 5,
+ };
+
+ enum board_ids {
+@@ -375,6 +377,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 968 */
+ { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */
+
++ /* ST Microelectronics */
++ { PCI_VDEVICE(STMICRO, 0xCC06), board_ahci }, /* ST ConneXt */
++
+ /* Marvell */
+ { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
+ { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
+@@ -400,6 +405,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */
+ { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */
+
++ /* Enmotus */
++ { PCI_DEVICE(0x1c44, 0x8000), board_ahci },
++
+ /* Generic, PCI class code for AHCI */
+ { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
+@@ -629,6 +637,13 @@ static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
+ {
+ int rc;
+
++ /*
++ * If the device fixup already set the dma_mask to some non-standard
++ * value, don't extend it here. This happens on STA2X11, for example.
++ */
++ if (pdev->dma_mask && pdev->dma_mask < DMA_BIT_MASK(32))
++ return 0;
++
+ if (using_dac &&
+ !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+@@ -1033,6 +1048,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ struct ahci_host_priv *hpriv;
+ struct ata_host *host;
+ int n_ports, i, rc;
++ int ahci_pci_bar = AHCI_PCI_BAR_STANDARD;
+
+ VPRINTK("ENTER\n");
+
+@@ -1064,6 +1080,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ dev_info(&pdev->dev,
+ "PDC42819 can only drive SATA devices with this driver\n");
+
++ /* Both Connext and Enmotus devices use non-standard BARs */
++ if (pdev->vendor == PCI_VENDOR_ID_STMICRO && pdev->device == 0xCC06)
++ ahci_pci_bar = AHCI_PCI_BAR_STA2X11;
++ else if (pdev->vendor == 0x1c44 && pdev->device == 0x8000)
++ ahci_pci_bar = AHCI_PCI_BAR_ENMOTUS;
++
+ /* acquire resources */
+ rc = pcim_enable_device(pdev);
+ if (rc)
+@@ -1072,7 +1094,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ /* AHCI controllers often implement SFF compatible interface.
+ * Grab all PCI BARs just in case.
+ */
+- rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
++ rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME);
+ if (rc == -EBUSY)
+ pcim_pin_device(pdev);
+ if (rc)
+@@ -1115,7 +1137,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev))
+ pci_intx(pdev, 1);
+
+- hpriv->mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
++ hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar];
+
+ /* save initial config */
+ ahci_pci_save_initial_config(pdev, hpriv);
+@@ -1179,8 +1201,8 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+
+- ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
+- ata_port_pbar_desc(ap, AHCI_PCI_BAR,
++ ata_port_pbar_desc(ap, ahci_pci_bar, -1, "abar");
++ ata_port_pbar_desc(ap, ahci_pci_bar,
+ 0x100 + ap->port_no * 0x80, "port");
+
+ /* set enclosure management message type */
+diff --git a/drivers/atm/iphase.h b/drivers/atm/iphase.h
+index 6a0955e..53ecac5 100644
+--- a/drivers/atm/iphase.h
++++ b/drivers/atm/iphase.h
+@@ -636,82 +636,82 @@ struct rx_buf_desc {
+ #define SEG_BASE IPHASE5575_FRAG_CONTROL_REG_BASE
+ #define REASS_BASE IPHASE5575_REASS_CONTROL_REG_BASE
+
+-typedef volatile u_int freg_t;
++typedef volatile u_int ffreg_t;
+ typedef u_int rreg_t;
+
+ typedef struct _ffredn_t {
+- freg_t idlehead_high; /* Idle cell header (high) */
+- freg_t idlehead_low; /* Idle cell header (low) */
+- freg_t maxrate; /* Maximum rate */
+- freg_t stparms; /* Traffic Management Parameters */
+- freg_t abrubr_abr; /* ABRUBR Priority Byte 1, TCR Byte 0 */
+- freg_t rm_type; /* */
+- u_int filler5[0x17 - 0x06];
+- freg_t cmd_reg; /* Command register */
+- u_int filler18[0x20 - 0x18];
+- freg_t cbr_base; /* CBR Pointer Base */
+- freg_t vbr_base; /* VBR Pointer Base */
+- freg_t abr_base; /* ABR Pointer Base */
+- freg_t ubr_base; /* UBR Pointer Base */
+- u_int filler24;
+- freg_t vbrwq_base; /* VBR Wait Queue Base */
+- freg_t abrwq_base; /* ABR Wait Queue Base */
+- freg_t ubrwq_base; /* UBR Wait Queue Base */
+- freg_t vct_base; /* Main VC Table Base */
+- freg_t vcte_base; /* Extended Main VC Table Base */
+- u_int filler2a[0x2C - 0x2A];
+- freg_t cbr_tab_beg; /* CBR Table Begin */
+- freg_t cbr_tab_end; /* CBR Table End */
+- freg_t cbr_pointer; /* CBR Pointer */
+- u_int filler2f[0x30 - 0x2F];
+- freg_t prq_st_adr; /* Packet Ready Queue Start Address */
+- freg_t prq_ed_adr; /* Packet Ready Queue End Address */
+- freg_t prq_rd_ptr; /* Packet Ready Queue read pointer */
+- freg_t prq_wr_ptr; /* Packet Ready Queue write pointer */
+- freg_t tcq_st_adr; /* Transmit Complete Queue Start Address*/
+- freg_t tcq_ed_adr; /* Transmit Complete Queue End Address */
+- freg_t tcq_rd_ptr; /* Transmit Complete Queue read pointer */
+- freg_t tcq_wr_ptr; /* Transmit Complete Queue write pointer*/
+- u_int filler38[0x40 - 0x38];
+- freg_t queue_base; /* Base address for PRQ and TCQ */
+- freg_t desc_base; /* Base address of descriptor table */
+- u_int filler42[0x45 - 0x42];
+- freg_t mode_reg_0; /* Mode register 0 */
+- freg_t mode_reg_1; /* Mode register 1 */
+- freg_t intr_status_reg;/* Interrupt Status register */
+- freg_t mask_reg; /* Mask Register */
+- freg_t cell_ctr_high1; /* Total cell transfer count (high) */
+- freg_t cell_ctr_lo1; /* Total cell transfer count (low) */
+- freg_t state_reg; /* Status register */
+- u_int filler4c[0x58 - 0x4c];
+- freg_t curr_desc_num; /* Contains the current descriptor num */
+- freg_t next_desc; /* Next descriptor */
+- freg_t next_vc; /* Next VC */
+- u_int filler5b[0x5d - 0x5b];
+- freg_t present_slot_cnt;/* Present slot count */
+- u_int filler5e[0x6a - 0x5e];
+- freg_t new_desc_num; /* New descriptor number */
+- freg_t new_vc; /* New VC */
+- freg_t sched_tbl_ptr; /* Schedule table pointer */
+- freg_t vbrwq_wptr; /* VBR wait queue write pointer */
+- freg_t vbrwq_rptr; /* VBR wait queue read pointer */
+- freg_t abrwq_wptr; /* ABR wait queue write pointer */
+- freg_t abrwq_rptr; /* ABR wait queue read pointer */
+- freg_t ubrwq_wptr; /* UBR wait queue write pointer */
+- freg_t ubrwq_rptr; /* UBR wait queue read pointer */
+- freg_t cbr_vc; /* CBR VC */
+- freg_t vbr_sb_vc; /* VBR SB VC */
+- freg_t abr_sb_vc; /* ABR SB VC */
+- freg_t ubr_sb_vc; /* UBR SB VC */
+- freg_t vbr_next_link; /* VBR next link */
+- freg_t abr_next_link; /* ABR next link */
+- freg_t ubr_next_link; /* UBR next link */
+- u_int filler7a[0x7c-0x7a];
+- freg_t out_rate_head; /* Out of rate head */
+- u_int filler7d[0xca-0x7d]; /* pad out to full address space */
+- freg_t cell_ctr_high1_nc;/* Total cell transfer count (high) */
+- freg_t cell_ctr_lo1_nc;/* Total cell transfer count (low) */
+- u_int fillercc[0x100-0xcc]; /* pad out to full address space */
++ ffreg_t idlehead_high; /* Idle cell header (high) */
++ ffreg_t idlehead_low; /* Idle cell header (low) */
++ ffreg_t maxrate; /* Maximum rate */
++ ffreg_t stparms; /* Traffic Management Parameters */
++ ffreg_t abrubr_abr; /* ABRUBR Priority Byte 1, TCR Byte 0 */
++ ffreg_t rm_type; /* */
++ u_int filler5[0x17 - 0x06];
++ ffreg_t cmd_reg; /* Command register */
++ u_int filler18[0x20 - 0x18];
++ ffreg_t cbr_base; /* CBR Pointer Base */
++ ffreg_t vbr_base; /* VBR Pointer Base */
++ ffreg_t abr_base; /* ABR Pointer Base */
++ ffreg_t ubr_base; /* UBR Pointer Base */
++ u_int filler24;
++ ffreg_t vbrwq_base; /* VBR Wait Queue Base */
++ ffreg_t abrwq_base; /* ABR Wait Queue Base */
++ ffreg_t ubrwq_base; /* UBR Wait Queue Base */
++ ffreg_t vct_base; /* Main VC Table Base */
++ ffreg_t vcte_base; /* Extended Main VC Table Base */
++ u_int filler2a[0x2C - 0x2A];
++ ffreg_t cbr_tab_beg; /* CBR Table Begin */
++ ffreg_t cbr_tab_end; /* CBR Table End */
++ ffreg_t cbr_pointer; /* CBR Pointer */
++ u_int filler2f[0x30 - 0x2F];
++ ffreg_t prq_st_adr; /* Packet Ready Queue Start Address */
++ ffreg_t prq_ed_adr; /* Packet Ready Queue End Address */
++ ffreg_t prq_rd_ptr; /* Packet Ready Queue read pointer */
++ ffreg_t prq_wr_ptr; /* Packet Ready Queue write pointer */
++ ffreg_t tcq_st_adr; /* Transmit Complete Queue Start Address*/
++ ffreg_t tcq_ed_adr; /* Transmit Complete Queue End Address */
++ ffreg_t tcq_rd_ptr; /* Transmit Complete Queue read pointer */
++ ffreg_t tcq_wr_ptr; /* Transmit Complete Queue write pointer*/
++ u_int filler38[0x40 - 0x38];
++ ffreg_t queue_base; /* Base address for PRQ and TCQ */
++ ffreg_t desc_base; /* Base address of descriptor table */
++ u_int filler42[0x45 - 0x42];
++ ffreg_t mode_reg_0; /* Mode register 0 */
++ ffreg_t mode_reg_1; /* Mode register 1 */
++ ffreg_t intr_status_reg;/* Interrupt Status register */
++ ffreg_t mask_reg; /* Mask Register */
++ ffreg_t cell_ctr_high1; /* Total cell transfer count (high) */
++ ffreg_t cell_ctr_lo1; /* Total cell transfer count (low) */
++ ffreg_t state_reg; /* Status register */
++ u_int filler4c[0x58 - 0x4c];
++ ffreg_t curr_desc_num; /* Contains the current descriptor num */
++ ffreg_t next_desc; /* Next descriptor */
++ ffreg_t next_vc; /* Next VC */
++ u_int filler5b[0x5d - 0x5b];
++ ffreg_t present_slot_cnt;/* Present slot count */
++ u_int filler5e[0x6a - 0x5e];
++ ffreg_t new_desc_num; /* New descriptor number */
++ ffreg_t new_vc; /* New VC */
++ ffreg_t sched_tbl_ptr; /* Schedule table pointer */
++ ffreg_t vbrwq_wptr; /* VBR wait queue write pointer */
++ ffreg_t vbrwq_rptr; /* VBR wait queue read pointer */
++ ffreg_t abrwq_wptr; /* ABR wait queue write pointer */
++ ffreg_t abrwq_rptr; /* ABR wait queue read pointer */
++ ffreg_t ubrwq_wptr; /* UBR wait queue write pointer */
++ ffreg_t ubrwq_rptr; /* UBR wait queue read pointer */
++ ffreg_t cbr_vc; /* CBR VC */
++ ffreg_t vbr_sb_vc; /* VBR SB VC */
++ ffreg_t abr_sb_vc; /* ABR SB VC */
++ ffreg_t ubr_sb_vc; /* UBR SB VC */
++ ffreg_t vbr_next_link; /* VBR next link */
++ ffreg_t abr_next_link; /* ABR next link */
++ ffreg_t ubr_next_link; /* UBR next link */
++ u_int filler7a[0x7c-0x7a];
++ ffreg_t out_rate_head; /* Out of rate head */
++ u_int filler7d[0xca-0x7d]; /* pad out to full address space */
++ ffreg_t cell_ctr_high1_nc;/* Total cell transfer count (high) */
++ ffreg_t cell_ctr_lo1_nc;/* Total cell transfer count (low) */
++ u_int fillercc[0x100-0xcc]; /* pad out to full address space */
+ } ffredn_t;
+
+ typedef struct _rfredn_t {
+diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
+index 8e3c46d..7795d1e 100644
+--- a/drivers/char/virtio_console.c
++++ b/drivers/char/virtio_console.c
+@@ -1789,7 +1789,8 @@ static void virtcons_remove(struct virtio_device *vdev)
+ /* Disable interrupts for vqs */
+ vdev->config->reset(vdev);
+ /* Finish up work that's lined up */
+- cancel_work_sync(&portdev->control_work);
++ if (use_multiport(portdev))
++ cancel_work_sync(&portdev->control_work);
+
+ list_for_each_entry_safe(port, port2, &portdev->ports, list)
+ unplug_port(port);
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index c05e825..7817429 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -7156,8 +7156,6 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
+ OUT_RING(pf | pipesrc);
+
+ intel_mark_page_flip_active(intel_crtc);
+-
+- intel_mark_page_flip_active(intel_crtc);
+ ADVANCE_LP_RING();
+ return 0;
+
+@@ -7193,6 +7191,8 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
+ pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
+ pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
+ OUT_RING(pf | pipesrc);
++
++ intel_mark_page_flip_active(intel_crtc);
+ ADVANCE_LP_RING();
+ return 0;
+
+diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
+index 1b98338..ec36dd9 100644
+--- a/drivers/gpu/drm/radeon/radeon_combios.c
++++ b/drivers/gpu/drm/radeon/radeon_combios.c
+@@ -2455,6 +2455,14 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
+ 1),
+ ATOM_DEVICE_CRT1_SUPPORT);
+ }
++ /* RV100 board with external TDMS bit mis-set.
++ * Actually uses internal TMDS, clear the bit.
++ */
++ if (dev->pdev->device == 0x5159 &&
++ dev->pdev->subsystem_vendor == 0x1014 &&
++ dev->pdev->subsystem_device == 0x029A) {
++ tmp &= ~(1 << 4);
++ }
+ if ((tmp >> 4) & 0x1) {
+ devices |= ATOM_DEVICE_DFP2_SUPPORT;
+ radeon_add_legacy_encoder(dev,
+diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
+index aec8e0c..63e7143 100644
+--- a/drivers/gpu/drm/radeon/radeon_display.c
++++ b/drivers/gpu/drm/radeon/radeon_display.c
+@@ -1110,8 +1110,10 @@ radeon_user_framebuffer_create(struct drm_device *dev,
+ }
+
+ radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
+- if (radeon_fb == NULL)
++ if (radeon_fb == NULL) {
++ drm_gem_object_unreference_unlocked(obj);
+ return ERR_PTR(-ENOMEM);
++ }
+
+ radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
+
+diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
+index 49d5820..65be5e8 100644
+--- a/drivers/gpu/drm/radeon/radeon_ring.c
++++ b/drivers/gpu/drm/radeon/radeon_ring.c
+@@ -306,6 +306,9 @@ int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw)
+ {
+ int r;
+
++ /* make sure we aren't trying to allocate more space than there is on the ring */
++ if (ndw > (rdev->cp.ring_size / 4))
++ return -ENOMEM;
+ /* Align requested size with padding so unlock_commit can
+ * pad safely */
+ ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask;
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 2d41336..c15c38e 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -278,6 +278,9 @@
+ #define USB_VENDOR_ID_EZKEY 0x0518
+ #define USB_DEVICE_ID_BTC_8193 0x0002
+
++#define USB_VENDOR_ID_FORMOSA 0x147a
++#define USB_DEVICE_ID_FORMOSA_IR_RECEIVER 0xe03e
++
+ #define USB_VENDOR_ID_FREESCALE 0x15A2
+ #define USB_DEVICE_ID_FREESCALE_MX28 0x004F
+
+diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
+index aec3fa3..e26eddf 100644
+--- a/drivers/hid/usbhid/hid-quirks.c
++++ b/drivers/hid/usbhid/hid-quirks.c
+@@ -68,6 +68,7 @@ static const struct hid_blacklist {
+ { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
++ { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
+diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c
+index fd17bb3..08c2329 100644
+--- a/drivers/isdn/gigaset/capi.c
++++ b/drivers/isdn/gigaset/capi.c
+@@ -264,6 +264,8 @@ static inline void dump_rawmsg(enum debuglevel level, const char *tag,
+ CAPIMSG_APPID(data), CAPIMSG_MSGID(data), l,
+ CAPIMSG_CONTROL(data));
+ l -= 12;
++ if (l <= 0)
++ return;
+ dbgline = kmalloc(3*l, GFP_ATOMIC);
+ if (!dbgline)
+ return;
+diff --git a/drivers/media/video/gspca/kinect.c b/drivers/media/video/gspca/kinect.c
+index 4fe51fd..acaef66 100644
+--- a/drivers/media/video/gspca/kinect.c
++++ b/drivers/media/video/gspca/kinect.c
+@@ -390,6 +390,7 @@ static const struct sd_desc sd_desc = {
+ /* -- module initialisation -- */
+ static const struct usb_device_id device_table[] = {
+ {USB_DEVICE(0x045e, 0x02ae)},
++ {USB_DEVICE(0x045e, 0x02bf)},
+ {}
+ };
+
+diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
+index 21a3d77..64647d4 100644
+--- a/drivers/net/can/c_can/c_can.c
++++ b/drivers/net/can/c_can/c_can.c
+@@ -446,8 +446,12 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface,
+
+ priv->write_reg(priv, &priv->regs->ifregs[iface].mask1,
+ IFX_WRITE_LOW_16BIT(mask));
++
++ /* According to C_CAN documentation, the reserved bit
++ * in IFx_MASK2 register is fixed 1
++ */
+ priv->write_reg(priv, &priv->regs->ifregs[iface].mask2,
+- IFX_WRITE_HIGH_16BIT(mask));
++ IFX_WRITE_HIGH_16BIT(mask) | BIT(13));
+
+ priv->write_reg(priv, &priv->regs->ifregs[iface].arb1,
+ IFX_WRITE_LOW_16BIT(id));
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index 01bc102..c86fa50 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -1135,14 +1135,26 @@ static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
+ return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
+ }
+
+-#define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
+- tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
+- MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
+- MII_TG3_AUXCTL_ACTL_TX_6DB)
++static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
++{
++ u32 val;
++ int err;
+
+-#define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
+- tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
+- MII_TG3_AUXCTL_ACTL_TX_6DB);
++ err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
++
++ if (err)
++ return err;
++ if (enable)
++
++ val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
++ else
++ val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
++
++ err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
++ val | MII_TG3_AUXCTL_ACTL_TX_6DB);
++
++ return err;
++}
+
+ static int tg3_bmcr_reset(struct tg3 *tp)
+ {
+@@ -2087,7 +2099,7 @@ static void tg3_phy_apply_otp(struct tg3 *tp)
+
+ otp = tp->phy_otp;
+
+- if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
++ if (tg3_phy_toggle_auxctl_smdsp(tp, true))
+ return;
+
+ phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
+@@ -2112,7 +2124,7 @@ static void tg3_phy_apply_otp(struct tg3 *tp)
+ ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
+ tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
+
+- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
++ tg3_phy_toggle_auxctl_smdsp(tp, false);
+ }
+
+ static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
+@@ -2148,9 +2160,9 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
+
+ if (!tp->setlpicnt) {
+ if (current_link_up == 1 &&
+- !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
++ !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
+ tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
+- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
++ tg3_phy_toggle_auxctl_smdsp(tp, false);
+ }
+
+ val = tr32(TG3_CPMU_EEE_MODE);
+@@ -2166,11 +2178,11 @@ static void tg3_phy_eee_enable(struct tg3 *tp)
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
+- !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
++ !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
+ val = MII_TG3_DSP_TAP26_ALNOKO |
+ MII_TG3_DSP_TAP26_RMRXSTO;
+ tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
+- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
++ tg3_phy_toggle_auxctl_smdsp(tp, false);
+ }
+
+ val = tr32(TG3_CPMU_EEE_MODE);
+@@ -2314,7 +2326,7 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
+ tg3_writephy(tp, MII_CTRL1000,
+ CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
+
+- err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
++ err = tg3_phy_toggle_auxctl_smdsp(tp, true);
+ if (err)
+ return err;
+
+@@ -2335,7 +2347,7 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
+ tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
+ tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
+
+- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
++ tg3_phy_toggle_auxctl_smdsp(tp, false);
+
+ tg3_writephy(tp, MII_CTRL1000, phy9_orig);
+
+@@ -2424,10 +2436,10 @@ static int tg3_phy_reset(struct tg3 *tp)
+
+ out:
+ if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
+- !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
++ !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
+ tg3_phydsp_write(tp, 0x201f, 0x2aaa);
+ tg3_phydsp_write(tp, 0x000a, 0x0323);
+- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
++ tg3_phy_toggle_auxctl_smdsp(tp, false);
+ }
+
+ if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
+@@ -2436,14 +2448,14 @@ out:
+ }
+
+ if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
+- if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
++ if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
+ tg3_phydsp_write(tp, 0x000a, 0x310b);
+ tg3_phydsp_write(tp, 0x201f, 0x9506);
+ tg3_phydsp_write(tp, 0x401f, 0x14e2);
+- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
++ tg3_phy_toggle_auxctl_smdsp(tp, false);
+ }
+ } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
+- if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
++ if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
+ tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
+ if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
+ tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
+@@ -2452,7 +2464,7 @@ out:
+ } else
+ tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
+
+- TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
++ tg3_phy_toggle_auxctl_smdsp(tp, false);
+ }
+ }
+
+@@ -3639,7 +3651,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
+ tw32(TG3_CPMU_EEE_MODE,
+ tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
+
+- err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
++ err = tg3_phy_toggle_auxctl_smdsp(tp, true);
+ if (!err) {
+ u32 err2;
+
+@@ -3671,7 +3683,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
+ MII_TG3_DSP_CH34TP2_HIBW01);
+ }
+
+- err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
++ err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
+ if (!err)
+ err = err2;
+ }
+@@ -6353,6 +6365,9 @@ static void tg3_poll_controller(struct net_device *dev)
+ int i;
+ struct tg3 *tp = netdev_priv(dev);
+
++ if (tg3_irq_sync(tp))
++ return;
++
+ for (i = 0; i < tp->irq_cnt; i++)
+ tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
+ }
+@@ -15388,6 +15403,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
+ tp->pm_cap = pm_cap;
+ tp->rx_mode = TG3_DEF_RX_MODE;
+ tp->tx_mode = TG3_DEF_TX_MODE;
++ tp->irq_sync = 1;
+
+ if (tg3_debug > 0)
+ tp->msg_enable = tg3_debug;
+diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+index a8259cc..5674145 100644
+--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
++++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+@@ -144,7 +144,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter)
+ buffrag->length, PCI_DMA_TODEVICE);
+ buffrag->dma = 0ULL;
+ }
+- for (j = 0; j < cmd_buf->frag_count; j++) {
++ for (j = 1; j < cmd_buf->frag_count; j++) {
+ buffrag++;
+ if (buffrag->dma) {
+ pci_unmap_page(adapter->pdev, buffrag->dma,
+diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+index da5204d..4a238a4 100644
+--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
++++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+@@ -1924,10 +1924,12 @@ unwind:
+ while (--i >= 0) {
+ nf = &pbuf->frag_array[i+1];
+ pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
++ nf->dma = 0ULL;
+ }
+
+ nf = &pbuf->frag_array[0];
+ pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
++ nf->dma = 0ULL;
+
+ out_err:
+ return -ENOMEM;
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index b8db4cd..a6153f1 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -5829,13 +5829,6 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
+ dev->stats.rx_bytes += pkt_size;
+ dev->stats.rx_packets++;
+ }
+-
+- /* Work around for AMD plateform. */
+- if ((desc->opts2 & cpu_to_le32(0xfffe000)) &&
+- (tp->mac_version == RTL_GIGA_MAC_VER_05)) {
+- desc->opts2 = 0;
+- cur_rx++;
+- }
+ }
+
+ count = cur_rx - tp->cur_rx;
+diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
+index 4ce9e5f..d0893e4 100644
+--- a/drivers/net/loopback.c
++++ b/drivers/net/loopback.c
+@@ -78,6 +78,11 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
+
+ skb_orphan(skb);
+
++ /* Before queueing this packet to netif_rx(),
++ * make sure dst is refcounted.
++ */
++ skb_dst_force(skb);
++
+ skb->protocol = eth_type_trans(skb, dev);
+
+ /* it's OK to use per_cpu_ptr() because BHs are off */
+diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
+index 8d3ab37..6618dd6 100644
+--- a/drivers/net/wireless/mwifiex/scan.c
++++ b/drivers/net/wireless/mwifiex/scan.c
+@@ -1594,7 +1594,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
+ dev_err(adapter->dev, "SCAN_RESP: too many AP returned (%d)\n",
+ scan_rsp->number_of_sets);
+ ret = -1;
+- goto done;
++ goto check_next_scan;
+ }
+
+ bytes_left = le16_to_cpu(scan_rsp->bss_descript_size);
+@@ -1663,7 +1663,8 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
+ if (!beacon_size || beacon_size > bytes_left) {
+ bss_info += bytes_left;
+ bytes_left = 0;
+- return -1;
++ ret = -1;
++ goto check_next_scan;
+ }
+
+ /* Initialize the current working beacon pointer for this BSS
+@@ -1716,7 +1717,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
+ dev_err(priv->adapter->dev, "%s: in processing"
+ " IE, bytes left < IE length\n",
+ __func__);
+- goto done;
++ goto check_next_scan;
+ }
+ if (element_id == WLAN_EID_DS_PARAMS) {
+ channel = *(u8 *) (current_ptr +
+@@ -1782,6 +1783,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
+ }
+ }
+
++check_next_scan:
+ spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
+ if (list_empty(&adapter->scan_pending_q)) {
+ spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+@@ -1812,7 +1814,6 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
+ mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true);
+ }
+
+-done:
+ return ret;
+ }
+
+diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
+index 22ed6df..2be9880 100644
+--- a/drivers/net/wireless/rt2x00/rt2500usb.c
++++ b/drivers/net/wireless/rt2x00/rt2500usb.c
+@@ -1921,7 +1921,7 @@ static struct usb_device_id rt2500usb_device_table[] = {
+ { USB_DEVICE(0x0b05, 0x1706) },
+ { USB_DEVICE(0x0b05, 0x1707) },
+ /* Belkin */
+- { USB_DEVICE(0x050d, 0x7050) },
++ { USB_DEVICE(0x050d, 0x7050) }, /* FCC ID: K7SF5D7050A ver. 2.x */
+ { USB_DEVICE(0x050d, 0x7051) },
+ /* Cisco Systems */
+ { USB_DEVICE(0x13b1, 0x000d) },
+diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
+index b66a61b..3d4ea1f 100644
+--- a/drivers/net/wireless/rt2x00/rt2800usb.c
++++ b/drivers/net/wireless/rt2x00/rt2800usb.c
+@@ -959,6 +959,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ { USB_DEVICE(0x07d1, 0x3c15) },
+ { USB_DEVICE(0x07d1, 0x3c16) },
+ { USB_DEVICE(0x2001, 0x3c1b) },
++ { USB_DEVICE(0x2001, 0x3c1e) },
+ /* Draytek */
+ { USB_DEVICE(0x07fa, 0x7712) },
+ /* DVICO */
+@@ -1090,6 +1091,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ { USB_DEVICE(0x177f, 0x0153) },
+ { USB_DEVICE(0x177f, 0x0302) },
+ { USB_DEVICE(0x177f, 0x0313) },
++ { USB_DEVICE(0x177f, 0x0323) },
+ /* U-Media */
+ { USB_DEVICE(0x157e, 0x300e) },
+ { USB_DEVICE(0x157e, 0x3013) },
+diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
+index 2ad468d..9e724eb 100644
+--- a/drivers/net/wireless/rt2x00/rt73usb.c
++++ b/drivers/net/wireless/rt2x00/rt73usb.c
+@@ -2421,6 +2421,7 @@ static struct usb_device_id rt73usb_device_table[] = {
+ { USB_DEVICE(0x0b05, 0x1723) },
+ { USB_DEVICE(0x0b05, 0x1724) },
+ /* Belkin */
++ { USB_DEVICE(0x050d, 0x7050) }, /* FCC ID: K7SF5D7050B ver. 3.x */
+ { USB_DEVICE(0x050d, 0x705a) },
+ { USB_DEVICE(0x050d, 0x905b) },
+ { USB_DEVICE(0x050d, 0x905c) },
+diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
+index a49e848..30dd0a9 100644
+--- a/drivers/net/wireless/rtlwifi/usb.c
++++ b/drivers/net/wireless/rtlwifi/usb.c
+@@ -503,8 +503,8 @@ static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb)
+ WARN_ON(skb_queue_empty(&rx_queue));
+ while (!skb_queue_empty(&rx_queue)) {
+ _skb = skb_dequeue(&rx_queue);
+- _rtl_usb_rx_process_agg(hw, skb);
+- ieee80211_rx_irqsafe(hw, skb);
++ _rtl_usb_rx_process_agg(hw, _skb);
++ ieee80211_rx_irqsafe(hw, _skb);
+ }
+ }
+
+diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
+index 94b79c3..9d7f172 100644
+--- a/drivers/net/xen-netback/common.h
++++ b/drivers/net/xen-netback/common.h
+@@ -151,6 +151,9 @@ void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb);
+ /* Notify xenvif that ring now has space to send an skb to the frontend */
+ void xenvif_notify_tx_completion(struct xenvif *vif);
+
++/* Prevent the device from generating any further traffic. */
++void xenvif_carrier_off(struct xenvif *vif);
++
+ /* Returns number of ring slots required to send an skb to the frontend */
+ unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);
+
+diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
+index 1825629..5925e0b 100644
+--- a/drivers/net/xen-netback/interface.c
++++ b/drivers/net/xen-netback/interface.c
+@@ -342,17 +342,22 @@ err:
+ return err;
+ }
+
+-void xenvif_disconnect(struct xenvif *vif)
++void xenvif_carrier_off(struct xenvif *vif)
+ {
+ struct net_device *dev = vif->dev;
+- if (netif_carrier_ok(dev)) {
+- rtnl_lock();
+- netif_carrier_off(dev); /* discard queued packets */
+- if (netif_running(dev))
+- xenvif_down(vif);
+- rtnl_unlock();
+- xenvif_put(vif);
+- }
++
++ rtnl_lock();
++ netif_carrier_off(dev); /* discard queued packets */
++ if (netif_running(dev))
++ xenvif_down(vif);
++ rtnl_unlock();
++ xenvif_put(vif);
++}
++
++void xenvif_disconnect(struct xenvif *vif)
++{
++ if (netif_carrier_ok(vif->dev))
++ xenvif_carrier_off(vif);
+
+ atomic_dec(&vif->refcnt);
+ wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0);
+diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
+index 15e332d..b802bb3 100644
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -146,7 +146,8 @@ void xen_netbk_remove_xenvif(struct xenvif *vif)
+ atomic_dec(&netbk->netfront_count);
+ }
+
+-static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx);
++static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
++ u8 status);
+ static void make_tx_response(struct xenvif *vif,
+ struct xen_netif_tx_request *txp,
+ s8 st);
+@@ -851,7 +852,7 @@ static void netbk_tx_err(struct xenvif *vif,
+
+ do {
+ make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
+- if (cons >= end)
++ if (cons == end)
+ break;
+ txp = RING_GET_REQUEST(&vif->tx, cons++);
+ } while (1);
+@@ -860,6 +861,13 @@ static void netbk_tx_err(struct xenvif *vif,
+ xenvif_put(vif);
+ }
+
++static void netbk_fatal_tx_err(struct xenvif *vif)
++{
++ netdev_err(vif->dev, "fatal error; disabling device\n");
++ xenvif_carrier_off(vif);
++ xenvif_put(vif);
++}
++
+ static int netbk_count_requests(struct xenvif *vif,
+ struct xen_netif_tx_request *first,
+ struct xen_netif_tx_request *txp,
+@@ -873,19 +881,22 @@ static int netbk_count_requests(struct xenvif *vif,
+
+ do {
+ if (frags >= work_to_do) {
+- netdev_dbg(vif->dev, "Need more frags\n");
++ netdev_err(vif->dev, "Need more frags\n");
++ netbk_fatal_tx_err(vif);
+ return -frags;
+ }
+
+ if (unlikely(frags >= MAX_SKB_FRAGS)) {
+- netdev_dbg(vif->dev, "Too many frags\n");
++ netdev_err(vif->dev, "Too many frags\n");
++ netbk_fatal_tx_err(vif);
+ return -frags;
+ }
+
+ memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags),
+ sizeof(*txp));
+ if (txp->size > first->size) {
+- netdev_dbg(vif->dev, "Frags galore\n");
++ netdev_err(vif->dev, "Frag is bigger than frame.\n");
++ netbk_fatal_tx_err(vif);
+ return -frags;
+ }
+
+@@ -893,8 +904,9 @@ static int netbk_count_requests(struct xenvif *vif,
+ frags++;
+
+ if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
+- netdev_dbg(vif->dev, "txp->offset: %x, size: %u\n",
++ netdev_err(vif->dev, "txp->offset: %x, size: %u\n",
+ txp->offset, txp->size);
++ netbk_fatal_tx_err(vif);
+ return -frags;
+ }
+ } while ((txp++)->flags & XEN_NETTXF_more_data);
+@@ -938,7 +950,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
+ pending_idx = netbk->pending_ring[index];
+ page = xen_netbk_alloc_page(netbk, skb, pending_idx);
+ if (!page)
+- return NULL;
++ goto err;
+
+ netbk->mmap_pages[pending_idx] = page;
+
+@@ -962,6 +974,17 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
+ }
+
+ return gop;
++err:
++ /* Unwind, freeing all pages and sending error responses. */
++ while (i-- > start) {
++ xen_netbk_idx_release(netbk, frag_get_pending_idx(&frags[i]),
++ XEN_NETIF_RSP_ERROR);
++ }
++ /* The head too, if necessary. */
++ if (start)
++ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
++
++ return NULL;
+ }
+
+ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
+@@ -970,30 +993,20 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
+ {
+ struct gnttab_copy *gop = *gopp;
+ u16 pending_idx = *((u16 *)skb->data);
+- struct pending_tx_info *pending_tx_info = netbk->pending_tx_info;
+- struct xenvif *vif = pending_tx_info[pending_idx].vif;
+- struct xen_netif_tx_request *txp;
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ int nr_frags = shinfo->nr_frags;
+ int i, err, start;
+
+ /* Check status of header. */
+ err = gop->status;
+- if (unlikely(err)) {
+- pending_ring_idx_t index;
+- index = pending_index(netbk->pending_prod++);
+- txp = &pending_tx_info[pending_idx].req;
+- make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
+- netbk->pending_ring[index] = pending_idx;
+- xenvif_put(vif);
+- }
++ if (unlikely(err))
++ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
+
+ /* Skip first skb fragment if it is on same page as header fragment. */
+ start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
+
+ for (i = start; i < nr_frags; i++) {
+ int j, newerr;
+- pending_ring_idx_t index;
+
+ pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
+
+@@ -1002,16 +1015,12 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
+ if (likely(!newerr)) {
+ /* Had a previous error? Invalidate this fragment. */
+ if (unlikely(err))
+- xen_netbk_idx_release(netbk, pending_idx);
++ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
+ continue;
+ }
+
+ /* Error on this fragment: respond to client with an error. */
+- txp = &netbk->pending_tx_info[pending_idx].req;
+- make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
+- index = pending_index(netbk->pending_prod++);
+- netbk->pending_ring[index] = pending_idx;
+- xenvif_put(vif);
++ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
+
+ /* Not the first error? Preceding frags already invalidated. */
+ if (err)
+@@ -1019,10 +1028,10 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
+
+ /* First error: invalidate header and preceding fragments. */
+ pending_idx = *((u16 *)skb->data);
+- xen_netbk_idx_release(netbk, pending_idx);
++ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
+ for (j = start; j < i; j++) {
+ pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
+- xen_netbk_idx_release(netbk, pending_idx);
++ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
+ }
+
+ /* Remember the error: invalidate all subsequent fragments. */
+@@ -1056,7 +1065,7 @@ static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
+
+ /* Take an extra reference to offset xen_netbk_idx_release */
+ get_page(netbk->mmap_pages[pending_idx]);
+- xen_netbk_idx_release(netbk, pending_idx);
++ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
+ }
+ }
+
+@@ -1069,7 +1078,8 @@ static int xen_netbk_get_extras(struct xenvif *vif,
+
+ do {
+ if (unlikely(work_to_do-- <= 0)) {
+- netdev_dbg(vif->dev, "Missing extra info\n");
++ netdev_err(vif->dev, "Missing extra info\n");
++ netbk_fatal_tx_err(vif);
+ return -EBADR;
+ }
+
+@@ -1078,8 +1088,9 @@ static int xen_netbk_get_extras(struct xenvif *vif,
+ if (unlikely(!extra.type ||
+ extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
+ vif->tx.req_cons = ++cons;
+- netdev_dbg(vif->dev,
++ netdev_err(vif->dev,
+ "Invalid extra type: %d\n", extra.type);
++ netbk_fatal_tx_err(vif);
+ return -EINVAL;
+ }
+
+@@ -1095,13 +1106,15 @@ static int netbk_set_skb_gso(struct xenvif *vif,
+ struct xen_netif_extra_info *gso)
+ {
+ if (!gso->u.gso.size) {
+- netdev_dbg(vif->dev, "GSO size must not be zero.\n");
++ netdev_err(vif->dev, "GSO size must not be zero.\n");
++ netbk_fatal_tx_err(vif);
+ return -EINVAL;
+ }
+
+ /* Currently only TCPv4 S.O. is supported. */
+ if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
+- netdev_dbg(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
++ netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
++ netbk_fatal_tx_err(vif);
+ return -EINVAL;
+ }
+
+@@ -1238,9 +1251,25 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
+
+ /* Get a netif from the list with work to do. */
+ vif = poll_net_schedule_list(netbk);
++ /* This can sometimes happen because the test of
++ * list_empty(net_schedule_list) at the top of the
++ * loop is unlocked. Just go back and have another
++ * look.
++ */
+ if (!vif)
+ continue;
+
++ if (vif->tx.sring->req_prod - vif->tx.req_cons >
++ XEN_NETIF_TX_RING_SIZE) {
++ netdev_err(vif->dev,
++ "Impossible number of requests. "
++ "req_prod %d, req_cons %d, size %ld\n",
++ vif->tx.sring->req_prod, vif->tx.req_cons,
++ XEN_NETIF_TX_RING_SIZE);
++ netbk_fatal_tx_err(vif);
++ continue;
++ }
++
+ RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
+ if (!work_to_do) {
+ xenvif_put(vif);
+@@ -1268,17 +1297,14 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
+ work_to_do = xen_netbk_get_extras(vif, extras,
+ work_to_do);
+ idx = vif->tx.req_cons;
+- if (unlikely(work_to_do < 0)) {
+- netbk_tx_err(vif, &txreq, idx);
++ if (unlikely(work_to_do < 0))
+ continue;
+- }
+ }
+
+ ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
+- if (unlikely(ret < 0)) {
+- netbk_tx_err(vif, &txreq, idx - ret);
++ if (unlikely(ret < 0))
+ continue;
+- }
++
+ idx += ret;
+
+ if (unlikely(txreq.size < ETH_HLEN)) {
+@@ -1290,11 +1316,11 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
+
+ /* No crossing a page as the payload mustn't fragment. */
+ if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
+- netdev_dbg(vif->dev,
++ netdev_err(vif->dev,
+ "txreq.offset: %x, size: %u, end: %lu\n",
+ txreq.offset, txreq.size,
+ (txreq.offset&~PAGE_MASK) + txreq.size);
+- netbk_tx_err(vif, &txreq, idx);
++ netbk_fatal_tx_err(vif);
+ continue;
+ }
+
+@@ -1322,8 +1348,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
+ gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
+
+ if (netbk_set_skb_gso(vif, skb, gso)) {
++ /* Failure in netbk_set_skb_gso is fatal. */
+ kfree_skb(skb);
+- netbk_tx_err(vif, &txreq, idx);
+ continue;
+ }
+ }
+@@ -1424,7 +1450,7 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
+ txp->size -= data_len;
+ } else {
+ /* Schedule a response immediately. */
+- xen_netbk_idx_release(netbk, pending_idx);
++ xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
+ }
+
+ if (txp->flags & XEN_NETTXF_csum_blank)
+@@ -1479,7 +1505,8 @@ static void xen_netbk_tx_action(struct xen_netbk *netbk)
+
+ }
+
+-static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
++static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
++ u8 status)
+ {
+ struct xenvif *vif;
+ struct pending_tx_info *pending_tx_info;
+@@ -1493,7 +1520,7 @@ static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
+
+ vif = pending_tx_info->vif;
+
+- make_tx_response(vif, &pending_tx_info->req, XEN_NETIF_RSP_OKAY);
++ make_tx_response(vif, &pending_tx_info->req, status);
+
+ index = pending_index(netbk->pending_prod++);
+ netbk->pending_ring[index] = pending_idx;
+diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c
+index da8beb8..627b66a 100644
+--- a/drivers/rtc/rtc-isl1208.c
++++ b/drivers/rtc/rtc-isl1208.c
+@@ -494,6 +494,7 @@ isl1208_rtc_interrupt(int irq, void *data)
+ {
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+ struct i2c_client *client = data;
++ struct rtc_device *rtc = i2c_get_clientdata(client);
+ int handled = 0, sr, err;
+
+ /*
+@@ -516,6 +517,8 @@ isl1208_rtc_interrupt(int irq, void *data)
+ if (sr & ISL1208_REG_SR_ALM) {
+ dev_dbg(&client->dev, "alarm!\n");
+
++ rtc_update_irq(rtc, 1, RTC_IRQF | RTC_AF);
++
+ /* Clear the alarm */
+ sr &= ~ISL1208_REG_SR_ALM;
+ sr = i2c_smbus_write_byte_data(client, ISL1208_REG_SR, sr);
+diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
+index 1e80a48..73816d8 100644
+--- a/drivers/rtc/rtc-pl031.c
++++ b/drivers/rtc/rtc-pl031.c
+@@ -44,6 +44,7 @@
+ #define RTC_YMR 0x34 /* Year match register */
+ #define RTC_YLR 0x38 /* Year data load register */
+
++#define RTC_CR_EN (1 << 0) /* counter enable bit */
+ #define RTC_CR_CWEN (1 << 26) /* Clockwatch enable bit */
+
+ #define RTC_TCR_EN (1 << 1) /* Periodic timer enable bit */
+@@ -312,7 +313,7 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
+ int ret;
+ struct pl031_local *ldata;
+ struct rtc_class_ops *ops = id->data;
+- unsigned long time;
++ unsigned long time, data;
+
+ ret = amba_request_regions(adev, NULL);
+ if (ret)
+@@ -339,10 +340,11 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
+ dev_dbg(&adev->dev, "designer ID = 0x%02x\n", ldata->hw_designer);
+ dev_dbg(&adev->dev, "revision = 0x%01x\n", ldata->hw_revision);
+
++ data = readl(ldata->base + RTC_CR);
+ /* Enable the clockwatch on ST Variants */
+ if (ldata->hw_designer == AMBA_VENDOR_ST)
+- writel(readl(ldata->base + RTC_CR) | RTC_CR_CWEN,
+- ldata->base + RTC_CR);
++ data |= RTC_CR_CWEN;
++ writel(data | RTC_CR_EN, ldata->base + RTC_CR);
+
+ /*
+ * On ST PL031 variants, the RTC reset value does not provide correct
+diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
+index 34655d0..08e470f 100644
+--- a/drivers/usb/host/ehci-sched.c
++++ b/drivers/usb/host/ehci-sched.c
+@@ -236,7 +236,7 @@ static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __hc32 mask)
+ }
+
+ static const unsigned char
+-max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 };
++max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 125, 25 };
+
+ /* carryover low/fullspeed bandwidth that crosses uframe boundries */
+ static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8])
+diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
+index 5cc401b..c7cfbce 100644
+--- a/drivers/usb/host/pci-quirks.c
++++ b/drivers/usb/host/pci-quirks.c
+@@ -780,6 +780,7 @@ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev)
+ "defaulting to EHCI.\n");
+ dev_warn(&xhci_pdev->dev,
+ "USB 3.0 devices will work at USB 2.0 speeds.\n");
++ usb_disable_xhci_ports(xhci_pdev);
+ return;
+ }
+
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 2ed591d..5c1f9e7 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -2504,6 +2504,8 @@ cleanup:
+ (trb_comp_code != COMP_STALL &&
+ trb_comp_code != COMP_BABBLE))
+ xhci_urb_free_priv(xhci, urb_priv);
++ else
++ kfree(urb_priv);
+
+ usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
+ if ((urb->actual_length != urb->transfer_buffer_length &&
+@@ -3032,7 +3034,7 @@ static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
+ * running_total.
+ */
+ packets_transferred = (running_total + trb_buff_len) /
+- usb_endpoint_maxp(&urb->ep->desc);
++ GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
+
+ if ((total_packet_count - packets_transferred) > 31)
+ return 31 << 17;
+@@ -3594,7 +3596,8 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ td_len = urb->iso_frame_desc[i].length;
+ td_remain_len = td_len;
+ total_packet_count = DIV_ROUND_UP(td_len,
+- usb_endpoint_maxp(&urb->ep->desc));
++ GET_MAX_PACKET(
++ usb_endpoint_maxp(&urb->ep->desc)));
+ /* A zero-length transfer still involves at least one packet. */
+ if (total_packet_count == 0)
+ total_packet_count++;
+@@ -3617,9 +3620,11 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ td = urb_priv->td[i];
+ for (j = 0; j < trbs_per_td; j++) {
+ u32 remainder = 0;
+- field = TRB_TBC(burst_count) | TRB_TLBPC(residue);
++ field = 0;
+
+ if (first_trb) {
++ field = TRB_TBC(burst_count) |
++ TRB_TLBPC(residue);
+ /* Queue the isoc TRB */
+ field |= TRB_TYPE(TRB_ISOC);
+ /* Assume URB_ISO_ASAP is set */
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 2cc7c18..d644a66 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -590,6 +590,7 @@ static struct usb_device_id id_table_combined [] = {
+ /*
+ * ELV devices:
+ */
++ { USB_DEVICE(FTDI_ELV_VID, FTDI_ELV_WS300_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_USR_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_MSM1_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_KL100_PID) },
+@@ -676,6 +677,7 @@ static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_5_PID) },
+ { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_6_PID) },
+ { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_7_PID) },
++ { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) },
+ { USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ACTIVE_ROBOTS_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_MHAM_KW_PID) },
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index dd6edf8..97e0a6b 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -147,6 +147,11 @@
+ #define XSENS_CONVERTER_6_PID 0xD38E
+ #define XSENS_CONVERTER_7_PID 0xD38F
+
++/**
++ * Zolix (www.zolix.com.cb) product ids
++ */
++#define FTDI_OMNI1509 0xD491 /* Omni1509 embedded USB-serial */
++
+ /*
+ * NDI (www.ndigital.com) product ids
+ */
+@@ -204,7 +209,7 @@
+
+ /*
+ * ELV USB devices submitted by Christian Abt of ELV (www.elv.de).
+- * All of these devices use FTDI's vendor ID (0x0403).
++ * Almost all of these devices use FTDI's vendor ID (0x0403).
+ * Further IDs taken from ELV Windows .inf file.
+ *
+ * The previously included PID for the UO 100 module was incorrect.
+@@ -212,6 +217,8 @@
+ *
+ * Armin Laeuger originally sent the PID for the UM 100 module.
+ */
++#define FTDI_ELV_VID 0x1B1F /* ELV AG */
++#define FTDI_ELV_WS300_PID 0xC006 /* eQ3 WS 300 PC II */
+ #define FTDI_ELV_USR_PID 0xE000 /* ELV Universal-Sound-Recorder */
+ #define FTDI_ELV_MSM1_PID 0xE001 /* ELV Mini-Sound-Modul */
+ #define FTDI_ELV_KL100_PID 0xE002 /* ELV Kfz-Leistungsmesser KL 100 */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 9db3e23..52cd814 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -242,6 +242,7 @@ static void option_instat_callback(struct urb *urb);
+ #define TELIT_PRODUCT_CC864_DUAL 0x1005
+ #define TELIT_PRODUCT_CC864_SINGLE 0x1006
+ #define TELIT_PRODUCT_DE910_DUAL 0x1010
++#define TELIT_PRODUCT_LE920 0x1200
+
+ /* ZTE PRODUCTS */
+ #define ZTE_VENDOR_ID 0x19d2
+@@ -453,6 +454,10 @@ static void option_instat_callback(struct urb *urb);
+ #define TPLINK_VENDOR_ID 0x2357
+ #define TPLINK_PRODUCT_MA180 0x0201
+
++/* Changhong products */
++#define CHANGHONG_VENDOR_ID 0x2077
++#define CHANGHONG_PRODUCT_CH690 0x7001
++
+ /* some devices interfaces need special handling due to a number of reasons */
+ enum option_blacklist_reason {
+ OPTION_BLACKLIST_NONE = 0,
+@@ -534,6 +539,11 @@ static const struct option_blacklist_info zte_1255_blacklist = {
+ .reserved = BIT(3) | BIT(4),
+ };
+
++static const struct option_blacklist_info telit_le920_blacklist = {
++ .sendsetup = BIT(0),
++ .reserved = BIT(1) | BIT(5),
++};
++
+ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
+ { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
+@@ -784,6 +794,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
++ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
++ .driver_info = (kernel_ulong_t)&telit_le920_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+@@ -1318,6 +1330,7 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T) },
+ { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++ { USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) },
+ { } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, option_ids);
+diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
+index 6634477..14c4a82 100644
+--- a/drivers/usb/serial/qcserial.c
++++ b/drivers/usb/serial/qcserial.c
+@@ -55,6 +55,7 @@ static const struct usb_device_id id_table[] = {
+ {DEVICE_G1K(0x05c6, 0x9221)}, /* Generic Gobi QDL device */
+ {DEVICE_G1K(0x05c6, 0x9231)}, /* Generic Gobi QDL device */
+ {DEVICE_G1K(0x1f45, 0x0001)}, /* Unknown Gobi QDL device */
++ {DEVICE_G1K(0x1bc7, 0x900e)}, /* Telit Gobi QDL device */
+
+ /* Gobi 2000 devices */
+ {USB_DEVICE(0x1410, 0xa010)}, /* Novatel Gobi 2000 QDL device */
+diff --git a/drivers/usb/storage/initializers.c b/drivers/usb/storage/initializers.c
+index 105d900..16b0bf0 100644
+--- a/drivers/usb/storage/initializers.c
++++ b/drivers/usb/storage/initializers.c
+@@ -92,8 +92,8 @@ int usb_stor_ucr61s2b_init(struct us_data *us)
+ return 0;
+ }
+
+-/* This places the HUAWEI E220 devices in multi-port mode */
+-int usb_stor_huawei_e220_init(struct us_data *us)
++/* This places the HUAWEI usb dongles in multi-port mode */
++static int usb_stor_huawei_feature_init(struct us_data *us)
+ {
+ int result;
+
+@@ -104,3 +104,75 @@ int usb_stor_huawei_e220_init(struct us_data *us)
+ US_DEBUGP("Huawei mode set result is %d\n", result);
+ return 0;
+ }
++
++/*
++ * It will send a scsi switch command called rewind' to huawei dongle.
++ * When the dongle receives this command at the first time,
++ * it will reboot immediately. After rebooted, it will ignore this command.
++ * So it is unnecessary to read its response.
++ */
++static int usb_stor_huawei_scsi_init(struct us_data *us)
++{
++ int result = 0;
++ int act_len = 0;
++ struct bulk_cb_wrap *bcbw = (struct bulk_cb_wrap *) us->iobuf;
++ char rewind_cmd[] = {0x11, 0x06, 0x20, 0x00, 0x00, 0x01, 0x01, 0x00,
++ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
++
++ bcbw->Signature = cpu_to_le32(US_BULK_CB_SIGN);
++ bcbw->Tag = 0;
++ bcbw->DataTransferLength = 0;
++ bcbw->Flags = bcbw->Lun = 0;
++ bcbw->Length = sizeof(rewind_cmd);
++ memset(bcbw->CDB, 0, sizeof(bcbw->CDB));
++ memcpy(bcbw->CDB, rewind_cmd, sizeof(rewind_cmd));
++
++ result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcbw,
++ US_BULK_CB_WRAP_LEN, &act_len);
++ US_DEBUGP("transfer actual length=%d, result=%d\n", act_len, result);
++ return result;
++}
++
++/*
++ * It tries to find the supported Huawei USB dongles.
++ * In Huawei, they assign the following product IDs
++ * for all of their mobile broadband dongles,
++ * including the new dongles in the future.
++ * So if the product ID is not included in this list,
++ * it means it is not Huawei's mobile broadband dongles.
++ */
++static int usb_stor_huawei_dongles_pid(struct us_data *us)
++{
++ struct usb_interface_descriptor *idesc;
++ int idProduct;
++
++ idesc = &us->pusb_intf->cur_altsetting->desc;
++ idProduct = us->pusb_dev->descriptor.idProduct;
++ /* The first port is CDROM,
++ * means the dongle in the single port mode,
++ * and a switch command is required to be sent. */
++ if (idesc && idesc->bInterfaceNumber == 0) {
++ if ((idProduct == 0x1001)
++ || (idProduct == 0x1003)
++ || (idProduct == 0x1004)
++ || (idProduct >= 0x1401 && idProduct <= 0x1500)
++ || (idProduct >= 0x1505 && idProduct <= 0x1600)
++ || (idProduct >= 0x1c02 && idProduct <= 0x2202)) {
++ return 1;
++ }
++ }
++ return 0;
++}
++
++int usb_stor_huawei_init(struct us_data *us)
++{
++ int result = 0;
++
++ if (usb_stor_huawei_dongles_pid(us)) {
++ if (us->pusb_dev->descriptor.idProduct >= 0x1446)
++ result = usb_stor_huawei_scsi_init(us);
++ else
++ result = usb_stor_huawei_feature_init(us);
++ }
++ return result;
++}
+diff --git a/drivers/usb/storage/initializers.h b/drivers/usb/storage/initializers.h
+index 529327f..5376d4f 100644
+--- a/drivers/usb/storage/initializers.h
++++ b/drivers/usb/storage/initializers.h
+@@ -46,5 +46,5 @@ int usb_stor_euscsi_init(struct us_data *us);
+ * flash reader */
+ int usb_stor_ucr61s2b_init(struct us_data *us);
+
+-/* This places the HUAWEI E220 devices in multi-port mode */
+-int usb_stor_huawei_e220_init(struct us_data *us);
++/* This places the HUAWEI usb dongles in multi-port mode */
++int usb_stor_huawei_init(struct us_data *us);
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index fa8a1b2..12640ef 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -1515,335 +1515,10 @@ UNUSUAL_DEV( 0x1210, 0x0003, 0x0100, 0x0100,
+ /* Reported by fangxiaozhi <huananhu@huawei.com>
+ * This brings the HUAWEI data card devices into multi-port mode
+ */
+-UNUSUAL_DEV( 0x12d1, 0x1001, 0x0000, 0x0000,
++UNUSUAL_VENDOR_INTF(0x12d1, 0x08, 0x06, 0x50,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1003, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1004, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1401, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1402, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1403, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1404, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1405, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1406, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1407, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1408, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1409, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x140A, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x140B, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x140C, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x140D, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x140E, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x140F, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1410, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1411, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1412, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1413, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1414, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1415, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1416, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1417, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1418, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1419, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x141A, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x141B, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x141C, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x141D, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x141E, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x141F, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1420, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1421, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1422, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1423, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1424, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1425, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1426, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1427, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1428, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1429, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x142A, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x142B, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x142C, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x142D, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x142E, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x142F, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1430, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1431, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1432, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1433, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1434, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1435, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1436, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1437, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1438, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x1439, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x143A, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x143B, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x143C, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x143D, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x143E, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+- 0),
+-UNUSUAL_DEV( 0x12d1, 0x143F, 0x0000, 0x0000,
+- "HUAWEI MOBILE",
+- "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_init,
+ 0),
+
+ /* Reported by Vilius Bilinkevicius <vilisas AT xxx DOT lt) */
+diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
+index db51ba1..d582af4 100644
+--- a/drivers/usb/storage/usb.c
++++ b/drivers/usb/storage/usb.c
+@@ -120,6 +120,17 @@ MODULE_PARM_DESC(quirks, "supplemental list of device IDs and their quirks");
+ .useTransport = use_transport, \
+ }
+
++#define UNUSUAL_VENDOR_INTF(idVendor, cl, sc, pr, \
++ vendor_name, product_name, use_protocol, use_transport, \
++ init_function, Flags) \
++{ \
++ .vendorName = vendor_name, \
++ .productName = product_name, \
++ .useProtocol = use_protocol, \
++ .useTransport = use_transport, \
++ .initFunction = init_function, \
++}
++
+ static struct us_unusual_dev us_unusual_dev_list[] = {
+ # include "unusual_devs.h"
+ { } /* Terminating entry */
+@@ -128,6 +139,7 @@ static struct us_unusual_dev us_unusual_dev_list[] = {
+ #undef UNUSUAL_DEV
+ #undef COMPLIANT_DEV
+ #undef USUAL_DEV
++#undef UNUSUAL_VENDOR_INTF
+
+
+ #ifdef CONFIG_PM /* Minimal support for suspend and resume */
+diff --git a/drivers/usb/storage/usual-tables.c b/drivers/usb/storage/usual-tables.c
+index b969279..a9b5f2e 100644
+--- a/drivers/usb/storage/usual-tables.c
++++ b/drivers/usb/storage/usual-tables.c
+@@ -46,6 +46,20 @@
+ { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, useProto, useTrans), \
+ .driver_info = ((useType)<<24) }
+
++/* Define the device is matched with Vendor ID and interface descriptors */
++#define UNUSUAL_VENDOR_INTF(id_vendor, cl, sc, pr, \
++ vendorName, productName, useProtocol, useTransport, \
++ initFunction, flags) \
++{ \
++ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO \
++ | USB_DEVICE_ID_MATCH_VENDOR, \
++ .idVendor = (id_vendor), \
++ .bInterfaceClass = (cl), \
++ .bInterfaceSubClass = (sc), \
++ .bInterfaceProtocol = (pr), \
++ .driver_info = (flags) \
++}
++
+ struct usb_device_id usb_storage_usb_ids[] = {
+ # include "unusual_devs.h"
+ { } /* Terminating entry */
+@@ -57,6 +71,7 @@ MODULE_DEVICE_TABLE(usb, usb_storage_usb_ids);
+ #undef UNUSUAL_DEV
+ #undef COMPLIANT_DEV
+ #undef USUAL_DEV
++#undef UNUSUAL_VENDOR_INTF
+
+
+ /*
+diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
+index c598cfb..2b5e695 100644
+--- a/fs/nilfs2/ioctl.c
++++ b/fs/nilfs2/ioctl.c
+@@ -664,8 +664,11 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
+ if (ret < 0)
+ printk(KERN_ERR "NILFS: GC failed during preparation: "
+ "cannot read source blocks: err=%d\n", ret);
+- else
++ else {
++ if (nilfs_sb_need_update(nilfs))
++ set_nilfs_discontinued(nilfs);
+ ret = nilfs_clean_segments(inode->i_sb, argv, kbufs);
++ }
+
+ nilfs_remove_all_gcinodes(nilfs);
+ clear_nilfs_gc_running(nilfs);
+diff --git a/fs/splice.c b/fs/splice.c
+index 014fcb4..58ab918 100644
+--- a/fs/splice.c
++++ b/fs/splice.c
+@@ -697,8 +697,10 @@ static int pipe_to_sendpage(struct pipe_inode_info *pipe,
+ return -EINVAL;
+
+ more = (sd->flags & SPLICE_F_MORE) ? MSG_MORE : 0;
+- if (sd->len < sd->total_len)
++
++ if (sd->len < sd->total_len && pipe->nrbufs > 1)
+ more |= MSG_SENDPAGE_NOTLAST;
++
+ return file->f_op->sendpage(file, buf->page, buf->offset,
+ sd->len, &pos, more);
+ }
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 1e86bb4..8204898 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -2597,7 +2597,16 @@ static inline void thread_group_cputime_init(struct signal_struct *sig)
+ extern void recalc_sigpending_and_wake(struct task_struct *t);
+ extern void recalc_sigpending(void);
+
+-extern void signal_wake_up(struct task_struct *t, int resume_stopped);
++extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
++
++static inline void signal_wake_up(struct task_struct *t, bool resume)
++{
++ signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
++}
++static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
++{
++ signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
++}
+
+ /*
+ * Wrappers for p->thread_info->cpu access. No-op on UP.
+diff --git a/kernel/ptrace.c b/kernel/ptrace.c
+index 78ab24a..67fedad 100644
+--- a/kernel/ptrace.c
++++ b/kernel/ptrace.c
+@@ -117,11 +117,45 @@ void __ptrace_unlink(struct task_struct *child)
+ * TASK_KILLABLE sleeps.
+ */
+ if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child))
+- signal_wake_up(child, task_is_traced(child));
++ ptrace_signal_wake_up(child, true);
+
+ spin_unlock(&child->sighand->siglock);
+ }
+
++/* Ensure that nothing can wake it up, even SIGKILL */
++static bool ptrace_freeze_traced(struct task_struct *task)
++{
++ bool ret = false;
++
++ /* Lockless, nobody but us can set this flag */
++ if (task->jobctl & JOBCTL_LISTENING)
++ return ret;
++
++ spin_lock_irq(&task->sighand->siglock);
++ if (task_is_traced(task) && !__fatal_signal_pending(task)) {
++ task->state = __TASK_TRACED;
++ ret = true;
++ }
++ spin_unlock_irq(&task->sighand->siglock);
++
++ return ret;
++}
++
++static void ptrace_unfreeze_traced(struct task_struct *task)
++{
++ if (task->state != __TASK_TRACED)
++ return;
++
++ WARN_ON(!task->ptrace || task->parent != current);
++
++ spin_lock_irq(&task->sighand->siglock);
++ if (__fatal_signal_pending(task))
++ wake_up_state(task, __TASK_TRACED);
++ else
++ task->state = TASK_TRACED;
++ spin_unlock_irq(&task->sighand->siglock);
++}
++
+ /**
+ * ptrace_check_attach - check whether ptracee is ready for ptrace operation
+ * @child: ptracee to check for
+@@ -151,24 +185,29 @@ int ptrace_check_attach(struct task_struct *child, bool ignore_state)
+ * be changed by us so it's not changing right after this.
+ */
+ read_lock(&tasklist_lock);
+- if ((child->ptrace & PT_PTRACED) && child->parent == current) {
++ if (child->ptrace && child->parent == current) {
++ WARN_ON(child->state == __TASK_TRACED);
+ /*
+ * child->sighand can't be NULL, release_task()
+ * does ptrace_unlink() before __exit_signal().
+ */
+- spin_lock_irq(&child->sighand->siglock);
+- WARN_ON_ONCE(task_is_stopped(child));
+- if (ignore_state || (task_is_traced(child) &&
+- !(child->jobctl & JOBCTL_LISTENING)))
++ if (ignore_state || ptrace_freeze_traced(child))
+ ret = 0;
+- spin_unlock_irq(&child->sighand->siglock);
+ }
+ read_unlock(&tasklist_lock);
+
+- if (!ret && !ignore_state)
+- ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH;
++ if (!ret && !ignore_state) {
++ if (!wait_task_inactive(child, __TASK_TRACED)) {
++ /*
++ * This can only happen if may_ptrace_stop() fails and
++ * ptrace_stop() changes ->state back to TASK_RUNNING,
++ * so we should not worry about leaking __TASK_TRACED.
++ */
++ WARN_ON(child->state == __TASK_TRACED);
++ ret = -ESRCH;
++ }
++ }
+
+- /* All systems go.. */
+ return ret;
+ }
+
+@@ -307,7 +346,7 @@ static int ptrace_attach(struct task_struct *task, long request,
+ */
+ if (task_is_stopped(task) &&
+ task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING))
+- signal_wake_up(task, 1);
++ signal_wake_up_state(task, __TASK_STOPPED);
+
+ spin_unlock(&task->sighand->siglock);
+
+@@ -736,7 +775,7 @@ int ptrace_request(struct task_struct *child, long request,
+ * tracee into STOP.
+ */
+ if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP)))
+- signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
++ ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
+
+ unlock_task_sighand(child, &flags);
+ ret = 0;
+@@ -762,7 +801,7 @@ int ptrace_request(struct task_struct *child, long request,
+ * start of this trap and now. Trigger re-trap.
+ */
+ if (child->jobctl & JOBCTL_TRAP_NOTIFY)
+- signal_wake_up(child, true);
++ ptrace_signal_wake_up(child, true);
+ ret = 0;
+ }
+ unlock_task_sighand(child, &flags);
+@@ -899,6 +938,8 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
+ goto out_put_task_struct;
+
+ ret = arch_ptrace(child, request, addr, data);
++ if (ret || request != PTRACE_DETACH)
++ ptrace_unfreeze_traced(child);
+
+ out_put_task_struct:
+ put_task_struct(child);
+@@ -1038,8 +1079,11 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
+
+ ret = ptrace_check_attach(child, request == PTRACE_KILL ||
+ request == PTRACE_INTERRUPT);
+- if (!ret)
++ if (!ret) {
+ ret = compat_arch_ptrace(child, request, addr, data);
++ if (ret || request != PTRACE_DETACH)
++ ptrace_unfreeze_traced(child);
++ }
+
+ out_put_task_struct:
+ put_task_struct(child);
+diff --git a/kernel/resource.c b/kernel/resource.c
+index 7640b3a..08aa28e 100644
+--- a/kernel/resource.c
++++ b/kernel/resource.c
+@@ -757,6 +757,7 @@ static void __init __reserve_region_with_split(struct resource *root,
+ struct resource *parent = root;
+ struct resource *conflict;
+ struct resource *res = kzalloc(sizeof(*res), GFP_ATOMIC);
++ struct resource *next_res = NULL;
+
+ if (!res)
+ return;
+@@ -766,21 +767,46 @@ static void __init __reserve_region_with_split(struct resource *root,
+ res->end = end;
+ res->flags = IORESOURCE_BUSY;
+
+- conflict = __request_resource(parent, res);
+- if (!conflict)
+- return;
++ while (1) {
+
+- /* failed, split and try again */
+- kfree(res);
++ conflict = __request_resource(parent, res);
++ if (!conflict) {
++ if (!next_res)
++ break;
++ res = next_res;
++ next_res = NULL;
++ continue;
++ }
+
+- /* conflict covered whole area */
+- if (conflict->start <= start && conflict->end >= end)
+- return;
++ /* conflict covered whole area */
++ if (conflict->start <= res->start &&
++ conflict->end >= res->end) {
++ kfree(res);
++ WARN_ON(next_res);
++ break;
++ }
++
++ /* failed, split and try again */
++ if (conflict->start > res->start) {
++ end = res->end;
++ res->end = conflict->start - 1;
++ if (conflict->end < end) {
++ next_res = kzalloc(sizeof(*next_res),
++ GFP_ATOMIC);
++ if (!next_res) {
++ kfree(res);
++ break;
++ }
++ next_res->name = name;
++ next_res->start = conflict->end + 1;
++ next_res->end = end;
++ next_res->flags = IORESOURCE_BUSY;
++ }
++ } else {
++ res->start = conflict->end + 1;
++ }
++ }
+
+- if (conflict->start > start)
+- __reserve_region_with_split(root, start, conflict->start-1, name);
+- if (conflict->end < end)
+- __reserve_region_with_split(root, conflict->end+1, end, name);
+ }
+
+ void __init reserve_region_with_split(struct resource *root,
+diff --git a/kernel/sched.c b/kernel/sched.c
+index fcc893f..eeeec4e 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -2924,7 +2924,8 @@ out:
+ */
+ int wake_up_process(struct task_struct *p)
+ {
+- return try_to_wake_up(p, TASK_ALL, 0);
++ WARN_ON(task_is_stopped_or_traced(p));
++ return try_to_wake_up(p, TASK_NORMAL, 0);
+ }
+ EXPORT_SYMBOL(wake_up_process);
+
+diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
+index 78fcacf..6ad4fb3 100644
+--- a/kernel/sched_rt.c
++++ b/kernel/sched_rt.c
+@@ -384,7 +384,7 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
+ static int do_balance_runtime(struct rt_rq *rt_rq)
+ {
+ struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
+- struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
++ struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
+ int i, weight, more = 0;
+ u64 rt_period;
+
+diff --git a/kernel/signal.c b/kernel/signal.c
+index 08e0b97..d2f55ea 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -676,23 +676,17 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
+ * No need to set need_resched since signal event passing
+ * goes through ->blocked
+ */
+-void signal_wake_up(struct task_struct *t, int resume)
++void signal_wake_up_state(struct task_struct *t, unsigned int state)
+ {
+- unsigned int mask;
+-
+ set_tsk_thread_flag(t, TIF_SIGPENDING);
+-
+ /*
+- * For SIGKILL, we want to wake it up in the stopped/traced/killable
++ * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
+ * case. We don't check t->state here because there is a race with it
+ * executing another processor and just now entering stopped state.
+ * By using wake_up_state, we ensure the process will wake up and
+ * handle its death signal.
+ */
+- mask = TASK_INTERRUPTIBLE;
+- if (resume)
+- mask |= TASK_WAKEKILL;
+- if (!wake_up_state(t, mask))
++ if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
+ kick_process(t);
+ }
+
+@@ -841,7 +835,7 @@ static void ptrace_trap_notify(struct task_struct *t)
+ assert_spin_locked(&t->sighand->siglock);
+
+ task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
+- signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
++ ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
+ }
+
+ /*
+@@ -1765,6 +1759,10 @@ static inline int may_ptrace_stop(void)
+ * If SIGKILL was already sent before the caller unlocked
+ * ->siglock we must see ->core_state != NULL. Otherwise it
+ * is safe to enter schedule().
++ *
++ * This is almost outdated, a task with the pending SIGKILL can't
++ * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
++ * after SIGKILL was already dequeued.
+ */
+ if (unlikely(current->mm->core_state) &&
+ unlikely(current->mm == current->parent->mm))
+@@ -1890,6 +1888,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
+ if (gstop_done)
+ do_notify_parent_cldstop(current, false, why);
+
++ /* tasklist protects us from ptrace_freeze_traced() */
+ __set_current_state(TASK_RUNNING);
+ if (clear_code)
+ current->exit_code = 0;
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 6033f02..7a157b3 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -1972,7 +1972,7 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
+ if (ev->opcode != HCI_OP_NOP)
+ del_timer(&hdev->cmd_timer);
+
+- if (ev->ncmd) {
++ if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
+ atomic_set(&hdev->cmd_cnt, 1);
+ if (!skb_queue_empty(&hdev->cmd_q))
+ tasklet_schedule(&hdev->cmd_task);
+diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
+index 1849ee0..9ab60e6 100644
+--- a/net/bluetooth/smp.c
++++ b/net/bluetooth/smp.c
+@@ -642,6 +642,19 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
+
+ skb_pull(skb, sizeof(code));
+
++ /*
++ * The SMP context must be initialized for all other PDUs except
++ * pairing and security requests. If we get any other PDU when
++ * not initialized simply disconnect (done if this function
++ * returns an error).
++ */
++ if (code != SMP_CMD_PAIRING_REQ && code != SMP_CMD_SECURITY_REQ &&
++ !conn->smp_chan) {
++ BT_ERR("Unexpected SMP command 0x%02x. Disconnecting.", code);
++ kfree_skb(skb);
++ return -ENOTSUPP;
++ }
++
+ switch (code) {
+ case SMP_CMD_PAIRING_REQ:
+ reason = smp_cmd_pairing_req(conn, skb);
+diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
+index 577ea5d..7c1745d 100644
+--- a/net/bridge/br_netfilter.c
++++ b/net/bridge/br_netfilter.c
+@@ -245,6 +245,9 @@ static int br_parse_ip_options(struct sk_buff *skb)
+ struct net_device *dev = skb->dev;
+ u32 len;
+
++ if (!pskb_may_pull(skb, sizeof(struct iphdr)))
++ goto inhdr_error;
++
+ iph = ip_hdr(skb);
+ opt = &(IPCB(skb)->opt);
+
+diff --git a/net/core/pktgen.c b/net/core/pktgen.c
+index 7bc9991..2ef7da0 100644
+--- a/net/core/pktgen.c
++++ b/net/core/pktgen.c
+@@ -1803,10 +1803,13 @@ static ssize_t pktgen_thread_write(struct file *file,
+ return -EFAULT;
+ i += len;
+ mutex_lock(&pktgen_thread_lock);
+- pktgen_add_device(t, f);
++ ret = pktgen_add_device(t, f);
+ mutex_unlock(&pktgen_thread_lock);
+- ret = count;
+- sprintf(pg_result, "OK: add_device=%s", f);
++ if (!ret) {
++ ret = count;
++ sprintf(pg_result, "OK: add_device=%s", f);
++ } else
++ sprintf(pg_result, "ERROR: can not add device %s", f);
+ goto out;
+ }
+
+diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
+index 0106d25..3b36002 100644
+--- a/net/ipv4/ip_sockglue.c
++++ b/net/ipv4/ip_sockglue.c
+@@ -600,7 +600,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
+ case IP_TTL:
+ if (optlen < 1)
+ goto e_inval;
+- if (val != -1 && (val < 0 || val > 255))
++ if (val != -1 && (val < 1 || val > 255))
+ goto e_inval;
+ inet->uc_ttl = val;
+ break;
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index aab8f08..e865ed1 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -3655,6 +3655,11 @@ static int tcp_process_frto(struct sock *sk, int flag)
+ }
+ } else {
+ if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) {
++ if (!tcp_packets_in_flight(tp)) {
++ tcp_enter_frto_loss(sk, 2, flag);
++ return true;
++ }
++
+ /* Prevent sending of new data. */
+ tp->snd_cwnd = min(tp->snd_cwnd,
+ tcp_packets_in_flight(tp));
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index aef80d7..b27baed 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -1739,7 +1739,7 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
+ continue;
+ if ((rt->rt6i_flags & flags) != flags)
+ continue;
+- if ((noflags != 0) && ((rt->rt6i_flags & flags) != 0))
++ if ((rt->rt6i_flags & noflags) != 0)
+ continue;
+ dst_hold(&rt->dst);
+ break;
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index ae98e09..3ccd9b2 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1284,10 +1284,10 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
+ cork->length = 0;
+ sk->sk_sndmsg_page = NULL;
+ sk->sk_sndmsg_off = 0;
+- exthdrlen = (opt ? opt->opt_flen : 0) - rt->rt6i_nfheader_len;
++ exthdrlen = (opt ? opt->opt_flen : 0);
+ length += exthdrlen;
+ transhdrlen += exthdrlen;
+- dst_exthdrlen = rt->dst.header_len;
++ dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
+ } else {
+ rt = (struct rt6_info *)cork->dst;
+ fl6 = &inet->cork.fl.u.ip6;
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 19724bd..791c1fa 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -819,7 +819,8 @@ restart:
+ dst_hold(&rt->dst);
+ read_unlock_bh(&table->tb6_lock);
+
+- if (!dst_get_neighbour_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
++ if (!dst_get_neighbour_raw(&rt->dst)
++ && !(rt->rt6i_flags & (RTF_NONEXTHOP | RTF_LOCAL)))
+ nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
+ else if (!(rt->dst.flags & DST_HOST))
+ nrt = rt6_alloc_clone(rt, &fl6->daddr);
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 85afc13..835fcea 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2422,13 +2422,15 @@ static int packet_release(struct socket *sock)
+
+ packet_flush_mclist(sk);
+
+- memset(&req_u, 0, sizeof(req_u));
+-
+- if (po->rx_ring.pg_vec)
++ if (po->rx_ring.pg_vec) {
++ memset(&req_u, 0, sizeof(req_u));
+ packet_set_ring(sk, &req_u, 1, 0);
++ }
+
+- if (po->tx_ring.pg_vec)
++ if (po->tx_ring.pg_vec) {
++ memset(&req_u, 0, sizeof(req_u));
+ packet_set_ring(sk, &req_u, 1, 1);
++ }
+
+ fanout_release(sk);
+
+diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
+index c8cc24e..dbe5870a 100644
+--- a/net/sctp/endpointola.c
++++ b/net/sctp/endpointola.c
+@@ -248,6 +248,8 @@ void sctp_endpoint_free(struct sctp_endpoint *ep)
+ /* Final destructor for endpoint. */
+ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
+ {
++ int i;
++
+ SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return);
+
+ /* Free up the HMAC transform. */
+@@ -270,6 +272,9 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
+ sctp_inq_free(&ep->base.inqueue);
+ sctp_bind_addr_free(&ep->base.bind_addr);
+
++ for (i = 0; i < SCTP_HOW_MANY_SECRETS; ++i)
++ memset(&ep->secret_key[i], 0, SCTP_SECRET_SIZE);
++
+ /* Remove and free the port */
+ if (sctp_sk(ep->base.sk)->bind_hash)
+ sctp_put_port(ep->base.sk);
+diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
+index cfeb1d4..96eb168 100644
+--- a/net/sctp/outqueue.c
++++ b/net/sctp/outqueue.c
+@@ -223,7 +223,7 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
+
+ /* Free the outqueue structure and any related pending chunks.
+ */
+-void sctp_outq_teardown(struct sctp_outq *q)
++static void __sctp_outq_teardown(struct sctp_outq *q)
+ {
+ struct sctp_transport *transport;
+ struct list_head *lchunk, *temp;
+@@ -276,8 +276,6 @@ void sctp_outq_teardown(struct sctp_outq *q)
+ sctp_chunk_free(chunk);
+ }
+
+- q->error = 0;
+-
+ /* Throw away any leftover control chunks. */
+ list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
+ list_del_init(&chunk->list);
+@@ -285,11 +283,17 @@ void sctp_outq_teardown(struct sctp_outq *q)
+ }
+ }
+
++void sctp_outq_teardown(struct sctp_outq *q)
++{
++ __sctp_outq_teardown(q);
++ sctp_outq_init(q->asoc, q);
++}
++
+ /* Free the outqueue structure and any related pending chunks. */
+ void sctp_outq_free(struct sctp_outq *q)
+ {
+ /* Throw away leftover chunks. */
+- sctp_outq_teardown(q);
++ __sctp_outq_teardown(q);
+
+ /* If we were kmalloc()'d, free the memory. */
+ if (q->malloced)
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index fa8333b..5e0d86e 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -3375,7 +3375,7 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
+
+ ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey);
+ out:
+- kfree(authkey);
++ kzfree(authkey);
+ return ret;
+ }
+
diff --git a/1039_linux-3.2.40.patch b/1039_linux-3.2.40.patch
new file mode 100644
index 00000000..f26b39cb
--- /dev/null
+++ b/1039_linux-3.2.40.patch
@@ -0,0 +1,6295 @@
+diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
+index 81c287f..ddbf18e 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -552,6 +552,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+ UART at the specified I/O port or MMIO address,
+ switching to the matching ttyS device later. The
+ options are the same as for ttyS, above.
++ hvc<n> Use the hypervisor console device <n>. This is for
++ both Xen and PowerPC hypervisors.
+
+ If the device connected to the port is not a TTY but a braille
+ device, prepend "brl," before the device type, for instance
+@@ -703,6 +705,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+
+ earlyprintk= [X86,SH,BLACKFIN]
+ earlyprintk=vga
++ earlyprintk=xen
+ earlyprintk=serial[,ttySn[,baudrate]]
+ earlyprintk=ttySn[,baudrate]
+ earlyprintk=dbgp[debugController#]
+@@ -720,6 +723,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+ The VGA output is eventually overwritten by the real
+ console.
+
++ The xen output can only be used by Xen PV guests.
++
+ ekgdboc= [X86,KGDB] Allow early kernel console debugging
+ ekgdboc=kbd
+
+diff --git a/Makefile b/Makefile
+index 0fceb8b..47af1e9 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 39
++SUBLEVEL = 40
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/mach-pxa/include/mach/smemc.h b/arch/arm/mach-pxa/include/mach/smemc.h
+index b7de471..b802f28 100644
+--- a/arch/arm/mach-pxa/include/mach/smemc.h
++++ b/arch/arm/mach-pxa/include/mach/smemc.h
+@@ -37,6 +37,7 @@
+ #define CSADRCFG1 (SMEMC_VIRT + 0x84) /* Address Configuration Register for CS1 */
+ #define CSADRCFG2 (SMEMC_VIRT + 0x88) /* Address Configuration Register for CS2 */
+ #define CSADRCFG3 (SMEMC_VIRT + 0x8C) /* Address Configuration Register for CS3 */
++#define CSMSADRCFG (SMEMC_VIRT + 0xA0) /* Chip Select Configuration Register */
+
+ /*
+ * More handy macros for PCMCIA
+diff --git a/arch/arm/mach-pxa/smemc.c b/arch/arm/mach-pxa/smemc.c
+index 7992305..f38aa89 100644
+--- a/arch/arm/mach-pxa/smemc.c
++++ b/arch/arm/mach-pxa/smemc.c
+@@ -40,6 +40,8 @@ static void pxa3xx_smemc_resume(void)
+ __raw_writel(csadrcfg[1], CSADRCFG1);
+ __raw_writel(csadrcfg[2], CSADRCFG2);
+ __raw_writel(csadrcfg[3], CSADRCFG3);
++ /* CSMSADRCFG wakes up in its default state (0), so we need to set it */
++ __raw_writel(0x2, CSMSADRCFG);
+ }
+
+ static struct syscore_ops smemc_syscore_ops = {
+@@ -49,8 +51,19 @@ static struct syscore_ops smemc_syscore_ops = {
+
+ static int __init smemc_init(void)
+ {
+- if (cpu_is_pxa3xx())
++ if (cpu_is_pxa3xx()) {
++ /*
++ * The only documentation we have on the
++ * Chip Select Configuration Register (CSMSADRCFG) is that
++ * it must be programmed to 0x2.
++ * Moreover, in the bit definitions, the second bit
++ * (CSMSADRCFG[1]) is called "SETALWAYS".
++ * Other bits are reserved in this register.
++ */
++ __raw_writel(0x2, CSMSADRCFG);
++
+ register_syscore_ops(&smemc_syscore_ops);
++ }
+
+ return 0;
+ }
+diff --git a/arch/arm/mach-s3c2410/include/mach/debug-macro.S b/arch/arm/mach-s3c2410/include/mach/debug-macro.S
+index 4135de8..13ed33c 100644
+--- a/arch/arm/mach-s3c2410/include/mach/debug-macro.S
++++ b/arch/arm/mach-s3c2410/include/mach/debug-macro.S
+@@ -40,17 +40,17 @@
+ addeq \rd, \rx, #(S3C24XX_PA_GPIO - S3C24XX_PA_UART)
+ addne \rd, \rx, #(S3C24XX_VA_GPIO - S3C24XX_VA_UART)
+ bic \rd, \rd, #0xff000
+- ldr \rd, [ \rd, # S3C2410_GSTATUS1 - S3C2410_GPIOREG(0) ]
++ ldr \rd, [\rd, # S3C2410_GSTATUS1 - S3C2410_GPIOREG(0)]
+ and \rd, \rd, #0x00ff0000
+ teq \rd, #0x00440000 @ is it 2440?
+ 1004:
+- ldr \rd, [ \rx, # S3C2410_UFSTAT ]
++ ldr \rd, [\rx, # S3C2410_UFSTAT]
+ moveq \rd, \rd, lsr #SHIFT_2440TXF
+ tst \rd, #S3C2410_UFSTAT_TXFULL
+ .endm
+
+ .macro fifo_full_s3c2410 rd, rx
+- ldr \rd, [ \rx, # S3C2410_UFSTAT ]
++ ldr \rd, [\rx, # S3C2410_UFSTAT]
+ tst \rd, #S3C2410_UFSTAT_TXFULL
+ .endm
+
+@@ -68,18 +68,18 @@
+ addeq \rd, \rx, #(S3C24XX_PA_GPIO - S3C24XX_PA_UART)
+ addne \rd, \rx, #(S3C24XX_VA_GPIO - S3C24XX_VA_UART)
+ bic \rd, \rd, #0xff000
+- ldr \rd, [ \rd, # S3C2410_GSTATUS1 - S3C2410_GPIOREG(0) ]
++ ldr \rd, [\rd, # S3C2410_GSTATUS1 - S3C2410_GPIOREG(0)]
+ and \rd, \rd, #0x00ff0000
+ teq \rd, #0x00440000 @ is it 2440?
+
+ 10000:
+- ldr \rd, [ \rx, # S3C2410_UFSTAT ]
++ ldr \rd, [\rx, # S3C2410_UFSTAT]
+ andne \rd, \rd, #S3C2410_UFSTAT_TXMASK
+ andeq \rd, \rd, #S3C2440_UFSTAT_TXMASK
+ .endm
+
+ .macro fifo_level_s3c2410 rd, rx
+- ldr \rd, [ \rx, # S3C2410_UFSTAT ]
++ ldr \rd, [\rx, # S3C2410_UFSTAT]
+ and \rd, \rd, #S3C2410_UFSTAT_TXMASK
+ .endm
+
+diff --git a/arch/arm/mach-s3c2410/include/mach/entry-macro.S b/arch/arm/mach-s3c2410/include/mach/entry-macro.S
+index 473b3cd..ef2287b 100644
+--- a/arch/arm/mach-s3c2410/include/mach/entry-macro.S
++++ b/arch/arm/mach-s3c2410/include/mach/entry-macro.S
+@@ -34,10 +34,10 @@
+
+ @@ try the interrupt offset register, since it is there
+
+- ldr \irqstat, [ \base, #INTPND ]
++ ldr \irqstat, [\base, #INTPND ]
+ teq \irqstat, #0
+ beq 1002f
+- ldr \irqnr, [ \base, #INTOFFSET ]
++ ldr \irqnr, [\base, #INTOFFSET ]
+ mov \tmp, #1
+ tst \irqstat, \tmp, lsl \irqnr
+ bne 1001f
+diff --git a/arch/arm/mach-s3c2410/pm-h1940.S b/arch/arm/mach-s3c2410/pm-h1940.S
+index c93bf2d..6183a68 100644
+--- a/arch/arm/mach-s3c2410/pm-h1940.S
++++ b/arch/arm/mach-s3c2410/pm-h1940.S
+@@ -30,4 +30,4 @@
+
+ h1940_pm_return:
+ mov r0, #S3C2410_PA_GPIO
+- ldr pc, [ r0, #S3C2410_GSTATUS3 - S3C24XX_VA_GPIO ]
++ ldr pc, [r0, #S3C2410_GSTATUS3 - S3C24XX_VA_GPIO]
+diff --git a/arch/arm/mach-s3c2410/sleep.S b/arch/arm/mach-s3c2410/sleep.S
+index dd5b638..65200ae 100644
+--- a/arch/arm/mach-s3c2410/sleep.S
++++ b/arch/arm/mach-s3c2410/sleep.S
+@@ -45,9 +45,9 @@ ENTRY(s3c2410_cpu_suspend)
+ ldr r4, =S3C2410_REFRESH
+ ldr r5, =S3C24XX_MISCCR
+ ldr r6, =S3C2410_CLKCON
+- ldr r7, [ r4 ] @ get REFRESH (and ensure in TLB)
+- ldr r8, [ r5 ] @ get MISCCR (and ensure in TLB)
+- ldr r9, [ r6 ] @ get CLKCON (and ensure in TLB)
++ ldr r7, [r4] @ get REFRESH (and ensure in TLB)
++ ldr r8, [r5] @ get MISCCR (and ensure in TLB)
++ ldr r9, [r6] @ get CLKCON (and ensure in TLB)
+
+ orr r7, r7, #S3C2410_REFRESH_SELF @ SDRAM sleep command
+ orr r8, r8, #S3C2410_MISCCR_SDSLEEP @ SDRAM power-down signals
+@@ -61,8 +61,8 @@ ENTRY(s3c2410_cpu_suspend)
+ @@ align next bit of code to cache line
+ .align 5
+ s3c2410_do_sleep:
+- streq r7, [ r4 ] @ SDRAM sleep command
+- streq r8, [ r5 ] @ SDRAM power-down config
+- streq r9, [ r6 ] @ CPU sleep
++ streq r7, [r4] @ SDRAM sleep command
++ streq r8, [r5] @ SDRAM power-down config
++ streq r9, [r6] @ CPU sleep
+ 1: beq 1b
+ mov pc, r14
+diff --git a/arch/arm/mach-s3c2412/sleep.S b/arch/arm/mach-s3c2412/sleep.S
+index c82418e..5adaceb 100644
+--- a/arch/arm/mach-s3c2412/sleep.S
++++ b/arch/arm/mach-s3c2412/sleep.S
+@@ -57,12 +57,12 @@ s3c2412_sleep_enter1:
+ * retry, as simply returning causes the system to lock.
+ */
+
+- ldrne r9, [ r1 ]
+- strne r9, [ r1 ]
+- ldrne r9, [ r2 ]
+- strne r9, [ r2 ]
+- ldrne r9, [ r3 ]
+- strne r9, [ r3 ]
++ ldrne r9, [r1]
++ strne r9, [r1]
++ ldrne r9, [r2]
++ strne r9, [r2]
++ ldrne r9, [r3]
++ strne r9, [r3]
+ bne s3c2412_sleep_enter1
+
+ mov pc, r14
+diff --git a/arch/arm/mach-w90x900/include/mach/entry-macro.S b/arch/arm/mach-w90x900/include/mach/entry-macro.S
+index d39aca5..08436cf 100644
+--- a/arch/arm/mach-w90x900/include/mach/entry-macro.S
++++ b/arch/arm/mach-w90x900/include/mach/entry-macro.S
+@@ -22,8 +22,8 @@
+
+ mov \base, #AIC_BA
+
+- ldr \irqnr, [ \base, #AIC_IPER]
+- ldr \irqnr, [ \base, #AIC_ISNR]
++ ldr \irqnr, [\base, #AIC_IPER]
++ ldr \irqnr, [\base, #AIC_ISNR]
+ cmp \irqnr, #0
+
+ .endm
+diff --git a/arch/arm/plat-samsung/include/plat/debug-macro.S b/arch/arm/plat-samsung/include/plat/debug-macro.S
+index 207e275..f3a9cff 100644
+--- a/arch/arm/plat-samsung/include/plat/debug-macro.S
++++ b/arch/arm/plat-samsung/include/plat/debug-macro.S
+@@ -14,12 +14,12 @@
+ /* The S5PV210/S5PC110 implementations are as belows. */
+
+ .macro fifo_level_s5pv210 rd, rx
+- ldr \rd, [ \rx, # S3C2410_UFSTAT ]
++ ldr \rd, [\rx, # S3C2410_UFSTAT]
+ and \rd, \rd, #S5PV210_UFSTAT_TXMASK
+ .endm
+
+ .macro fifo_full_s5pv210 rd, rx
+- ldr \rd, [ \rx, # S3C2410_UFSTAT ]
++ ldr \rd, [\rx, # S3C2410_UFSTAT]
+ tst \rd, #S5PV210_UFSTAT_TXFULL
+ .endm
+
+@@ -27,7 +27,7 @@
+ * most widely re-used */
+
+ .macro fifo_level_s3c2440 rd, rx
+- ldr \rd, [ \rx, # S3C2410_UFSTAT ]
++ ldr \rd, [\rx, # S3C2410_UFSTAT]
+ and \rd, \rd, #S3C2440_UFSTAT_TXMASK
+ .endm
+
+@@ -36,7 +36,7 @@
+ #endif
+
+ .macro fifo_full_s3c2440 rd, rx
+- ldr \rd, [ \rx, # S3C2410_UFSTAT ]
++ ldr \rd, [\rx, # S3C2410_UFSTAT]
+ tst \rd, #S3C2440_UFSTAT_TXFULL
+ .endm
+
+@@ -45,11 +45,11 @@
+ #endif
+
+ .macro senduart,rd,rx
+- strb \rd, [\rx, # S3C2410_UTXH ]
++ strb \rd, [\rx, # S3C2410_UTXH]
+ .endm
+
+ .macro busyuart, rd, rx
+- ldr \rd, [ \rx, # S3C2410_UFCON ]
++ ldr \rd, [\rx, # S3C2410_UFCON]
+ tst \rd, #S3C2410_UFCON_FIFOMODE @ fifo enabled?
+ beq 1001f @
+ @ FIFO enabled...
+@@ -60,7 +60,7 @@
+
+ 1001:
+ @ busy waiting for non fifo
+- ldr \rd, [ \rx, # S3C2410_UTRSTAT ]
++ ldr \rd, [\rx, # S3C2410_UTRSTAT]
+ tst \rd, #S3C2410_UTRSTAT_TXFE
+ beq 1001b
+
+@@ -68,7 +68,7 @@
+ .endm
+
+ .macro waituart,rd,rx
+- ldr \rd, [ \rx, # S3C2410_UFCON ]
++ ldr \rd, [\rx, # S3C2410_UFCON]
+ tst \rd, #S3C2410_UFCON_FIFOMODE @ fifo enabled?
+ beq 1001f @
+ @ FIFO enabled...
+@@ -79,7 +79,7 @@
+ b 1002f
+ 1001:
+ @ idle waiting for non fifo
+- ldr \rd, [ \rx, # S3C2410_UTRSTAT ]
++ ldr \rd, [\rx, # S3C2410_UTRSTAT]
+ tst \rd, #S3C2410_UTRSTAT_TXFE
+ beq 1001b
+
+diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
+index 22dadeb..9d35a3e 100644
+--- a/arch/parisc/include/asm/pgtable.h
++++ b/arch/parisc/include/asm/pgtable.h
+@@ -12,11 +12,10 @@
+
+ #include <linux/bitops.h>
+ #include <linux/spinlock.h>
++#include <linux/mm_types.h>
+ #include <asm/processor.h>
+ #include <asm/cache.h>
+
+-struct vm_area_struct;
+-
+ /*
+ * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
+ * memory. For the return value to be meaningful, ADDR must be >=
+@@ -40,7 +39,14 @@ struct vm_area_struct;
+ do{ \
+ *(pteptr) = (pteval); \
+ } while(0)
+-#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
++
++extern void purge_tlb_entries(struct mm_struct *, unsigned long);
++
++#define set_pte_at(mm, addr, ptep, pteval) \
++ do { \
++ set_pte(ptep, pteval); \
++ purge_tlb_entries(mm, addr); \
++ } while (0)
+
+ #endif /* !__ASSEMBLY__ */
+
+@@ -464,6 +470,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
+ old = pte_val(*ptep);
+ new = pte_val(pte_wrprotect(__pte (old)));
+ } while (cmpxchg((unsigned long *) ptep, old, new) != old);
++ purge_tlb_entries(mm, addr);
+ #else
+ pte_t old_pte = *ptep;
+ set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
+diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
+index 83335f3..5241698 100644
+--- a/arch/parisc/kernel/cache.c
++++ b/arch/parisc/kernel/cache.c
+@@ -421,6 +421,24 @@ void kunmap_parisc(void *addr)
+ EXPORT_SYMBOL(kunmap_parisc);
+ #endif
+
++void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
++{
++ unsigned long flags;
++
++ /* Note: purge_tlb_entries can be called at startup with
++ no context. */
++
++ /* Disable preemption while we play with %sr1. */
++ preempt_disable();
++ mtsp(mm->context, 1);
++ purge_tlb_start(flags);
++ pdtlb(addr);
++ pitlb(addr);
++ purge_tlb_end(flags);
++ preempt_enable();
++}
++EXPORT_SYMBOL(purge_tlb_entries);
++
+ void __flush_tlb_range(unsigned long sid, unsigned long start,
+ unsigned long end)
+ {
+diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
+index 66ea9b8..21165a4 100644
+--- a/arch/powerpc/include/asm/eeh.h
++++ b/arch/powerpc/include/asm/eeh.h
+@@ -61,6 +61,7 @@ void __init pci_addr_cache_build(void);
+ */
+ void eeh_add_device_tree_early(struct device_node *);
+ void eeh_add_device_tree_late(struct pci_bus *);
++void eeh_add_sysfs_files(struct pci_bus *);
+
+ /**
+ * eeh_remove_device_recursive - undo EEH for device & children.
+@@ -105,6 +106,8 @@ static inline void eeh_add_device_tree_early(struct device_node *dn) { }
+
+ static inline void eeh_add_device_tree_late(struct pci_bus *bus) { }
+
++static inline void eeh_add_sysfs_files(struct pci_bus *bus) { }
++
+ static inline void eeh_remove_bus_device(struct pci_dev *dev) { }
+ #define EEH_POSSIBLE_ERROR(val, type) (0)
+ #define EEH_IO_ERROR_VALUE(size) (-1UL)
+diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
+index 26ccbf7..4c0908d 100644
+--- a/arch/powerpc/kernel/machine_kexec_64.c
++++ b/arch/powerpc/kernel/machine_kexec_64.c
+@@ -162,6 +162,8 @@ static int kexec_all_irq_disabled = 0;
+ static void kexec_smp_down(void *arg)
+ {
+ local_irq_disable();
++ hard_irq_disable();
++
+ mb(); /* make sure our irqs are disabled before we say they are */
+ get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF;
+ while(kexec_all_irq_disabled == 0)
+@@ -244,6 +246,8 @@ static void kexec_prepare_cpus(void)
+ wake_offline_cpus();
+ smp_call_function(kexec_smp_down, NULL, /* wait */0);
+ local_irq_disable();
++ hard_irq_disable();
++
+ mb(); /* make sure IRQs are disabled before we say they are */
+ get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF;
+
+@@ -281,6 +285,7 @@ static void kexec_prepare_cpus(void)
+ if (ppc_md.kexec_cpu_down)
+ ppc_md.kexec_cpu_down(0, 0);
+ local_irq_disable();
++ hard_irq_disable();
+ }
+
+ #endif /* SMP */
+diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c
+index e1612df..b10beef 100644
+--- a/arch/powerpc/kernel/of_platform.c
++++ b/arch/powerpc/kernel/of_platform.c
+@@ -91,6 +91,9 @@ static int __devinit of_pci_phb_probe(struct platform_device *dev)
+ /* Add probed PCI devices to the device model */
+ pci_bus_add_devices(phb->bus);
+
++ /* sysfs files should only be added after devices are added */
++ eeh_add_sysfs_files(phb->bus);
++
+ return 0;
+ }
+
+diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
+index 458ed3b..a3cd949 100644
+--- a/arch/powerpc/kernel/pci-common.c
++++ b/arch/powerpc/kernel/pci-common.c
+@@ -1536,11 +1536,14 @@ void pcibios_finish_adding_to_bus(struct pci_bus *bus)
+ pcibios_allocate_bus_resources(bus);
+ pcibios_claim_one_bus(bus);
+
++ /* Fixup EEH */
++ eeh_add_device_tree_late(bus);
++
+ /* Add new devices to global lists. Register in proc, sysfs. */
+ pci_bus_add_devices(bus);
+
+- /* Fixup EEH */
+- eeh_add_device_tree_late(bus);
++ /* sysfs files should only be added after devices are added */
++ eeh_add_sysfs_files(bus);
+ }
+ EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
+
+diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
+index 5658690..389e06b 100644
+--- a/arch/powerpc/platforms/pseries/eeh.c
++++ b/arch/powerpc/platforms/pseries/eeh.c
+@@ -1238,7 +1238,6 @@ static void eeh_add_device_late(struct pci_dev *dev)
+ pdn->pcidev = dev;
+
+ pci_addr_cache_insert_device(dev);
+- eeh_sysfs_add_device(dev);
+ }
+
+ void eeh_add_device_tree_late(struct pci_bus *bus)
+@@ -1257,6 +1256,29 @@ void eeh_add_device_tree_late(struct pci_bus *bus)
+ EXPORT_SYMBOL_GPL(eeh_add_device_tree_late);
+
+ /**
++ * eeh_add_sysfs_files - Add EEH sysfs files for the indicated PCI bus
++ * @bus: PCI bus
++ *
++ * This routine must be used to add EEH sysfs files for PCI
++ * devices which are attached to the indicated PCI bus. The PCI bus
++ * is added after system boot through hotplug or dlpar.
++ */
++void eeh_add_sysfs_files(struct pci_bus *bus)
++{
++ struct pci_dev *dev;
++
++ list_for_each_entry(dev, &bus->devices, bus_list) {
++ eeh_sysfs_add_device(dev);
++ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
++ struct pci_bus *subbus = dev->subordinate;
++ if (subbus)
++ eeh_add_sysfs_files(subbus);
++ }
++ }
++}
++EXPORT_SYMBOL_GPL(eeh_add_sysfs_files);
++
++/**
+ * eeh_remove_device - undo EEH setup for the indicated pci device
+ * @dev: pci device to be removed
+ *
+diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
+index 8644366..b2f44de 100644
+--- a/arch/s390/kernel/time.c
++++ b/arch/s390/kernel/time.c
+@@ -121,6 +121,9 @@ static int s390_next_ktime(ktime_t expires,
+ nsecs = ktime_to_ns(ktime_add(timespec_to_ktime(ts), expires));
+ do_div(nsecs, 125);
+ S390_lowcore.clock_comparator = sched_clock_base_cc + (nsecs << 9);
++ /* Program the maximum value if we have an overflow (== year 2042) */
++ if (unlikely(S390_lowcore.clock_comparator < sched_clock_base_cc))
++ S390_lowcore.clock_comparator = -1ULL;
+ set_clock_comparator(S390_lowcore.clock_comparator);
+ return 0;
+ }
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index dffcaa4..4db9b1e 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -597,6 +597,14 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
+ } else
+ prefix = 0;
+
++ /*
++ * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
++ * copying in vcpu load/put. Lets update our copies before we save
++ * it into the save area
++ */
++ save_fp_regs(&vcpu->arch.guest_fpregs);
++ save_access_regs(vcpu->arch.guest_acrs);
++
+ if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
+ vcpu->arch.guest_fpregs.fprs, 128, prefix))
+ return -EFAULT;
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index efb4294..9a42703 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -1150,7 +1150,7 @@ config DIRECT_GBPAGES
+ config NUMA
+ bool "Numa Memory Allocation and Scheduler Support"
+ depends on SMP
+- depends on X86_64 || (X86_32 && HIGHMEM64G && (X86_NUMAQ || X86_BIGSMP || X86_SUMMIT && ACPI) && EXPERIMENTAL)
++ depends on X86_64 || (X86_32 && HIGHMEM64G && (X86_NUMAQ || X86_BIGSMP || X86_SUMMIT && ACPI) && BROKEN)
+ default y if (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP)
+ ---help---
+ Enable NUMA (Non Uniform Memory Access) support.
+diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
+index 884507e..6be9909 100644
+--- a/arch/x86/include/asm/pgtable.h
++++ b/arch/x86/include/asm/pgtable.h
+@@ -142,6 +142,11 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
+ return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
+ }
+
++static inline unsigned long pud_pfn(pud_t pud)
++{
++ return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
++}
++
+ #define pte_page(pte) pfn_to_page(pte_pfn(pte))
+
+ static inline int pmd_large(pmd_t pte)
+diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
+index f5373df..db4f704 100644
+--- a/arch/x86/kernel/apic/x2apic_phys.c
++++ b/arch/x86/kernel/apic/x2apic_phys.c
+@@ -20,12 +20,19 @@ static int set_x2apic_phys_mode(char *arg)
+ }
+ early_param("x2apic_phys", set_x2apic_phys_mode);
+
++static bool x2apic_fadt_phys(void)
++{
++ if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) &&
++ (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) {
++ printk(KERN_DEBUG "System requires x2apic physical mode\n");
++ return true;
++ }
++ return false;
++}
++
+ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+ {
+- if (x2apic_phys)
+- return x2apic_enabled();
+- else
+- return 0;
++ return x2apic_enabled() && (x2apic_phys || x2apic_fadt_phys());
+ }
+
+ static void
+@@ -108,7 +115,7 @@ static void init_x2apic_ldr(void)
+
+ static int x2apic_phys_probe(void)
+ {
+- if (x2apic_mode && x2apic_phys)
++ if (x2apic_mode && (x2apic_phys || x2apic_fadt_phys()))
+ return 1;
+
+ return apic == &apic_x2apic_phys;
+diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
+index 0a630dd..646d192 100644
+--- a/arch/x86/kernel/cpu/mshyperv.c
++++ b/arch/x86/kernel/cpu/mshyperv.c
+@@ -68,7 +68,8 @@ static void __init ms_hyperv_init_platform(void)
+ printk(KERN_INFO "HyperV: features 0x%x, hints 0x%x\n",
+ ms_hyperv.features, ms_hyperv.hints);
+
+- clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100);
++ if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
++ clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100);
+ }
+
+ const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
+diff --git a/arch/x86/kernel/head.c b/arch/x86/kernel/head.c
+index af0699b..f6c4674 100644
+--- a/arch/x86/kernel/head.c
++++ b/arch/x86/kernel/head.c
+@@ -5,8 +5,6 @@
+ #include <asm/setup.h>
+ #include <asm/bios_ebda.h>
+
+-#define BIOS_LOWMEM_KILOBYTES 0x413
+-
+ /*
+ * The BIOS places the EBDA/XBDA at the top of conventional
+ * memory, and usually decreases the reported amount of
+@@ -16,17 +14,30 @@
+ * chipset: reserve a page before VGA to prevent PCI prefetch
+ * into it (errata #56). Usually the page is reserved anyways,
+ * unless you have no PS/2 mouse plugged in.
++ *
++ * This functions is deliberately very conservative. Losing
++ * memory in the bottom megabyte is rarely a problem, as long
++ * as we have enough memory to install the trampoline. Using
++ * memory that is in use by the BIOS or by some DMA device
++ * the BIOS didn't shut down *is* a big problem.
+ */
++
++#define BIOS_LOWMEM_KILOBYTES 0x413
++#define LOWMEM_CAP 0x9f000U /* Absolute maximum */
++#define INSANE_CUTOFF 0x20000U /* Less than this = insane */
++
+ void __init reserve_ebda_region(void)
+ {
+ unsigned int lowmem, ebda_addr;
+
+- /* To determine the position of the EBDA and the */
+- /* end of conventional memory, we need to look at */
+- /* the BIOS data area. In a paravirtual environment */
+- /* that area is absent. We'll just have to assume */
+- /* that the paravirt case can handle memory setup */
+- /* correctly, without our help. */
++ /*
++ * To determine the position of the EBDA and the
++ * end of conventional memory, we need to look at
++ * the BIOS data area. In a paravirtual environment
++ * that area is absent. We'll just have to assume
++ * that the paravirt case can handle memory setup
++ * correctly, without our help.
++ */
+ if (paravirt_enabled())
+ return;
+
+@@ -37,19 +48,23 @@ void __init reserve_ebda_region(void)
+ /* start of EBDA area */
+ ebda_addr = get_bios_ebda();
+
+- /* Fixup: bios puts an EBDA in the top 64K segment */
+- /* of conventional memory, but does not adjust lowmem. */
+- if ((lowmem - ebda_addr) <= 0x10000)
+- lowmem = ebda_addr;
++ /*
++ * Note: some old Dells seem to need 4k EBDA without
++ * reporting so, so just consider the memory above 0x9f000
++ * to be off limits (bugzilla 2990).
++ */
++
++ /* If the EBDA address is below 128K, assume it is bogus */
++ if (ebda_addr < INSANE_CUTOFF)
++ ebda_addr = LOWMEM_CAP;
+
+- /* Fixup: bios does not report an EBDA at all. */
+- /* Some old Dells seem to need 4k anyhow (bugzilla 2990) */
+- if ((ebda_addr == 0) && (lowmem >= 0x9f000))
+- lowmem = 0x9f000;
++ /* If lowmem is less than 128K, assume it is bogus */
++ if (lowmem < INSANE_CUTOFF)
++ lowmem = LOWMEM_CAP;
+
+- /* Paranoia: should never happen, but... */
+- if ((lowmem == 0) || (lowmem >= 0x100000))
+- lowmem = 0x9f000;
++ /* Use the lower of the lowmem and EBDA markers as the cutoff */
++ lowmem = min(lowmem, ebda_addr);
++ lowmem = min(lowmem, LOWMEM_CAP); /* Absolute cap */
+
+ /* reserve all memory between lowmem and the 1MB mark */
+ memblock_x86_reserve_range(lowmem, 0x100000, "* BIOS reserved");
+diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
+index 5db0490..7b73c88 100644
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
+@@ -738,13 +738,15 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
+ return;
+ }
+ #endif
++ /* Kernel addresses are always protection faults: */
++ if (address >= TASK_SIZE)
++ error_code |= PF_PROT;
+
+- if (unlikely(show_unhandled_signals))
++ if (likely(show_unhandled_signals))
+ show_signal_msg(regs, error_code, address, tsk);
+
+- /* Kernel addresses are always protection faults: */
+ tsk->thread.cr2 = address;
+- tsk->thread.error_code = error_code | (address >= TASK_SIZE);
++ tsk->thread.error_code = error_code;
+ tsk->thread.trap_no = 14;
+
+ force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0);
+diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
+index bbaaa00..44b93da 100644
+--- a/arch/x86/mm/init_64.c
++++ b/arch/x86/mm/init_64.c
+@@ -831,6 +831,9 @@ int kern_addr_valid(unsigned long addr)
+ if (pud_none(*pud))
+ return 0;
+
++ if (pud_large(*pud))
++ return pfn_valid(pud_pfn(*pud));
++
+ pmd = pmd_offset(pud, addr);
+ if (pmd_none(*pmd))
+ return 0;
+diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
+index bef9991..1de542b 100644
+--- a/arch/x86/platform/efi/efi.c
++++ b/arch/x86/platform/efi/efi.c
+@@ -83,9 +83,10 @@ int efi_enabled(int facility)
+ }
+ EXPORT_SYMBOL(efi_enabled);
+
++static bool disable_runtime = false;
+ static int __init setup_noefi(char *arg)
+ {
+- clear_bit(EFI_BOOT, &x86_efi_facility);
++ disable_runtime = true;
+ return 0;
+ }
+ early_param("noefi", setup_noefi);
+@@ -549,35 +550,37 @@ void __init efi_init(void)
+
+ set_bit(EFI_CONFIG_TABLES, &x86_efi_facility);
+
+- /*
+- * Check out the runtime services table. We need to map
+- * the runtime services table so that we can grab the physical
+- * address of several of the EFI runtime functions, needed to
+- * set the firmware into virtual mode.
+- */
+- runtime = early_ioremap((unsigned long)efi.systab->runtime,
+- sizeof(efi_runtime_services_t));
+- if (runtime != NULL) {
+- /*
+- * We will only need *early* access to the following
+- * two EFI runtime services before set_virtual_address_map
+- * is invoked.
+- */
+- efi_phys.get_time = (efi_get_time_t *)runtime->get_time;
+- efi_phys.set_virtual_address_map =
+- (efi_set_virtual_address_map_t *)
+- runtime->set_virtual_address_map;
++ if (!disable_runtime) {
+ /*
+- * Make efi_get_time can be called before entering
+- * virtual mode.
++ * Check out the runtime services table. We need to map
++ * the runtime services table so that we can grab the physical
++ * address of several of the EFI runtime functions, needed to
++ * set the firmware into virtual mode.
+ */
+- efi.get_time = phys_efi_get_time;
+-
+- set_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility);
+- } else
+- printk(KERN_ERR "Could not map the EFI runtime service "
+- "table!\n");
+- early_iounmap(runtime, sizeof(efi_runtime_services_t));
++ runtime = early_ioremap((unsigned long)efi.systab->runtime,
++ sizeof(efi_runtime_services_t));
++ if (runtime != NULL) {
++ /*
++ * We will only need *early* access to the following
++ * two EFI runtime services before set_virtual_address_map
++ * is invoked.
++ */
++ efi_phys.get_time = (efi_get_time_t *)runtime->get_time;
++ efi_phys.set_virtual_address_map =
++ (efi_set_virtual_address_map_t *)
++ runtime->set_virtual_address_map;
++ /*
++ * Make efi_get_time can be called before entering
++ * virtual mode.
++ */
++ efi.get_time = phys_efi_get_time;
++
++ set_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility);
++ } else
++ printk(KERN_ERR "Could not map the EFI runtime service "
++ "table!\n");
++ early_iounmap(runtime, sizeof(efi_runtime_services_t));
++ }
+
+ /* Map the EFI memory map */
+ memmap.map = early_ioremap((unsigned long)memmap.phys_map,
+diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
+index d69cc6c..67bc7ba 100644
+--- a/arch/x86/xen/spinlock.c
++++ b/arch/x86/xen/spinlock.c
+@@ -328,7 +328,6 @@ static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl)
+ if (per_cpu(lock_spinners, cpu) == xl) {
+ ADD_STATS(released_slow_kicked, 1);
+ xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
+- break;
+ }
+ }
+ }
+diff --git a/block/genhd.c b/block/genhd.c
+index 4927476..6edf228 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -26,7 +26,7 @@ static DEFINE_MUTEX(block_class_lock);
+ struct kobject *block_depr;
+
+ /* for extended dynamic devt allocation, currently only one major is used */
+-#define MAX_EXT_DEVT (1 << MINORBITS)
++#define NR_EXT_DEVT (1 << MINORBITS)
+
+ /* For extended devt allocation. ext_devt_mutex prevents look up
+ * results from going away underneath its user.
+@@ -421,17 +421,18 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
+ do {
+ if (!idr_pre_get(&ext_devt_idr, GFP_KERNEL))
+ return -ENOMEM;
++ mutex_lock(&ext_devt_mutex);
+ rc = idr_get_new(&ext_devt_idr, part, &idx);
++ if (!rc && idx >= NR_EXT_DEVT) {
++ idr_remove(&ext_devt_idr, idx);
++ rc = -EBUSY;
++ }
++ mutex_unlock(&ext_devt_mutex);
+ } while (rc == -EAGAIN);
+
+ if (rc)
+ return rc;
+
+- if (idx > MAX_EXT_DEVT) {
+- idr_remove(&ext_devt_idr, idx);
+- return -EBUSY;
+- }
+-
+ *devt = MKDEV(BLOCK_EXT_MAJOR, blk_mangle_minor(idx));
+ return 0;
+ }
+@@ -645,7 +646,6 @@ void del_gendisk(struct gendisk *disk)
+ disk_part_iter_exit(&piter);
+
+ invalidate_partition(disk, 0);
+- blk_free_devt(disk_to_dev(disk)->devt);
+ set_capacity(disk, 0);
+ disk->flags &= ~GENHD_FL_UP;
+
+@@ -663,6 +663,7 @@ void del_gendisk(struct gendisk *disk)
+ if (!sysfs_deprecated)
+ sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
+ device_del(disk_to_dev(disk));
++ blk_free_devt(disk_to_dev(disk)->devt);
+ }
+ EXPORT_SYMBOL(del_gendisk);
+
+diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
+index d790791..cc9d020 100644
+--- a/drivers/acpi/sleep.c
++++ b/drivers/acpi/sleep.c
+@@ -156,6 +156,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
+ },
+ {
+ .callback = init_nvs_nosave,
++ .ident = "Sony Vaio VGN-FW41E_H",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW41E_H"),
++ },
++ },
++ {
++ .callback = init_nvs_nosave,
+ .ident = "Sony Vaio VGN-FW21E",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
+index 69ac373..df47397 100644
+--- a/drivers/ata/ata_piix.c
++++ b/drivers/ata/ata_piix.c
+@@ -321,6 +321,41 @@ static const struct pci_device_id piix_pci_tbl[] = {
+ { 0x8086, 0x1e08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (Panther Point) */
+ { 0x8086, 0x1e09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
++ /* SATA Controller IDE (Lynx Point) */
++ { 0x8086, 0x8c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
++ /* SATA Controller IDE (Lynx Point) */
++ { 0x8086, 0x8c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
++ /* SATA Controller IDE (Lynx Point) */
++ { 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
++ /* SATA Controller IDE (Lynx Point) */
++ { 0x8086, 0x8c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
++ /* SATA Controller IDE (Lynx Point-LP) */
++ { 0x8086, 0x9c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
++ /* SATA Controller IDE (Lynx Point-LP) */
++ { 0x8086, 0x9c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
++ /* SATA Controller IDE (Lynx Point-LP) */
++ { 0x8086, 0x9c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
++ /* SATA Controller IDE (Lynx Point-LP) */
++ { 0x8086, 0x9c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
++ /* SATA Controller IDE (DH89xxCC) */
++ { 0x8086, 0x2326, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
++ /* SATA Controller IDE (Avoton) */
++ { 0x8086, 0x1f20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
++ /* SATA Controller IDE (Avoton) */
++ { 0x8086, 0x1f21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
++ /* SATA Controller IDE (Avoton) */
++ { 0x8086, 0x1f30, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
++ /* SATA Controller IDE (Avoton) */
++ { 0x8086, 0x1f31, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
++ /* SATA Controller IDE (Wellsburg) */
++ { 0x8086, 0x8d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
++ /* SATA Controller IDE (Wellsburg) */
++ { 0x8086, 0x8d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
++ /* SATA Controller IDE (Wellsburg) */
++ { 0x8086, 0x8d60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
++ /* SATA Controller IDE (Wellsburg) */
++ { 0x8086, 0x8d68, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
++
+ { } /* terminate list */
+ };
+
+diff --git a/drivers/base/bus.c b/drivers/base/bus.c
+index 000e7b2..8b8e8c0 100644
+--- a/drivers/base/bus.c
++++ b/drivers/base/bus.c
+@@ -289,7 +289,7 @@ int bus_for_each_dev(struct bus_type *bus, struct device *start,
+ struct device *dev;
+ int error = 0;
+
+- if (!bus)
++ if (!bus || !bus->p)
+ return -EINVAL;
+
+ klist_iter_init_node(&bus->p->klist_devices, &i,
+@@ -323,7 +323,7 @@ struct device *bus_find_device(struct bus_type *bus,
+ struct klist_iter i;
+ struct device *dev;
+
+- if (!bus)
++ if (!bus || !bus->p)
+ return NULL;
+
+ klist_iter_init_node(&bus->p->klist_devices, &i,
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index 86848c6..40a0fcb 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -584,12 +584,20 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
+ struct request sreq;
+
+ dev_info(disk_to_dev(lo->disk), "NBD_DISCONNECT\n");
++ if (!lo->sock)
++ return -EINVAL;
+
++ mutex_unlock(&lo->tx_lock);
++ fsync_bdev(bdev);
++ mutex_lock(&lo->tx_lock);
+ blk_rq_init(NULL, &sreq);
+ sreq.cmd_type = REQ_TYPE_SPECIAL;
+ nbd_cmd(&sreq) = NBD_CMD_DISC;
++
++ /* Check again after getting mutex back. */
+ if (!lo->sock)
+ return -EINVAL;
++
+ nbd_send_req(lo, &sreq);
+ return 0;
+ }
+@@ -603,6 +611,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
+ nbd_clear_que(lo);
+ BUG_ON(!list_empty(&lo->queue_head));
+ BUG_ON(!list_empty(&lo->waiting_queue));
++ kill_bdev(bdev);
+ if (file)
+ fput(file);
+ return 0;
+@@ -683,6 +692,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
+ lo->file = NULL;
+ nbd_clear_que(lo);
+ dev_warn(disk_to_dev(lo->disk), "queue cleared\n");
++ kill_bdev(bdev);
+ if (file)
+ fput(file);
+ lo->bytesize = 0;
+diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
+index 48e8fee..94f6ae2 100644
+--- a/drivers/block/sunvdc.c
++++ b/drivers/block/sunvdc.c
+@@ -461,7 +461,7 @@ static int generic_request(struct vdc_port *port, u8 op, void *buf, int len)
+ int op_len, err;
+ void *req_buf;
+
+- if (!(((u64)1 << ((u64)op - 1)) & port->operations))
++ if (!(((u64)1 << (u64)op) & port->operations))
+ return -EOPNOTSUPP;
+
+ switch (op) {
+diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
+index f759ad4..674e3c2 100644
+--- a/drivers/block/xen-blkback/xenbus.c
++++ b/drivers/block/xen-blkback/xenbus.c
+@@ -364,6 +364,7 @@ static int xen_blkbk_remove(struct xenbus_device *dev)
+ be->blkif = NULL;
+ }
+
++ kfree(be->mode);
+ kfree(be);
+ dev_set_drvdata(&dev->dev, NULL);
+ return 0;
+@@ -513,6 +514,7 @@ static void backend_changed(struct xenbus_watch *watch,
+ = container_of(watch, struct backend_info, backend_watch);
+ struct xenbus_device *dev = be->dev;
+ int cdrom = 0;
++ unsigned long handle;
+ char *device_type;
+
+ DPRINTK("");
+@@ -532,10 +534,10 @@ static void backend_changed(struct xenbus_watch *watch,
+ return;
+ }
+
+- if ((be->major || be->minor) &&
+- ((be->major != major) || (be->minor != minor))) {
+- pr_warn(DRV_PFX "changing physical device (from %x:%x to %x:%x) not supported.\n",
+- be->major, be->minor, major, minor);
++ if (be->major | be->minor) {
++ if (be->major != major || be->minor != minor)
++ pr_warn(DRV_PFX "changing physical device (from %x:%x to %x:%x) not supported.\n",
++ be->major, be->minor, major, minor);
+ return;
+ }
+
+@@ -553,36 +555,33 @@ static void backend_changed(struct xenbus_watch *watch,
+ kfree(device_type);
+ }
+
+- if (be->major == 0 && be->minor == 0) {
+- /* Front end dir is a number, which is used as the handle. */
+-
+- char *p = strrchr(dev->otherend, '/') + 1;
+- long handle;
+- err = strict_strtoul(p, 0, &handle);
+- if (err)
+- return;
++ /* Front end dir is a number, which is used as the handle. */
++ err = strict_strtoul(strrchr(dev->otherend, '/') + 1, 0, &handle);
++ if (err)
++ return;
+
+- be->major = major;
+- be->minor = minor;
++ be->major = major;
++ be->minor = minor;
+
+- err = xen_vbd_create(be->blkif, handle, major, minor,
+- (NULL == strchr(be->mode, 'w')), cdrom);
+- if (err) {
+- be->major = 0;
+- be->minor = 0;
+- xenbus_dev_fatal(dev, err, "creating vbd structure");
+- return;
+- }
++ err = xen_vbd_create(be->blkif, handle, major, minor,
++ !strchr(be->mode, 'w'), cdrom);
+
++ if (err)
++ xenbus_dev_fatal(dev, err, "creating vbd structure");
++ else {
+ err = xenvbd_sysfs_addif(dev);
+ if (err) {
+ xen_vbd_free(&be->blkif->vbd);
+- be->major = 0;
+- be->minor = 0;
+ xenbus_dev_fatal(dev, err, "creating sysfs entries");
+- return;
+ }
++ }
+
++ if (err) {
++ kfree(be->mode);
++ be->mode = NULL;
++ be->major = 0;
++ be->minor = 0;
++ } else {
+ /* We're potentially connected now */
+ xen_update_blkif_status(be->blkif);
+ }
+diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c
+index bc6f5fa..819dfda 100644
+--- a/drivers/dca/dca-core.c
++++ b/drivers/dca/dca-core.c
+@@ -420,6 +420,11 @@ void unregister_dca_provider(struct dca_provider *dca, struct device *dev)
+
+ raw_spin_lock_irqsave(&dca_lock, flags);
+
++ if (list_empty(&dca_domains)) {
++ raw_spin_unlock_irqrestore(&dca_lock, flags);
++ return;
++ }
++
+ list_del(&dca->node);
+
+ pci_rc = dca_pci_rc_from_dev(dev);
+diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
+index f3b890d..1f3dd51 100644
+--- a/drivers/firewire/core-device.c
++++ b/drivers/firewire/core-device.c
+@@ -995,6 +995,10 @@ static void fw_device_init(struct work_struct *work)
+ ret = idr_pre_get(&fw_device_idr, GFP_KERNEL) ?
+ idr_get_new(&fw_device_idr, device, &minor) :
+ -ENOMEM;
++ if (minor >= 1 << MINORBITS) {
++ idr_remove(&fw_device_idr, minor);
++ minor = -ENOSPC;
++ }
+ up_write(&fw_device_rwsem);
+
+ if (ret < 0)
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index bb95d59..9080eb7 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -87,9 +87,6 @@ static struct edid_quirk {
+ int product_id;
+ u32 quirks;
+ } edid_quirk_list[] = {
+- /* ASUS VW222S */
+- { "ACI", 0x22a2, EDID_QUIRK_FORCE_REDUCED_BLANKING },
+-
+ /* Acer AL1706 */
+ { "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 },
+ /* Acer F51 */
+@@ -1743,7 +1740,8 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
+ num_modes += add_cvt_modes(connector, edid);
+ num_modes += add_standard_modes(connector, edid);
+ num_modes += add_established_modes(connector, edid);
+- num_modes += add_inferred_modes(connector, edid);
++ if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)
++ num_modes += add_inferred_modes(connector, edid);
+
+ if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
+ edid_fixup_preferred(connector, quirks);
+diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c
+index 445003f..471f453 100644
+--- a/drivers/gpu/drm/drm_usb.c
++++ b/drivers/gpu/drm/drm_usb.c
+@@ -19,7 +19,7 @@ int drm_get_usb_dev(struct usb_interface *interface,
+
+ usbdev = interface_to_usbdev(interface);
+ dev->usbdev = usbdev;
+- dev->dev = &usbdev->dev;
++ dev->dev = &interface->dev;
+
+ mutex_lock(&drm_global_mutex);
+
+diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
+index 10fe480..5620192 100644
+--- a/drivers/gpu/drm/i915/i915_debugfs.c
++++ b/drivers/gpu/drm/i915/i915_debugfs.c
+@@ -756,7 +756,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
+
+ seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
+ error->time.tv_usec);
+- seq_printf(m, "Kernel: " UTS_RELEASE);
++ seq_printf(m, "Kernel: " UTS_RELEASE "\n");
+ seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
+ seq_printf(m, "EIR: 0x%08x\n", error->eir);
+ seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 7817429..2303c2b 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -138,8 +138,8 @@ static const intel_limit_t intel_limits_i9xx_sdvo = {
+ .vco = { .min = 1400000, .max = 2800000 },
+ .n = { .min = 1, .max = 6 },
+ .m = { .min = 70, .max = 120 },
+- .m1 = { .min = 10, .max = 22 },
+- .m2 = { .min = 5, .max = 9 },
++ .m1 = { .min = 8, .max = 18 },
++ .m2 = { .min = 3, .max = 7 },
+ .p = { .min = 5, .max = 80 },
+ .p1 = { .min = 1, .max = 8 },
+ .p2 = { .dot_limit = 200000,
+@@ -3242,6 +3242,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
++ u32 pctl;
+
+ if (!intel_crtc->active)
+ return;
+@@ -3257,6 +3258,13 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
+
+ intel_disable_plane(dev_priv, plane, pipe);
+ intel_disable_pipe(dev_priv, pipe);
++
++ /* Disable pannel fitter if it is on this pipe. */
++ pctl = I915_READ(PFIT_CONTROL);
++ if ((pctl & PFIT_ENABLE) &&
++ ((pctl & PFIT_PIPE_MASK) >> PFIT_PIPE_SHIFT) == pipe)
++ I915_WRITE(PFIT_CONTROL, 0);
++
+ intel_disable_pll(dev_priv, pipe);
+
+ intel_crtc->active = false;
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index 0977849..60d13fe 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -1137,6 +1137,8 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
+ blackout &= ~BLACKOUT_MODE_MASK;
+ WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
+ }
++ /* wait for the MC to settle */
++ udelay(100);
+ }
+
+ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 279b863d..a23b63a 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1900,6 +1900,7 @@ static const struct hid_device_id hid_ignore_list[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_MASTERKIT, USB_DEVICE_ID_MASTERKIT_MA901RADIO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) },
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index c15c38e..25f3290 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -506,6 +506,9 @@
+ #define USB_VENDOR_ID_MADCATZ 0x0738
+ #define USB_DEVICE_ID_MADCATZ_BEATPAD 0x4540
+
++#define USB_VENDOR_ID_MASTERKIT 0x16c0
++#define USB_DEVICE_ID_MASTERKIT_MA901RADIO 0x05df
++
+ #define USB_VENDOR_ID_MCC 0x09db
+ #define USB_DEVICE_ID_MCC_PMD1024LS 0x0076
+ #define USB_DEVICE_ID_MCC_PMD1208LS 0x007a
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index dffdca8..f44a067 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -4140,13 +4140,19 @@ static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
+ {
+ /*
+ * Mobile 4 Series Chipset neglects to set RWBF capability,
+- * but needs it:
++ * but needs it. Same seems to hold for the desktop versions.
+ */
+ printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
+ rwbf_quirk = 1;
+ }
+
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
+
+ #define GGC 0x52
+ #define GGC_MEMORY_SIZE_MASK (0xf << 8)
+diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
+index 29f9000..a47ba33 100644
+--- a/drivers/media/rc/rc-main.c
++++ b/drivers/media/rc/rc-main.c
+@@ -774,9 +774,12 @@ static ssize_t show_protocols(struct device *device,
+ if (dev->driver_type == RC_DRIVER_SCANCODE) {
+ enabled = dev->rc_map.rc_type;
+ allowed = dev->allowed_protos;
+- } else {
++ } else if (dev->raw) {
+ enabled = dev->raw->enabled_protocols;
+ allowed = ir_raw_get_allowed_protocols();
++ } else {
++ mutex_unlock(&dev->lock);
++ return -ENODEV;
+ }
+
+ IR_dprintk(1, "allowed - 0x%llx, enabled - 0x%llx\n",
+diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
+index ee0d0b3..d345215 100644
+--- a/drivers/media/video/omap/omap_vout.c
++++ b/drivers/media/video/omap/omap_vout.c
+@@ -206,19 +206,21 @@ static u32 omap_vout_uservirt_to_phys(u32 virtp)
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+
+- vma = find_vma(mm, virtp);
+ /* For kernel direct-mapped memory, take the easy way */
+- if (virtp >= PAGE_OFFSET) {
+- physp = virt_to_phys((void *) virtp);
+- } else if (vma && (vma->vm_flags & VM_IO) && vma->vm_pgoff) {
++ if (virtp >= PAGE_OFFSET)
++ return virt_to_phys((void *) virtp);
++
++ down_read(&current->mm->mmap_sem);
++ vma = find_vma(mm, virtp);
++ if (vma && (vma->vm_flags & VM_IO) && vma->vm_pgoff) {
+ /* this will catch, kernel-allocated, mmaped-to-usermode
+ addresses */
+ physp = (vma->vm_pgoff << PAGE_SHIFT) + (virtp - vma->vm_start);
++ up_read(&current->mm->mmap_sem);
+ } else {
+ /* otherwise, use get_user_pages() for general userland pages */
+ int res, nr_pages = 1;
+ struct page *pages;
+- down_read(&current->mm->mmap_sem);
+
+ res = get_user_pages(current, current->mm, virtp, nr_pages, 1,
+ 0, &pages, NULL);
+diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
+index 0edd618..8b0777f 100644
+--- a/drivers/media/video/v4l2-device.c
++++ b/drivers/media/video/v4l2-device.c
+@@ -159,31 +159,21 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
+ sd->v4l2_dev = v4l2_dev;
+ if (sd->internal_ops && sd->internal_ops->registered) {
+ err = sd->internal_ops->registered(sd);
+- if (err) {
+- module_put(sd->owner);
+- return err;
+- }
++ if (err)
++ goto error_module;
+ }
+
+ /* This just returns 0 if either of the two args is NULL */
+ err = v4l2_ctrl_add_handler(v4l2_dev->ctrl_handler, sd->ctrl_handler);
+- if (err) {
+- if (sd->internal_ops && sd->internal_ops->unregistered)
+- sd->internal_ops->unregistered(sd);
+- module_put(sd->owner);
+- return err;
+- }
++ if (err)
++ goto error_unregister;
+
+ #if defined(CONFIG_MEDIA_CONTROLLER)
+ /* Register the entity. */
+ if (v4l2_dev->mdev) {
+ err = media_device_register_entity(v4l2_dev->mdev, entity);
+- if (err < 0) {
+- if (sd->internal_ops && sd->internal_ops->unregistered)
+- sd->internal_ops->unregistered(sd);
+- module_put(sd->owner);
+- return err;
+- }
++ if (err < 0)
++ goto error_unregister;
+ }
+ #endif
+
+@@ -192,6 +182,14 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
+ spin_unlock(&v4l2_dev->lock);
+
+ return 0;
++
++error_unregister:
++ if (sd->internal_ops && sd->internal_ops->unregistered)
++ sd->internal_ops->unregistered(sd);
++error_module:
++ module_put(sd->owner);
++ sd->v4l2_dev = NULL;
++ return err;
+ }
+ EXPORT_SYMBOL_GPL(v4l2_device_register_subdev);
+
+diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
+index 1b47937..85a074f 100644
+--- a/drivers/mmc/host/sdhci-esdhc-imx.c
++++ b/drivers/mmc/host/sdhci-esdhc-imx.c
+@@ -232,15 +232,18 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
+
+ static u16 esdhc_readw_le(struct sdhci_host *host, int reg)
+ {
++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
++ struct pltfm_imx_data *imx_data = pltfm_host->priv;
++
+ if (unlikely(reg == SDHCI_HOST_VERSION)) {
+- u16 val = readw(host->ioaddr + (reg ^ 2));
+- /*
+- * uSDHC supports SDHCI v3.0, but it's encoded as value
+- * 0x3 in host controller version register, which violates
+- * SDHCI_SPEC_300 definition. Work it around here.
+- */
+- if ((val & SDHCI_SPEC_VER_MASK) == 3)
+- return --val;
++ reg ^= 2;
++ if (is_imx6q_usdhc(imx_data)) {
++ /*
++ * The usdhc register returns a wrong host version.
++ * Correct it here.
++ */
++ return SDHCI_SPEC_300;
++ }
+ }
+
+ return readw(host->ioaddr + reg);
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index cf177b8..df5a09a 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -4559,11 +4559,13 @@ void igb_update_stats(struct igb_adapter *adapter,
+ bytes = 0;
+ packets = 0;
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+- u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
++ u32 rqdpc = rd32(E1000_RQDPC(i));
+ struct igb_ring *ring = adapter->rx_ring[i];
+
+- ring->rx_stats.drops += rqdpc_tmp;
+- net_stats->rx_fifo_errors += rqdpc_tmp;
++ if (rqdpc) {
++ ring->rx_stats.drops += rqdpc;
++ net_stats->rx_fifo_errors += rqdpc;
++ }
+
+ do {
+ start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
+diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h
+index 315b96e..9fdd198 100644
+--- a/drivers/net/wireless/b43/dma.h
++++ b/drivers/net/wireless/b43/dma.h
+@@ -169,7 +169,7 @@ struct b43_dmadesc_generic {
+
+ /* DMA engine tuning knobs */
+ #define B43_TXRING_SLOTS 256
+-#define B43_RXRING_SLOTS 64
++#define B43_RXRING_SLOTS 256
+ #define B43_DMA0_RX_FW598_BUFSIZE (B43_DMA0_RX_FW598_FO + IEEE80211_MAX_FRAME_LEN)
+ #define B43_DMA0_RX_FW351_BUFSIZE (B43_DMA0_RX_FW351_FO + IEEE80211_MAX_FRAME_LEN)
+
+diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
+index 7ca84c3..564218c 100644
+--- a/drivers/net/wireless/p54/p54usb.c
++++ b/drivers/net/wireless/p54/p54usb.c
+@@ -84,8 +84,8 @@ static struct usb_device_id p54u_table[] = {
+ {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */
+ {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */
+ {USB_DEVICE(0x0803, 0x4310)}, /* Zoom 4410a */
+- {USB_DEVICE(0x083a, 0x4503)}, /* T-Com Sinus 154 data II */
+ {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */
++ {USB_DEVICE(0x083a, 0x4531)}, /* T-Com Sinus 154 data II */
+ {USB_DEVICE(0x083a, 0xc501)}, /* Zoom Wireless-G 4410 */
+ {USB_DEVICE(0x083a, 0xf503)}, /* Accton FD7050E ver 1010ec */
+ {USB_DEVICE(0x0846, 0x4240)}, /* Netgear WG111 (v2) */
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+index a99be2d0..0984dcf 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+@@ -295,6 +295,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817f, rtl92cu_hal_cfg)},
+ /* RTL8188CUS-VL */
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x818a, rtl92cu_hal_cfg)},
++ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x819a, rtl92cu_hal_cfg)},
+ /* 8188 Combo for BC4 */
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8754, rtl92cu_hal_cfg)},
+
+@@ -372,9 +373,15 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
+
+ MODULE_DEVICE_TABLE(usb, rtl8192c_usb_ids);
+
++static int rtl8192cu_probe(struct usb_interface *intf,
++ const struct usb_device_id *id)
++{
++ return rtl_usb_probe(intf, id, &rtl92cu_hal_cfg);
++}
++
+ static struct usb_driver rtl8192cu_driver = {
+ .name = "rtl8192cu",
+- .probe = rtl_usb_probe,
++ .probe = rtl8192cu_probe,
+ .disconnect = rtl_usb_disconnect,
+ .id_table = rtl8192c_usb_ids,
+
+diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
+index 30dd0a9..c04ee92 100644
+--- a/drivers/net/wireless/rtlwifi/usb.c
++++ b/drivers/net/wireless/rtlwifi/usb.c
+@@ -44,8 +44,12 @@
+
+ static void usbctrl_async_callback(struct urb *urb)
+ {
+- if (urb)
+- kfree(urb->context);
++ if (urb) {
++ /* free dr */
++ kfree(urb->setup_packet);
++ /* free databuf */
++ kfree(urb->transfer_buffer);
++ }
+ }
+
+ static int _usbctrl_vendorreq_async_write(struct usb_device *udev, u8 request,
+@@ -57,38 +61,46 @@ static int _usbctrl_vendorreq_async_write(struct usb_device *udev, u8 request,
+ u8 reqtype;
+ struct usb_ctrlrequest *dr;
+ struct urb *urb;
+- struct rtl819x_async_write_data {
+- u8 data[REALTEK_USB_VENQT_MAX_BUF_SIZE];
+- struct usb_ctrlrequest dr;
+- } *buf;
++ const u16 databuf_maxlen = REALTEK_USB_VENQT_MAX_BUF_SIZE;
++ u8 *databuf;
++
++ if (WARN_ON_ONCE(len > databuf_maxlen))
++ len = databuf_maxlen;
+
+ pipe = usb_sndctrlpipe(udev, 0); /* write_out */
+ reqtype = REALTEK_USB_VENQT_WRITE;
+
+- buf = kmalloc(sizeof(*buf), GFP_ATOMIC);
+- if (!buf)
++ dr = kmalloc(sizeof(*dr), GFP_ATOMIC);
++ if (!dr)
+ return -ENOMEM;
+
++ databuf = kmalloc(databuf_maxlen, GFP_ATOMIC);
++ if (!databuf) {
++ kfree(dr);
++ return -ENOMEM;
++ }
++
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb) {
+- kfree(buf);
++ kfree(databuf);
++ kfree(dr);
+ return -ENOMEM;
+ }
+
+- dr = &buf->dr;
+-
+ dr->bRequestType = reqtype;
+ dr->bRequest = request;
+ dr->wValue = cpu_to_le16(value);
+ dr->wIndex = cpu_to_le16(index);
+ dr->wLength = cpu_to_le16(len);
+- memcpy(buf, pdata, len);
++ memcpy(databuf, pdata, len);
+ usb_fill_control_urb(urb, udev, pipe,
+- (unsigned char *)dr, buf, len,
+- usbctrl_async_callback, buf);
++ (unsigned char *)dr, databuf, len,
++ usbctrl_async_callback, NULL);
+ rc = usb_submit_urb(urb, GFP_ATOMIC);
+- if (rc < 0)
+- kfree(buf);
++ if (rc < 0) {
++ kfree(databuf);
++ kfree(dr);
++ }
+ usb_free_urb(urb);
+ return rc;
+ }
+@@ -894,7 +906,8 @@ static struct rtl_intf_ops rtl_usb_ops = {
+ };
+
+ int __devinit rtl_usb_probe(struct usb_interface *intf,
+- const struct usb_device_id *id)
++ const struct usb_device_id *id,
++ struct rtl_hal_cfg *rtl_hal_cfg)
+ {
+ int err;
+ struct ieee80211_hw *hw = NULL;
+@@ -928,7 +941,7 @@ int __devinit rtl_usb_probe(struct usb_interface *intf,
+ usb_set_intfdata(intf, hw);
+ /* init cfg & intf_ops */
+ rtlpriv->rtlhal.interface = INTF_USB;
+- rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_info);
++ rtlpriv->cfg = rtl_hal_cfg;
+ rtlpriv->intf_ops = &rtl_usb_ops;
+ rtl_dbgp_flag_init(hw);
+ /* Init IO handler */
+diff --git a/drivers/net/wireless/rtlwifi/usb.h b/drivers/net/wireless/rtlwifi/usb.h
+index d2a63fb..4dc4b1c 100644
+--- a/drivers/net/wireless/rtlwifi/usb.h
++++ b/drivers/net/wireless/rtlwifi/usb.h
+@@ -158,7 +158,8 @@ struct rtl_usb_priv {
+
+
+ int __devinit rtl_usb_probe(struct usb_interface *intf,
+- const struct usb_device_id *id);
++ const struct usb_device_id *id,
++ struct rtl_hal_cfg *rtl92cu_hal_cfg);
+ void rtl_usb_disconnect(struct usb_interface *intf);
+ int rtl_usb_suspend(struct usb_interface *pusb_intf, pm_message_t message);
+ int rtl_usb_resume(struct usb_interface *pusb_intf);
+diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
+index 5925e0b..8eaf0e2 100644
+--- a/drivers/net/xen-netback/interface.c
++++ b/drivers/net/xen-netback/interface.c
+@@ -132,6 +132,7 @@ static void xenvif_up(struct xenvif *vif)
+ static void xenvif_down(struct xenvif *vif)
+ {
+ disable_irq(vif->irq);
++ del_timer_sync(&vif->credit_timeout);
+ xen_netbk_deschedule_xenvif(vif);
+ xen_netbk_remove_xenvif(vif);
+ }
+@@ -362,8 +363,6 @@ void xenvif_disconnect(struct xenvif *vif)
+ atomic_dec(&vif->refcnt);
+ wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0);
+
+- del_timer_sync(&vif->credit_timeout);
+-
+ if (vif->irq)
+ unbind_from_irqhandler(vif->irq, vif);
+
+diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
+index b802bb3..185a0eb 100644
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -883,13 +883,13 @@ static int netbk_count_requests(struct xenvif *vif,
+ if (frags >= work_to_do) {
+ netdev_err(vif->dev, "Need more frags\n");
+ netbk_fatal_tx_err(vif);
+- return -frags;
++ return -ENODATA;
+ }
+
+ if (unlikely(frags >= MAX_SKB_FRAGS)) {
+ netdev_err(vif->dev, "Too many frags\n");
+ netbk_fatal_tx_err(vif);
+- return -frags;
++ return -E2BIG;
+ }
+
+ memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags),
+@@ -897,7 +897,7 @@ static int netbk_count_requests(struct xenvif *vif,
+ if (txp->size > first->size) {
+ netdev_err(vif->dev, "Frag is bigger than frame.\n");
+ netbk_fatal_tx_err(vif);
+- return -frags;
++ return -EIO;
+ }
+
+ first->size -= txp->size;
+@@ -907,7 +907,7 @@ static int netbk_count_requests(struct xenvif *vif,
+ netdev_err(vif->dev, "txp->offset: %x, size: %u\n",
+ txp->offset, txp->size);
+ netbk_fatal_tx_err(vif);
+- return -frags;
++ return -EINVAL;
+ }
+ } while ((txp++)->flags & XEN_NETTXF_more_data);
+ return frags;
+diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
+index 7f87bee..f53da9e 100644
+--- a/drivers/pci/remove.c
++++ b/drivers/pci/remove.c
+@@ -19,6 +19,8 @@ static void pci_free_resources(struct pci_dev *dev)
+
+ static void pci_stop_dev(struct pci_dev *dev)
+ {
++ pci_pme_active(dev, false);
++
+ if (dev->is_added) {
+ pci_proc_detach_device(dev);
+ pci_remove_sysfs_dev_files(dev);
+diff --git a/drivers/pcmcia/vrc4171_card.c b/drivers/pcmcia/vrc4171_card.c
+index 86e4a1a..6bb02ab 100644
+--- a/drivers/pcmcia/vrc4171_card.c
++++ b/drivers/pcmcia/vrc4171_card.c
+@@ -246,6 +246,7 @@ static int pccard_init(struct pcmcia_socket *sock)
+ socket = &vrc4171_sockets[slot];
+ socket->csc_irq = search_nonuse_irq();
+ socket->io_irq = search_nonuse_irq();
++ spin_lock_init(&socket->lock);
+
+ return 0;
+ }
+diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
+index f75a4c8..3a09460 100644
+--- a/drivers/platform/x86/asus-laptop.c
++++ b/drivers/platform/x86/asus-laptop.c
+@@ -820,8 +820,10 @@ static ssize_t show_infos(struct device *dev,
+ /*
+ * The HWRS method return informations about the hardware.
+ * 0x80 bit is for WLAN, 0x100 for Bluetooth.
++ * 0x40 for WWAN, 0x10 for WIMAX.
+ * The significance of others is yet to be found.
+- * If we don't find the method, we assume the device are present.
++ * We don't currently use this for device detection, and it
++ * takes several seconds to run on some systems.
+ */
+ rv = acpi_evaluate_integer(asus->handle, "HWRS", NULL, &temp);
+ if (!ACPI_FAILURE(rv))
+@@ -1591,7 +1593,7 @@ static int asus_laptop_get_info(struct asus_laptop *asus)
+ {
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *model = NULL;
+- unsigned long long bsts_result, hwrs_result;
++ unsigned long long bsts_result;
+ char *string = NULL;
+ acpi_status status;
+
+@@ -1653,17 +1655,6 @@ static int asus_laptop_get_info(struct asus_laptop *asus)
+ if (*string)
+ pr_notice(" %s model detected\n", string);
+
+- /*
+- * The HWRS method return informations about the hardware.
+- * 0x80 bit is for WLAN, 0x100 for Bluetooth,
+- * 0x40 for WWAN, 0x10 for WIMAX.
+- * The significance of others is yet to be found.
+- */
+- status =
+- acpi_evaluate_integer(asus->handle, "HWRS", NULL, &hwrs_result);
+- if (!ACPI_FAILURE(status))
+- pr_notice(" HWRS returned %x", (int)hwrs_result);
+-
+ if (!acpi_check_handle(asus->handle, METHOD_WL_STATUS, NULL))
+ asus->have_rsts = true;
+
+diff --git a/drivers/pps/clients/pps-ldisc.c b/drivers/pps/clients/pps-ldisc.c
+index 79451f2..60cee9e 100644
+--- a/drivers/pps/clients/pps-ldisc.c
++++ b/drivers/pps/clients/pps-ldisc.c
+@@ -31,7 +31,7 @@
+ static void pps_tty_dcd_change(struct tty_struct *tty, unsigned int status,
+ struct pps_event_time *ts)
+ {
+- struct pps_device *pps = (struct pps_device *)tty->disc_data;
++ struct pps_device *pps = pps_lookup_dev(tty);
+
+ BUG_ON(pps == NULL);
+
+@@ -67,9 +67,9 @@ static int pps_tty_open(struct tty_struct *tty)
+ pr_err("cannot register PPS source \"%s\"\n", info.path);
+ return -ENOMEM;
+ }
+- tty->disc_data = pps;
++ pps->lookup_cookie = tty;
+
+- /* Should open N_TTY ldisc too */
++ /* Now open the base class N_TTY ldisc */
+ ret = alias_n_tty_open(tty);
+ if (ret < 0) {
+ pr_err("cannot open tty ldisc \"%s\"\n", info.path);
+@@ -81,7 +81,6 @@ static int pps_tty_open(struct tty_struct *tty)
+ return 0;
+
+ err_unregister:
+- tty->disc_data = NULL;
+ pps_unregister_source(pps);
+ return ret;
+ }
+@@ -90,11 +89,10 @@ static void (*alias_n_tty_close)(struct tty_struct *tty);
+
+ static void pps_tty_close(struct tty_struct *tty)
+ {
+- struct pps_device *pps = (struct pps_device *)tty->disc_data;
++ struct pps_device *pps = pps_lookup_dev(tty);
+
+ alias_n_tty_close(tty);
+
+- tty->disc_data = NULL;
+ dev_info(pps->dev, "removed\n");
+ pps_unregister_source(pps);
+ }
+diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c
+index 2baadd2..e83669f 100644
+--- a/drivers/pps/pps.c
++++ b/drivers/pps/pps.c
+@@ -247,12 +247,15 @@ static int pps_cdev_open(struct inode *inode, struct file *file)
+ struct pps_device *pps = container_of(inode->i_cdev,
+ struct pps_device, cdev);
+ file->private_data = pps;
+-
++ kobject_get(&pps->dev->kobj);
+ return 0;
+ }
+
+ static int pps_cdev_release(struct inode *inode, struct file *file)
+ {
++ struct pps_device *pps = container_of(inode->i_cdev,
++ struct pps_device, cdev);
++ kobject_put(&pps->dev->kobj);
+ return 0;
+ }
+
+@@ -274,8 +277,10 @@ static void pps_device_destruct(struct device *dev)
+ {
+ struct pps_device *pps = dev_get_drvdata(dev);
+
+- /* release id here to protect others from using it while it's
+- * still in use */
++ cdev_del(&pps->cdev);
++
++ /* Now we can release the ID for re-use */
++ pr_debug("deallocating pps%d\n", pps->id);
+ mutex_lock(&pps_idr_lock);
+ idr_remove(&pps_idr, pps->id);
+ mutex_unlock(&pps_idr_lock);
+@@ -330,6 +335,7 @@ int pps_register_cdev(struct pps_device *pps)
+ if (IS_ERR(pps->dev))
+ goto del_cdev;
+
++ /* Override the release function with our own */
+ pps->dev->release = pps_device_destruct;
+
+ pr_debug("source %s got cdev (%d:%d)\n", pps->info.name,
+@@ -350,11 +356,44 @@ free_idr:
+
+ void pps_unregister_cdev(struct pps_device *pps)
+ {
++ pr_debug("unregistering pps%d\n", pps->id);
++ pps->lookup_cookie = NULL;
+ device_destroy(pps_class, pps->dev->devt);
+- cdev_del(&pps->cdev);
+ }
+
+ /*
++ * Look up a pps device by magic cookie.
++ * The cookie is usually a pointer to some enclosing device, but this
++ * code doesn't care; you should never be dereferencing it.
++ *
++ * This is a bit of a kludge that is currently used only by the PPS
++ * serial line discipline. It may need to be tweaked when a second user
++ * is found.
++ *
++ * There is no function interface for setting the lookup_cookie field.
++ * It's initialized to NULL when the pps device is created, and if a
++ * client wants to use it, just fill it in afterward.
++ *
++ * The cookie is automatically set to NULL in pps_unregister_source()
++ * so that it will not be used again, even if the pps device cannot
++ * be removed from the idr due to pending references holding the minor
++ * number in use.
++ */
++struct pps_device *pps_lookup_dev(void const *cookie)
++{
++ struct pps_device *pps;
++ unsigned id;
++
++ rcu_read_lock();
++ idr_for_each_entry(&pps_idr, pps, id)
++ if (cookie == pps->lookup_cookie)
++ break;
++ rcu_read_unlock();
++ return pps;
++}
++EXPORT_SYMBOL(pps_lookup_dev);
++
++/*
+ * Module stuff
+ */
+
+diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
+index 73816d8..1f94073 100644
+--- a/drivers/rtc/rtc-pl031.c
++++ b/drivers/rtc/rtc-pl031.c
+@@ -344,7 +344,9 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
+ /* Enable the clockwatch on ST Variants */
+ if (ldata->hw_designer == AMBA_VENDOR_ST)
+ data |= RTC_CR_CWEN;
+- writel(data | RTC_CR_EN, ldata->base + RTC_CR);
++ else
++ data |= RTC_CR_EN;
++ writel(data, ldata->base + RTC_CR);
+
+ /*
+ * On ST PL031 variants, the RTC reset value does not provide correct
+diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
+index 94f49ff..b1e8f6c 100644
+--- a/drivers/s390/kvm/kvm_virtio.c
++++ b/drivers/s390/kvm/kvm_virtio.c
+@@ -414,6 +414,26 @@ static void kvm_extint_handler(unsigned int ext_int_code,
+ }
+
+ /*
++ * For s390-virtio, we expect a page above main storage containing
++ * the virtio configuration. Try to actually load from this area
++ * in order to figure out if the host provides this page.
++ */
++static int __init test_devices_support(unsigned long addr)
++{
++ int ret = -EIO;
++
++ asm volatile(
++ "0: lura 0,%1\n"
++ "1: xgr %0,%0\n"
++ "2:\n"
++ EX_TABLE(0b,2b)
++ EX_TABLE(1b,2b)
++ : "+d" (ret)
++ : "a" (addr)
++ : "0", "cc");
++ return ret;
++}
++/*
+ * Init function for virtio
+ * devices are in a single page above top of "normal" mem
+ */
+@@ -424,21 +444,23 @@ static int __init kvm_devices_init(void)
+ if (!MACHINE_IS_KVM)
+ return -ENODEV;
+
++ if (test_devices_support(real_memory_size) < 0)
++ return -ENODEV;
++
++ rc = vmem_add_mapping(real_memory_size, PAGE_SIZE);
++ if (rc)
++ return rc;
++
++ kvm_devices = (void *) real_memory_size;
++
+ kvm_root = root_device_register("kvm_s390");
+ if (IS_ERR(kvm_root)) {
+ rc = PTR_ERR(kvm_root);
+ printk(KERN_ERR "Could not register kvm_s390 root device");
++ vmem_remove_mapping(real_memory_size, PAGE_SIZE);
+ return rc;
+ }
+
+- rc = vmem_add_mapping(real_memory_size, PAGE_SIZE);
+- if (rc) {
+- root_device_unregister(kvm_root);
+- return rc;
+- }
+-
+- kvm_devices = (void *) real_memory_size;
+-
+ INIT_WORK(&hotplug_work, hotplug_devices);
+
+ service_subclass_irq_register();
+diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
+index ab9f5ed..a023f52 100644
+--- a/drivers/staging/comedi/comedi_fops.c
++++ b/drivers/staging/comedi/comedi_fops.c
+@@ -136,6 +136,11 @@ static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd,
+ /* Device config is special, because it must work on
+ * an unconfigured device. */
+ if (cmd == COMEDI_DEVCONFIG) {
++ if (minor >= COMEDI_NUM_BOARD_MINORS) {
++ /* Device config not appropriate on non-board minors. */
++ rc = -ENOTTY;
++ goto done;
++ }
+ rc = do_devconfig_ioctl(dev,
+ (struct comedi_devconfig __user *)arg);
+ goto done;
+@@ -1569,7 +1574,7 @@ static unsigned int comedi_poll(struct file *file, poll_table * wait)
+
+ mask = 0;
+ read_subdev = comedi_get_read_subdevice(dev_file_info);
+- if (read_subdev) {
++ if (read_subdev && read_subdev->async) {
+ poll_wait(file, &read_subdev->async->wait_head, wait);
+ if (!read_subdev->busy
+ || comedi_buf_read_n_available(read_subdev->async) > 0
+@@ -1579,7 +1584,7 @@ static unsigned int comedi_poll(struct file *file, poll_table * wait)
+ }
+ }
+ write_subdev = comedi_get_write_subdevice(dev_file_info);
+- if (write_subdev) {
++ if (write_subdev && write_subdev->async) {
+ poll_wait(file, &write_subdev->async->wait_head, wait);
+ comedi_buf_write_alloc(write_subdev->async,
+ write_subdev->async->prealloc_bufsz);
+@@ -1621,7 +1626,7 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
+ }
+
+ s = comedi_get_write_subdevice(dev_file_info);
+- if (s == NULL) {
++ if (s == NULL || s->async == NULL) {
+ retval = -EIO;
+ goto done;
+ }
+@@ -1732,7 +1737,7 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
+ }
+
+ s = comedi_get_read_subdevice(dev_file_info);
+- if (s == NULL) {
++ if (s == NULL || s->async == NULL) {
+ retval = -EIO;
+ goto done;
+ }
+diff --git a/drivers/staging/comedi/drivers/ni_labpc.c b/drivers/staging/comedi/drivers/ni_labpc.c
+index 721b2be..0517a23 100644
+--- a/drivers/staging/comedi/drivers/ni_labpc.c
++++ b/drivers/staging/comedi/drivers/ni_labpc.c
+@@ -1264,7 +1264,9 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
+ else
+ channel = CR_CHAN(cmd->chanlist[0]);
+ /* munge channel bits for differential / scan disabled mode */
+- if (labpc_ai_scan_mode(cmd) != MODE_SINGLE_CHAN && aref == AREF_DIFF)
++ if ((labpc_ai_scan_mode(cmd) == MODE_SINGLE_CHAN ||
++ labpc_ai_scan_mode(cmd) == MODE_SINGLE_CHAN_INTERVAL) &&
++ aref == AREF_DIFF)
+ channel *= 2;
+ devpriv->command1_bits |= ADC_CHAN_BITS(channel);
+ devpriv->command1_bits |= thisboard->ai_range_code[range];
+@@ -1280,21 +1282,6 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
+ devpriv->write_byte(devpriv->command1_bits,
+ dev->iobase + COMMAND1_REG);
+ }
+- /* setup any external triggering/pacing (command4 register) */
+- devpriv->command4_bits = 0;
+- if (cmd->convert_src != TRIG_EXT)
+- devpriv->command4_bits |= EXT_CONVERT_DISABLE_BIT;
+- /* XXX should discard first scan when using interval scanning
+- * since manual says it is not synced with scan clock */
+- if (labpc_use_continuous_mode(cmd) == 0) {
+- devpriv->command4_bits |= INTERVAL_SCAN_EN_BIT;
+- if (cmd->scan_begin_src == TRIG_EXT)
+- devpriv->command4_bits |= EXT_SCAN_EN_BIT;
+- }
+- /* single-ended/differential */
+- if (aref == AREF_DIFF)
+- devpriv->command4_bits |= ADC_DIFF_BIT;
+- devpriv->write_byte(devpriv->command4_bits, dev->iobase + COMMAND4_REG);
+
+ devpriv->write_byte(cmd->chanlist_len,
+ dev->iobase + INTERVAL_COUNT_REG);
+@@ -1374,6 +1361,22 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
+ devpriv->command3_bits &= ~ADC_FNE_INTR_EN_BIT;
+ devpriv->write_byte(devpriv->command3_bits, dev->iobase + COMMAND3_REG);
+
++ /* setup any external triggering/pacing (command4 register) */
++ devpriv->command4_bits = 0;
++ if (cmd->convert_src != TRIG_EXT)
++ devpriv->command4_bits |= EXT_CONVERT_DISABLE_BIT;
++ /* XXX should discard first scan when using interval scanning
++ * since manual says it is not synced with scan clock */
++ if (labpc_use_continuous_mode(cmd) == 0) {
++ devpriv->command4_bits |= INTERVAL_SCAN_EN_BIT;
++ if (cmd->scan_begin_src == TRIG_EXT)
++ devpriv->command4_bits |= EXT_SCAN_EN_BIT;
++ }
++ /* single-ended/differential */
++ if (aref == AREF_DIFF)
++ devpriv->command4_bits |= ADC_DIFF_BIT;
++ devpriv->write_byte(devpriv->command4_bits, dev->iobase + COMMAND4_REG);
++
+ /* startup acquisition */
+
+ /* command2 reg */
+diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
+index b5130c8..e2f5c81 100644
+--- a/drivers/staging/speakup/speakup_soft.c
++++ b/drivers/staging/speakup/speakup_soft.c
+@@ -46,7 +46,7 @@ static int misc_registered;
+ static struct var_t vars[] = {
+ { CAPS_START, .u.s = {"\x01+3p" } },
+ { CAPS_STOP, .u.s = {"\x01-3p" } },
+- { RATE, .u.n = {"\x01%ds", 5, 0, 9, 0, 0, NULL } },
++ { RATE, .u.n = {"\x01%ds", 2, 0, 9, 0, 0, NULL } },
+ { PITCH, .u.n = {"\x01%dp", 5, 0, 9, 0, 0, NULL } },
+ { VOL, .u.n = {"\x01%dv", 5, 0, 9, 0, 0, NULL } },
+ { TONE, .u.n = {"\x01%dx", 1, 0, 2, 0, 0, NULL } },
+diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
+index 09de99f..2594a31 100644
+--- a/drivers/staging/zram/zram_drv.c
++++ b/drivers/staging/zram/zram_drv.c
+@@ -242,7 +242,7 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
+
+ if (is_partial_io(bvec)) {
+ /* Use a temporary buffer to decompress the page */
+- uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
++ uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
+ if (!uncmem) {
+ pr_info("Error allocating temp memory!\n");
+ return -ENOMEM;
+@@ -338,7 +338,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
+ * This is a partial IO. We need to read the full page
+ * before to write the changes.
+ */
+- uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
++ uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
+ if (!uncmem) {
+ pr_info("Error allocating temp memory!\n");
+ ret = -ENOMEM;
+diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
+index a0143a0..5def359 100644
+--- a/drivers/target/target_core_device.c
++++ b/drivers/target/target_core_device.c
+@@ -1439,24 +1439,18 @@ static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked
+
+ struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
+ struct se_portal_group *tpg,
++ struct se_node_acl *nacl,
+ u32 mapped_lun,
+- char *initiatorname,
+ int *ret)
+ {
+ struct se_lun_acl *lacl;
+- struct se_node_acl *nacl;
+
+- if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) {
++ if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
+ pr_err("%s InitiatorName exceeds maximum size.\n",
+ tpg->se_tpg_tfo->get_fabric_name());
+ *ret = -EOVERFLOW;
+ return NULL;
+ }
+- nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
+- if (!nacl) {
+- *ret = -EINVAL;
+- return NULL;
+- }
+ lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
+ if (!lacl) {
+ pr_err("Unable to allocate memory for struct se_lun_acl.\n");
+@@ -1467,7 +1461,8 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
+ INIT_LIST_HEAD(&lacl->lacl_list);
+ lacl->mapped_lun = mapped_lun;
+ lacl->se_lun_nacl = nacl;
+- snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
++ snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s",
++ nacl->initiatorname);
+
+ return lacl;
+ }
+diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
+index 09b6f87..60009bd 100644
+--- a/drivers/target/target_core_fabric_configfs.c
++++ b/drivers/target/target_core_fabric_configfs.c
+@@ -354,9 +354,17 @@ static struct config_group *target_fabric_make_mappedlun(
+ ret = -EINVAL;
+ goto out;
+ }
++ if (mapped_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
++ pr_err("Mapped LUN: %lu exceeds TRANSPORT_MAX_LUNS_PER_TPG"
++ "-1: %u for Target Portal Group: %u\n", mapped_lun,
++ TRANSPORT_MAX_LUNS_PER_TPG-1,
++ se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
++ ret = -EINVAL;
++ goto out;
++ }
+
+- lacl = core_dev_init_initiator_node_lun_acl(se_tpg, mapped_lun,
+- config_item_name(acl_ci), &ret);
++ lacl = core_dev_init_initiator_node_lun_acl(se_tpg, se_nacl,
++ mapped_lun, &ret);
+ if (!lacl) {
+ ret = -EINVAL;
+ goto out;
+diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
+index d91fe44..d048e33 100644
+--- a/drivers/target/target_core_tpg.c
++++ b/drivers/target/target_core_tpg.c
+@@ -117,16 +117,10 @@ struct se_node_acl *core_tpg_get_initiator_node_acl(
+ struct se_node_acl *acl;
+
+ spin_lock_irq(&tpg->acl_node_lock);
+- list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
+- if (!strcmp(acl->initiatorname, initiatorname) &&
+- !acl->dynamic_node_acl) {
+- spin_unlock_irq(&tpg->acl_node_lock);
+- return acl;
+- }
+- }
++ acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
+ spin_unlock_irq(&tpg->acl_node_lock);
+
+- return NULL;
++ return acl;
+ }
+
+ /* core_tpg_add_node_to_devs():
+diff --git a/drivers/tty/serial/8250.c b/drivers/tty/serial/8250.c
+index 90dad17..6748568 100644
+--- a/drivers/tty/serial/8250.c
++++ b/drivers/tty/serial/8250.c
+@@ -2695,7 +2695,7 @@ serial8250_verify_port(struct uart_port *port, struct serial_struct *ser)
+ if (ser->irq >= nr_irqs || ser->irq < 0 ||
+ ser->baud_base < 9600 || ser->type < PORT_UNKNOWN ||
+ ser->type >= ARRAY_SIZE(uart_config) || ser->type == PORT_CIRRUS ||
+- ser->type == PORT_STARTECH)
++ ser->type == PORT_STARTECH || uart_config[ser->type].name == NULL)
+ return -EINVAL;
+ return 0;
+ }
+@@ -2705,7 +2705,7 @@ serial8250_type(struct uart_port *port)
+ {
+ int type = port->type;
+
+- if (type >= ARRAY_SIZE(uart_config))
++ if (type >= ARRAY_SIZE(uart_config) || uart_config[type].name == NULL)
+ type = 0;
+ return uart_config[type].name;
+ }
+diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
+index 9314d93..937f927 100644
+--- a/drivers/tty/tty_ioctl.c
++++ b/drivers/tty/tty_ioctl.c
+@@ -618,7 +618,7 @@ static int set_termios(struct tty_struct *tty, void __user *arg, int opt)
+ if (opt & TERMIOS_WAIT) {
+ tty_wait_until_sent(tty, 0);
+ if (signal_pending(current))
+- return -EINTR;
++ return -ERESTARTSYS;
+ }
+
+ tty_set_termios(tty, &tmp_termios);
+@@ -685,7 +685,7 @@ static int set_termiox(struct tty_struct *tty, void __user *arg, int opt)
+ if (opt & TERMIOS_WAIT) {
+ tty_wait_until_sent(tty, 0);
+ if (signal_pending(current))
+- return -EINTR;
++ return -ERESTARTSYS;
+ }
+
+ mutex_lock(&tty->termios_mutex);
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index e716839..632df54 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -657,7 +657,7 @@ static inline void save_screen(struct vc_data *vc)
+ * Redrawing of screen
+ */
+
+-static void clear_buffer_attributes(struct vc_data *vc)
++void clear_buffer_attributes(struct vc_data *vc)
+ {
+ unsigned short *p = (unsigned short *)vc->vc_origin;
+ int count = vc->vc_screenbuf_size / 2;
+@@ -3016,7 +3016,7 @@ int __init vty_init(const struct file_operations *console_fops)
+
+ static struct class *vtconsole_class;
+
+-static int bind_con_driver(const struct consw *csw, int first, int last,
++static int do_bind_con_driver(const struct consw *csw, int first, int last,
+ int deflt)
+ {
+ struct module *owner = csw->owner;
+@@ -3027,7 +3027,7 @@ static int bind_con_driver(const struct consw *csw, int first, int last,
+ if (!try_module_get(owner))
+ return -ENODEV;
+
+- console_lock();
++ WARN_CONSOLE_UNLOCKED();
+
+ /* check if driver is registered */
+ for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
+@@ -3112,11 +3112,22 @@ static int bind_con_driver(const struct consw *csw, int first, int last,
+
+ retval = 0;
+ err:
+- console_unlock();
+ module_put(owner);
+ return retval;
+ };
+
++
++static int bind_con_driver(const struct consw *csw, int first, int last,
++ int deflt)
++{
++ int ret;
++
++ console_lock();
++ ret = do_bind_con_driver(csw, first, last, deflt);
++ console_unlock();
++ return ret;
++}
++
+ #ifdef CONFIG_VT_HW_CONSOLE_BINDING
+ static int con_is_graphics(const struct consw *csw, int first, int last)
+ {
+@@ -3153,6 +3164,18 @@ static int con_is_graphics(const struct consw *csw, int first, int last)
+ */
+ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
+ {
++ int retval;
++
++ console_lock();
++ retval = do_unbind_con_driver(csw, first, last, deflt);
++ console_unlock();
++ return retval;
++}
++EXPORT_SYMBOL(unbind_con_driver);
++
++/* unlocked version of unbind_con_driver() */
++int do_unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
++{
+ struct module *owner = csw->owner;
+ const struct consw *defcsw = NULL;
+ struct con_driver *con_driver = NULL, *con_back = NULL;
+@@ -3161,7 +3184,7 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
+ if (!try_module_get(owner))
+ return -ENODEV;
+
+- console_lock();
++ WARN_CONSOLE_UNLOCKED();
+
+ /* check if driver is registered and if it is unbindable */
+ for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
+@@ -3174,10 +3197,8 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
+ }
+ }
+
+- if (retval) {
+- console_unlock();
++ if (retval)
+ goto err;
+- }
+
+ retval = -ENODEV;
+
+@@ -3193,15 +3214,11 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
+ }
+ }
+
+- if (retval) {
+- console_unlock();
++ if (retval)
+ goto err;
+- }
+
+- if (!con_is_bound(csw)) {
+- console_unlock();
++ if (!con_is_bound(csw))
+ goto err;
+- }
+
+ first = max(first, con_driver->first);
+ last = min(last, con_driver->last);
+@@ -3228,15 +3245,14 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
+ if (!con_is_bound(csw))
+ con_driver->flag &= ~CON_DRIVER_FLAG_INIT;
+
+- console_unlock();
+ /* ignore return value, binding should not fail */
+- bind_con_driver(defcsw, first, last, deflt);
++ do_bind_con_driver(defcsw, first, last, deflt);
+ err:
+ module_put(owner);
+ return retval;
+
+ }
+-EXPORT_SYMBOL(unbind_con_driver);
++EXPORT_SYMBOL_GPL(do_unbind_con_driver);
+
+ static int vt_bind(struct con_driver *con)
+ {
+@@ -3508,28 +3524,18 @@ int con_debug_leave(void)
+ }
+ EXPORT_SYMBOL_GPL(con_debug_leave);
+
+-/**
+- * register_con_driver - register console driver to console layer
+- * @csw: console driver
+- * @first: the first console to take over, minimum value is 0
+- * @last: the last console to take over, maximum value is MAX_NR_CONSOLES -1
+- *
+- * DESCRIPTION: This function registers a console driver which can later
+- * bind to a range of consoles specified by @first and @last. It will
+- * also initialize the console driver by calling con_startup().
+- */
+-int register_con_driver(const struct consw *csw, int first, int last)
++static int do_register_con_driver(const struct consw *csw, int first, int last)
+ {
+ struct module *owner = csw->owner;
+ struct con_driver *con_driver;
+ const char *desc;
+ int i, retval = 0;
+
++ WARN_CONSOLE_UNLOCKED();
++
+ if (!try_module_get(owner))
+ return -ENODEV;
+
+- console_lock();
+-
+ for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
+ con_driver = &registered_con_driver[i];
+
+@@ -3582,10 +3588,29 @@ int register_con_driver(const struct consw *csw, int first, int last)
+ }
+
+ err:
+- console_unlock();
+ module_put(owner);
+ return retval;
+ }
++
++/**
++ * register_con_driver - register console driver to console layer
++ * @csw: console driver
++ * @first: the first console to take over, minimum value is 0
++ * @last: the last console to take over, maximum value is MAX_NR_CONSOLES -1
++ *
++ * DESCRIPTION: This function registers a console driver which can later
++ * bind to a range of consoles specified by @first and @last. It will
++ * also initialize the console driver by calling con_startup().
++ */
++int register_con_driver(const struct consw *csw, int first, int last)
++{
++ int retval;
++
++ console_lock();
++ retval = do_register_con_driver(csw, first, last);
++ console_unlock();
++ return retval;
++}
+ EXPORT_SYMBOL(register_con_driver);
+
+ /**
+@@ -3601,9 +3626,18 @@ EXPORT_SYMBOL(register_con_driver);
+ */
+ int unregister_con_driver(const struct consw *csw)
+ {
+- int i, retval = -ENODEV;
++ int retval;
+
+ console_lock();
++ retval = do_unregister_con_driver(csw);
++ console_unlock();
++ return retval;
++}
++EXPORT_SYMBOL(unregister_con_driver);
++
++int do_unregister_con_driver(const struct consw *csw)
++{
++ int i, retval = -ENODEV;
+
+ /* cannot unregister a bound driver */
+ if (con_is_bound(csw))
+@@ -3629,27 +3663,53 @@ int unregister_con_driver(const struct consw *csw)
+ }
+ }
+ err:
+- console_unlock();
+ return retval;
+ }
+-EXPORT_SYMBOL(unregister_con_driver);
++EXPORT_SYMBOL_GPL(do_unregister_con_driver);
+
+ /*
+ * If we support more console drivers, this function is used
+ * when a driver wants to take over some existing consoles
+ * and become default driver for newly opened ones.
+ *
+- * take_over_console is basically a register followed by unbind
++ * take_over_console is basically a register followed by unbind
++ */
++int do_take_over_console(const struct consw *csw, int first, int last, int deflt)
++{
++ int err;
++
++ err = do_register_con_driver(csw, first, last);
++ /*
++ * If we get an busy error we still want to bind the console driver
++ * and return success, as we may have unbound the console driver
++ * but not unregistered it.
++ */
++ if (err == -EBUSY)
++ err = 0;
++ if (!err)
++ do_bind_con_driver(csw, first, last, deflt);
++
++ return err;
++}
++EXPORT_SYMBOL_GPL(do_take_over_console);
++
++/*
++ * If we support more console drivers, this function is used
++ * when a driver wants to take over some existing consoles
++ * and become default driver for newly opened ones.
++ *
++ * take_over_console is basically a register followed by unbind
+ */
+ int take_over_console(const struct consw *csw, int first, int last, int deflt)
+ {
+ int err;
+
+ err = register_con_driver(csw, first, last);
+- /* if we get an busy error we still want to bind the console driver
++ /*
++ * If we get an busy error we still want to bind the console driver
+ * and return success, as we may have unbound the console driver
+-  * but not unregistered it.
+- */
++ * but not unregistered it.
++ */
+ if (err == -EBUSY)
+ err = 0;
+ if (!err)
+diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
+index c77f0d6..9f3003e 100644
+--- a/drivers/usb/core/driver.c
++++ b/drivers/usb/core/driver.c
+@@ -541,22 +541,10 @@ int usb_match_device(struct usb_device *dev, const struct usb_device_id *id)
+ }
+
+ /* returns 0 if no match, 1 if match */
+-int usb_match_one_id(struct usb_interface *interface,
+- const struct usb_device_id *id)
++int usb_match_one_id_intf(struct usb_device *dev,
++ struct usb_host_interface *intf,
++ const struct usb_device_id *id)
+ {
+- struct usb_host_interface *intf;
+- struct usb_device *dev;
+-
+- /* proc_connectinfo in devio.c may call us with id == NULL. */
+- if (id == NULL)
+- return 0;
+-
+- intf = interface->cur_altsetting;
+- dev = interface_to_usbdev(interface);
+-
+- if (!usb_match_device(dev, id))
+- return 0;
+-
+ /* The interface class, subclass, and protocol should never be
+ * checked for a match if the device class is Vendor Specific,
+ * unless the match record specifies the Vendor ID. */
+@@ -581,6 +569,26 @@ int usb_match_one_id(struct usb_interface *interface,
+
+ return 1;
+ }
++
++/* returns 0 if no match, 1 if match */
++int usb_match_one_id(struct usb_interface *interface,
++ const struct usb_device_id *id)
++{
++ struct usb_host_interface *intf;
++ struct usb_device *dev;
++
++ /* proc_connectinfo in devio.c may call us with id == NULL. */
++ if (id == NULL)
++ return 0;
++
++ intf = interface->cur_altsetting;
++ dev = interface_to_usbdev(interface);
++
++ if (!usb_match_device(dev, id))
++ return 0;
++
++ return usb_match_one_id_intf(dev, intf, id);
++}
+ EXPORT_SYMBOL_GPL(usb_match_one_id);
+
+ /**
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 0ff8e9a..2564d8d 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -1883,7 +1883,7 @@ static int usb_enumerate_device(struct usb_device *udev)
+ if (err < 0) {
+ dev_err(&udev->dev, "can't read configurations, error %d\n",
+ err);
+- goto fail;
++ return err;
+ }
+ }
+ if (udev->wusb == 1 && udev->authorized == 0) {
+@@ -1899,8 +1899,12 @@ static int usb_enumerate_device(struct usb_device *udev)
+ udev->serial = usb_cache_string(udev, udev->descriptor.iSerialNumber);
+ }
+ err = usb_enumerate_device_otg(udev);
+-fail:
+- return err;
++ if (err < 0)
++ return err;
++
++ usb_detect_interface_quirks(udev);
++
++ return 0;
+ }
+
+
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 3f08c09..0aaa4f1 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -15,17 +15,22 @@
+ #include <linux/usb/quirks.h>
+ #include "usb.h"
+
+-/* List of quirky USB devices. Please keep this list ordered by:
++/* Lists of quirky USB devices, split in device quirks and interface quirks.
++ * Device quirks are applied at the very beginning of the enumeration process,
++ * right after reading the device descriptor. They can thus only match on device
++ * information.
++ *
++ * Interface quirks are applied after reading all the configuration descriptors.
++ * They can match on both device and interface information.
++ *
++ * Note that the DELAY_INIT and HONOR_BNUMINTERFACES quirks do not make sense as
++ * interface quirks, as they only influence the enumeration process which is run
++ * before processing the interface quirks.
++ *
++ * Please keep the lists ordered by:
+ * 1) Vendor ID
+ * 2) Product ID
+ * 3) Class ID
+- *
+- * as we want specific devices to be overridden first, and only after that, any
+- * class specific quirks.
+- *
+- * Right now the logic aborts if it finds a valid device in the table, we might
+- * want to change that in the future if it turns out that a whole class of
+- * devices is broken...
+ */
+ static const struct usb_device_id usb_quirk_list[] = {
+ /* CBM - Flash disk */
+@@ -41,53 +46,23 @@ static const struct usb_device_id usb_quirk_list[] = {
+ /* Microsoft LifeCam-VX700 v2.0 */
+ { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
+
+- /* Logitech Webcam C200 */
+- { USB_DEVICE(0x046d, 0x0802), .driver_info = USB_QUIRK_RESET_RESUME },
+-
+- /* Logitech Webcam C250 */
+- { USB_DEVICE(0x046d, 0x0804), .driver_info = USB_QUIRK_RESET_RESUME },
+-
+- /* Logitech Webcam C300 */
+- { USB_DEVICE(0x046d, 0x0805), .driver_info = USB_QUIRK_RESET_RESUME },
+-
+- /* Logitech Webcam B/C500 */
+- { USB_DEVICE(0x046d, 0x0807), .driver_info = USB_QUIRK_RESET_RESUME },
+-
+- /* Logitech Webcam C600 */
+- { USB_DEVICE(0x046d, 0x0808), .driver_info = USB_QUIRK_RESET_RESUME },
+-
+- /* Logitech Webcam Pro 9000 */
+- { USB_DEVICE(0x046d, 0x0809), .driver_info = USB_QUIRK_RESET_RESUME },
++ /* Logitech Quickcam Fusion */
++ { USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME },
+
+- /* Logitech Webcam C905 */
+- { USB_DEVICE(0x046d, 0x080a), .driver_info = USB_QUIRK_RESET_RESUME },
++ /* Logitech Quickcam Orbit MP */
++ { USB_DEVICE(0x046d, 0x08c2), .driver_info = USB_QUIRK_RESET_RESUME },
+
+- /* Logitech Webcam C210 */
+- { USB_DEVICE(0x046d, 0x0819), .driver_info = USB_QUIRK_RESET_RESUME },
++ /* Logitech Quickcam Pro for Notebook */
++ { USB_DEVICE(0x046d, 0x08c3), .driver_info = USB_QUIRK_RESET_RESUME },
+
+- /* Logitech Webcam C260 */
+- { USB_DEVICE(0x046d, 0x081a), .driver_info = USB_QUIRK_RESET_RESUME },
++ /* Logitech Quickcam Pro 5000 */
++ { USB_DEVICE(0x046d, 0x08c5), .driver_info = USB_QUIRK_RESET_RESUME },
+
+- /* Logitech Webcam C310 */
+- { USB_DEVICE(0x046d, 0x081b), .driver_info = USB_QUIRK_RESET_RESUME },
++ /* Logitech Quickcam OEM Dell Notebook */
++ { USB_DEVICE(0x046d, 0x08c6), .driver_info = USB_QUIRK_RESET_RESUME },
+
+- /* Logitech Webcam C910 */
+- { USB_DEVICE(0x046d, 0x0821), .driver_info = USB_QUIRK_RESET_RESUME },
+-
+- /* Logitech Webcam C160 */
+- { USB_DEVICE(0x046d, 0x0824), .driver_info = USB_QUIRK_RESET_RESUME },
+-
+- /* Logitech Webcam C270 */
+- { USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME },
+-
+- /* Logitech Quickcam Pro 9000 */
+- { USB_DEVICE(0x046d, 0x0990), .driver_info = USB_QUIRK_RESET_RESUME },
+-
+- /* Logitech Quickcam E3500 */
+- { USB_DEVICE(0x046d, 0x09a4), .driver_info = USB_QUIRK_RESET_RESUME },
+-
+- /* Logitech Quickcam Vision Pro */
+- { USB_DEVICE(0x046d, 0x09a6), .driver_info = USB_QUIRK_RESET_RESUME },
++ /* Logitech Quickcam OEM Cisco VT Camera II */
++ { USB_DEVICE(0x046d, 0x08c7), .driver_info = USB_QUIRK_RESET_RESUME },
+
+ /* Logitech Harmony 700-series */
+ { USB_DEVICE(0x046d, 0xc122), .driver_info = USB_QUIRK_DELAY_INIT },
+@@ -163,16 +138,57 @@ static const struct usb_device_id usb_quirk_list[] = {
+ { } /* terminating entry must be last */
+ };
+
+-static const struct usb_device_id *find_id(struct usb_device *udev)
++static const struct usb_device_id usb_interface_quirk_list[] = {
++ /* Logitech UVC Cameras */
++ { USB_VENDOR_AND_INTERFACE_INFO(0x046d, USB_CLASS_VIDEO, 1, 0),
++ .driver_info = USB_QUIRK_RESET_RESUME },
++
++ { } /* terminating entry must be last */
++};
++
++static bool usb_match_any_interface(struct usb_device *udev,
++ const struct usb_device_id *id)
+ {
+- const struct usb_device_id *id = usb_quirk_list;
++ unsigned int i;
+
+- for (; id->idVendor || id->bDeviceClass || id->bInterfaceClass ||
+- id->driver_info; id++) {
+- if (usb_match_device(udev, id))
+- return id;
++ for (i = 0; i < udev->descriptor.bNumConfigurations; ++i) {
++ struct usb_host_config *cfg = &udev->config[i];
++ unsigned int j;
++
++ for (j = 0; j < cfg->desc.bNumInterfaces; ++j) {
++ struct usb_interface_cache *cache;
++ struct usb_host_interface *intf;
++
++ cache = cfg->intf_cache[j];
++ if (cache->num_altsetting == 0)
++ continue;
++
++ intf = &cache->altsetting[0];
++ if (usb_match_one_id_intf(udev, intf, id))
++ return true;
++ }
++ }
++
++ return false;
++}
++
++static u32 __usb_detect_quirks(struct usb_device *udev,
++ const struct usb_device_id *id)
++{
++ u32 quirks = 0;
++
++ for (; id->match_flags; id++) {
++ if (!usb_match_device(udev, id))
++ continue;
++
++ if ((id->match_flags & USB_DEVICE_ID_MATCH_INT_INFO) &&
++ !usb_match_any_interface(udev, id))
++ continue;
++
++ quirks |= (u32)(id->driver_info);
+ }
+- return NULL;
++
++ return quirks;
+ }
+
+ /*
+@@ -180,14 +196,10 @@ static const struct usb_device_id *find_id(struct usb_device *udev)
+ */
+ void usb_detect_quirks(struct usb_device *udev)
+ {
+- const struct usb_device_id *id = usb_quirk_list;
+-
+- id = find_id(udev);
+- if (id)
+- udev->quirks = (u32)(id->driver_info);
++ udev->quirks = __usb_detect_quirks(udev, usb_quirk_list);
+ if (udev->quirks)
+ dev_dbg(&udev->dev, "USB quirks for this device: %x\n",
+- udev->quirks);
++ udev->quirks);
+
+ /* For the present, all devices default to USB-PERSIST enabled */
+ #if 0 /* was: #ifdef CONFIG_PM */
+@@ -204,3 +216,16 @@ void usb_detect_quirks(struct usb_device *udev)
+ udev->persist_enabled = 1;
+ #endif /* CONFIG_PM */
+ }
++
++void usb_detect_interface_quirks(struct usb_device *udev)
++{
++ u32 quirks;
++
++ quirks = __usb_detect_quirks(udev, usb_interface_quirk_list);
++ if (quirks == 0)
++ return;
++
++ dev_dbg(&udev->dev, "USB interface quirks for this device: %x\n",
++ quirks);
++ udev->quirks |= quirks;
++}
+diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
+index 45e8479..3e1159b 100644
+--- a/drivers/usb/core/usb.h
++++ b/drivers/usb/core/usb.h
+@@ -24,6 +24,7 @@ extern void usb_disable_device(struct usb_device *dev, int skip_ep0);
+ extern int usb_deauthorize_device(struct usb_device *);
+ extern int usb_authorize_device(struct usb_device *);
+ extern void usb_detect_quirks(struct usb_device *udev);
++extern void usb_detect_interface_quirks(struct usb_device *udev);
+ extern int usb_remove_device(struct usb_device *udev);
+
+ extern int usb_get_device_descriptor(struct usb_device *dev,
+@@ -35,6 +36,9 @@ extern int usb_set_configuration(struct usb_device *dev, int configuration);
+ extern int usb_choose_configuration(struct usb_device *udev);
+
+ extern void usb_kick_khubd(struct usb_device *dev);
++extern int usb_match_one_id_intf(struct usb_device *dev,
++ struct usb_host_interface *intf,
++ const struct usb_device_id *id);
+ extern int usb_match_device(struct usb_device *dev,
+ const struct usb_device_id *id);
+ extern void usb_forced_unbind_intf(struct usb_interface *intf);
+diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
+index e39b029..d4159b8 100644
+--- a/drivers/usb/host/ehci-omap.c
++++ b/drivers/usb/host/ehci-omap.c
+@@ -337,7 +337,7 @@ static const struct hc_driver ehci_omap_hc_driver = {
+ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+ };
+
+-MODULE_ALIAS("platform:omap-ehci");
++MODULE_ALIAS("platform:ehci-omap");
+ MODULE_AUTHOR("Texas Instruments, Inc.");
+ MODULE_AUTHOR("Felipe Balbi <felipe.balbi@nokia.com>");
+
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index d644a66..71c4696 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -1916,24 +1916,22 @@ static void ftdi_dtr_rts(struct usb_serial_port *port, int on)
+ {
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+
+- mutex_lock(&port->serial->disc_mutex);
+- if (!port->serial->disconnected) {
+- /* Disable flow control */
+- if (!on && usb_control_msg(port->serial->dev,
++ /* Disable flow control */
++ if (!on) {
++ if (usb_control_msg(port->serial->dev,
+ usb_sndctrlpipe(port->serial->dev, 0),
+ FTDI_SIO_SET_FLOW_CTRL_REQUEST,
+ FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
+ 0, priv->interface, NULL, 0,
+ WDR_TIMEOUT) < 0) {
+- dev_err(&port->dev, "error from flowcontrol urb\n");
++ dev_err(&port->dev, "error from flowcontrol urb\n");
+ }
+- /* drop RTS and DTR */
+- if (on)
+- set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
+- else
+- clear_mctrl(port, TIOCM_DTR | TIOCM_RTS);
+ }
+- mutex_unlock(&port->serial->disc_mutex);
++ /* drop RTS and DTR */
++ if (on)
++ set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
++ else
++ clear_mctrl(port, TIOCM_DTR | TIOCM_RTS);
+ }
+
+ /*
+diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
+index d3addb2..de0bb8e 100644
+--- a/drivers/usb/serial/mct_u232.c
++++ b/drivers/usb/serial/mct_u232.c
+@@ -558,19 +558,15 @@ static void mct_u232_dtr_rts(struct usb_serial_port *port, int on)
+ unsigned int control_state;
+ struct mct_u232_private *priv = usb_get_serial_port_data(port);
+
+- mutex_lock(&port->serial->disc_mutex);
+- if (!port->serial->disconnected) {
+- /* drop DTR and RTS */
+- spin_lock_irq(&priv->lock);
+- if (on)
+- priv->control_state |= TIOCM_DTR | TIOCM_RTS;
+- else
+- priv->control_state &= ~(TIOCM_DTR | TIOCM_RTS);
+- control_state = priv->control_state;
+- spin_unlock_irq(&priv->lock);
+- mct_u232_set_modem_ctrl(port->serial, control_state);
+- }
+- mutex_unlock(&port->serial->disc_mutex);
++ spin_lock_irq(&priv->lock);
++ if (on)
++ priv->control_state |= TIOCM_DTR | TIOCM_RTS;
++ else
++ priv->control_state &= ~(TIOCM_DTR | TIOCM_RTS);
++ control_state = priv->control_state;
++ spin_unlock_irq(&priv->lock);
++
++ mct_u232_set_modem_ctrl(port->serial, control_state);
+ }
+
+ static void mct_u232_close(struct usb_serial_port *port)
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 52cd814..24a3ea6 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -479,6 +479,7 @@ static const struct option_blacklist_info four_g_w14_blacklist = {
+
+ static const struct option_blacklist_info alcatel_x200_blacklist = {
+ .sendsetup = BIT(0) | BIT(1),
++ .reserved = BIT(4),
+ };
+
+ static const struct option_blacklist_info zte_0037_blacklist = {
+@@ -575,8 +576,14 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLX) },
+ { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GKE) },
+ { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLE) },
++ { USB_DEVICE(QUANTA_VENDOR_ID, 0xea42),
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c05, USB_CLASS_COMM, 0x02, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1442, USB_CLASS_COMM, 0x02, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff),
+@@ -1215,7 +1222,14 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
+ .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
+ },
+- { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D) },
++ { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D),
++ .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
++ { USB_DEVICE(ALCATEL_VENDOR_ID, 0x0052),
++ .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
++ { USB_DEVICE(ALCATEL_VENDOR_ID, 0x00b6),
++ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
++ { USB_DEVICE(ALCATEL_VENDOR_ID, 0x00b7),
++ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L100V),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
+diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
+index e1f1ebd..a7fa673 100644
+--- a/drivers/usb/serial/sierra.c
++++ b/drivers/usb/serial/sierra.c
+@@ -891,19 +891,13 @@ static int sierra_open(struct tty_struct *tty, struct usb_serial_port *port)
+
+ static void sierra_dtr_rts(struct usb_serial_port *port, int on)
+ {
+- struct usb_serial *serial = port->serial;
+ struct sierra_port_private *portdata;
+
+ portdata = usb_get_serial_port_data(port);
+ portdata->rts_state = on;
+ portdata->dtr_state = on;
+
+- if (serial->dev) {
+- mutex_lock(&serial->disc_mutex);
+- if (!serial->disconnected)
+- sierra_send_setup(port);
+- mutex_unlock(&serial->disc_mutex);
+- }
++ sierra_send_setup(port);
+ }
+
+ static int sierra_startup(struct usb_serial *serial)
+diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c
+index 87362e4..fff7f17 100644
+--- a/drivers/usb/serial/ssu100.c
++++ b/drivers/usb/serial/ssu100.c
+@@ -533,19 +533,16 @@ static void ssu100_dtr_rts(struct usb_serial_port *port, int on)
+
+ dbg("%s\n", __func__);
+
+- mutex_lock(&port->serial->disc_mutex);
+- if (!port->serial->disconnected) {
+- /* Disable flow control */
+- if (!on &&
+- ssu100_setregister(dev, 0, UART_MCR, 0) < 0)
++ /* Disable flow control */
++ if (!on) {
++ if (ssu100_setregister(dev, 0, UART_MCR, 0) < 0)
+ dev_err(&port->dev, "error from flowcontrol urb\n");
+- /* drop RTS and DTR */
+- if (on)
+- set_mctrl(dev, TIOCM_DTR | TIOCM_RTS);
+- else
+- clear_mctrl(dev, TIOCM_DTR | TIOCM_RTS);
+ }
+- mutex_unlock(&port->serial->disc_mutex);
++ /* drop RTS and DTR */
++ if (on)
++ set_mctrl(dev, TIOCM_DTR | TIOCM_RTS);
++ else
++ clear_mctrl(dev, TIOCM_DTR | TIOCM_RTS);
+ }
+
+ static void ssu100_update_msr(struct usb_serial_port *port, u8 msr)
+diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
+index e5206de..dc1ce62 100644
+--- a/drivers/usb/serial/usb-serial.c
++++ b/drivers/usb/serial/usb-serial.c
+@@ -697,9 +697,20 @@ static int serial_carrier_raised(struct tty_port *port)
+ static void serial_dtr_rts(struct tty_port *port, int on)
+ {
+ struct usb_serial_port *p = container_of(port, struct usb_serial_port, port);
+- struct usb_serial_driver *drv = p->serial->type;
+- if (drv->dtr_rts)
++ struct usb_serial *serial = p->serial;
++ struct usb_serial_driver *drv = serial->type;
++
++ if (!drv->dtr_rts)
++ return;
++ /*
++ * Work-around bug in the tty-layer which can result in dtr_rts
++ * being called after a disconnect (and tty_unregister_device
++ * has returned). Remove once bug has been squashed.
++ */
++ mutex_lock(&serial->disc_mutex);
++ if (!serial->disconnected)
+ drv->dtr_rts(p, on);
++ mutex_unlock(&serial->disc_mutex);
+ }
+
+ static const struct tty_port_operations serial_port_ops = {
+diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
+index d555ca9..6c92301 100644
+--- a/drivers/usb/serial/usb_wwan.c
++++ b/drivers/usb/serial/usb_wwan.c
+@@ -41,7 +41,6 @@ static int debug;
+
+ void usb_wwan_dtr_rts(struct usb_serial_port *port, int on)
+ {
+- struct usb_serial *serial = port->serial;
+ struct usb_wwan_port_private *portdata;
+
+ struct usb_wwan_intf_private *intfdata;
+@@ -54,12 +53,11 @@ void usb_wwan_dtr_rts(struct usb_serial_port *port, int on)
+ return;
+
+ portdata = usb_get_serial_port_data(port);
+- mutex_lock(&serial->disc_mutex);
++ /* FIXME: locking */
+ portdata->rts_state = on;
+ portdata->dtr_state = on;
+- if (serial->dev)
+- intfdata->send_setup(port);
+- mutex_unlock(&serial->disc_mutex);
++
++ intfdata->send_setup(port);
+ }
+ EXPORT_SYMBOL(usb_wwan_dtr_rts);
+
+diff --git a/drivers/usb/storage/initializers.c b/drivers/usb/storage/initializers.c
+index 16b0bf0..7ab9046 100644
+--- a/drivers/usb/storage/initializers.c
++++ b/drivers/usb/storage/initializers.c
+@@ -147,7 +147,7 @@ static int usb_stor_huawei_dongles_pid(struct us_data *us)
+ int idProduct;
+
+ idesc = &us->pusb_intf->cur_altsetting->desc;
+- idProduct = us->pusb_dev->descriptor.idProduct;
++ idProduct = le16_to_cpu(us->pusb_dev->descriptor.idProduct);
+ /* The first port is CDROM,
+ * means the dongle in the single port mode,
+ * and a switch command is required to be sent. */
+@@ -169,7 +169,7 @@ int usb_stor_huawei_init(struct us_data *us)
+ int result = 0;
+
+ if (usb_stor_huawei_dongles_pid(us)) {
+- if (us->pusb_dev->descriptor.idProduct >= 0x1446)
++ if (le16_to_cpu(us->pusb_dev->descriptor.idProduct) >= 0x1446)
+ result = usb_stor_huawei_scsi_init(us);
+ else
+ result = usb_stor_huawei_feature_init(us);
+diff --git a/drivers/usb/storage/unusual_cypress.h b/drivers/usb/storage/unusual_cypress.h
+index 2c85530..65a6a75 100644
+--- a/drivers/usb/storage/unusual_cypress.h
++++ b/drivers/usb/storage/unusual_cypress.h
+@@ -31,7 +31,7 @@ UNUSUAL_DEV( 0x04b4, 0x6831, 0x0000, 0x9999,
+ "Cypress ISD-300LP",
+ USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
+
+-UNUSUAL_DEV( 0x14cd, 0x6116, 0x0000, 0x9999,
++UNUSUAL_DEV( 0x14cd, 0x6116, 0x0000, 0x0219,
+ "Super Top",
+ "USB 2.0 SATA BRIDGE",
+ USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
+diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
+index ae66278..be32b1b 100644
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -1073,7 +1073,7 @@ static int translate_desc(struct vhost_dev *dev, u64 addr, u32 len,
+ }
+ _iov = iov + ret;
+ size = reg->memory_size - addr + reg->guest_phys_addr;
+- _iov->iov_len = min((u64)len, size);
++ _iov->iov_len = min((u64)len - s, size);
+ _iov->iov_base = (void __user *)(unsigned long)
+ (reg->userspace_addr + addr - reg->guest_phys_addr);
+ s += size;
+diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c
+index 66bc74d..b35c857 100644
+--- a/drivers/video/backlight/adp8860_bl.c
++++ b/drivers/video/backlight/adp8860_bl.c
+@@ -791,7 +791,7 @@ static int adp8860_i2c_suspend(struct i2c_client *client, pm_message_t message)
+
+ static int adp8860_i2c_resume(struct i2c_client *client)
+ {
+- adp8860_set_bits(client, ADP8860_MDCR, NSTBY);
++ adp8860_set_bits(client, ADP8860_MDCR, NSTBY | BLEN);
+
+ return 0;
+ }
+diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c
+index 6c68a68..25a9b3a 100644
+--- a/drivers/video/backlight/adp8870_bl.c
++++ b/drivers/video/backlight/adp8870_bl.c
+@@ -965,7 +965,7 @@ static int adp8870_i2c_suspend(struct i2c_client *client, pm_message_t message)
+
+ static int adp8870_i2c_resume(struct i2c_client *client)
+ {
+- adp8870_set_bits(client, ADP8870_MDCR, NSTBY);
++ adp8870_set_bits(client, ADP8870_MDCR, NSTBY | BLEN);
+
+ return 0;
+ }
+diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
+index bf9a9b7..9b8bcab 100644
+--- a/drivers/video/console/fbcon.c
++++ b/drivers/video/console/fbcon.c
+@@ -530,6 +530,33 @@ static int search_for_mapped_con(void)
+ return retval;
+ }
+
++static int do_fbcon_takeover(int show_logo)
++{
++ int err, i;
++
++ if (!num_registered_fb)
++ return -ENODEV;
++
++ if (!show_logo)
++ logo_shown = FBCON_LOGO_DONTSHOW;
++
++ for (i = first_fb_vc; i <= last_fb_vc; i++)
++ con2fb_map[i] = info_idx;
++
++ err = do_take_over_console(&fb_con, first_fb_vc, last_fb_vc,
++ fbcon_is_default);
++
++ if (err) {
++ for (i = first_fb_vc; i <= last_fb_vc; i++)
++ con2fb_map[i] = -1;
++ info_idx = -1;
++ } else {
++ fbcon_has_console_bind = 1;
++ }
++
++ return err;
++}
++
+ static int fbcon_takeover(int show_logo)
+ {
+ int err, i;
+@@ -991,7 +1018,7 @@ static const char *fbcon_startup(void)
+ }
+
+ /* Setup default font */
+- if (!p->fontdata) {
++ if (!p->fontdata && !vc->vc_font.data) {
+ if (!fontname[0] || !(font = find_font(fontname)))
+ font = get_default_font(info->var.xres,
+ info->var.yres,
+@@ -1001,6 +1028,8 @@ static const char *fbcon_startup(void)
+ vc->vc_font.height = font->height;
+ vc->vc_font.data = (void *)(p->fontdata = font->data);
+ vc->vc_font.charcount = 256; /* FIXME Need to support more fonts */
++ } else {
++ p->fontdata = vc->vc_font.data;
+ }
+
+ cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres);
+@@ -1160,9 +1189,9 @@ static void fbcon_init(struct vc_data *vc, int init)
+ ops->p = &fb_display[fg_console];
+ }
+
+-static void fbcon_free_font(struct display *p)
++static void fbcon_free_font(struct display *p, bool freefont)
+ {
+- if (p->userfont && p->fontdata && (--REFCOUNT(p->fontdata) == 0))
++ if (freefont && p->userfont && p->fontdata && (--REFCOUNT(p->fontdata) == 0))
+ kfree(p->fontdata - FONT_EXTRA_WORDS * sizeof(int));
+ p->fontdata = NULL;
+ p->userfont = 0;
+@@ -1174,8 +1203,8 @@ static void fbcon_deinit(struct vc_data *vc)
+ struct fb_info *info;
+ struct fbcon_ops *ops;
+ int idx;
++ bool free_font = true;
+
+- fbcon_free_font(p);
+ idx = con2fb_map[vc->vc_num];
+
+ if (idx == -1)
+@@ -1186,6 +1215,8 @@ static void fbcon_deinit(struct vc_data *vc)
+ if (!info)
+ goto finished;
+
++ if (info->flags & FBINFO_MISC_FIRMWARE)
++ free_font = false;
+ ops = info->fbcon_par;
+
+ if (!ops)
+@@ -1197,6 +1228,8 @@ static void fbcon_deinit(struct vc_data *vc)
+ ops->flags &= ~FBCON_FLAGS_INIT;
+ finished:
+
++ fbcon_free_font(p, free_font);
++
+ if (!con_is_bound(&fb_con))
+ fbcon_exit();
+
+@@ -2978,7 +3011,7 @@ static int fbcon_unbind(void)
+ {
+ int ret;
+
+- ret = unbind_con_driver(&fb_con, first_fb_vc, last_fb_vc,
++ ret = do_unbind_con_driver(&fb_con, first_fb_vc, last_fb_vc,
+ fbcon_is_default);
+
+ if (!ret)
+@@ -3051,7 +3084,7 @@ static int fbcon_fb_unregistered(struct fb_info *info)
+ primary_device = -1;
+
+ if (!num_registered_fb)
+- unregister_con_driver(&fb_con);
++ do_unregister_con_driver(&fb_con);
+
+ return 0;
+ }
+@@ -3116,7 +3149,7 @@ static int fbcon_fb_registered(struct fb_info *info)
+ }
+
+ if (info_idx != -1)
+- ret = fbcon_takeover(1);
++ ret = do_fbcon_takeover(1);
+ } else {
+ for (i = first_fb_vc; i <= last_fb_vc; i++) {
+ if (con2fb_map_boot[i] == idx)
+diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
+index d449a74..5855d17 100644
+--- a/drivers/video/console/vgacon.c
++++ b/drivers/video/console/vgacon.c
+@@ -1064,7 +1064,7 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
+ unsigned short video_port_status = vga_video_port_reg + 6;
+ int font_select = 0x00, beg, i;
+ char *charmap;
+-
++ bool clear_attribs = false;
+ if (vga_video_type != VIDEO_TYPE_EGAM) {
+ charmap = (char *) VGA_MAP_MEM(colourmap, 0);
+ beg = 0x0e;
+@@ -1169,12 +1169,6 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
+
+ /* if 512 char mode is already enabled don't re-enable it. */
+ if ((set) && (ch512 != vga_512_chars)) {
+- /* attribute controller */
+- for (i = 0; i < MAX_NR_CONSOLES; i++) {
+- struct vc_data *c = vc_cons[i].d;
+- if (c && c->vc_sw == &vga_con)
+- c->vc_hi_font_mask = ch512 ? 0x0800 : 0;
+- }
+ vga_512_chars = ch512;
+ /* 256-char: enable intensity bit
+ 512-char: disable intensity bit */
+@@ -1185,8 +1179,22 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
+ it means, but it works, and it appears necessary */
+ inb_p(video_port_status);
+ vga_wattr(state->vgabase, VGA_AR_ENABLE_DISPLAY, 0);
++ clear_attribs = true;
+ }
+ raw_spin_unlock_irq(&vga_lock);
++
++ if (clear_attribs) {
++ for (i = 0; i < MAX_NR_CONSOLES; i++) {
++ struct vc_data *c = vc_cons[i].d;
++ if (c && c->vc_sw == &vga_con) {
++ /* force hi font mask to 0, so we always clear
++ the bit on either transition */
++ c->vc_hi_font_mask = 0x00;
++ clear_buffer_attributes(c);
++ c->vc_hi_font_mask = ch512 ? 0x0800 : 0;
++ }
++ }
++ }
+ return 0;
+ }
+
+diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
+index 7a41220..c133dde 100644
+--- a/drivers/video/fbmem.c
++++ b/drivers/video/fbmem.c
+@@ -1628,7 +1628,9 @@ static int do_register_framebuffer(struct fb_info *fb_info)
+ event.info = fb_info;
+ if (!lock_fb_info(fb_info))
+ return -ENODEV;
++ console_lock();
+ fb_notifier_call_chain(FB_EVENT_FB_REGISTERED, &event);
++ console_unlock();
+ unlock_fb_info(fb_info);
+ return 0;
+ }
+@@ -1644,8 +1646,10 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
+
+ if (!lock_fb_info(fb_info))
+ return -ENODEV;
++ console_lock();
+ event.info = fb_info;
+ ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event);
++ console_unlock();
+ unlock_fb_info(fb_info);
+
+ if (ret)
+@@ -1660,7 +1664,9 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
+ num_registered_fb--;
+ fb_cleanup_device(fb_info);
+ event.info = fb_info;
++ console_lock();
+ fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event);
++ console_unlock();
+
+ /* this may free fb info */
+ put_fb_info(fb_info);
+@@ -1831,11 +1837,8 @@ int fb_new_modelist(struct fb_info *info)
+ err = 1;
+
+ if (!list_empty(&info->modelist)) {
+- if (!lock_fb_info(info))
+- return -ENODEV;
+ event.info = info;
+ err = fb_notifier_call_chain(FB_EVENT_NEW_MODELIST, &event);
+- unlock_fb_info(info);
+ }
+
+ return err;
+diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c
+index 67afa9c..303fb9f 100644
+--- a/drivers/video/fbsysfs.c
++++ b/drivers/video/fbsysfs.c
+@@ -175,6 +175,8 @@ static ssize_t store_modes(struct device *device,
+ if (i * sizeof(struct fb_videomode) != count)
+ return -EINVAL;
+
++ if (!lock_fb_info(fb_info))
++ return -ENODEV;
+ console_lock();
+ list_splice(&fb_info->modelist, &old_list);
+ fb_videomode_to_modelist((const struct fb_videomode *)buf, i,
+@@ -186,6 +188,7 @@ static ssize_t store_modes(struct device *device,
+ fb_destroy_modelist(&old_list);
+
+ console_unlock();
++ unlock_fb_info(fb_info);
+
+ return 0;
+ }
+diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
+index dbc13e9..c93d59e 100644
+--- a/drivers/xen/evtchn.c
++++ b/drivers/xen/evtchn.c
+@@ -269,6 +269,14 @@ static int evtchn_bind_to_user(struct per_user_data *u, int port)
+ u->name, (void *)(unsigned long)port);
+ if (rc >= 0)
+ rc = 0;
++ else {
++ /* bind failed, should close the port now */
++ struct evtchn_close close;
++ close.port = port;
++ if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
++ BUG();
++ set_port_user(port, NULL);
++ }
+
+ return rc;
+ }
+@@ -277,6 +285,8 @@ static void evtchn_unbind_from_user(struct per_user_data *u, int port)
+ {
+ int irq = irq_from_evtchn(port);
+
++ BUG_ON(irq < 0);
++
+ unbind_from_irqhandler(irq, (void *)(unsigned long)port);
+
+ set_port_user(port, NULL);
+diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
+index 63616d7..d07c4cd 100644
+--- a/drivers/xen/xen-pciback/pciback_ops.c
++++ b/drivers/xen/xen-pciback/pciback_ops.c
+@@ -8,6 +8,7 @@
+ #include <linux/bitops.h>
+ #include <xen/events.h>
+ #include <linux/sched.h>
++#include <linux/ratelimit.h>
+ #include "pciback.h"
+
+ int verbose_request;
+@@ -135,7 +136,6 @@ int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev,
+ struct pci_dev *dev, struct xen_pci_op *op)
+ {
+ struct xen_pcibk_dev_data *dev_data;
+- int otherend = pdev->xdev->otherend_id;
+ int status;
+
+ if (unlikely(verbose_request))
+@@ -144,8 +144,9 @@ int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev,
+ status = pci_enable_msi(dev);
+
+ if (status) {
+- printk(KERN_ERR "error enable msi for guest %x status %x\n",
+- otherend, status);
++ pr_warn_ratelimited(DRV_NAME ": %s: error enabling MSI for guest %u: err %d\n",
++ pci_name(dev), pdev->xdev->otherend_id,
++ status);
+ op->value = 0;
+ return XEN_PCI_ERR_op_failed;
+ }
+@@ -223,10 +224,10 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,
+ pci_name(dev), i,
+ op->msix_entries[i].vector);
+ }
+- } else {
+- printk(KERN_WARNING DRV_NAME ": %s: failed to enable MSI-X: err %d!\n",
+- pci_name(dev), result);
+- }
++ } else
++ pr_warn_ratelimited(DRV_NAME ": %s: error enabling MSI-X for guest %u: err %d!\n",
++ pci_name(dev), pdev->xdev->otherend_id,
++ result);
+ kfree(entries);
+
+ op->value = result;
+diff --git a/fs/binfmt_em86.c b/fs/binfmt_em86.c
+index b8e8b0a..4a1b984 100644
+--- a/fs/binfmt_em86.c
++++ b/fs/binfmt_em86.c
+@@ -42,7 +42,6 @@ static int load_em86(struct linux_binprm *bprm,struct pt_regs *regs)
+ return -ENOEXEC;
+ }
+
+- bprm->recursion_depth++; /* Well, the bang-shell is implicit... */
+ allow_write_access(bprm->file);
+ fput(bprm->file);
+ bprm->file = NULL;
+diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
+index ca52e92..7423cb9 100644
+--- a/fs/binfmt_misc.c
++++ b/fs/binfmt_misc.c
+@@ -116,10 +116,6 @@ static int load_misc_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+ if (!enabled)
+ goto _ret;
+
+- retval = -ENOEXEC;
+- if (bprm->recursion_depth > BINPRM_MAX_RECURSION)
+- goto _ret;
+-
+ /* to keep locking time low, we copy the interpreter string */
+ read_lock(&entries_lock);
+ fmt = check_file(bprm);
+@@ -199,8 +195,6 @@ static int load_misc_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+ if (retval < 0)
+ goto _error;
+
+- bprm->recursion_depth++;
+-
+ retval = search_binary_handler (bprm, regs);
+ if (retval < 0)
+ goto _error;
+diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c
+index e39c18a..211ede0 100644
+--- a/fs/binfmt_script.c
++++ b/fs/binfmt_script.c
+@@ -22,15 +22,13 @@ static int load_script(struct linux_binprm *bprm,struct pt_regs *regs)
+ char interp[BINPRM_BUF_SIZE];
+ int retval;
+
+- if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!') ||
+- (bprm->recursion_depth > BINPRM_MAX_RECURSION))
++ if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!'))
+ return -ENOEXEC;
+ /*
+ * This section does the #! interpretation.
+ * Sorta complicated, but hopefully it will work. -TYT
+ */
+
+- bprm->recursion_depth++;
+ allow_write_access(bprm->file);
+ fput(bprm->file);
+ bprm->file = NULL;
+diff --git a/fs/block_dev.c b/fs/block_dev.c
+index 9b98987..613edd8 100644
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -82,13 +82,14 @@ sector_t blkdev_max_block(struct block_device *bdev)
+ }
+
+ /* Kill _all_ buffers and pagecache , dirty or not.. */
+-static void kill_bdev(struct block_device *bdev)
++void kill_bdev(struct block_device *bdev)
+ {
+ if (bdev->bd_inode->i_mapping->nrpages == 0)
+ return;
+ invalidate_bh_lrus();
+ truncate_inode_pages(bdev->bd_inode->i_mapping, 0);
+ }
++EXPORT_SYMBOL(kill_bdev);
+
+ int set_blocksize(struct block_device *bdev, int size)
+ {
+@@ -1024,6 +1025,7 @@ int revalidate_disk(struct gendisk *disk)
+
+ mutex_lock(&bdev->bd_mutex);
+ check_disk_size_change(disk, bdev);
++ bdev->bd_invalidated = 0;
+ mutex_unlock(&bdev->bd_mutex);
+ bdput(bdev);
+ return ret;
+diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
+index 0e3c092..b4d2438 100644
+--- a/fs/cachefiles/rdwr.c
++++ b/fs/cachefiles/rdwr.c
+@@ -918,7 +918,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
+ * own time */
+ dget(object->backer);
+ mntget(cache->mnt);
+- file = dentry_open(object->backer, cache->mnt, O_RDWR,
++ file = dentry_open(object->backer, cache->mnt, O_RDWR | O_LARGEFILE,
+ cache->cache_cred);
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
+diff --git a/fs/direct-io.c b/fs/direct-io.c
+index d740ab6..ac401d2 100644
+--- a/fs/direct-io.c
++++ b/fs/direct-io.c
+@@ -304,9 +304,9 @@ static ssize_t dio_complete(struct dio *dio, loff_t offset, ssize_t ret, bool is
+ dio->end_io(dio->iocb, offset, transferred,
+ dio->private, ret, is_async);
+ } else {
++ inode_dio_done(dio->inode);
+ if (is_async)
+ aio_complete(dio->iocb, ret, 0);
+- inode_dio_done(dio->inode);
+ }
+
+ return ret;
+diff --git a/fs/exec.c b/fs/exec.c
+index c27fa0d..312e297 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -1385,6 +1385,10 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
+ struct linux_binfmt *fmt;
+ pid_t old_pid;
+
++ /* This allows 4 levels of binfmt rewrites before failing hard. */
++ if (depth > 5)
++ return -ELOOP;
++
+ retval = security_bprm_check(bprm);
+ if (retval)
+ return retval;
+@@ -1408,12 +1412,8 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
+ if (!try_module_get(fmt->module))
+ continue;
+ read_unlock(&binfmt_lock);
++ bprm->recursion_depth = depth + 1;
+ retval = fn(bprm, regs);
+- /*
+- * Restore the depth counter to its starting value
+- * in this call, so we don't have to rely on every
+- * load_binary function to restore it on return.
+- */
+ bprm->recursion_depth = depth;
+ if (retval >= 0) {
+ if (depth == 0)
+diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
+index d6970f7..484ffee 100644
+--- a/fs/ext4/balloc.c
++++ b/fs/ext4/balloc.c
+@@ -420,11 +420,16 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
+
+ free_clusters = percpu_counter_read_positive(fcc);
+ dirty_clusters = percpu_counter_read_positive(dcc);
+- root_clusters = EXT4_B2C(sbi, ext4_r_blocks_count(sbi->s_es));
++
++ /*
++ * r_blocks_count should always be multiple of the cluster ratio so
++ * we are safe to do a plane bit shift only.
++ */
++ root_clusters = ext4_r_blocks_count(sbi->s_es) >> sbi->s_cluster_bits;
+
+ if (free_clusters - (nclusters + root_clusters + dirty_clusters) <
+ EXT4_FREECLUSTERS_WATERMARK) {
+- free_clusters = EXT4_C2B(sbi, percpu_counter_sum_positive(fcc));
++ free_clusters = percpu_counter_sum_positive(fcc);
+ dirty_clusters = percpu_counter_sum_positive(dcc);
+ }
+ /* Check whether we have space after accounting for current
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index fbb92e6..b48e0dc 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -45,6 +45,17 @@
+
+ #include <trace/events/ext4.h>
+
++/*
++ * used by extent splitting.
++ */
++#define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \
++ due to ENOSPC */
++#define EXT4_EXT_MARK_UNINIT1 0x2 /* mark first half uninitialized */
++#define EXT4_EXT_MARK_UNINIT2 0x4 /* mark second half uninitialized */
++
++#define EXT4_EXT_DATA_VALID1 0x8 /* first half contains valid data */
++#define EXT4_EXT_DATA_VALID2 0x10 /* second half contains valid data */
++
+ static int ext4_split_extent(handle_t *handle,
+ struct inode *inode,
+ struct ext4_ext_path *path,
+@@ -52,6 +63,13 @@ static int ext4_split_extent(handle_t *handle,
+ int split_flag,
+ int flags);
+
++static int ext4_split_extent_at(handle_t *handle,
++ struct inode *inode,
++ struct ext4_ext_path *path,
++ ext4_lblk_t split,
++ int split_flag,
++ int flags);
++
+ static int ext4_ext_truncate_extend_restart(handle_t *handle,
+ struct inode *inode,
+ int needed)
+@@ -636,6 +654,7 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
+ struct ext4_extent_header *eh;
+ struct buffer_head *bh;
+ short int depth, i, ppos = 0, alloc = 0;
++ int ret;
+
+ eh = ext_inode_hdr(inode);
+ depth = ext_depth(inode);
+@@ -665,12 +684,15 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
+ path[ppos].p_ext = NULL;
+
+ bh = sb_getblk(inode->i_sb, path[ppos].p_block);
+- if (unlikely(!bh))
++ if (unlikely(!bh)) {
++ ret = -ENOMEM;
+ goto err;
++ }
+ if (!bh_uptodate_or_lock(bh)) {
+ trace_ext4_ext_load_extent(inode, block,
+ path[ppos].p_block);
+- if (bh_submit_read(bh) < 0) {
++ ret = bh_submit_read(bh);
++ if (ret < 0) {
+ put_bh(bh);
+ goto err;
+ }
+@@ -683,13 +705,15 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
+ put_bh(bh);
+ EXT4_ERROR_INODE(inode,
+ "ppos %d > depth %d", ppos, depth);
++ ret = -EIO;
+ goto err;
+ }
+ path[ppos].p_bh = bh;
+ path[ppos].p_hdr = eh;
+ i--;
+
+- if (need_to_validate && ext4_ext_check(inode, eh, i))
++ ret = need_to_validate ? ext4_ext_check(inode, eh, i) : 0;
++ if (ret < 0)
+ goto err;
+ }
+
+@@ -711,7 +735,7 @@ err:
+ ext4_ext_drop_refs(path);
+ if (alloc)
+ kfree(path);
+- return ERR_PTR(-EIO);
++ return ERR_PTR(ret);
+ }
+
+ /*
+@@ -866,7 +890,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
+ }
+ bh = sb_getblk(inode->i_sb, newblock);
+ if (!bh) {
+- err = -EIO;
++ err = -ENOMEM;
+ goto cleanup;
+ }
+ lock_buffer(bh);
+@@ -938,7 +962,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
+ newblock = ablocks[--a];
+ bh = sb_getblk(inode->i_sb, newblock);
+ if (!bh) {
+- err = -EIO;
++ err = -ENOMEM;
+ goto cleanup;
+ }
+ lock_buffer(bh);
+@@ -1049,11 +1073,8 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
+ return err;
+
+ bh = sb_getblk(inode->i_sb, newblock);
+- if (!bh) {
+- err = -EIO;
+- ext4_std_error(inode->i_sb, err);
+- return err;
+- }
++ if (!bh)
++ return -ENOMEM;
+ lock_buffer(bh);
+
+ err = ext4_journal_get_create_access(handle, bh);
+@@ -2321,7 +2342,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
+ struct ext4_extent *ex;
+
+ /* the header must be checked already in ext4_ext_remove_space() */
+- ext_debug("truncate since %u in leaf\n", start);
++ ext_debug("truncate since %u in leaf to %u\n", start, end);
+ if (!path[depth].p_hdr)
+ path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
+ eh = path[depth].p_hdr;
+@@ -2356,7 +2377,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
+ ext_debug(" border %u:%u\n", a, b);
+
+ /* If this extent is beyond the end of the hole, skip it */
+- if (end <= ex_ee_block) {
++ if (end < ex_ee_block) {
+ ex--;
+ ex_ee_block = le32_to_cpu(ex->ee_block);
+ ex_ee_len = ext4_ext_get_actual_len(ex);
+@@ -2495,16 +2516,17 @@ ext4_ext_more_to_rm(struct ext4_ext_path *path)
+ return 1;
+ }
+
+-static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start)
++static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
++ ext4_lblk_t end)
+ {
+ struct super_block *sb = inode->i_sb;
+ int depth = ext_depth(inode);
+- struct ext4_ext_path *path;
++ struct ext4_ext_path *path = NULL;
+ ext4_fsblk_t partial_cluster = 0;
+ handle_t *handle;
+- int i, err;
++ int i = 0, err;
+
+- ext_debug("truncate since %u\n", start);
++ ext_debug("truncate since %u to %u\n", start, end);
+
+ /* probably first extent we're gonna free will be last in block */
+ handle = ext4_journal_start(inode, depth + 1);
+@@ -2517,29 +2539,96 @@ again:
+ trace_ext4_ext_remove_space(inode, start, depth);
+
+ /*
++ * Check if we are removing extents inside the extent tree. If that
++ * is the case, we are going to punch a hole inside the extent tree
++ * so we have to check whether we need to split the extent covering
++ * the last block to remove so we can easily remove the part of it
++ * in ext4_ext_rm_leaf().
++ */
++ if (end < EXT_MAX_BLOCKS - 1) {
++ struct ext4_extent *ex;
++ ext4_lblk_t ee_block;
++
++ /* find extent for this block */
++ path = ext4_ext_find_extent(inode, end, NULL);
++ if (IS_ERR(path)) {
++ ext4_journal_stop(handle);
++ return PTR_ERR(path);
++ }
++ depth = ext_depth(inode);
++ ex = path[depth].p_ext;
++ if (!ex) {
++ ext4_ext_drop_refs(path);
++ kfree(path);
++ path = NULL;
++ goto cont;
++ }
++
++ ee_block = le32_to_cpu(ex->ee_block);
++
++ /*
++ * See if the last block is inside the extent, if so split
++ * the extent at 'end' block so we can easily remove the
++ * tail of the first part of the split extent in
++ * ext4_ext_rm_leaf().
++ */
++ if (end >= ee_block &&
++ end < ee_block + ext4_ext_get_actual_len(ex) - 1) {
++ int split_flag = 0;
++
++ if (ext4_ext_is_uninitialized(ex))
++ split_flag = EXT4_EXT_MARK_UNINIT1 |
++ EXT4_EXT_MARK_UNINIT2;
++
++ /*
++ * Split the extent in two so that 'end' is the last
++ * block in the first new extent
++ */
++ err = ext4_split_extent_at(handle, inode, path,
++ end + 1, split_flag,
++ EXT4_GET_BLOCKS_PRE_IO |
++ EXT4_GET_BLOCKS_PUNCH_OUT_EXT);
++
++ if (err < 0)
++ goto out;
++ }
++ }
++cont:
++
++ /*
+ * We start scanning from right side, freeing all the blocks
+ * after i_size and walking into the tree depth-wise.
+ */
+ depth = ext_depth(inode);
+- path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_NOFS);
+- if (path == NULL) {
+- ext4_journal_stop(handle);
+- return -ENOMEM;
+- }
+- path[0].p_depth = depth;
+- path[0].p_hdr = ext_inode_hdr(inode);
+- if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
+- err = -EIO;
+- goto out;
++ if (path) {
++ int k = i = depth;
++ while (--k > 0)
++ path[k].p_block =
++ le16_to_cpu(path[k].p_hdr->eh_entries)+1;
++ } else {
++ path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1),
++ GFP_NOFS);
++ if (path == NULL) {
++ ext4_journal_stop(handle);
++ return -ENOMEM;
++ }
++ path[0].p_depth = depth;
++ path[0].p_hdr = ext_inode_hdr(inode);
++ i = 0;
++
++ if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
++ err = -EIO;
++ goto out;
++ }
+ }
+- i = err = 0;
++ err = 0;
+
+ while (i >= 0 && err == 0) {
+ if (i == depth) {
+ /* this is leaf block */
+ err = ext4_ext_rm_leaf(handle, inode, path,
+ &partial_cluster, start,
+- EXT_MAX_BLOCKS - 1);
++ end);
+ /* root level has p_bh == NULL, brelse() eats this */
+ brelse(path[i].p_bh);
+ path[i].p_bh = NULL;
+@@ -2646,8 +2735,10 @@ again:
+ out:
+ ext4_ext_drop_refs(path);
+ kfree(path);
+- if (err == -EAGAIN)
++ if (err == -EAGAIN) {
++ path = NULL;
+ goto again;
++ }
+ ext4_journal_stop(handle);
+
+ return err;
+@@ -2722,17 +2813,6 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
+ }
+
+ /*
+- * used by extent splitting.
+- */
+-#define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \
+- due to ENOSPC */
+-#define EXT4_EXT_MARK_UNINIT1 0x2 /* mark first half uninitialized */
+-#define EXT4_EXT_MARK_UNINIT2 0x4 /* mark second half uninitialized */
+-
+-#define EXT4_EXT_DATA_VALID1 0x8 /* first half contains valid data */
+-#define EXT4_EXT_DATA_VALID2 0x10 /* second half contains valid data */
+-
+-/*
+ * ext4_split_extent_at() splits an extent at given block.
+ *
+ * @handle: the journal handle
+@@ -4274,7 +4354,7 @@ void ext4_ext_truncate(struct inode *inode)
+
+ last_block = (inode->i_size + sb->s_blocksize - 1)
+ >> EXT4_BLOCK_SIZE_BITS(sb);
+- err = ext4_ext_remove_space(inode, last_block);
++ err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
+
+ /* In a multi-transaction truncate, we only make the final
+ * transaction synchronous.
+@@ -4751,14 +4831,12 @@ int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length)
+ {
+ struct inode *inode = file->f_path.dentry->d_inode;
+ struct super_block *sb = inode->i_sb;
+- struct ext4_ext_cache cache_ex;
+- ext4_lblk_t first_block, last_block, num_blocks, iblock, max_blocks;
++ ext4_lblk_t first_block, stop_block;
+ struct address_space *mapping = inode->i_mapping;
+- struct ext4_map_blocks map;
+ handle_t *handle;
+ loff_t first_page, last_page, page_len;
+ loff_t first_page_offset, last_page_offset;
+- int ret, credits, blocks_released, err = 0;
++ int credits, err = 0;
+
+ /* No need to punch hole beyond i_size */
+ if (offset >= inode->i_size)
+@@ -4774,10 +4852,6 @@ int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length)
+ offset;
+ }
+
+- first_block = (offset + sb->s_blocksize - 1) >>
+- EXT4_BLOCK_SIZE_BITS(sb);
+- last_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
+-
+ first_page = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ last_page = (offset + length) >> PAGE_CACHE_SHIFT;
+
+@@ -4856,7 +4930,6 @@ int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length)
+ }
+ }
+
+-
+ /*
+ * If i_size is contained in the last page, we need to
+ * unmap and zero the partial page after i_size
+@@ -4876,73 +4949,22 @@ int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length)
+ }
+ }
+
++ first_block = (offset + sb->s_blocksize - 1) >>
++ EXT4_BLOCK_SIZE_BITS(sb);
++ stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
++
+ /* If there are no blocks to remove, return now */
+- if (first_block >= last_block)
++ if (first_block >= stop_block)
+ goto out;
+
+ down_write(&EXT4_I(inode)->i_data_sem);
+ ext4_ext_invalidate_cache(inode);
+ ext4_discard_preallocations(inode);
+
+- /*
+- * Loop over all the blocks and identify blocks
+- * that need to be punched out
+- */
+- iblock = first_block;
+- blocks_released = 0;
+- while (iblock < last_block) {
+- max_blocks = last_block - iblock;
+- num_blocks = 1;
+- memset(&map, 0, sizeof(map));
+- map.m_lblk = iblock;
+- map.m_len = max_blocks;
+- ret = ext4_ext_map_blocks(handle, inode, &map,
+- EXT4_GET_BLOCKS_PUNCH_OUT_EXT);
+-
+- if (ret > 0) {
+- blocks_released += ret;
+- num_blocks = ret;
+- } else if (ret == 0) {
+- /*
+- * If map blocks could not find the block,
+- * then it is in a hole. If the hole was
+- * not already cached, then map blocks should
+- * put it in the cache. So we can get the hole
+- * out of the cache
+- */
+- memset(&cache_ex, 0, sizeof(cache_ex));
+- if ((ext4_ext_check_cache(inode, iblock, &cache_ex)) &&
+- !cache_ex.ec_start) {
+-
+- /* The hole is cached */
+- num_blocks = cache_ex.ec_block +
+- cache_ex.ec_len - iblock;
++ err = ext4_ext_remove_space(inode, first_block, stop_block - 1);
+
+- } else {
+- /* The block could not be identified */
+- err = -EIO;
+- break;
+- }
+- } else {
+- /* Map blocks error */
+- err = ret;
+- break;
+- }
+-
+- if (num_blocks == 0) {
+- /* This condition should never happen */
+- ext_debug("Block lookup failed");
+- err = -EIO;
+- break;
+- }
+-
+- iblock += num_blocks;
+- }
+-
+- if (blocks_released > 0) {
+- ext4_ext_invalidate_cache(inode);
+- ext4_discard_preallocations(inode);
+- }
++ ext4_ext_invalidate_cache(inode);
++ ext4_discard_preallocations(inode);
+
+ if (IS_SYNC(inode))
+ ext4_handle_sync(handle);
+diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
+index 3cfc73f..26d6dbf 100644
+--- a/fs/ext4/indirect.c
++++ b/fs/ext4/indirect.c
+@@ -146,6 +146,7 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth,
+ struct super_block *sb = inode->i_sb;
+ Indirect *p = chain;
+ struct buffer_head *bh;
++ int ret = -EIO;
+
+ *err = 0;
+ /* i_data is not going away, no lock needed */
+@@ -154,8 +155,10 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth,
+ goto no_block;
+ while (--depth) {
+ bh = sb_getblk(sb, le32_to_cpu(p->key));
+- if (unlikely(!bh))
++ if (unlikely(!bh)) {
++ ret = -ENOMEM;
+ goto failure;
++ }
+
+ if (!bh_uptodate_or_lock(bh)) {
+ if (bh_submit_read(bh) < 0) {
+@@ -177,7 +180,7 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth,
+ return NULL;
+
+ failure:
+- *err = -EIO;
++ *err = ret;
+ no_block:
+ return p;
+ }
+@@ -471,7 +474,7 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
+ */
+ bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
+ if (unlikely(!bh)) {
+- err = -EIO;
++ err = -ENOMEM;
+ goto failed;
+ }
+
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 8424dda..4b2bb75 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -661,7 +661,7 @@ struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
+
+ bh = sb_getblk(inode->i_sb, map.m_pblk);
+ if (!bh) {
+- *errp = -EIO;
++ *errp = -ENOMEM;
+ return NULL;
+ }
+ if (map.m_flags & EXT4_MAP_NEW) {
+@@ -2795,9 +2795,9 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
+ if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
+ ext4_free_io_end(io_end);
+ out:
++ inode_dio_done(inode);
+ if (is_async)
+ aio_complete(iocb, ret, 0);
+- inode_dio_done(inode);
+ return;
+ }
+
+@@ -3575,11 +3575,8 @@ static int __ext4_get_inode_loc(struct inode *inode,
+ iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
+
+ bh = sb_getblk(sb, block);
+- if (!bh) {
+- EXT4_ERROR_INODE_BLOCK(inode, block,
+- "unable to read itable block");
+- return -EIO;
+- }
++ if (!bh)
++ return -ENOMEM;
+ if (!buffer_uptodate(bh)) {
+ lock_buffer(bh);
+
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 1d07c12..553ff71 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -4178,7 +4178,7 @@ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
+ /* The max size of hash table is PREALLOC_TB_SIZE */
+ order = PREALLOC_TB_SIZE - 1;
+ /* Add the prealloc space to lg */
+- rcu_read_lock();
++ spin_lock(&lg->lg_prealloc_lock);
+ list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
+ pa_inode_list) {
+ spin_lock(&tmp_pa->pa_lock);
+@@ -4202,12 +4202,12 @@ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
+ if (!added)
+ list_add_tail_rcu(&pa->pa_inode_list,
+ &lg->lg_prealloc_list[order]);
+- rcu_read_unlock();
++ spin_unlock(&lg->lg_prealloc_lock);
+
+ /* Now trim the list to be not more than 8 elements */
+ if (lg_prealloc_count > 8) {
+ ext4_mb_discard_lg_preallocations(sb, lg,
+- order, lg_prealloc_count);
++ order, lg_prealloc_count);
+ return;
+ }
+ return ;
+diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
+index 7ea4ba4..f3358ab 100644
+--- a/fs/ext4/mmp.c
++++ b/fs/ext4/mmp.c
+@@ -41,6 +41,8 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
+ * is not blocked in the elevator. */
+ if (!*bh)
+ *bh = sb_getblk(sb, mmp_block);
++ if (!*bh)
++ return -ENOMEM;
+ if (*bh) {
+ get_bh(*bh);
+ lock_buffer(*bh);
+diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
+index 24feb1c..54f566d 100644
+--- a/fs/ext4/page-io.c
++++ b/fs/ext4/page-io.c
+@@ -108,14 +108,13 @@ int ext4_end_io_nolock(ext4_io_end_t *io)
+ inode->i_ino, offset, size, ret);
+ }
+
+- if (io->iocb)
+- aio_complete(io->iocb, io->result, 0);
+-
+- if (io->flag & EXT4_IO_END_DIRECT)
+- inode_dio_done(inode);
+ /* Wake up anyone waiting on unwritten extent conversion */
+ if (atomic_dec_and_test(&EXT4_I(inode)->i_aiodio_unwritten))
+ wake_up_all(ext4_ioend_wq(io->inode));
++ if (io->flag & EXT4_IO_END_DIRECT)
++ inode_dio_done(inode);
++ if (io->iocb)
++ aio_complete(io->iocb, io->result, 0);
+ return ret;
+ }
+
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index 4eac337..33129c0 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -142,7 +142,7 @@ static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
+
+ bh = sb_getblk(sb, blk);
+ if (!bh)
+- return ERR_PTR(-EIO);
++ return ERR_PTR(-ENOMEM);
+ if ((err = ext4_journal_get_write_access(handle, bh))) {
+ brelse(bh);
+ bh = ERR_PTR(err);
+@@ -220,7 +220,7 @@ static int setup_new_group_blocks(struct super_block *sb,
+
+ gdb = sb_getblk(sb, block);
+ if (!gdb) {
+- err = -EIO;
++ err = -ENOMEM;
+ goto exit_journal;
+ }
+ if ((err = ext4_journal_get_write_access(handle, gdb))) {
+@@ -694,7 +694,7 @@ static void update_backups(struct super_block *sb,
+
+ bh = sb_getblk(sb, group * bpg + blk_off);
+ if (!bh) {
+- err = -EIO;
++ err = -ENOMEM;
+ break;
+ }
+ ext4_debug("update metadata backup %#04lx\n",
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index 4410ae7..d5498b2 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -496,7 +496,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
+ error = ext4_handle_dirty_metadata(handle, inode, bh);
+ if (IS_SYNC(inode))
+ ext4_handle_sync(handle);
+- dquot_free_block(inode, 1);
++ dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1));
+ ea_bdebug(bh, "refcount now=%d; releasing",
+ le32_to_cpu(BHDR(bh)->h_refcount));
+ }
+@@ -785,7 +785,8 @@ inserted:
+ else {
+ /* The old block is released after updating
+ the inode. */
+- error = dquot_alloc_block(inode, 1);
++ error = dquot_alloc_block(inode,
++ EXT4_C2B(EXT4_SB(sb), 1));
+ if (error)
+ goto cleanup;
+ error = ext4_journal_get_write_access(handle,
+@@ -839,16 +840,17 @@ inserted:
+
+ new_bh = sb_getblk(sb, block);
+ if (!new_bh) {
++ error = -ENOMEM;
+ getblk_failed:
+ ext4_free_blocks(handle, inode, NULL, block, 1,
+ EXT4_FREE_BLOCKS_METADATA);
+- error = -EIO;
+ goto cleanup;
+ }
+ lock_buffer(new_bh);
+ error = ext4_journal_get_create_access(handle, new_bh);
+ if (error) {
+ unlock_buffer(new_bh);
++ error = -EIO;
+ goto getblk_failed;
+ }
+ memcpy(new_bh->b_data, s->base, new_bh->b_size);
+@@ -880,7 +882,7 @@ cleanup:
+ return error;
+
+ cleanup_dquot:
+- dquot_free_block(inode, 1);
++ dquot_free_block(inode, EXT4_C2B(EXT4_SB(sb), 1));
+ goto cleanup;
+
+ bad_block:
+diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
+index 8392cb8..a3a0987 100644
+--- a/fs/lockd/clntproc.c
++++ b/fs/lockd/clntproc.c
+@@ -551,6 +551,9 @@ again:
+ status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT);
+ if (status < 0)
+ break;
++ /* Resend the blocking lock request after a server reboot */
++ if (resp->status == nlm_lck_denied_grace_period)
++ continue;
+ if (resp->status != nlm_lck_blocked)
+ break;
+ }
+diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
+index 1aaa0ee..b17a81c 100644
+--- a/fs/nfs/blocklayout/blocklayout.c
++++ b/fs/nfs/blocklayout/blocklayout.c
+@@ -1101,6 +1101,7 @@ static const struct nfs_pageio_ops bl_pg_write_ops = {
+ static struct pnfs_layoutdriver_type blocklayout_type = {
+ .id = LAYOUT_BLOCK_VOLUME,
+ .name = "LAYOUT_BLOCK_VOLUME",
++ .owner = THIS_MODULE,
+ .read_pagelist = bl_read_pagelist,
+ .write_pagelist = bl_write_pagelist,
+ .alloc_layout_hdr = bl_alloc_layout_hdr,
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 2f98c53..6d7c53d 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -5891,7 +5891,8 @@ int nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
+ status = nfs4_wait_for_completion_rpc_task(task);
+ if (status == 0)
+ status = task->tk_status;
+- if (status == 0)
++ /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
++ if (status == 0 && lgp->res.layoutp->len)
+ status = pnfs_layout_process(lgp);
+ rpc_put_task(task);
+ dprintk("<-- %s status=%d\n", __func__, status);
+diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
+index a03ee52..c1897f7 100644
+--- a/fs/nfs/objlayout/objio_osd.c
++++ b/fs/nfs/objlayout/objio_osd.c
+@@ -569,6 +569,7 @@ static struct pnfs_layoutdriver_type objlayout_type = {
+ .flags = PNFS_LAYOUTRET_ON_SETATTR |
+ PNFS_LAYOUTRET_ON_ERROR,
+
++ .owner = THIS_MODULE,
+ .alloc_layout_hdr = objlayout_alloc_layout_hdr,
+ .free_layout_hdr = objlayout_free_layout_hdr,
+
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 8b197d2..7d189dc 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -1009,6 +1009,8 @@ free_client(struct nfs4_client *clp)
+ put_group_info(clp->cl_cred.cr_group_info);
+ kfree(clp->cl_principal);
+ kfree(clp->cl_name.data);
++ idr_remove_all(&clp->cl_stateids);
++ idr_destroy(&clp->cl_stateids);
+ kfree(clp);
+ }
+
+diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
+index 8445fbc..6f292dd 100644
+--- a/fs/notify/inotify/inotify_user.c
++++ b/fs/notify/inotify/inotify_user.c
+@@ -579,8 +579,6 @@ static int inotify_update_existing_watch(struct fsnotify_group *group,
+
+ /* don't allow invalid bits: we don't want flags set */
+ mask = inotify_arg_to_mask(arg);
+- if (unlikely(!(mask & IN_ALL_EVENTS)))
+- return -EINVAL;
+
+ fsn_mark = fsnotify_find_inode_mark(group, inode);
+ if (!fsn_mark)
+@@ -632,8 +630,6 @@ static int inotify_new_watch(struct fsnotify_group *group,
+
+ /* don't allow invalid bits: we don't want flags set */
+ mask = inotify_arg_to_mask(arg);
+- if (unlikely(!(mask & IN_ALL_EVENTS)))
+- return -EINVAL;
+
+ tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
+ if (unlikely(!tmp_i_mark))
+diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
+index 78b68af..4402b18 100644
+--- a/fs/ocfs2/aops.c
++++ b/fs/ocfs2/aops.c
+@@ -593,9 +593,9 @@ static void ocfs2_dio_end_io(struct kiocb *iocb,
+ level = ocfs2_iocb_rw_locked_level(iocb);
+ ocfs2_rw_unlock(inode, level);
+
++ inode_dio_done(inode);
+ if (is_async)
+ aio_complete(iocb, ret, 0);
+- inode_dio_done(inode);
+ }
+
+ /*
+diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
+index 81a4cd2..231eab2 100644
+--- a/fs/ocfs2/dlmglue.c
++++ b/fs/ocfs2/dlmglue.c
+@@ -2545,6 +2545,7 @@ int ocfs2_super_lock(struct ocfs2_super *osb,
+ * everything is up to the caller :) */
+ status = ocfs2_should_refresh_lock_res(lockres);
+ if (status < 0) {
++ ocfs2_cluster_unlock(osb, lockres, level);
+ mlog_errno(status);
+ goto bail;
+ }
+@@ -2553,8 +2554,10 @@ int ocfs2_super_lock(struct ocfs2_super *osb,
+
+ ocfs2_complete_lock_res_refresh(lockres, status);
+
+- if (status < 0)
++ if (status < 0) {
++ ocfs2_cluster_unlock(osb, lockres, level);
+ mlog_errno(status);
++ }
+ ocfs2_track_lock_refresh(lockres);
+ }
+ bail:
+diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
+index f169da4..b7e74b5 100644
+--- a/fs/ocfs2/suballoc.c
++++ b/fs/ocfs2/suballoc.c
+@@ -642,7 +642,7 @@ ocfs2_block_group_alloc_discontig(handle_t *handle,
+ * cluster groups will be staying in cache for the duration of
+ * this operation.
+ */
+- ac->ac_allow_chain_relink = 0;
++ ac->ac_disable_chain_relink = 1;
+
+ /* Claim the first region */
+ status = ocfs2_block_group_claim_bits(osb, handle, ac, min_bits,
+@@ -1823,7 +1823,7 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
+ * Do this *after* figuring out how many bits we're taking out
+ * of our target group.
+ */
+- if (ac->ac_allow_chain_relink &&
++ if (!ac->ac_disable_chain_relink &&
+ (prev_group_bh) &&
+ (ocfs2_block_group_reasonably_empty(bg, res->sr_bits))) {
+ status = ocfs2_relink_block_group(handle, alloc_inode,
+@@ -1928,7 +1928,6 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
+
+ victim = ocfs2_find_victim_chain(cl);
+ ac->ac_chain = victim;
+- ac->ac_allow_chain_relink = 1;
+
+ status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits,
+ res, &bits_left);
+@@ -1947,7 +1946,7 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
+ * searching each chain in order. Don't allow chain relinking
+ * because we only calculate enough journal credits for one
+ * relink per alloc. */
+- ac->ac_allow_chain_relink = 0;
++ ac->ac_disable_chain_relink = 1;
+ for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i ++) {
+ if (i == victim)
+ continue;
+diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h
+index b8afabf..a36d0aa 100644
+--- a/fs/ocfs2/suballoc.h
++++ b/fs/ocfs2/suballoc.h
+@@ -49,7 +49,7 @@ struct ocfs2_alloc_context {
+
+ /* these are used by the chain search */
+ u16 ac_chain;
+- int ac_allow_chain_relink;
++ int ac_disable_chain_relink;
+ group_search_t *ac_group_search;
+
+ u64 ac_last_group;
+diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
+index aa9e877..0d5ea9c 100644
+--- a/fs/ocfs2/xattr.c
++++ b/fs/ocfs2/xattr.c
+@@ -7189,7 +7189,7 @@ int ocfs2_init_security_and_acl(struct inode *dir,
+ struct buffer_head *dir_bh = NULL;
+
+ ret = ocfs2_init_security_get(inode, dir, qstr, NULL);
+- if (!ret) {
++ if (ret) {
+ mlog_errno(ret);
+ goto leave;
+ }
+diff --git a/fs/partitions/check.c b/fs/partitions/check.c
+index 6b5fcc5..1ef15cc 100644
+--- a/fs/partitions/check.c
++++ b/fs/partitions/check.c
+@@ -399,11 +399,11 @@ void delete_partition(struct gendisk *disk, int partno)
+ if (!part)
+ return;
+
+- blk_free_devt(part_devt(part));
+ rcu_assign_pointer(ptbl->part[partno], NULL);
+ rcu_assign_pointer(ptbl->last_lookup, NULL);
+ kobject_put(part->holder_dir);
+ device_del(part_to_dev(part));
++ blk_free_devt(part_devt(part));
+
+ hd_struct_put(part);
+ }
+diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
+index 57bbf90..45d18d1 100644
+--- a/fs/pstore/platform.c
++++ b/fs/pstore/platform.c
+@@ -72,6 +72,27 @@ static char *reason_str[] = {
+ "Oops", "Panic", "Kexec", "Restart", "Halt", "Poweroff", "Emergency"
+ };
+
++bool pstore_cannot_block_path(enum kmsg_dump_reason reason)
++{
++ /*
++ * In case of NMI path, pstore shouldn't be blocked
++ * regardless of reason.
++ */
++ if (in_nmi())
++ return true;
++
++ switch (reason) {
++ /* In panic case, other cpus are stopped by smp_send_stop(). */
++ case KMSG_DUMP_PANIC:
++ /* Emergency restart shouldn't be blocked by spin lock. */
++ case KMSG_DUMP_EMERG:
++ return true;
++ default:
++ return false;
++ }
++}
++EXPORT_SYMBOL_GPL(pstore_cannot_block_path);
++
+ /*
+ * callback from kmsg_dump. (s2,l2) has the most recently
+ * written bytes, older bytes are in (s1,l1). Save as much
+@@ -97,10 +118,12 @@ static void pstore_dump(struct kmsg_dumper *dumper,
+ else
+ why = "Unknown";
+
+- if (in_nmi()) {
+- is_locked = spin_trylock(&psinfo->buf_lock);
+- if (!is_locked)
+- pr_err("pstore dump routine blocked in NMI, may corrupt error record\n");
++ if (pstore_cannot_block_path(reason)) {
++ is_locked = spin_trylock_irqsave(&psinfo->buf_lock, flags);
++ if (!is_locked) {
++ pr_err("pstore dump routine blocked in %s path, may corrupt error record\n"
++ , in_nmi() ? "NMI" : why);
++ }
+ } else
+ spin_lock_irqsave(&psinfo->buf_lock, flags);
+ oopscount++;
+@@ -131,9 +154,9 @@ static void pstore_dump(struct kmsg_dumper *dumper,
+ total += l1_cpy + l2_cpy;
+ part++;
+ }
+- if (in_nmi()) {
++ if (pstore_cannot_block_path(reason)) {
+ if (is_locked)
+- spin_unlock(&psinfo->buf_lock);
++ spin_unlock_irqrestore(&psinfo->buf_lock, flags);
+ } else
+ spin_unlock_irqrestore(&psinfo->buf_lock, flags);
+ }
+diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c
+index c542c73..f9c90b5 100644
+--- a/fs/ubifs/orphan.c
++++ b/fs/ubifs/orphan.c
+@@ -130,13 +130,14 @@ void ubifs_delete_orphan(struct ubifs_info *c, ino_t inum)
+ else if (inum > o->inum)
+ p = p->rb_right;
+ else {
+- if (o->dnext) {
++ if (o->del) {
+ spin_unlock(&c->orphan_lock);
+ dbg_gen("deleted twice ino %lu",
+ (unsigned long)inum);
+ return;
+ }
+ if (o->cnext) {
++ o->del = 1;
+ o->dnext = c->orph_dnext;
+ c->orph_dnext = o;
+ spin_unlock(&c->orphan_lock);
+@@ -447,6 +448,7 @@ static void erase_deleted(struct ubifs_info *c)
+ orphan = dnext;
+ dnext = orphan->dnext;
+ ubifs_assert(!orphan->new);
++ ubifs_assert(orphan->del);
+ rb_erase(&orphan->rb, &c->orph_tree);
+ list_del(&orphan->list);
+ c->tot_orphans -= 1;
+@@ -536,6 +538,7 @@ static int insert_dead_orphan(struct ubifs_info *c, ino_t inum)
+ rb_link_node(&orphan->rb, parent, p);
+ rb_insert_color(&orphan->rb, &c->orph_tree);
+ list_add_tail(&orphan->list, &c->orph_list);
++ orphan->del = 1;
+ orphan->dnext = c->orph_dnext;
+ c->orph_dnext = orphan;
+ dbg_mnt("ino %lu, new %d, tot %d", (unsigned long)inum,
+diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
+index 8bbc99e..a39fce5 100644
+--- a/fs/ubifs/ubifs.h
++++ b/fs/ubifs/ubifs.h
+@@ -908,6 +908,7 @@ struct ubifs_budget_req {
+ * @dnext: next orphan to delete
+ * @inum: inode number
+ * @new: %1 => added since the last commit, otherwise %0
++ * @del: %1 => delete pending, otherwise %0
+ */
+ struct ubifs_orphan {
+ struct rb_node rb;
+@@ -917,6 +918,7 @@ struct ubifs_orphan {
+ struct ubifs_orphan *dnext;
+ ino_t inum;
+ int new;
++ unsigned del:1;
+ };
+
+ /**
+diff --git a/include/linux/auto_fs.h b/include/linux/auto_fs.h
+index da64e15..6cdabb4 100644
+--- a/include/linux/auto_fs.h
++++ b/include/linux/auto_fs.h
+@@ -31,25 +31,16 @@
+ #define AUTOFS_MIN_PROTO_VERSION AUTOFS_PROTO_VERSION
+
+ /*
+- * Architectures where both 32- and 64-bit binaries can be executed
+- * on 64-bit kernels need this. This keeps the structure format
+- * uniform, and makes sure the wait_queue_token isn't too big to be
+- * passed back down to the kernel.
+- *
+- * This assumes that on these architectures:
+- * mode 32 bit 64 bit
+- * -------------------------
+- * int 32 bit 32 bit
+- * long 32 bit 64 bit
+- *
+- * If so, 32-bit user-space code should be backwards compatible.
++ * The wait_queue_token (autofs_wqt_t) is part of a structure which is passed
++ * back to the kernel via ioctl from userspace. On architectures where 32- and
++ * 64-bit userspace binaries can be executed it's important that the size of
++ * autofs_wqt_t stays constant between 32- and 64-bit Linux kernels so that we
++ * do not break the binary ABI interface by changing the structure size.
+ */
+-
+-#if defined(__sparc__) || defined(__mips__) || defined(__x86_64__) \
+- || defined(__powerpc__) || defined(__s390__)
+-typedef unsigned int autofs_wqt_t;
+-#else
++#if defined(__ia64__) || defined(__alpha__) /* pure 64bit architectures */
+ typedef unsigned long autofs_wqt_t;
++#else
++typedef unsigned int autofs_wqt_t;
+ #endif
+
+ /* Packet types */
+diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
+index f606406..acd8d4b 100644
+--- a/include/linux/binfmts.h
++++ b/include/linux/binfmts.h
+@@ -67,8 +67,6 @@ struct linux_binprm {
+ #define BINPRM_FLAGS_EXECFD_BIT 1
+ #define BINPRM_FLAGS_EXECFD (1 << BINPRM_FLAGS_EXECFD_BIT)
+
+-#define BINPRM_MAX_RECURSION 4
+-
+ /* Function parameter for binfmt->coredump */
+ struct coredump_params {
+ long signr;
+diff --git a/include/linux/console.h b/include/linux/console.h
+index 7453cfd..6ae6a15 100644
+--- a/include/linux/console.h
++++ b/include/linux/console.h
+@@ -77,7 +77,9 @@ extern const struct consw prom_con; /* SPARC PROM console */
+ int con_is_bound(const struct consw *csw);
+ int register_con_driver(const struct consw *csw, int first, int last);
+ int unregister_con_driver(const struct consw *csw);
++int do_unregister_con_driver(const struct consw *csw);
+ int take_over_console(const struct consw *sw, int first, int last, int deflt);
++int do_take_over_console(const struct consw *sw, int first, int last, int deflt);
+ void give_up_console(const struct consw *sw);
+ #ifdef CONFIG_HW_CONSOLE
+ int con_debug_enter(struct vc_data *vc);
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 29b6353..a276817 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -2103,6 +2103,7 @@ extern void bd_forget(struct inode *inode);
+ extern void bdput(struct block_device *);
+ extern void invalidate_bdev(struct block_device *);
+ extern int sync_blockdev(struct block_device *bdev);
++extern void kill_bdev(struct block_device *);
+ extern struct super_block *freeze_bdev(struct block_device *);
+ extern void emergency_thaw_all(void);
+ extern int thaw_bdev(struct block_device *bdev, struct super_block *sb);
+@@ -2110,6 +2111,7 @@ extern int fsync_bdev(struct block_device *);
+ #else
+ static inline void bd_forget(struct inode *inode) {}
+ static inline int sync_blockdev(struct block_device *bdev) { return 0; }
++static inline void kill_bdev(struct block_device *bdev) {}
+ static inline void invalidate_bdev(struct block_device *bdev) {}
+
+ static inline struct super_block *freeze_bdev(struct block_device *sb)
+diff --git a/include/linux/idr.h b/include/linux/idr.h
+index 255491c..52a9da2 100644
+--- a/include/linux/idr.h
++++ b/include/linux/idr.h
+@@ -152,4 +152,15 @@ void ida_simple_remove(struct ida *ida, unsigned int id);
+
+ void __init idr_init_cache(void);
+
++/**
++ * idr_for_each_entry - iterate over an idr's elements of a given type
++ * @idp: idr handle
++ * @entry: the type * to use as cursor
++ * @id: id entry's key
++ */
++#define idr_for_each_entry(idp, entry, id) \
++ for (id = 0, entry = (typeof(entry))idr_get_next((idp), &(id)); \
++ entry != NULL; \
++ ++id, entry = (typeof(entry))idr_get_next((idp), &(id)))
++
+ #endif /* __IDR_H__ */
+diff --git a/include/linux/kmod.h b/include/linux/kmod.h
+index b16f653..f8d4b27 100644
+--- a/include/linux/kmod.h
++++ b/include/linux/kmod.h
+@@ -54,6 +54,8 @@ enum umh_wait {
+ UMH_WAIT_PROC = 1, /* wait for the process to complete */
+ };
+
++#define UMH_KILLABLE 4 /* wait for EXEC/PROC killable */
++
+ struct subprocess_info {
+ struct work_struct work;
+ struct completion *complete;
+diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
+index 1d1b1e1..ee2baf0 100644
+--- a/include/linux/mmu_notifier.h
++++ b/include/linux/mmu_notifier.h
+@@ -4,6 +4,7 @@
+ #include <linux/list.h>
+ #include <linux/spinlock.h>
+ #include <linux/mm_types.h>
++#include <linux/srcu.h>
+
+ struct mmu_notifier;
+ struct mmu_notifier_ops;
+diff --git a/include/linux/pps_kernel.h b/include/linux/pps_kernel.h
+index 9404854..ce2ab3d 100644
+--- a/include/linux/pps_kernel.h
++++ b/include/linux/pps_kernel.h
+@@ -43,7 +43,7 @@ struct pps_source_info {
+ int event, void *data); /* PPS echo function */
+
+ struct module *owner;
+- struct device *dev;
++ struct device *dev; /* Parent device for device_create */
+ };
+
+ struct pps_event_time {
+@@ -69,6 +69,7 @@ struct pps_device {
+ wait_queue_head_t queue; /* PPS event queue */
+
+ unsigned int id; /* PPS source unique ID */
++ void const *lookup_cookie; /* pps_lookup_dev only */
+ struct cdev cdev;
+ struct device *dev;
+ struct fasync_struct *async_queue; /* fasync method */
+@@ -82,16 +83,26 @@ struct pps_device {
+ extern struct device_attribute pps_attrs[];
+
+ /*
++ * Internal functions.
++ *
++ * These are not actually part of the exported API, but this is a
++ * convenient header file to put them in.
++ */
++
++extern int pps_register_cdev(struct pps_device *pps);
++extern void pps_unregister_cdev(struct pps_device *pps);
++
++/*
+ * Exported functions
+ */
+
+ extern struct pps_device *pps_register_source(
+ struct pps_source_info *info, int default_params);
+ extern void pps_unregister_source(struct pps_device *pps);
+-extern int pps_register_cdev(struct pps_device *pps);
+-extern void pps_unregister_cdev(struct pps_device *pps);
+ extern void pps_event(struct pps_device *pps,
+ struct pps_event_time *ts, int event, void *data);
++/* Look up a pps device by magic cookie */
++struct pps_device *pps_lookup_dev(void const *cookie);
+
+ static inline void timespec_to_pps_ktime(struct pps_ktime *kt,
+ struct timespec ts)
+diff --git a/include/linux/pstore.h b/include/linux/pstore.h
+index 2ca8cde..9b16969 100644
+--- a/include/linux/pstore.h
++++ b/include/linux/pstore.h
+@@ -22,6 +22,8 @@
+ #ifndef _LINUX_PSTORE_H
+ #define _LINUX_PSTORE_H
+
++#include <linux/kmsg_dump.h>
++
+ /* types */
+ enum pstore_type_id {
+ PSTORE_TYPE_DMESG = 0,
+@@ -50,6 +52,7 @@ struct pstore_info {
+
+ #ifdef CONFIG_PSTORE
+ extern int pstore_register(struct pstore_info *);
++extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason);
+ extern int pstore_write(enum pstore_type_id type, char *buf, size_t size);
+ #else
+ static inline int
+@@ -57,6 +60,11 @@ pstore_register(struct pstore_info *psi)
+ {
+ return -ENODEV;
+ }
++static inline bool
++pstore_cannot_block_path(enum kmsg_dump_reason reason)
++{
++ return false;
++}
+ static inline int
+ pstore_write(enum pstore_type_id type, char *buf, size_t size)
+ {
+diff --git a/include/linux/quota.h b/include/linux/quota.h
+index cb78556..1162580 100644
+--- a/include/linux/quota.h
++++ b/include/linux/quota.h
+@@ -413,6 +413,7 @@ struct quota_module_name {
+ #define INIT_QUOTA_MODULE_NAMES {\
+ {QFMT_VFS_OLD, "quota_v1"},\
+ {QFMT_VFS_V0, "quota_v2"},\
++ {QFMT_VFS_V1, "quota_v2"},\
+ {0, NULL}}
+
+ #endif /* __KERNEL__ */
+diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
+index 8bec265..bae516e 100644
+--- a/include/linux/serial_core.h
++++ b/include/linux/serial_core.h
+@@ -47,8 +47,8 @@
+ #define PORT_U6_16550A 19 /* ST-Ericsson U6xxx internal UART */
+ #define PORT_TEGRA 20 /* NVIDIA Tegra internal UART */
+ #define PORT_XR17D15X 21 /* Exar XR17D15x UART */
+-#define PORT_BRCM_TRUMANAGE 22
+-#define PORT_MAX_8250 22 /* max port ID */
++#define PORT_BRCM_TRUMANAGE 25
++#define PORT_MAX_8250 25 /* max port ID */
+
+ /*
+ * ARM specific type numbers. These are not currently guaranteed
+diff --git a/include/linux/usb/audio.h b/include/linux/usb/audio.h
+index a54b825..6f8b026 100644
+--- a/include/linux/usb/audio.h
++++ b/include/linux/usb/audio.h
+@@ -384,14 +384,16 @@ static inline __u8 uac_processing_unit_iProcessing(struct uac_processing_unit_de
+ int protocol)
+ {
+ __u8 control_size = uac_processing_unit_bControlSize(desc, protocol);
+- return desc->baSourceID[desc->bNrInPins + control_size];
++ return *(uac_processing_unit_bmControls(desc, protocol)
++ + control_size);
+ }
+
+ static inline __u8 *uac_processing_unit_specific(struct uac_processing_unit_descriptor *desc,
+ int protocol)
+ {
+ __u8 control_size = uac_processing_unit_bControlSize(desc, protocol);
+- return &desc->baSourceID[desc->bNrInPins + control_size + 1];
++ return uac_processing_unit_bmControls(desc, protocol)
++ + control_size + 1;
+ }
+
+ /* 4.5.2 Class-Specific AS Interface Descriptor */
+diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h
+index c2164fa..644921f 100644
+--- a/include/linux/vt_kern.h
++++ b/include/linux/vt_kern.h
+@@ -47,6 +47,7 @@ int con_set_cmap(unsigned char __user *cmap);
+ int con_get_cmap(unsigned char __user *cmap);
+ void scrollback(struct vc_data *vc, int lines);
+ void scrollfront(struct vc_data *vc, int lines);
++void clear_buffer_attributes(struct vc_data *vc);
+ void update_region(struct vc_data *vc, unsigned long start, int count);
+ void redraw_screen(struct vc_data *vc, int is_switch);
+ #define update_screen(x) redraw_screen(x, 0)
+@@ -131,6 +132,8 @@ void vt_event_post(unsigned int event, unsigned int old, unsigned int new);
+ int vt_waitactive(int n);
+ void change_console(struct vc_data *new_vc);
+ void reset_vc(struct vc_data *vc);
++extern int do_unbind_con_driver(const struct consw *csw, int first, int last,
++ int deflt);
+ extern int unbind_con_driver(const struct consw *csw, int first, int last,
+ int deflt);
+ int vty_init(const struct file_operations *console_fops);
+diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
+index e46674d..f9ce2fa 100644
+--- a/include/net/inet6_hashtables.h
++++ b/include/net/inet6_hashtables.h
+@@ -28,16 +28,16 @@
+
+ struct inet_hashinfo;
+
+-/* I have no idea if this is a good hash for v6 or not. -DaveM */
+ static inline unsigned int inet6_ehashfn(struct net *net,
+ const struct in6_addr *laddr, const u16 lport,
+ const struct in6_addr *faddr, const __be16 fport)
+ {
+- u32 ports = (lport ^ (__force u16)fport);
++ u32 ports = (((u32)lport) << 16) | (__force u32)fport;
+
+ return jhash_3words((__force u32)laddr->s6_addr32[3],
+- (__force u32)faddr->s6_addr32[3],
+- ports, inet_ehash_secret + net_hash_mix(net));
++ ipv6_addr_jhash(faddr),
++ ports,
++ inet_ehash_secret + net_hash_mix(net));
+ }
+
+ static inline int inet6_sk_ehashfn(const struct sock *sk)
+diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
+index f941964..ee4ee91 100644
+--- a/include/net/inet_sock.h
++++ b/include/net/inet_sock.h
+@@ -199,6 +199,7 @@ static inline void inet_sk_copy_descendant(struct sock *sk_to,
+ extern int inet_sk_rebuild_header(struct sock *sk);
+
+ extern u32 inet_ehash_secret;
++extern u32 ipv6_hash_secret;
+ extern void build_ehash_secret(void);
+
+ static inline unsigned int inet_ehashfn(struct net *net,
+diff --git a/include/net/ipv6.h b/include/net/ipv6.h
+index a366a8a..4d549cf 100644
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -15,6 +15,7 @@
+
+ #include <linux/ipv6.h>
+ #include <linux/hardirq.h>
++#include <linux/jhash.h>
+ #include <net/if_inet6.h>
+ #include <net/ndisc.h>
+ #include <net/flow.h>
+@@ -386,6 +387,17 @@ struct ip6_create_arg {
+ void ip6_frag_init(struct inet_frag_queue *q, void *a);
+ int ip6_frag_match(struct inet_frag_queue *q, void *a);
+
++/* more secured version of ipv6_addr_hash() */
++static inline u32 ipv6_addr_jhash(const struct in6_addr *a)
++{
++ u32 v = (__force u32)a->s6_addr32[0] ^ (__force u32)a->s6_addr32[1];
++
++ return jhash_3words(v,
++ (__force u32)a->s6_addr32[2],
++ (__force u32)a->s6_addr32[3],
++ ipv6_hash_secret);
++}
++
+ static inline int ipv6_addr_any(const struct in6_addr *a)
+ {
+ return (a->s6_addr32[0] | a->s6_addr32[1] |
+diff --git a/include/target/target_core_device.h b/include/target/target_core_device.h
+index 2be31ff..6f30e70 100644
+--- a/include/target/target_core_device.h
++++ b/include/target/target_core_device.h
+@@ -50,7 +50,7 @@ extern struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_hba *
+ extern int core_dev_del_lun(struct se_portal_group *, u32);
+ extern struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32);
+ extern struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *,
+- u32, char *, int *);
++ struct se_node_acl *, u32, int *);
+ extern int core_dev_add_initiator_node_lun_acl(struct se_portal_group *,
+ struct se_lun_acl *, u32, u32);
+ extern int core_dev_del_initiator_node_lun_acl(struct se_portal_group *,
+diff --git a/kernel/cgroup.c b/kernel/cgroup.c
+index b6cacf1..c0739f8 100644
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -361,12 +361,20 @@ static void __put_css_set(struct css_set *cg, int taskexit)
+ struct cgroup *cgrp = link->cgrp;
+ list_del(&link->cg_link_list);
+ list_del(&link->cgrp_link_list);
++
++ /*
++ * We may not be holding cgroup_mutex, and if cgrp->count is
++ * dropped to 0 the cgroup can be destroyed at any time, hence
++ * rcu_read_lock is used to keep it alive.
++ */
++ rcu_read_lock();
+ if (atomic_dec_and_test(&cgrp->count) &&
+ notify_on_release(cgrp)) {
+ if (taskexit)
+ set_bit(CGRP_RELEASABLE, &cgrp->flags);
+ check_for_release(cgrp);
+ }
++ rcu_read_unlock();
+
+ kfree(link);
+ }
+diff --git a/kernel/cpuset.c b/kernel/cpuset.c
+index 84a524b..835eee6 100644
+--- a/kernel/cpuset.c
++++ b/kernel/cpuset.c
+@@ -2507,8 +2507,16 @@ void cpuset_print_task_mems_allowed(struct task_struct *tsk)
+
+ dentry = task_cs(tsk)->css.cgroup->dentry;
+ spin_lock(&cpuset_buffer_lock);
+- snprintf(cpuset_name, CPUSET_NAME_LEN,
+- dentry ? (const char *)dentry->d_name.name : "/");
++
++ if (!dentry) {
++ strcpy(cpuset_name, "/");
++ } else {
++ spin_lock(&dentry->d_lock);
++ strlcpy(cpuset_name, (const char *)dentry->d_name.name,
++ CPUSET_NAME_LEN);
++ spin_unlock(&dentry->d_lock);
++ }
++
+ nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN,
+ tsk->mems_allowed);
+ printk(KERN_INFO "%s cpuset=%s mems_allowed=%s\n",
+diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
+index 6db7a5e..cdd5607 100644
+--- a/kernel/hrtimer.c
++++ b/kernel/hrtimer.c
+@@ -640,21 +640,9 @@ static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
+ * and expiry check is done in the hrtimer_interrupt or in the softirq.
+ */
+ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
+- struct hrtimer_clock_base *base,
+- int wakeup)
++ struct hrtimer_clock_base *base)
+ {
+- if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
+- if (wakeup) {
+- raw_spin_unlock(&base->cpu_base->lock);
+- raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+- raw_spin_lock(&base->cpu_base->lock);
+- } else
+- __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+-
+- return 1;
+- }
+-
+- return 0;
++ return base->cpu_base->hres_active && hrtimer_reprogram(timer, base);
+ }
+
+ static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
+@@ -735,8 +723,7 @@ static inline int hrtimer_switch_to_hres(void) { return 0; }
+ static inline void
+ hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
+ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
+- struct hrtimer_clock_base *base,
+- int wakeup)
++ struct hrtimer_clock_base *base)
+ {
+ return 0;
+ }
+@@ -995,8 +982,21 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+ *
+ * XXX send_remote_softirq() ?
+ */
+- if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases))
+- hrtimer_enqueue_reprogram(timer, new_base, wakeup);
++ if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)
++ && hrtimer_enqueue_reprogram(timer, new_base)) {
++ if (wakeup) {
++ /*
++ * We need to drop cpu_base->lock to avoid a
++ * lock ordering issue vs. rq->lock.
++ */
++ raw_spin_unlock(&new_base->cpu_base->lock);
++ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
++ local_irq_restore(flags);
++ return ret;
++ } else {
++ __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
++ }
++ }
+
+ unlock_hrtimer_base(timer, &flags);
+
+diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
+index dc813a9..63633a3 100644
+--- a/kernel/irq/spurious.c
++++ b/kernel/irq/spurious.c
+@@ -80,13 +80,11 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force)
+
+ /*
+ * All handlers must agree on IRQF_SHARED, so we test just the
+- * first. Check for action->next as well.
++ * first.
+ */
+ action = desc->action;
+ if (!action || !(action->flags & IRQF_SHARED) ||
+- (action->flags & __IRQF_TIMER) ||
+- (action->handler(irq, action->dev_id) == IRQ_HANDLED) ||
+- !action->next)
++ (action->flags & __IRQF_TIMER))
+ goto out;
+
+ /* Already running on another processor */
+@@ -104,6 +102,7 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force)
+ do {
+ if (handle_irq_event(desc) == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
++ /* Make sure that there is still a valid action */
+ action = desc->action;
+ } while ((desc->istate & IRQS_PENDING) && action);
+ desc->istate &= ~IRQS_POLL_INPROGRESS;
+diff --git a/kernel/kmod.c b/kernel/kmod.c
+index a4bea97..d6fe08a 100644
+--- a/kernel/kmod.c
++++ b/kernel/kmod.c
+@@ -58,6 +58,43 @@ static DEFINE_SPINLOCK(umh_sysctl_lock);
+ */
+ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
+
++static void free_modprobe_argv(struct subprocess_info *info)
++{
++ kfree(info->argv[3]); /* check call_modprobe() */
++ kfree(info->argv);
++}
++
++static int call_modprobe(char *module_name, int wait)
++{
++ static char *envp[] = {
++ "HOME=/",
++ "TERM=linux",
++ "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
++ NULL
++ };
++
++ char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
++ if (!argv)
++ goto out;
++
++ module_name = kstrdup(module_name, GFP_KERNEL);
++ if (!module_name)
++ goto free_argv;
++
++ argv[0] = modprobe_path;
++ argv[1] = "-q";
++ argv[2] = "--";
++ argv[3] = module_name; /* check free_modprobe_argv() */
++ argv[4] = NULL;
++
++ return call_usermodehelper_fns(modprobe_path, argv, envp,
++ wait | UMH_KILLABLE, NULL, free_modprobe_argv, NULL);
++free_argv:
++ kfree(argv);
++out:
++ return -ENOMEM;
++}
++
+ /**
+ * __request_module - try to load a kernel module
+ * @wait: wait (or not) for the operation to complete
+@@ -79,11 +116,6 @@ int __request_module(bool wait, const char *fmt, ...)
+ char module_name[MODULE_NAME_LEN];
+ unsigned int max_modprobes;
+ int ret;
+- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
+- static char *envp[] = { "HOME=/",
+- "TERM=linux",
+- "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
+- NULL };
+ static atomic_t kmod_concurrent = ATOMIC_INIT(0);
+ #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
+ static int kmod_loop_msg;
+@@ -126,9 +158,7 @@ int __request_module(bool wait, const char *fmt, ...)
+
+ trace_module_request(module_name, wait, _RET_IP_);
+
+- ret = call_usermodehelper_fns(modprobe_path, argv, envp,
+- wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC,
+- NULL, NULL, NULL);
++ ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
+
+ atomic_dec(&kmod_concurrent);
+ return ret;
+@@ -186,7 +216,7 @@ static int ____call_usermodehelper(void *data)
+ /* Exec failed? */
+ fail:
+ sub_info->retval = retval;
+- do_exit(0);
++ return 0;
+ }
+
+ void call_usermodehelper_freeinfo(struct subprocess_info *info)
+@@ -197,6 +227,19 @@ void call_usermodehelper_freeinfo(struct subprocess_info *info)
+ }
+ EXPORT_SYMBOL(call_usermodehelper_freeinfo);
+
++static void umh_complete(struct subprocess_info *sub_info)
++{
++ struct completion *comp = xchg(&sub_info->complete, NULL);
++ /*
++ * See call_usermodehelper_exec(). If xchg() returns NULL
++ * we own sub_info, the UMH_KILLABLE caller has gone away.
++ */
++ if (comp)
++ complete(comp);
++ else
++ call_usermodehelper_freeinfo(sub_info);
++}
++
+ /* Keventd can't block, but this (a child) can. */
+ static int wait_for_helper(void *data)
+ {
+@@ -233,7 +276,7 @@ static int wait_for_helper(void *data)
+ sub_info->retval = ret;
+ }
+
+- complete(sub_info->complete);
++ umh_complete(sub_info);
+ return 0;
+ }
+
+@@ -245,6 +288,9 @@ static void __call_usermodehelper(struct work_struct *work)
+ enum umh_wait wait = sub_info->wait;
+ pid_t pid;
+
++ if (wait != UMH_NO_WAIT)
++ wait &= ~UMH_KILLABLE;
++
+ /* CLONE_VFORK: wait until the usermode helper has execve'd
+ * successfully We need the data structures to stay around
+ * until that is done. */
+@@ -267,7 +313,7 @@ static void __call_usermodehelper(struct work_struct *work)
+ case UMH_WAIT_EXEC:
+ if (pid < 0)
+ sub_info->retval = pid;
+- complete(sub_info->complete);
++ umh_complete(sub_info);
+ }
+ }
+
+@@ -435,9 +481,21 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info,
+ queue_work(khelper_wq, &sub_info->work);
+ if (wait == UMH_NO_WAIT) /* task has freed sub_info */
+ goto unlock;
++
++ if (wait & UMH_KILLABLE) {
++ retval = wait_for_completion_killable(&done);
++ if (!retval)
++ goto wait_done;
++
++ /* umh_complete() will see NULL and free sub_info */
++ if (xchg(&sub_info->complete, NULL))
++ goto unlock;
++ /* fallthrough, umh_complete() was already called */
++ }
++
+ wait_for_completion(&done);
++wait_done:
+ retval = sub_info->retval;
+-
+ out:
+ call_usermodehelper_freeinfo(sub_info);
+ unlock:
+diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
+index e7cb76d..962c291 100644
+--- a/kernel/posix-cpu-timers.c
++++ b/kernel/posix-cpu-timers.c
+@@ -1450,8 +1450,10 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
+ while (!signal_pending(current)) {
+ if (timer.it.cpu.expires.sched == 0) {
+ /*
+- * Our timer fired and was reset.
++ * Our timer fired and was reset, below
++ * deletion can not fail.
+ */
++ posix_cpu_timer_del(&timer);
+ spin_unlock_irq(&timer.it_lock);
+ return 0;
+ }
+@@ -1469,9 +1471,26 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
+ * We were interrupted by a signal.
+ */
+ sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp);
+- posix_cpu_timer_set(&timer, 0, &zero_it, it);
++ error = posix_cpu_timer_set(&timer, 0, &zero_it, it);
++ if (!error) {
++ /*
++ * Timer is now unarmed, deletion can not fail.
++ */
++ posix_cpu_timer_del(&timer);
++ }
+ spin_unlock_irq(&timer.it_lock);
+
++ while (error == TIMER_RETRY) {
++ /*
++ * We need to handle case when timer was or is in the
++ * middle of firing. In other cases we already freed
++ * resources.
++ */
++ spin_lock_irq(&timer.it_lock);
++ error = posix_cpu_timer_del(&timer);
++ spin_unlock_irq(&timer.it_lock);
++ }
++
+ if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) {
+ /*
+ * It actually did fire already.
+diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
+index 69185ae..e885be1 100644
+--- a/kernel/posix-timers.c
++++ b/kernel/posix-timers.c
+@@ -639,6 +639,13 @@ static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags)
+ {
+ struct k_itimer *timr;
+
++ /*
++ * timer_t could be any type >= int and we want to make sure any
++ * @timer_id outside positive int range fails lookup.
++ */
++ if ((unsigned long long)timer_id > INT_MAX)
++ return NULL;
++
+ rcu_read_lock();
+ timr = idr_find(&posix_timers_id, (int)timer_id);
+ if (timr) {
+diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
+index a650694..9f9aa32 100644
+--- a/kernel/sysctl_binary.c
++++ b/kernel/sysctl_binary.c
+@@ -1194,9 +1194,10 @@ static ssize_t bin_dn_node_address(struct file *file,
+
+ /* Convert the decnet address to binary */
+ result = -EIO;
+- nodep = strchr(buf, '.') + 1;
++ nodep = strchr(buf, '.');
+ if (!nodep)
+ goto out;
++ ++nodep;
+
+ area = simple_strtoul(buf, NULL, 10);
+ node = simple_strtoul(nodep, NULL, 10);
+diff --git a/kernel/timeconst.pl b/kernel/timeconst.pl
+index eb51d76..3f42652 100644
+--- a/kernel/timeconst.pl
++++ b/kernel/timeconst.pl
+@@ -369,10 +369,8 @@ if ($hz eq '--can') {
+ die "Usage: $0 HZ\n";
+ }
+
+- @val = @{$canned_values{$hz}};
+- if (!defined(@val)) {
+- @val = compute_values($hz);
+- }
++ $cv = $canned_values{$hz};
++ @val = defined($cv) ? @$cv : compute_values($hz);
+ output($hz, @val);
+ }
+ exit 0;
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 4b1a96b..6c880e8 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -3454,37 +3454,51 @@ static void ftrace_init_module(struct module *mod,
+ ftrace_process_locs(mod, start, end);
+ }
+
+-static int ftrace_module_notify(struct notifier_block *self,
+- unsigned long val, void *data)
++static int ftrace_module_notify_enter(struct notifier_block *self,
++ unsigned long val, void *data)
+ {
+ struct module *mod = data;
+
+- switch (val) {
+- case MODULE_STATE_COMING:
++ if (val == MODULE_STATE_COMING)
+ ftrace_init_module(mod, mod->ftrace_callsites,
+ mod->ftrace_callsites +
+ mod->num_ftrace_callsites);
+- break;
+- case MODULE_STATE_GOING:
++ return 0;
++}
++
++static int ftrace_module_notify_exit(struct notifier_block *self,
++ unsigned long val, void *data)
++{
++ struct module *mod = data;
++
++ if (val == MODULE_STATE_GOING)
+ ftrace_release_mod(mod);
+- break;
+- }
+
+ return 0;
+ }
+ #else
+-static int ftrace_module_notify(struct notifier_block *self,
+- unsigned long val, void *data)
++static int ftrace_module_notify_enter(struct notifier_block *self,
++ unsigned long val, void *data)
++{
++ return 0;
++}
++static int ftrace_module_notify_exit(struct notifier_block *self,
++ unsigned long val, void *data)
+ {
+ return 0;
+ }
+ #endif /* CONFIG_MODULES */
+
+-struct notifier_block ftrace_module_nb = {
+- .notifier_call = ftrace_module_notify,
++struct notifier_block ftrace_module_enter_nb = {
++ .notifier_call = ftrace_module_notify_enter,
+ .priority = INT_MAX, /* Run before anything that can use kprobes */
+ };
+
++struct notifier_block ftrace_module_exit_nb = {
++ .notifier_call = ftrace_module_notify_exit,
++ .priority = INT_MIN, /* Run after anything that can remove kprobes */
++};
++
+ extern unsigned long __start_mcount_loc[];
+ extern unsigned long __stop_mcount_loc[];
+
+@@ -3516,9 +3530,13 @@ void __init ftrace_init(void)
+ __start_mcount_loc,
+ __stop_mcount_loc);
+
+- ret = register_module_notifier(&ftrace_module_nb);
++ ret = register_module_notifier(&ftrace_module_enter_nb);
++ if (ret)
++ pr_warning("Failed to register trace ftrace module enter notifier\n");
++
++ ret = register_module_notifier(&ftrace_module_exit_nb);
+ if (ret)
+- pr_warning("Failed to register trace ftrace module notifier\n");
++ pr_warning("Failed to register trace ftrace module exit notifier\n");
+
+ set_ftrace_early_filters();
+
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 7bf068a..0ad2420 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -128,6 +128,7 @@ struct worker {
+ };
+
+ struct work_struct *current_work; /* L: work being processed */
++ work_func_t current_func; /* L: current_work's fn */
+ struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
+ struct list_head scheduled; /* L: scheduled works */
+ struct task_struct *task; /* I: worker task */
+@@ -843,7 +844,8 @@ static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
+ struct hlist_node *tmp;
+
+ hlist_for_each_entry(worker, tmp, bwh, hentry)
+- if (worker->current_work == work)
++ if (worker->current_work == work &&
++ worker->current_func == work->func)
+ return worker;
+ return NULL;
+ }
+@@ -853,9 +855,27 @@ static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
+ * @gcwq: gcwq of interest
+ * @work: work to find worker for
+ *
+- * Find a worker which is executing @work on @gcwq. This function is
+- * identical to __find_worker_executing_work() except that this
+- * function calculates @bwh itself.
++ * Find a worker which is executing @work on @gcwq by searching
++ * @gcwq->busy_hash which is keyed by the address of @work. For a worker
++ * to match, its current execution should match the address of @work and
++ * its work function. This is to avoid unwanted dependency between
++ * unrelated work executions through a work item being recycled while still
++ * being executed.
++ *
++ * This is a bit tricky. A work item may be freed once its execution
++ * starts and nothing prevents the freed area from being recycled for
++ * another work item. If the same work item address ends up being reused
++ * before the original execution finishes, workqueue will identify the
++ * recycled work item as currently executing and make it wait until the
++ * current execution finishes, introducing an unwanted dependency.
++ *
++ * This function checks the work item address, work function and workqueue
++ * to avoid false positives. Note that this isn't complete as one may
++ * construct a work function which can introduce dependency onto itself
++ * through a recycled work item. Well, if somebody wants to shoot oneself
++ * in the foot that badly, there's only so much we can do, and if such
++ * deadlock actually occurs, it should be easy to locate the culprit work
++ * function.
+ *
+ * CONTEXT:
+ * spin_lock_irq(gcwq->lock).
+@@ -1816,7 +1836,6 @@ __acquires(&gcwq->lock)
+ struct global_cwq *gcwq = cwq->gcwq;
+ struct hlist_head *bwh = busy_worker_head(gcwq, work);
+ bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
+- work_func_t f = work->func;
+ int work_color;
+ struct worker *collision;
+ #ifdef CONFIG_LOCKDEP
+@@ -1845,6 +1864,7 @@ __acquires(&gcwq->lock)
+ debug_work_deactivate(work);
+ hlist_add_head(&worker->hentry, bwh);
+ worker->current_work = work;
++ worker->current_func = work->func;
+ worker->current_cwq = cwq;
+ work_color = get_work_color(work);
+
+@@ -1882,7 +1902,7 @@ __acquires(&gcwq->lock)
+ lock_map_acquire_read(&cwq->wq->lockdep_map);
+ lock_map_acquire(&lockdep_map);
+ trace_workqueue_execute_start(work);
+- f(work);
++ worker->current_func(work);
+ /*
+ * While we must be careful to not use "work" after this, the trace
+ * point will only record its address.
+@@ -1892,11 +1912,10 @@ __acquires(&gcwq->lock)
+ lock_map_release(&cwq->wq->lockdep_map);
+
+ if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
+- printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
+- "%s/0x%08x/%d\n",
+- current->comm, preempt_count(), task_pid_nr(current));
+- printk(KERN_ERR " last function: ");
+- print_symbol("%s\n", (unsigned long)f);
++ pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
++ " last function: %pf\n",
++ current->comm, preempt_count(), task_pid_nr(current),
++ worker->current_func);
+ debug_show_held_locks(current);
+ dump_stack();
+ }
+@@ -1910,6 +1929,7 @@ __acquires(&gcwq->lock)
+ /* we're done with it, release */
+ hlist_del_init(&worker->hentry);
+ worker->current_work = NULL;
++ worker->current_func = NULL;
+ worker->current_cwq = NULL;
+ cwq_dec_nr_in_flight(cwq, work_color, false);
+ }
+diff --git a/lib/idr.c b/lib/idr.c
+index ed055b2..aadc525 100644
+--- a/lib/idr.c
++++ b/lib/idr.c
+@@ -39,6 +39,14 @@
+ static struct kmem_cache *idr_layer_cache;
+ static DEFINE_SPINLOCK(simple_ida_lock);
+
++/* the maximum ID which can be allocated given idr->layers */
++static int idr_max(int layers)
++{
++ int bits = min_t(int, layers * IDR_BITS, MAX_ID_SHIFT);
++
++ return (1 << bits) - 1;
++}
++
+ static struct idr_layer *get_from_free_list(struct idr *idp)
+ {
+ struct idr_layer *p;
+@@ -223,7 +231,7 @@ build_up:
+ * Add a new layer to the top of the tree if the requested
+ * id is larger than the currently allocated space.
+ */
+- while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
++ while (id > idr_max(layers)) {
+ layers++;
+ if (!p->count) {
+ /* special case: if the tree is currently empty,
+@@ -265,7 +273,7 @@ build_up:
+
+ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
+ {
+- struct idr_layer *pa[MAX_LEVEL];
++ struct idr_layer *pa[MAX_LEVEL + 1];
+ int id;
+
+ id = idr_get_empty_slot(idp, starting_id, pa);
+@@ -357,7 +365,7 @@ static void idr_remove_warning(int id)
+ static void sub_remove(struct idr *idp, int shift, int id)
+ {
+ struct idr_layer *p = idp->top;
+- struct idr_layer **pa[MAX_LEVEL];
++ struct idr_layer **pa[MAX_LEVEL + 1];
+ struct idr_layer ***paa = &pa[0];
+ struct idr_layer *to_free;
+ int n;
+@@ -451,16 +459,16 @@ void idr_remove_all(struct idr *idp)
+ int n, id, max;
+ int bt_mask;
+ struct idr_layer *p;
+- struct idr_layer *pa[MAX_LEVEL];
++ struct idr_layer *pa[MAX_LEVEL + 1];
+ struct idr_layer **paa = &pa[0];
+
+ n = idp->layers * IDR_BITS;
+ p = idp->top;
+ rcu_assign_pointer(idp->top, NULL);
+- max = 1 << n;
++ max = idr_max(idp->layers);
+
+ id = 0;
+- while (id < max) {
++ while (id >= 0 && id <= max) {
+ while (n > IDR_BITS && p) {
+ n -= IDR_BITS;
+ *paa++ = p;
+@@ -519,7 +527,7 @@ void *idr_find(struct idr *idp, int id)
+ /* Mask off upper bits we don't use for the search. */
+ id &= MAX_ID_MASK;
+
+- if (id >= (1 << n))
++ if (id > idr_max(p->layer + 1))
+ return NULL;
+ BUG_ON(n == 0);
+
+@@ -555,15 +563,15 @@ int idr_for_each(struct idr *idp,
+ {
+ int n, id, max, error = 0;
+ struct idr_layer *p;
+- struct idr_layer *pa[MAX_LEVEL];
++ struct idr_layer *pa[MAX_LEVEL + 1];
+ struct idr_layer **paa = &pa[0];
+
+ n = idp->layers * IDR_BITS;
+ p = rcu_dereference_raw(idp->top);
+- max = 1 << n;
++ max = idr_max(idp->layers);
+
+ id = 0;
+- while (id < max) {
++ while (id >= 0 && id <= max) {
+ while (n > 0 && p) {
+ n -= IDR_BITS;
+ *paa++ = p;
+@@ -595,23 +603,25 @@ EXPORT_SYMBOL(idr_for_each);
+ * Returns pointer to registered object with id, which is next number to
+ * given id. After being looked up, *@nextidp will be updated for the next
+ * iteration.
++ *
++ * This function can be called under rcu_read_lock(), given that the leaf
++ * pointers lifetimes are correctly managed.
+ */
+-
+ void *idr_get_next(struct idr *idp, int *nextidp)
+ {
+- struct idr_layer *p, *pa[MAX_LEVEL];
++ struct idr_layer *p, *pa[MAX_LEVEL + 1];
+ struct idr_layer **paa = &pa[0];
+ int id = *nextidp;
+ int n, max;
+
+ /* find first ent */
+- n = idp->layers * IDR_BITS;
+- max = 1 << n;
+ p = rcu_dereference_raw(idp->top);
+ if (!p)
+ return NULL;
++ n = (p->layer + 1) * IDR_BITS;
++ max = idr_max(p->layer + 1);
+
+- while (id < max) {
++ while (id >= 0 && id <= max) {
+ while (n > 0 && p) {
+ n -= IDR_BITS;
+ *paa++ = p;
+@@ -623,7 +633,14 @@ void *idr_get_next(struct idr *idp, int *nextidp)
+ return p;
+ }
+
+- id += 1 << n;
++ /*
++ * Proceed to the next layer at the current level. Unlike
++ * idr_for_each(), @id isn't guaranteed to be aligned to
++ * layer boundary at this point and adding 1 << n may
++ * incorrectly skip IDs. Make sure we jump to the
++ * beginning of the next layer using round_up().
++ */
++ id = round_up(id + 1, 1 << n);
+ while (n < fls(id)) {
+ n += IDR_BITS;
+ p = *--paa;
+@@ -778,7 +795,7 @@ EXPORT_SYMBOL(ida_pre_get);
+ */
+ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
+ {
+- struct idr_layer *pa[MAX_LEVEL];
++ struct idr_layer *pa[MAX_LEVEL + 1];
+ struct ida_bitmap *bitmap;
+ unsigned long flags;
+ int idr_id = starting_id / IDA_BITMAP_BITS;
+diff --git a/mm/fadvise.c b/mm/fadvise.c
+index 8d723c9..35b2bb0 100644
+--- a/mm/fadvise.c
++++ b/mm/fadvise.c
+@@ -17,6 +17,7 @@
+ #include <linux/fadvise.h>
+ #include <linux/writeback.h>
+ #include <linux/syscalls.h>
++#include <linux/swap.h>
+
+ #include <asm/unistd.h>
+
+@@ -123,9 +124,22 @@ SYSCALL_DEFINE(fadvise64_64)(int fd, loff_t offset, loff_t len, int advice)
+ start_index = (offset+(PAGE_CACHE_SIZE-1)) >> PAGE_CACHE_SHIFT;
+ end_index = (endbyte >> PAGE_CACHE_SHIFT);
+
+- if (end_index >= start_index)
+- invalidate_mapping_pages(mapping, start_index,
++ if (end_index >= start_index) {
++ unsigned long count = invalidate_mapping_pages(mapping,
++ start_index, end_index);
++
++ /*
++ * If fewer pages were invalidated than expected then
++ * it is possible that some of the pages were on
++ * a per-cpu pagevec for a remote CPU. Drain all
++ * pagevecs and try again.
++ */
++ if (count < (end_index - start_index + 1)) {
++ lru_add_drain_all();
++ invalidate_mapping_pages(mapping, start_index,
+ end_index);
++ }
++ }
+ break;
+ default:
+ ret = -EINVAL;
+diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
+index 862b608..8d1ca2d 100644
+--- a/mm/mmu_notifier.c
++++ b/mm/mmu_notifier.c
+@@ -14,10 +14,14 @@
+ #include <linux/export.h>
+ #include <linux/mm.h>
+ #include <linux/err.h>
++#include <linux/srcu.h>
+ #include <linux/rcupdate.h>
+ #include <linux/sched.h>
+ #include <linux/slab.h>
+
++/* global SRCU for all MMs */
++static struct srcu_struct srcu;
++
+ /*
+ * This function can't run concurrently against mmu_notifier_register
+ * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
+@@ -25,58 +29,61 @@
+ * in parallel despite there being no task using this mm any more,
+ * through the vmas outside of the exit_mmap context, such as with
+ * vmtruncate. This serializes against mmu_notifier_unregister with
+- * the mmu_notifier_mm->lock in addition to RCU and it serializes
+- * against the other mmu notifiers with RCU. struct mmu_notifier_mm
++ * the mmu_notifier_mm->lock in addition to SRCU and it serializes
++ * against the other mmu notifiers with SRCU. struct mmu_notifier_mm
+ * can't go away from under us as exit_mmap holds an mm_count pin
+ * itself.
+ */
+ void __mmu_notifier_release(struct mm_struct *mm)
+ {
+ struct mmu_notifier *mn;
+- struct hlist_node *n;
++ int id;
+
+ /*
+- * RCU here will block mmu_notifier_unregister until
+- * ->release returns.
++ * srcu_read_lock() here will block synchronize_srcu() in
++ * mmu_notifier_unregister() until all registered
++ * ->release() callouts this function makes have
++ * returned.
+ */
+- rcu_read_lock();
+- hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist)
+- /*
+- * if ->release runs before mmu_notifier_unregister it
+- * must be handled as it's the only way for the driver
+- * to flush all existing sptes and stop the driver
+- * from establishing any more sptes before all the
+- * pages in the mm are freed.
+- */
+- if (mn->ops->release)
+- mn->ops->release(mn, mm);
+- rcu_read_unlock();
+-
++ id = srcu_read_lock(&srcu);
+ spin_lock(&mm->mmu_notifier_mm->lock);
+ while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
+ mn = hlist_entry(mm->mmu_notifier_mm->list.first,
+ struct mmu_notifier,
+ hlist);
++
+ /*
+- * We arrived before mmu_notifier_unregister so
+- * mmu_notifier_unregister will do nothing other than
+- * to wait ->release to finish and
+- * mmu_notifier_unregister to return.
++ * Unlink. This will prevent mmu_notifier_unregister()
++ * from also making the ->release() callout.
+ */
+ hlist_del_init_rcu(&mn->hlist);
++ spin_unlock(&mm->mmu_notifier_mm->lock);
++
++ /*
++ * Clear sptes. (see 'release' description in mmu_notifier.h)
++ */
++ if (mn->ops->release)
++ mn->ops->release(mn, mm);
++
++ spin_lock(&mm->mmu_notifier_mm->lock);
+ }
+ spin_unlock(&mm->mmu_notifier_mm->lock);
+
+ /*
+- * synchronize_rcu here prevents mmu_notifier_release to
+- * return to exit_mmap (which would proceed freeing all pages
+- * in the mm) until the ->release method returns, if it was
+- * invoked by mmu_notifier_unregister.
+- *
+- * The mmu_notifier_mm can't go away from under us because one
+- * mm_count is hold by exit_mmap.
++ * All callouts to ->release() which we have done are complete.
++ * Allow synchronize_srcu() in mmu_notifier_unregister() to complete
++ */
++ srcu_read_unlock(&srcu, id);
++
++ /*
++ * mmu_notifier_unregister() may have unlinked a notifier and may
++ * still be calling out to it. Additionally, other notifiers
++ * may have been active via vmtruncate() et. al. Block here
++ * to ensure that all notifier callouts for this mm have been
++ * completed and the sptes are really cleaned up before returning
++ * to exit_mmap().
+ */
+- synchronize_rcu();
++ synchronize_srcu(&srcu);
+ }
+
+ /*
+@@ -89,14 +96,14 @@ int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
+ {
+ struct mmu_notifier *mn;
+ struct hlist_node *n;
+- int young = 0;
++ int young = 0, id;
+
+- rcu_read_lock();
++ id = srcu_read_lock(&srcu);
+ hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ if (mn->ops->clear_flush_young)
+ young |= mn->ops->clear_flush_young(mn, mm, address);
+ }
+- rcu_read_unlock();
++ srcu_read_unlock(&srcu, id);
+
+ return young;
+ }
+@@ -106,9 +113,9 @@ int __mmu_notifier_test_young(struct mm_struct *mm,
+ {
+ struct mmu_notifier *mn;
+ struct hlist_node *n;
+- int young = 0;
++ int young = 0, id;
+
+- rcu_read_lock();
++ id = srcu_read_lock(&srcu);
+ hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ if (mn->ops->test_young) {
+ young = mn->ops->test_young(mn, mm, address);
+@@ -116,7 +123,7 @@ int __mmu_notifier_test_young(struct mm_struct *mm,
+ break;
+ }
+ }
+- rcu_read_unlock();
++ srcu_read_unlock(&srcu, id);
+
+ return young;
+ }
+@@ -126,8 +133,9 @@ void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
+ {
+ struct mmu_notifier *mn;
+ struct hlist_node *n;
++ int id;
+
+- rcu_read_lock();
++ id = srcu_read_lock(&srcu);
+ hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ if (mn->ops->change_pte)
+ mn->ops->change_pte(mn, mm, address, pte);
+@@ -138,7 +146,7 @@ void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
+ else if (mn->ops->invalidate_page)
+ mn->ops->invalidate_page(mn, mm, address);
+ }
+- rcu_read_unlock();
++ srcu_read_unlock(&srcu, id);
+ }
+
+ void __mmu_notifier_invalidate_page(struct mm_struct *mm,
+@@ -146,13 +154,14 @@ void __mmu_notifier_invalidate_page(struct mm_struct *mm,
+ {
+ struct mmu_notifier *mn;
+ struct hlist_node *n;
++ int id;
+
+- rcu_read_lock();
++ id = srcu_read_lock(&srcu);
+ hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ if (mn->ops->invalidate_page)
+ mn->ops->invalidate_page(mn, mm, address);
+ }
+- rcu_read_unlock();
++ srcu_read_unlock(&srcu, id);
+ }
+
+ void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
+@@ -160,13 +169,14 @@ void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
+ {
+ struct mmu_notifier *mn;
+ struct hlist_node *n;
++ int id;
+
+- rcu_read_lock();
++ id = srcu_read_lock(&srcu);
+ hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ if (mn->ops->invalidate_range_start)
+ mn->ops->invalidate_range_start(mn, mm, start, end);
+ }
+- rcu_read_unlock();
++ srcu_read_unlock(&srcu, id);
+ }
+
+ void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
+@@ -174,13 +184,14 @@ void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
+ {
+ struct mmu_notifier *mn;
+ struct hlist_node *n;
++ int id;
+
+- rcu_read_lock();
++ id = srcu_read_lock(&srcu);
+ hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
+ if (mn->ops->invalidate_range_end)
+ mn->ops->invalidate_range_end(mn, mm, start, end);
+ }
+- rcu_read_unlock();
++ srcu_read_unlock(&srcu, id);
+ }
+
+ static int do_mmu_notifier_register(struct mmu_notifier *mn,
+@@ -192,6 +203,12 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
+
+ BUG_ON(atomic_read(&mm->mm_users) <= 0);
+
++ /*
++ * Verify that mmu_notifier_init() already run and the global srcu is
++ * initialized.
++ */
++ BUG_ON(!srcu.per_cpu_ref);
++
+ ret = -ENOMEM;
+ mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
+ if (unlikely(!mmu_notifier_mm))
+@@ -274,8 +291,8 @@ void __mmu_notifier_mm_destroy(struct mm_struct *mm)
+ /*
+ * This releases the mm_count pin automatically and frees the mm
+ * structure if it was the last user of it. It serializes against
+- * running mmu notifiers with RCU and against mmu_notifier_unregister
+- * with the unregister lock + RCU. All sptes must be dropped before
++ * running mmu notifiers with SRCU and against mmu_notifier_unregister
++ * with the unregister lock + SRCU. All sptes must be dropped before
+ * calling mmu_notifier_unregister. ->release or any other notifier
+ * method may be invoked concurrently with mmu_notifier_unregister,
+ * and only after mmu_notifier_unregister returned we're guaranteed
+@@ -285,35 +302,43 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
+ {
+ BUG_ON(atomic_read(&mm->mm_count) <= 0);
+
++ spin_lock(&mm->mmu_notifier_mm->lock);
+ if (!hlist_unhashed(&mn->hlist)) {
+- /*
+- * RCU here will force exit_mmap to wait ->release to finish
+- * before freeing the pages.
+- */
+- rcu_read_lock();
++ int id;
+
+ /*
+- * exit_mmap will block in mmu_notifier_release to
+- * guarantee ->release is called before freeing the
+- * pages.
++ * Ensure we synchronize up with __mmu_notifier_release().
+ */
++ id = srcu_read_lock(&srcu);
++
++ hlist_del_rcu(&mn->hlist);
++ spin_unlock(&mm->mmu_notifier_mm->lock);
++
+ if (mn->ops->release)
+ mn->ops->release(mn, mm);
+- rcu_read_unlock();
+
+- spin_lock(&mm->mmu_notifier_mm->lock);
+- hlist_del_rcu(&mn->hlist);
++ /*
++ * Allow __mmu_notifier_release() to complete.
++ */
++ srcu_read_unlock(&srcu, id);
++ } else
+ spin_unlock(&mm->mmu_notifier_mm->lock);
+- }
+
+ /*
+- * Wait any running method to finish, of course including
+- * ->release if it was run by mmu_notifier_relase instead of us.
++ * Wait for any running method to finish, including ->release() if it
++ * was run by __mmu_notifier_release() instead of us.
+ */
+- synchronize_rcu();
++ synchronize_srcu(&srcu);
+
+ BUG_ON(atomic_read(&mm->mm_count) <= 0);
+
+ mmdrop(mm);
+ }
+ EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
++
++static int __init mmu_notifier_init(void)
++{
++ return init_srcu_struct(&srcu);
++}
++
++module_init(mmu_notifier_init);
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 4d3a697..5c028e2 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -4253,10 +4253,11 @@ static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
+ * round what is now in bits to nearest long in bits, then return it in
+ * bytes.
+ */
+-static unsigned long __init usemap_size(unsigned long zonesize)
++static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
+ {
+ unsigned long usemapsize;
+
++ zonesize += zone_start_pfn & (pageblock_nr_pages-1);
+ usemapsize = roundup(zonesize, pageblock_nr_pages);
+ usemapsize = usemapsize >> pageblock_order;
+ usemapsize *= NR_PAGEBLOCK_BITS;
+@@ -4266,17 +4267,19 @@ static unsigned long __init usemap_size(unsigned long zonesize)
+ }
+
+ static void __init setup_usemap(struct pglist_data *pgdat,
+- struct zone *zone, unsigned long zonesize)
++ struct zone *zone,
++ unsigned long zone_start_pfn,
++ unsigned long zonesize)
+ {
+- unsigned long usemapsize = usemap_size(zonesize);
++ unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
+ zone->pageblock_flags = NULL;
+ if (usemapsize)
+ zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat,
+ usemapsize);
+ }
+ #else
+-static inline void setup_usemap(struct pglist_data *pgdat,
+- struct zone *zone, unsigned long zonesize) {}
++static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
++ unsigned long zone_start_pfn, unsigned long zonesize) {}
+ #endif /* CONFIG_SPARSEMEM */
+
+ #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
+@@ -4401,7 +4404,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
+ continue;
+
+ set_pageblock_order();
+- setup_usemap(pgdat, zone, size);
++ setup_usemap(pgdat, zone, zone_start_pfn, size);
+ ret = init_currently_empty_zone(zone, zone_start_pfn,
+ size, MEMMAP_EARLY);
+ BUG_ON(ret);
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 12b9e80..a78acf0 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -2121,6 +2121,7 @@ static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
+ unsigned long inodes;
+ int error = -EINVAL;
+
++ config.mpol = NULL;
+ if (shmem_parse_options(data, &config, true))
+ return error;
+
+@@ -2145,8 +2146,13 @@ static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
+ sbinfo->max_inodes = config.max_inodes;
+ sbinfo->free_inodes = config.max_inodes - inodes;
+
+- mpol_put(sbinfo->mpol);
+- sbinfo->mpol = config.mpol; /* transfers initial ref */
++ /*
++ * Preserve previous mempolicy unless mpol remount option was specified.
++ */
++ if (config.mpol) {
++ mpol_put(sbinfo->mpol);
++ sbinfo->mpol = config.mpol; /* transfers initial ref */
++ }
+ out:
+ spin_unlock(&sbinfo->stat_lock);
+ return error;
+diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
+index e16aade..718cbe8 100644
+--- a/net/bridge/br_stp_bpdu.c
++++ b/net/bridge/br_stp_bpdu.c
+@@ -16,6 +16,7 @@
+ #include <linux/etherdevice.h>
+ #include <linux/llc.h>
+ #include <linux/slab.h>
++#include <linux/pkt_sched.h>
+ #include <net/net_namespace.h>
+ #include <net/llc.h>
+ #include <net/llc_pdu.h>
+@@ -40,6 +41,7 @@ static void br_send_bpdu(struct net_bridge_port *p,
+
+ skb->dev = p->dev;
+ skb->protocol = htons(ETH_P_802_2);
++ skb->priority = TC_PRIO_CONTROL;
+
+ skb_reserve(skb, LLC_RESERVE);
+ memcpy(__skb_put(skb, length), data, length);
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index 1b5096a..5d228de 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -226,8 +226,12 @@ EXPORT_SYMBOL(inet_listen);
+ u32 inet_ehash_secret __read_mostly;
+ EXPORT_SYMBOL(inet_ehash_secret);
+
++u32 ipv6_hash_secret __read_mostly;
++EXPORT_SYMBOL(ipv6_hash_secret);
++
+ /*
+- * inet_ehash_secret must be set exactly once
++ * inet_ehash_secret must be set exactly once, and to a non nul value
++ * ipv6_hash_secret must be set exactly once.
+ */
+ void build_ehash_secret(void)
+ {
+@@ -237,7 +241,8 @@ void build_ehash_secret(void)
+ get_random_bytes(&rnd, sizeof(rnd));
+ } while (rnd == 0);
+
+- cmpxchg(&inet_ehash_secret, 0, rnd);
++ if (cmpxchg(&inet_ehash_secret, 0, rnd) == 0)
++ get_random_bytes(&ipv6_hash_secret, sizeof(ipv6_hash_secret));
+ }
+ EXPORT_SYMBOL(build_ehash_secret);
+
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
+index 43d4c3b..294a380 100644
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -321,8 +321,8 @@ void ping_err(struct sk_buff *skb, u32 info)
+ struct iphdr *iph = (struct iphdr *)skb->data;
+ struct icmphdr *icmph = (struct icmphdr *)(skb->data+(iph->ihl<<2));
+ struct inet_sock *inet_sock;
+- int type = icmph->type;
+- int code = icmph->code;
++ int type = icmp_hdr(skb)->type;
++ int code = icmp_hdr(skb)->code;
+ struct net *net = dev_net(skb->dev);
+ struct sock *sk;
+ int harderr;
+diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
+index 3282453..9acee9d 100644
+--- a/net/sunrpc/svc_xprt.c
++++ b/net/sunrpc/svc_xprt.c
+@@ -816,7 +816,6 @@ static void svc_age_temp_xprts(unsigned long closure)
+ struct svc_serv *serv = (struct svc_serv *)closure;
+ struct svc_xprt *xprt;
+ struct list_head *le, *next;
+- LIST_HEAD(to_be_aged);
+
+ dprintk("svc_age_temp_xprts\n");
+
+@@ -837,25 +836,15 @@ static void svc_age_temp_xprts(unsigned long closure)
+ if (atomic_read(&xprt->xpt_ref.refcount) > 1 ||
+ test_bit(XPT_BUSY, &xprt->xpt_flags))
+ continue;
+- svc_xprt_get(xprt);
+- list_move(le, &to_be_aged);
++ list_del_init(le);
+ set_bit(XPT_CLOSE, &xprt->xpt_flags);
+ set_bit(XPT_DETACHED, &xprt->xpt_flags);
+- }
+- spin_unlock_bh(&serv->sv_lock);
+-
+- while (!list_empty(&to_be_aged)) {
+- le = to_be_aged.next;
+- /* fiddling the xpt_list node is safe 'cos we're XPT_DETACHED */
+- list_del_init(le);
+- xprt = list_entry(le, struct svc_xprt, xpt_list);
+-
+ dprintk("queuing xprt %p for closing\n", xprt);
+
+ /* a thread will dequeue and close it soon */
+ svc_xprt_enqueue(xprt);
+- svc_xprt_put(xprt);
+ }
++ spin_unlock_bh(&serv->sv_lock);
+
+ mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ);
+ }
+diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c
+index 193ce81..42b876d 100644
+--- a/sound/drivers/aloop.c
++++ b/sound/drivers/aloop.c
+@@ -287,12 +287,14 @@ static int loopback_trigger(struct snd_pcm_substream *substream, int cmd)
+ loopback_active_notify(dpcm);
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
++ case SNDRV_PCM_TRIGGER_SUSPEND:
+ spin_lock(&cable->lock);
+ cable->pause |= stream;
+ spin_unlock(&cable->lock);
+ loopback_timer_stop(dpcm);
+ break;
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
++ case SNDRV_PCM_TRIGGER_RESUME:
+ spin_lock(&cable->lock);
+ dpcm->last_jiffies = jiffies;
+ cable->pause &= ~stream;
+@@ -552,7 +554,8 @@ static snd_pcm_uframes_t loopback_pointer(struct snd_pcm_substream *substream)
+ static struct snd_pcm_hardware loopback_pcm_hardware =
+ {
+ .info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP |
+- SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_PAUSE),
++ SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_PAUSE |
++ SNDRV_PCM_INFO_RESUME),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S16_BE |
+ SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S32_BE |
+ SNDRV_PCM_FMTBIT_FLOAT_LE | SNDRV_PCM_FMTBIT_FLOAT_BE),
+diff --git a/sound/pci/ali5451/ali5451.c b/sound/pci/ali5451/ali5451.c
+index ef85ac5..be662c9 100644
+--- a/sound/pci/ali5451/ali5451.c
++++ b/sound/pci/ali5451/ali5451.c
+@@ -1435,7 +1435,7 @@ static snd_pcm_uframes_t snd_ali_pointer(struct snd_pcm_substream *substream)
+
+ spin_lock(&codec->reg_lock);
+ if (!pvoice->running) {
+- spin_unlock_irq(&codec->reg_lock);
++ spin_unlock(&codec->reg_lock);
+ return 0;
+ }
+ outb(pvoice->number, ALI_REG(codec, ALI_GC_CIR));
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index bde2615..3c8bc6e 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -918,8 +918,12 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
+ if (!static_hdmi_pcm && eld->eld_valid) {
+ snd_hdmi_eld_update_pcm_info(eld, hinfo);
+ if (hinfo->channels_min > hinfo->channels_max ||
+- !hinfo->rates || !hinfo->formats)
++ !hinfo->rates || !hinfo->formats) {
++ per_cvt->assigned = 0;
++ hinfo->nid = 0;
++ snd_hda_spdif_ctls_unassign(codec, pin_idx);
+ return -ENODEV;
++ }
+ }
+
+ /* Store the updated parameters */
+@@ -983,6 +987,7 @@ static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
+ "HDMI status: Codec=%d Pin=%d Presence_Detect=%d ELD_Valid=%d\n",
+ codec->addr, pin_nid, eld->monitor_present, eld_valid);
+
++ eld->eld_valid = false;
+ if (eld_valid) {
+ if (!snd_hdmi_get_eld(eld, codec, pin_nid))
+ snd_hdmi_show_eld(eld);
+diff --git a/sound/pci/rme32.c b/sound/pci/rme32.c
+index 21bcb47..62075a5 100644
+--- a/sound/pci/rme32.c
++++ b/sound/pci/rme32.c
+@@ -1017,7 +1017,7 @@ static int snd_rme32_capture_close(struct snd_pcm_substream *substream)
+ spin_lock_irq(&rme32->lock);
+ rme32->capture_substream = NULL;
+ rme32->capture_periodsize = 0;
+- spin_unlock(&rme32->lock);
++ spin_unlock_irq(&rme32->lock);
+ return 0;
+ }
+
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index 32d2a21..4e25148 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -1624,7 +1624,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+ /* .vendor_name = "Roland", */
+ /* .product_name = "A-PRO", */
+- .ifnum = 1,
++ .ifnum = 0,
+ .type = QUIRK_MIDI_FIXED_ENDPOINT,
+ .data = & (const struct snd_usb_midi_endpoint_info) {
+ .out_cables = 0x0003,
diff --git a/1040_linux-3.2.41.patch b/1040_linux-3.2.41.patch
new file mode 100644
index 00000000..0d27fcb9
--- /dev/null
+++ b/1040_linux-3.2.41.patch
@@ -0,0 +1,3865 @@
+diff --git a/Documentation/devicetree/bindings/tty/serial/of-serial.txt b/Documentation/devicetree/bindings/tty/serial/of-serial.txt
+index b8b27b0..3f89cbd 100644
+--- a/Documentation/devicetree/bindings/tty/serial/of-serial.txt
++++ b/Documentation/devicetree/bindings/tty/serial/of-serial.txt
+@@ -10,6 +10,9 @@ Required properties:
+ - "ns16850"
+ - "nvidia,tegra20-uart"
+ - "ibm,qpace-nwp-serial"
++ - "altr,16550-FIFO32"
++ - "altr,16550-FIFO64"
++ - "altr,16550-FIFO128"
+ - "serial" if the port type is unknown.
+ - reg : offset and length of the register set for the device.
+ - interrupts : should contain uart interrupt.
+diff --git a/Makefile b/Makefile
+index 47af1e9..95e6220 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 40
++SUBLEVEL = 41
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
+index 1049319..510456d 100644
+--- a/arch/arm/kernel/perf_event_v7.c
++++ b/arch/arm/kernel/perf_event_v7.c
+@@ -720,7 +720,7 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ /*
+ * PMXEVTYPER: Event selection reg
+ */
+-#define ARMV7_EVTYPE_MASK 0xc00000ff /* Mask for writable bits */
++#define ARMV7_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */
+ #define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
+
+ /*
+diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
+index c335c76..a125c4b 100644
+--- a/arch/arm/mm/alignment.c
++++ b/arch/arm/mm/alignment.c
+@@ -749,7 +749,6 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+ unsigned long instr = 0, instrptr;
+ int (*handler)(unsigned long addr, unsigned long instr, struct pt_regs *regs);
+ unsigned int type;
+- mm_segment_t fs;
+ unsigned int fault;
+ u16 tinstr = 0;
+ int isize = 4;
+@@ -760,16 +759,15 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+
+ instrptr = instruction_pointer(regs);
+
+- fs = get_fs();
+- set_fs(KERNEL_DS);
+ if (thumb_mode(regs)) {
+- fault = __get_user(tinstr, (u16 *)(instrptr & ~1));
++ u16 *ptr = (u16 *)(instrptr & ~1);
++ fault = probe_kernel_address(ptr, tinstr);
+ if (!fault) {
+ if (cpu_architecture() >= CPU_ARCH_ARMv7 &&
+ IS_T32(tinstr)) {
+ /* Thumb-2 32-bit */
+ u16 tinst2 = 0;
+- fault = __get_user(tinst2, (u16 *)(instrptr+2));
++ fault = probe_kernel_address(ptr + 1, tinst2);
+ instr = (tinstr << 16) | tinst2;
+ thumb2_32b = 1;
+ } else {
+@@ -778,8 +776,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+ }
+ }
+ } else
+- fault = __get_user(instr, (u32 *)instrptr);
+- set_fs(fs);
++ fault = probe_kernel_address(instrptr, instr);
+
+ if (fault) {
+ type = TYPE_FAULT;
+diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
+index 7c815b2..111691c 100644
+--- a/arch/arm/vfp/vfpmodule.c
++++ b/arch/arm/vfp/vfpmodule.c
+@@ -409,7 +409,7 @@ void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
+ * If there isn't a second FP instruction, exit now. Note that
+ * the FPEXC.FP2V bit is valid only if FPEXC.EX is 1.
+ */
+- if (fpexc ^ (FPEXC_EX | FPEXC_FP2V))
++ if ((fpexc & (FPEXC_EX | FPEXC_FP2V)) != (FPEXC_EX | FPEXC_FP2V))
+ goto exit;
+
+ /*
+diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
+index 21165a4..66ea9b8 100644
+--- a/arch/powerpc/include/asm/eeh.h
++++ b/arch/powerpc/include/asm/eeh.h
+@@ -61,7 +61,6 @@ void __init pci_addr_cache_build(void);
+ */
+ void eeh_add_device_tree_early(struct device_node *);
+ void eeh_add_device_tree_late(struct pci_bus *);
+-void eeh_add_sysfs_files(struct pci_bus *);
+
+ /**
+ * eeh_remove_device_recursive - undo EEH for device & children.
+@@ -106,8 +105,6 @@ static inline void eeh_add_device_tree_early(struct device_node *dn) { }
+
+ static inline void eeh_add_device_tree_late(struct pci_bus *bus) { }
+
+-static inline void eeh_add_sysfs_files(struct pci_bus *bus) { }
+-
+ static inline void eeh_remove_bus_device(struct pci_dev *dev) { }
+ #define EEH_POSSIBLE_ERROR(val, type) (0)
+ #define EEH_IO_ERROR_VALUE(size) (-1UL)
+diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c
+index b10beef..e1612df 100644
+--- a/arch/powerpc/kernel/of_platform.c
++++ b/arch/powerpc/kernel/of_platform.c
+@@ -91,9 +91,6 @@ static int __devinit of_pci_phb_probe(struct platform_device *dev)
+ /* Add probed PCI devices to the device model */
+ pci_bus_add_devices(phb->bus);
+
+- /* sysfs files should only be added after devices are added */
+- eeh_add_sysfs_files(phb->bus);
+-
+ return 0;
+ }
+
+diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
+index a3cd949..458ed3b 100644
+--- a/arch/powerpc/kernel/pci-common.c
++++ b/arch/powerpc/kernel/pci-common.c
+@@ -1536,14 +1536,11 @@ void pcibios_finish_adding_to_bus(struct pci_bus *bus)
+ pcibios_allocate_bus_resources(bus);
+ pcibios_claim_one_bus(bus);
+
+- /* Fixup EEH */
+- eeh_add_device_tree_late(bus);
+-
+ /* Add new devices to global lists. Register in proc, sysfs. */
+ pci_bus_add_devices(bus);
+
+- /* sysfs files should only be added after devices are added */
+- eeh_add_sysfs_files(bus);
++ /* Fixup EEH */
++ eeh_add_device_tree_late(bus);
+ }
+ EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
+
+diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
+index 389e06b..5658690 100644
+--- a/arch/powerpc/platforms/pseries/eeh.c
++++ b/arch/powerpc/platforms/pseries/eeh.c
+@@ -1238,6 +1238,7 @@ static void eeh_add_device_late(struct pci_dev *dev)
+ pdn->pcidev = dev;
+
+ pci_addr_cache_insert_device(dev);
++ eeh_sysfs_add_device(dev);
+ }
+
+ void eeh_add_device_tree_late(struct pci_bus *bus)
+@@ -1256,29 +1257,6 @@ void eeh_add_device_tree_late(struct pci_bus *bus)
+ EXPORT_SYMBOL_GPL(eeh_add_device_tree_late);
+
+ /**
+- * eeh_add_sysfs_files - Add EEH sysfs files for the indicated PCI bus
+- * @bus: PCI bus
+- *
+- * This routine must be used to add EEH sysfs files for PCI
+- * devices which are attached to the indicated PCI bus. The PCI bus
+- * is added after system boot through hotplug or dlpar.
+- */
+-void eeh_add_sysfs_files(struct pci_bus *bus)
+-{
+- struct pci_dev *dev;
+-
+- list_for_each_entry(dev, &bus->devices, bus_list) {
+- eeh_sysfs_add_device(dev);
+- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
+- struct pci_bus *subbus = dev->subordinate;
+- if (subbus)
+- eeh_add_sysfs_files(subbus);
+- }
+- }
+-}
+-EXPORT_SYMBOL_GPL(eeh_add_sysfs_files);
+-
+-/**
+ * eeh_remove_device - undo EEH setup for the indicated pci device
+ * @dev: pci device to be removed
+ *
+diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
+index 4c262f6..1e1caf56 100644
+--- a/arch/x86/pci/xen.c
++++ b/arch/x86/pci/xen.c
+@@ -162,6 +162,9 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+ struct msi_desc *msidesc;
+ int *v;
+
++ if (type == PCI_CAP_ID_MSI && nvec > 1)
++ return 1;
++
+ v = kzalloc(sizeof(int) * max(1, nvec), GFP_KERNEL);
+ if (!v)
+ return -ENOMEM;
+@@ -220,6 +223,9 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+ struct msi_desc *msidesc;
+ struct msi_msg msg;
+
++ if (type == PCI_CAP_ID_MSI && nvec > 1)
++ return 1;
++
+ list_for_each_entry(msidesc, &dev->msi_list, list) {
+ __read_msi_msg(msidesc, &msg);
+ pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) |
+@@ -263,6 +269,9 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+ int ret = 0;
+ struct msi_desc *msidesc;
+
++ if (type == PCI_CAP_ID_MSI && nvec > 1)
++ return 1;
++
+ list_for_each_entry(msidesc, &dev->msi_list, list) {
+ struct physdev_map_pirq map_irq;
+ domid_t domid;
+diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
+index a0f768c..9f73037 100644
+--- a/crypto/ablkcipher.c
++++ b/crypto/ablkcipher.c
+@@ -388,9 +388,9 @@ static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
+ {
+ struct crypto_report_blkcipher rblkcipher;
+
+- snprintf(rblkcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "ablkcipher");
+- snprintf(rblkcipher.geniv, CRYPTO_MAX_ALG_NAME, "%s",
+- alg->cra_ablkcipher.geniv ?: "<default>");
++ strncpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type));
++ strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<default>",
++ sizeof(rblkcipher.geniv));
+
+ rblkcipher.blocksize = alg->cra_blocksize;
+ rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
+@@ -469,9 +469,9 @@ static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
+ {
+ struct crypto_report_blkcipher rblkcipher;
+
+- snprintf(rblkcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "givcipher");
+- snprintf(rblkcipher.geniv, CRYPTO_MAX_ALG_NAME, "%s",
+- alg->cra_ablkcipher.geniv ?: "<built-in>");
++ strncpy(rblkcipher.type, "givcipher", sizeof(rblkcipher.type));
++ strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<built-in>",
++ sizeof(rblkcipher.geniv));
+
+ rblkcipher.blocksize = alg->cra_blocksize;
+ rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
+diff --git a/crypto/aead.c b/crypto/aead.c
+index 04add3dc..479b7d1 100644
+--- a/crypto/aead.c
++++ b/crypto/aead.c
+@@ -117,9 +117,8 @@ static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
+ struct crypto_report_aead raead;
+ struct aead_alg *aead = &alg->cra_aead;
+
+- snprintf(raead.type, CRYPTO_MAX_ALG_NAME, "%s", "aead");
+- snprintf(raead.geniv, CRYPTO_MAX_ALG_NAME, "%s",
+- aead->geniv ?: "<built-in>");
++ strncpy(raead.type, "aead", sizeof(raead.type));
++ strncpy(raead.geniv, aead->geniv ?: "<built-in>", sizeof(raead.geniv));
+
+ raead.blocksize = alg->cra_blocksize;
+ raead.maxauthsize = aead->maxauthsize;
+@@ -203,8 +202,8 @@ static int crypto_nivaead_report(struct sk_buff *skb, struct crypto_alg *alg)
+ struct crypto_report_aead raead;
+ struct aead_alg *aead = &alg->cra_aead;
+
+- snprintf(raead.type, CRYPTO_MAX_ALG_NAME, "%s", "nivaead");
+- snprintf(raead.geniv, CRYPTO_MAX_ALG_NAME, "%s", aead->geniv);
++ strncpy(raead.type, "nivaead", sizeof(raead.type));
++ strncpy(raead.geniv, aead->geniv, sizeof(raead.geniv));
+
+ raead.blocksize = alg->cra_blocksize;
+ raead.maxauthsize = aead->maxauthsize;
+diff --git a/crypto/ahash.c b/crypto/ahash.c
+index ac93c99..7fe1752 100644
+--- a/crypto/ahash.c
++++ b/crypto/ahash.c
+@@ -404,7 +404,7 @@ static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
+ {
+ struct crypto_report_hash rhash;
+
+- snprintf(rhash.type, CRYPTO_MAX_ALG_NAME, "%s", "ahash");
++ strncpy(rhash.type, "ahash", sizeof(rhash.type));
+
+ rhash.blocksize = alg->cra_blocksize;
+ rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
+diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
+index 1e61d1a..04f0f38 100644
+--- a/crypto/blkcipher.c
++++ b/crypto/blkcipher.c
+@@ -499,9 +499,9 @@ static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
+ {
+ struct crypto_report_blkcipher rblkcipher;
+
+- snprintf(rblkcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "blkcipher");
+- snprintf(rblkcipher.geniv, CRYPTO_MAX_ALG_NAME, "%s",
+- alg->cra_blkcipher.geniv ?: "<default>");
++ strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
++ strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "<default>",
++ sizeof(rblkcipher.geniv));
+
+ rblkcipher.blocksize = alg->cra_blocksize;
+ rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
+diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
+index 0605a2b..5b63b8d 100644
+--- a/crypto/crypto_user.c
++++ b/crypto/crypto_user.c
+@@ -71,7 +71,7 @@ static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
+ {
+ struct crypto_report_cipher rcipher;
+
+- snprintf(rcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "cipher");
++ strncpy(rcipher.type, "cipher", sizeof(rcipher.type));
+
+ rcipher.blocksize = alg->cra_blocksize;
+ rcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
+@@ -90,8 +90,7 @@ static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
+ {
+ struct crypto_report_comp rcomp;
+
+- snprintf(rcomp.type, CRYPTO_MAX_ALG_NAME, "%s", "compression");
+-
++ strncpy(rcomp.type, "compression", sizeof(rcomp.type));
+ NLA_PUT(skb, CRYPTOCFGA_REPORT_COMPRESS,
+ sizeof(struct crypto_report_comp), &rcomp);
+
+@@ -104,12 +103,14 @@ nla_put_failure:
+ static int crypto_report_one(struct crypto_alg *alg,
+ struct crypto_user_alg *ualg, struct sk_buff *skb)
+ {
+- memcpy(&ualg->cru_name, &alg->cra_name, sizeof(ualg->cru_name));
+- memcpy(&ualg->cru_driver_name, &alg->cra_driver_name,
+- sizeof(ualg->cru_driver_name));
+- memcpy(&ualg->cru_module_name, module_name(alg->cra_module),
+- CRYPTO_MAX_ALG_NAME);
+-
++ strncpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
++ strncpy(ualg->cru_driver_name, alg->cra_driver_name,
++ sizeof(ualg->cru_driver_name));
++ strncpy(ualg->cru_module_name, module_name(alg->cra_module),
++ sizeof(ualg->cru_module_name));
++
++ ualg->cru_type = 0;
++ ualg->cru_mask = 0;
+ ualg->cru_flags = alg->cra_flags;
+ ualg->cru_refcnt = atomic_read(&alg->cra_refcnt);
+
+@@ -118,8 +119,7 @@ static int crypto_report_one(struct crypto_alg *alg,
+ if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
+ struct crypto_report_larval rl;
+
+- snprintf(rl.type, CRYPTO_MAX_ALG_NAME, "%s", "larval");
+-
++ strncpy(rl.type, "larval", sizeof(rl.type));
+ NLA_PUT(skb, CRYPTOCFGA_REPORT_LARVAL,
+ sizeof(struct crypto_report_larval), &rl);
+
+diff --git a/crypto/pcompress.c b/crypto/pcompress.c
+index 2e458e5..6f2a361 100644
+--- a/crypto/pcompress.c
++++ b/crypto/pcompress.c
+@@ -53,8 +53,7 @@ static int crypto_pcomp_report(struct sk_buff *skb, struct crypto_alg *alg)
+ {
+ struct crypto_report_comp rpcomp;
+
+- snprintf(rpcomp.type, CRYPTO_MAX_ALG_NAME, "%s", "pcomp");
+-
++ strncpy(rpcomp.type, "pcomp", sizeof(rpcomp.type));
+ NLA_PUT(skb, CRYPTOCFGA_REPORT_COMPRESS,
+ sizeof(struct crypto_report_comp), &rpcomp);
+
+diff --git a/crypto/rng.c b/crypto/rng.c
+index 64f864f..1966c1d 100644
+--- a/crypto/rng.c
++++ b/crypto/rng.c
+@@ -65,7 +65,7 @@ static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg)
+ {
+ struct crypto_report_rng rrng;
+
+- snprintf(rrng.type, CRYPTO_MAX_ALG_NAME, "%s", "rng");
++ strncpy(rrng.type, "rng", sizeof(rrng.type));
+
+ rrng.seedsize = alg->cra_rng.seedsize;
+
+diff --git a/crypto/shash.c b/crypto/shash.c
+index 9100912..f507294 100644
+--- a/crypto/shash.c
++++ b/crypto/shash.c
+@@ -530,7 +530,8 @@ static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg)
+ struct crypto_report_hash rhash;
+ struct shash_alg *salg = __crypto_shash_alg(alg);
+
+- snprintf(rhash.type, CRYPTO_MAX_ALG_NAME, "%s", "shash");
++ strncpy(rhash.type, "shash", sizeof(rhash.type));
++
+ rhash.blocksize = alg->cra_blocksize;
+ rhash.digestsize = salg->digestsize;
+
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 62c1325..87acc23 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -262,6 +262,46 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, 0x1e06), board_ahci }, /* Panther Point RAID */
+ { PCI_VDEVICE(INTEL, 0x1e07), board_ahci }, /* Panther Point RAID */
+ { PCI_VDEVICE(INTEL, 0x1e0e), board_ahci }, /* Panther Point RAID */
++ { PCI_VDEVICE(INTEL, 0x8c02), board_ahci }, /* Lynx Point AHCI */
++ { PCI_VDEVICE(INTEL, 0x8c03), board_ahci }, /* Lynx Point AHCI */
++ { PCI_VDEVICE(INTEL, 0x8c04), board_ahci }, /* Lynx Point RAID */
++ { PCI_VDEVICE(INTEL, 0x8c05), board_ahci }, /* Lynx Point RAID */
++ { PCI_VDEVICE(INTEL, 0x8c06), board_ahci }, /* Lynx Point RAID */
++ { PCI_VDEVICE(INTEL, 0x8c07), board_ahci }, /* Lynx Point RAID */
++ { PCI_VDEVICE(INTEL, 0x8c0e), board_ahci }, /* Lynx Point RAID */
++ { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci }, /* Lynx Point RAID */
++ { PCI_VDEVICE(INTEL, 0x9c02), board_ahci }, /* Lynx Point-LP AHCI */
++ { PCI_VDEVICE(INTEL, 0x9c03), board_ahci }, /* Lynx Point-LP AHCI */
++ { PCI_VDEVICE(INTEL, 0x9c04), board_ahci }, /* Lynx Point-LP RAID */
++ { PCI_VDEVICE(INTEL, 0x9c05), board_ahci }, /* Lynx Point-LP RAID */
++ { PCI_VDEVICE(INTEL, 0x9c06), board_ahci }, /* Lynx Point-LP RAID */
++ { PCI_VDEVICE(INTEL, 0x9c07), board_ahci }, /* Lynx Point-LP RAID */
++ { PCI_VDEVICE(INTEL, 0x9c0e), board_ahci }, /* Lynx Point-LP RAID */
++ { PCI_VDEVICE(INTEL, 0x9c0f), board_ahci }, /* Lynx Point-LP RAID */
++ { PCI_VDEVICE(INTEL, 0x1f22), board_ahci }, /* Avoton AHCI */
++ { PCI_VDEVICE(INTEL, 0x1f23), board_ahci }, /* Avoton AHCI */
++ { PCI_VDEVICE(INTEL, 0x1f24), board_ahci }, /* Avoton RAID */
++ { PCI_VDEVICE(INTEL, 0x1f25), board_ahci }, /* Avoton RAID */
++ { PCI_VDEVICE(INTEL, 0x1f26), board_ahci }, /* Avoton RAID */
++ { PCI_VDEVICE(INTEL, 0x1f27), board_ahci }, /* Avoton RAID */
++ { PCI_VDEVICE(INTEL, 0x1f2e), board_ahci }, /* Avoton RAID */
++ { PCI_VDEVICE(INTEL, 0x1f2f), board_ahci }, /* Avoton RAID */
++ { PCI_VDEVICE(INTEL, 0x1f32), board_ahci }, /* Avoton AHCI */
++ { PCI_VDEVICE(INTEL, 0x1f33), board_ahci }, /* Avoton AHCI */
++ { PCI_VDEVICE(INTEL, 0x1f34), board_ahci }, /* Avoton RAID */
++ { PCI_VDEVICE(INTEL, 0x1f35), board_ahci }, /* Avoton RAID */
++ { PCI_VDEVICE(INTEL, 0x1f36), board_ahci }, /* Avoton RAID */
++ { PCI_VDEVICE(INTEL, 0x1f37), board_ahci }, /* Avoton RAID */
++ { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci }, /* Avoton RAID */
++ { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci }, /* Avoton RAID */
++ { PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */
++ { PCI_VDEVICE(INTEL, 0x8d04), board_ahci }, /* Wellsburg RAID */
++ { PCI_VDEVICE(INTEL, 0x8d06), board_ahci }, /* Wellsburg RAID */
++ { PCI_VDEVICE(INTEL, 0x8d0e), board_ahci }, /* Wellsburg RAID */
++ { PCI_VDEVICE(INTEL, 0x8d62), board_ahci }, /* Wellsburg AHCI */
++ { PCI_VDEVICE(INTEL, 0x8d64), board_ahci }, /* Wellsburg RAID */
++ { PCI_VDEVICE(INTEL, 0x8d66), board_ahci }, /* Wellsburg RAID */
++ { PCI_VDEVICE(INTEL, 0x8d6e), board_ahci }, /* Wellsburg RAID */
+
+ /* JMicron 360/1/3/5/6, match class to avoid IDE function */
+ { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index 1e888c9..8c6787a 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -1262,11 +1262,9 @@ static int loop_set_capacity(struct loop_device *lo, struct block_device *bdev)
+ /* the width of sector_t may be narrow for bit-shift */
+ sz = sec;
+ sz <<= 9;
+- mutex_lock(&bdev->bd_mutex);
+ bd_set_size(bdev, sz);
+ /* let user-space know about the new size */
+ kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
+- mutex_unlock(&bdev->bd_mutex);
+
+ out:
+ return err;
+@@ -1836,11 +1834,15 @@ static int __init loop_init(void)
+ max_part = (1UL << part_shift) - 1;
+ }
+
+- if ((1UL << part_shift) > DISK_MAX_PARTS)
+- return -EINVAL;
++ if ((1UL << part_shift) > DISK_MAX_PARTS) {
++ err = -EINVAL;
++ goto misc_out;
++ }
+
+- if (max_loop > 1UL << (MINORBITS - part_shift))
+- return -EINVAL;
++ if (max_loop > 1UL << (MINORBITS - part_shift)) {
++ err = -EINVAL;
++ goto misc_out;
++ }
+
+ /*
+ * If max_loop is specified, create that many devices upfront.
+@@ -1858,8 +1860,10 @@ static int __init loop_init(void)
+ range = 1UL << MINORBITS;
+ }
+
+- if (register_blkdev(LOOP_MAJOR, "loop"))
+- return -EIO;
++ if (register_blkdev(LOOP_MAJOR, "loop")) {
++ err = -EIO;
++ goto misc_out;
++ }
+
+ blk_register_region(MKDEV(LOOP_MAJOR, 0), range,
+ THIS_MODULE, loop_probe, NULL, NULL);
+@@ -1872,6 +1876,10 @@ static int __init loop_init(void)
+
+ printk(KERN_INFO "loop: module loaded\n");
+ return 0;
++
++misc_out:
++ misc_deregister(&loop_misc);
++ return err;
+ }
+
+ static int loop_exit_cb(int id, void *ptr, void *data)
+diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
+index 1bafb40..69ae597 100644
+--- a/drivers/char/hw_random/core.c
++++ b/drivers/char/hw_random/core.c
+@@ -40,6 +40,7 @@
+ #include <linux/init.h>
+ #include <linux/miscdevice.h>
+ #include <linux/delay.h>
++#include <linux/slab.h>
+ #include <asm/uaccess.h>
+
+
+@@ -52,8 +53,12 @@ static struct hwrng *current_rng;
+ static LIST_HEAD(rng_list);
+ static DEFINE_MUTEX(rng_mutex);
+ static int data_avail;
+-static u8 rng_buffer[SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES]
+- __cacheline_aligned;
++static u8 *rng_buffer;
++
++static size_t rng_buffer_size(void)
++{
++ return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
++}
+
+ static inline int hwrng_init(struct hwrng *rng)
+ {
+@@ -116,7 +121,7 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
+
+ if (!data_avail) {
+ bytes_read = rng_get_data(current_rng, rng_buffer,
+- sizeof(rng_buffer),
++ rng_buffer_size(),
+ !(filp->f_flags & O_NONBLOCK));
+ if (bytes_read < 0) {
+ err = bytes_read;
+@@ -307,6 +312,14 @@ int hwrng_register(struct hwrng *rng)
+
+ mutex_lock(&rng_mutex);
+
++ /* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
++ err = -ENOMEM;
++ if (!rng_buffer) {
++ rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
++ if (!rng_buffer)
++ goto out_unlock;
++ }
++
+ /* Must not register two RNGs with the same name. */
+ err = -EEXIST;
+ list_for_each_entry(tmp, &rng_list, list) {
+diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
+index fd699cc..86ed591 100644
+--- a/drivers/char/hw_random/virtio-rng.c
++++ b/drivers/char/hw_random/virtio-rng.c
+@@ -89,14 +89,22 @@ static int virtrng_probe(struct virtio_device *vdev)
+ {
+ int err;
+
++ if (vq) {
++ /* We only support one device for now */
++ return -EBUSY;
++ }
+ /* We expect a single virtqueue. */
+ vq = virtio_find_single_vq(vdev, random_recv_done, "input");
+- if (IS_ERR(vq))
+- return PTR_ERR(vq);
++ if (IS_ERR(vq)) {
++ err = PTR_ERR(vq);
++ vq = NULL;
++ return err;
++ }
+
+ err = hwrng_register(&virtio_hwrng);
+ if (err) {
+ vdev->config->del_vqs(vdev);
++ vq = NULL;
+ return err;
+ }
+
+@@ -108,6 +116,7 @@ static void __devexit virtrng_remove(struct virtio_device *vdev)
+ vdev->config->reset(vdev);
+ hwrng_unregister(&virtio_hwrng);
+ vdev->config->del_vqs(vdev);
++ vq = NULL;
+ }
+
+ static struct virtio_device_id id_table[] = {
+diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
+index 77e1e6c..46bbf43 100644
+--- a/drivers/connector/cn_proc.c
++++ b/drivers/connector/cn_proc.c
+@@ -303,6 +303,12 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
+ if (msg->len != sizeof(*mc_op))
+ return;
+
++ /* Can only change if privileged. */
++ if (!capable(CAP_NET_ADMIN)) {
++ err = EPERM;
++ goto out;
++ }
++
+ mc_op = (enum proc_cn_mcast_op*)msg->data;
+ switch (*mc_op) {
+ case PROC_CN_MCAST_LISTEN:
+@@ -315,6 +321,8 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
+ err = EINVAL;
+ break;
+ }
++
++out:
+ cn_proc_ack(err, msg->seq, msg->ack);
+ }
+
+diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
+index c5072a9..4bf374d 100644
+--- a/drivers/cpufreq/cpufreq_stats.c
++++ b/drivers/cpufreq/cpufreq_stats.c
+@@ -330,6 +330,7 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
+ cpufreq_update_policy(cpu);
+ break;
+ case CPU_DOWN_PREPARE:
++ case CPU_DOWN_PREPARE_FROZEN:
+ cpufreq_stats_free_sysfs(cpu);
+ break;
+ case CPU_DEAD:
+diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
+index 982f1f5..4cd392d 100644
+--- a/drivers/firmware/dmi_scan.c
++++ b/drivers/firmware/dmi_scan.c
+@@ -442,7 +442,6 @@ static int __init dmi_present(const char __iomem *p)
+ static int __init smbios_present(const char __iomem *p)
+ {
+ u8 buf[32];
+- int offset = 0;
+
+ memcpy_fromio(buf, p, 32);
+ if ((buf[5] < 32) && dmi_checksum(buf, buf[5])) {
+@@ -461,9 +460,9 @@ static int __init smbios_present(const char __iomem *p)
+ dmi_ver = 0x0206;
+ break;
+ }
+- offset = 16;
++ return memcmp(p + 16, "_DMI_", 5) || dmi_present(p + 16);
+ }
+- return dmi_present(buf + offset);
++ return 1;
+ }
+
+ void __init dmi_scan_machine(void)
+diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
+index 5d5a868..81346ae 100644
+--- a/drivers/firmware/efivars.c
++++ b/drivers/firmware/efivars.c
+@@ -393,10 +393,11 @@ static efi_status_t
+ get_var_data(struct efivars *efivars, struct efi_variable *var)
+ {
+ efi_status_t status;
++ unsigned long flags;
+
+- spin_lock(&efivars->lock);
++ spin_lock_irqsave(&efivars->lock, flags);
+ status = get_var_data_locked(efivars, var);
+- spin_unlock(&efivars->lock);
++ spin_unlock_irqrestore(&efivars->lock, flags);
+
+ if (status != EFI_SUCCESS) {
+ printk(KERN_WARNING "efivars: get_variable() failed 0x%lx!\n",
+@@ -405,6 +406,30 @@ get_var_data(struct efivars *efivars, struct efi_variable *var)
+ return status;
+ }
+
++static efi_status_t
++check_var_size_locked(struct efivars *efivars, u32 attributes,
++ unsigned long size)
++{
++ u64 storage_size, remaining_size, max_size;
++ efi_status_t status;
++ const struct efivar_operations *fops = efivars->ops;
++
++ if (!efivars->ops->query_variable_info)
++ return EFI_UNSUPPORTED;
++
++ status = fops->query_variable_info(attributes, &storage_size,
++ &remaining_size, &max_size);
++
++ if (status != EFI_SUCCESS)
++ return status;
++
++ if (!storage_size || size > remaining_size || size > max_size ||
++ (remaining_size - size) < (storage_size / 2))
++ return EFI_OUT_OF_RESOURCES;
++
++ return status;
++}
++
+ static ssize_t
+ efivar_guid_read(struct efivar_entry *entry, char *buf)
+ {
+@@ -525,14 +550,19 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
+ return -EINVAL;
+ }
+
+- spin_lock(&efivars->lock);
+- status = efivars->ops->set_variable(new_var->VariableName,
+- &new_var->VendorGuid,
+- new_var->Attributes,
+- new_var->DataSize,
+- new_var->Data);
++ spin_lock_irq(&efivars->lock);
++
++ status = check_var_size_locked(efivars, new_var->Attributes,
++ new_var->DataSize + utf16_strsize(new_var->VariableName, 1024));
+
+- spin_unlock(&efivars->lock);
++ if (status == EFI_SUCCESS || status == EFI_UNSUPPORTED)
++ status = efivars->ops->set_variable(new_var->VariableName,
++ &new_var->VendorGuid,
++ new_var->Attributes,
++ new_var->DataSize,
++ new_var->Data);
++
++ spin_unlock_irq(&efivars->lock);
+
+ if (status != EFI_SUCCESS) {
+ printk(KERN_WARNING "efivars: set_variable() failed: status=%lx\n",
+@@ -637,13 +667,43 @@ efivar_unregister(struct efivar_entry *var)
+ kobject_put(&var->kobj);
+ }
+
++static int efi_status_to_err(efi_status_t status)
++{
++ int err;
++
++ switch (status) {
++ case EFI_INVALID_PARAMETER:
++ err = -EINVAL;
++ break;
++ case EFI_OUT_OF_RESOURCES:
++ err = -ENOSPC;
++ break;
++ case EFI_DEVICE_ERROR:
++ err = -EIO;
++ break;
++ case EFI_WRITE_PROTECTED:
++ err = -EROFS;
++ break;
++ case EFI_SECURITY_VIOLATION:
++ err = -EACCES;
++ break;
++ case EFI_NOT_FOUND:
++ err = -ENOENT;
++ break;
++ default:
++ err = -EINVAL;
++ }
++
++ return err;
++}
++
+ #ifdef CONFIG_PSTORE
+
+ static int efi_pstore_open(struct pstore_info *psi)
+ {
+ struct efivars *efivars = psi->data;
+
+- spin_lock(&efivars->lock);
++ spin_lock_irq(&efivars->lock);
+ efivars->walk_entry = list_first_entry(&efivars->list,
+ struct efivar_entry, list);
+ return 0;
+@@ -653,7 +713,7 @@ static int efi_pstore_close(struct pstore_info *psi)
+ {
+ struct efivars *efivars = psi->data;
+
+- spin_unlock(&efivars->lock);
++ spin_unlock_irq(&efivars->lock);
+ return 0;
+ }
+
+@@ -706,11 +766,28 @@ static int efi_pstore_write(enum pstore_type_id type, u64 *id,
+ struct efivars *efivars = psi->data;
+ struct efivar_entry *entry, *found = NULL;
+ int i, ret = 0;
++ efi_status_t status = EFI_NOT_FOUND;
++ unsigned long flags;
+
+ sprintf(stub_name, "dump-type%u-%u-", type, part);
+ sprintf(name, "%s%lu", stub_name, get_seconds());
+
+- spin_lock(&efivars->lock);
++ spin_lock_irqsave(&efivars->lock, flags);
++
++ /*
++ * Check if there is a space enough to log.
++ * size: a size of logging data
++ * DUMP_NAME_LEN * 2: a maximum size of variable name
++ */
++
++ status = check_var_size_locked(efivars, PSTORE_EFI_ATTRIBUTES,
++ size + DUMP_NAME_LEN * 2);
++
++ if (status) {
++ spin_unlock_irqrestore(&efivars->lock, flags);
++ *id = part;
++ return -ENOSPC;
++ }
+
+ for (i = 0; i < DUMP_NAME_LEN; i++)
+ efi_name[i] = stub_name[i];
+@@ -748,7 +825,7 @@ static int efi_pstore_write(enum pstore_type_id type, u64 *id,
+ efivars->ops->set_variable(efi_name, &vendor, PSTORE_EFI_ATTRIBUTES,
+ size, psi->buf);
+
+- spin_unlock(&efivars->lock);
++ spin_unlock_irqrestore(&efivars->lock, flags);
+
+ if (found)
+ efivar_unregister(found);
+@@ -831,7 +908,7 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
+ return -EINVAL;
+ }
+
+- spin_lock(&efivars->lock);
++ spin_lock_irq(&efivars->lock);
+
+ /*
+ * Does this variable already exist?
+@@ -849,10 +926,18 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
+ }
+ }
+ if (found) {
+- spin_unlock(&efivars->lock);
++ spin_unlock_irq(&efivars->lock);
+ return -EINVAL;
+ }
+
++ status = check_var_size_locked(efivars, new_var->Attributes,
++ new_var->DataSize + utf16_strsize(new_var->VariableName, 1024));
++
++ if (status && status != EFI_UNSUPPORTED) {
++ spin_unlock_irq(&efivars->lock);
++ return efi_status_to_err(status);
++ }
++
+ /* now *really* create the variable via EFI */
+ status = efivars->ops->set_variable(new_var->VariableName,
+ &new_var->VendorGuid,
+@@ -863,10 +948,10 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
+ if (status != EFI_SUCCESS) {
+ printk(KERN_WARNING "efivars: set_variable() failed: status=%lx\n",
+ status);
+- spin_unlock(&efivars->lock);
++ spin_unlock_irq(&efivars->lock);
+ return -EIO;
+ }
+- spin_unlock(&efivars->lock);
++ spin_unlock_irq(&efivars->lock);
+
+ /* Create the entry in sysfs. Locking is not required here */
+ status = efivar_create_sysfs_entry(efivars,
+@@ -894,7 +979,7 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+- spin_lock(&efivars->lock);
++ spin_lock_irq(&efivars->lock);
+
+ /*
+ * Does this variable already exist?
+@@ -912,7 +997,7 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
+ }
+ }
+ if (!found) {
+- spin_unlock(&efivars->lock);
++ spin_unlock_irq(&efivars->lock);
+ return -EINVAL;
+ }
+ /* force the Attributes/DataSize to 0 to ensure deletion */
+@@ -928,12 +1013,12 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
+ if (status != EFI_SUCCESS) {
+ printk(KERN_WARNING "efivars: set_variable() failed: status=%lx\n",
+ status);
+- spin_unlock(&efivars->lock);
++ spin_unlock_irq(&efivars->lock);
+ return -EIO;
+ }
+ list_del(&search_efivar->list);
+ /* We need to release this lock before unregistering. */
+- spin_unlock(&efivars->lock);
++ spin_unlock_irq(&efivars->lock);
+ efivar_unregister(search_efivar);
+
+ /* It's dead Jim.... */
+@@ -1041,9 +1126,9 @@ efivar_create_sysfs_entry(struct efivars *efivars,
+ kfree(short_name);
+ short_name = NULL;
+
+- spin_lock(&efivars->lock);
++ spin_lock_irq(&efivars->lock);
+ list_add(&new_efivar->list, &efivars->list);
+- spin_unlock(&efivars->lock);
++ spin_unlock_irq(&efivars->lock);
+
+ return 0;
+ }
+@@ -1112,9 +1197,9 @@ void unregister_efivars(struct efivars *efivars)
+ struct efivar_entry *entry, *n;
+
+ list_for_each_entry_safe(entry, n, &efivars->list, list) {
+- spin_lock(&efivars->lock);
++ spin_lock_irq(&efivars->lock);
+ list_del(&entry->list);
+- spin_unlock(&efivars->lock);
++ spin_unlock_irq(&efivars->lock);
+ efivar_unregister(entry);
+ }
+ if (efivars->new_var)
+@@ -1235,6 +1320,7 @@ efivars_init(void)
+ ops.get_variable = efi.get_variable;
+ ops.set_variable = efi.set_variable;
+ ops.get_next_variable = efi.get_next_variable;
++ ops.query_variable_info = efi.query_variable_info;
+ error = register_efivars(&__efivars, &ops, efi_kobj);
+ if (error)
+ goto err_put;
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 2303c2b..4591582 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -7280,8 +7280,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
+ {
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+- struct intel_framebuffer *intel_fb;
+- struct drm_i915_gem_object *obj;
++ struct drm_framebuffer *old_fb = crtc->fb;
++ struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_unpin_work *work;
+ unsigned long flags;
+@@ -7293,8 +7293,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
+
+ work->event = event;
+ work->dev = crtc->dev;
+- intel_fb = to_intel_framebuffer(crtc->fb);
+- work->old_fb_obj = intel_fb->obj;
++ work->old_fb_obj = to_intel_framebuffer(old_fb)->obj;
+ INIT_WORK(&work->work, intel_unpin_work_fn);
+
+ ret = drm_vblank_get(dev, intel_crtc->pipe);
+@@ -7314,9 +7313,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
+ intel_crtc->unpin_work = work;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+- intel_fb = to_intel_framebuffer(fb);
+- obj = intel_fb->obj;
+-
+ mutex_lock(&dev->struct_mutex);
+
+ /* Reference the objects for the scheduled work. */
+@@ -7347,6 +7343,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
+
+ cleanup_pending:
+ atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
++ crtc->fb = old_fb;
+ drm_gem_object_unreference(&work->old_fb_obj->base);
+ drm_gem_object_unreference(&obj->base);
+ mutex_unlock(&dev->struct_mutex);
+diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
+index ec36dd9..c32fd93 100644
+--- a/drivers/gpu/drm/radeon/radeon_combios.c
++++ b/drivers/gpu/drm/radeon/radeon_combios.c
+@@ -958,6 +958,15 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
+ found = 1;
+ }
+
++ /* quirks */
++ /* Radeon 9100 (R200) */
++ if ((dev->pdev->device == 0x514D) &&
++ (dev->pdev->subsystem_vendor == 0x174B) &&
++ (dev->pdev->subsystem_device == 0x7149)) {
++ /* vbios value is bad, use the default */
++ found = 0;
++ }
++
+ if (!found) /* fallback to defaults */
+ radeon_legacy_get_primary_dac_info_from_table(rdev, p_dac);
+
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index a23b63a..611aafc 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1533,6 +1533,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
++ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_STANTUM, USB_DEVICE_ID_MTP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_STANTUM_STM, USB_DEVICE_ID_MTP_STM) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_STANTUM_SITRONIX, USB_DEVICE_ID_MTP_SITRONIX) },
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 25f3290..e665bdf 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -644,6 +644,7 @@
+
+ #define USB_VENDOR_ID_SONY 0x054c
+ #define USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE 0x024b
++#define USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE 0x0374
+ #define USB_DEVICE_ID_SONY_PS3_CONTROLLER 0x0268
+ #define USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER 0x042f
+
+diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
+index 5cd25bd..4142c21 100644
+--- a/drivers/hid/hid-sony.c
++++ b/drivers/hid/hid-sony.c
+@@ -44,9 +44,19 @@ static __u8 *sony_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ {
+ struct sony_sc *sc = hid_get_drvdata(hdev);
+
+- if ((sc->quirks & VAIO_RDESC_CONSTANT) &&
+- *rsize >= 56 && rdesc[54] == 0x81 && rdesc[55] == 0x07) {
+- hid_info(hdev, "Fixing up Sony Vaio VGX report descriptor\n");
++ /*
++ * Some Sony RF receivers wrongly declare the mouse pointer as a
++ * a constant non-data variable.
++ */
++ if ((sc->quirks & VAIO_RDESC_CONSTANT) && *rsize >= 56 &&
++ /* usage page: generic desktop controls */
++ /* rdesc[0] == 0x05 && rdesc[1] == 0x01 && */
++ /* usage: mouse */
++ rdesc[2] == 0x09 && rdesc[3] == 0x02 &&
++ /* input (usage page for x,y axes): constant, variable, relative */
++ rdesc[54] == 0x81 && rdesc[55] == 0x07) {
++ hid_info(hdev, "Fixing up Sony RF Receiver report descriptor\n");
++ /* input: data, variable, relative */
+ rdesc[55] = 0x06;
+ }
+
+@@ -218,6 +228,8 @@ static const struct hid_device_id sony_devices[] = {
+ .driver_data = SIXAXIS_CONTROLLER_BT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE),
+ .driver_data = VAIO_RDESC_CONSTANT },
++ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE),
++ .driver_data = VAIO_RDESC_CONSTANT },
+ { }
+ };
+ MODULE_DEVICE_TABLE(hid, sony_devices);
+diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
+index 89f5244..0e8343f 100644
+--- a/drivers/hv/hv_kvp.c
++++ b/drivers/hv/hv_kvp.c
+@@ -212,11 +212,13 @@ kvp_respond_to_host(char *key, char *value, int error)
+ * The windows host expects the key/value pair to be encoded
+ * in utf16.
+ */
+- keylen = utf8s_to_utf16s(key_name, strlen(key_name),
+- (wchar_t *)kvp_data->data.key);
++ keylen = utf8s_to_utf16s(key_name, strlen(key_name), UTF16_HOST_ENDIAN,
++ (wchar_t *) kvp_data->data.key,
++ HV_KVP_EXCHANGE_MAX_KEY_SIZE / 2);
+ kvp_data->data.key_size = 2*(keylen + 1); /* utf16 encoding */
+- valuelen = utf8s_to_utf16s(value, strlen(value),
+- (wchar_t *)kvp_data->data.value);
++ valuelen = utf8s_to_utf16s(value, strlen(value), UTF16_HOST_ENDIAN,
++ (wchar_t *) kvp_data->data.value,
++ HV_KVP_EXCHANGE_MAX_VALUE_SIZE / 2);
+ kvp_data->data.value_size = 2*(valuelen + 1); /* utf16 encoding */
+
+ kvp_data->data.value_type = REG_SZ; /* all our values are strings */
+diff --git a/drivers/hwmon/lineage-pem.c b/drivers/hwmon/lineage-pem.c
+index 58eded2..c9910f7 100644
+--- a/drivers/hwmon/lineage-pem.c
++++ b/drivers/hwmon/lineage-pem.c
+@@ -421,6 +421,7 @@ static struct attribute *pem_input_attributes[] = {
+ &sensor_dev_attr_in2_input.dev_attr.attr,
+ &sensor_dev_attr_curr1_input.dev_attr.attr,
+ &sensor_dev_attr_power1_input.dev_attr.attr,
++ NULL
+ };
+
+ static const struct attribute_group pem_input_group = {
+@@ -431,6 +432,7 @@ static struct attribute *pem_fan_attributes[] = {
+ &sensor_dev_attr_fan1_input.dev_attr.attr,
+ &sensor_dev_attr_fan2_input.dev_attr.attr,
+ &sensor_dev_attr_fan3_input.dev_attr.attr,
++ NULL
+ };
+
+ static const struct attribute_group pem_fan_group = {
+diff --git a/drivers/hwmon/pmbus/ltc2978.c b/drivers/hwmon/pmbus/ltc2978.c
+index 820fff4..43c7414 100644
+--- a/drivers/hwmon/pmbus/ltc2978.c
++++ b/drivers/hwmon/pmbus/ltc2978.c
+@@ -59,10 +59,10 @@ enum chips { ltc2978, ltc3880 };
+ struct ltc2978_data {
+ enum chips id;
+ int vin_min, vin_max;
+- int temp_min, temp_max;
++ int temp_min, temp_max[2];
+ int vout_min[8], vout_max[8];
+ int iout_max[2];
+- int temp2_max[2];
++ int temp2_max;
+ struct pmbus_driver_info info;
+ };
+
+@@ -113,9 +113,10 @@ static int ltc2978_read_word_data_common(struct i2c_client *client, int page,
+ ret = pmbus_read_word_data(client, page,
+ LTC2978_MFR_TEMPERATURE_PEAK);
+ if (ret >= 0) {
+- if (lin11_to_val(ret) > lin11_to_val(data->temp_max))
+- data->temp_max = ret;
+- ret = data->temp_max;
++ if (lin11_to_val(ret)
++ > lin11_to_val(data->temp_max[page]))
++ data->temp_max[page] = ret;
++ ret = data->temp_max[page];
+ }
+ break;
+ case PMBUS_VIRT_RESET_VOUT_HISTORY:
+@@ -204,10 +205,9 @@ static int ltc3880_read_word_data(struct i2c_client *client, int page, int reg)
+ ret = pmbus_read_word_data(client, page,
+ LTC3880_MFR_TEMPERATURE2_PEAK);
+ if (ret >= 0) {
+- if (lin11_to_val(ret)
+- > lin11_to_val(data->temp2_max[page]))
+- data->temp2_max[page] = ret;
+- ret = data->temp2_max[page];
++ if (lin11_to_val(ret) > lin11_to_val(data->temp2_max))
++ data->temp2_max = ret;
++ ret = data->temp2_max;
+ }
+ break;
+ case PMBUS_VIRT_READ_VIN_MIN:
+@@ -248,11 +248,11 @@ static int ltc2978_write_word_data(struct i2c_client *client, int page,
+
+ switch (reg) {
+ case PMBUS_VIRT_RESET_IOUT_HISTORY:
+- data->iout_max[page] = 0x7fff;
++ data->iout_max[page] = 0x7c00;
+ ret = ltc2978_clear_peaks(client, page, data->id);
+ break;
+ case PMBUS_VIRT_RESET_TEMP2_HISTORY:
+- data->temp2_max[page] = 0x7fff;
++ data->temp2_max = 0x7c00;
+ ret = ltc2978_clear_peaks(client, page, data->id);
+ break;
+ case PMBUS_VIRT_RESET_VOUT_HISTORY:
+@@ -262,12 +262,12 @@ static int ltc2978_write_word_data(struct i2c_client *client, int page,
+ break;
+ case PMBUS_VIRT_RESET_VIN_HISTORY:
+ data->vin_min = 0x7bff;
+- data->vin_max = 0;
++ data->vin_max = 0x7c00;
+ ret = ltc2978_clear_peaks(client, page, data->id);
+ break;
+ case PMBUS_VIRT_RESET_TEMP_HISTORY:
+ data->temp_min = 0x7bff;
+- data->temp_max = 0x7fff;
++ data->temp_max[page] = 0x7c00;
+ ret = ltc2978_clear_peaks(client, page, data->id);
+ break;
+ default:
+@@ -323,12 +323,14 @@ static int ltc2978_probe(struct i2c_client *client,
+ info = &data->info;
+ info->write_word_data = ltc2978_write_word_data;
+
+- data->vout_min[0] = 0xffff;
+ data->vin_min = 0x7bff;
++ data->vin_max = 0x7c00;
+ data->temp_min = 0x7bff;
+- data->temp_max = 0x7fff;
++ for (i = 0; i < ARRAY_SIZE(data->temp_max); i++)
++ data->temp_max[i] = 0x7c00;
++ data->temp2_max = 0x7c00;
+
+- switch (id->driver_data) {
++ switch (data->id) {
+ case ltc2978:
+ info->read_word_data = ltc2978_read_word_data;
+ info->pages = 8;
+@@ -338,7 +340,6 @@ static int ltc2978_probe(struct i2c_client *client,
+ for (i = 1; i < 8; i++) {
+ info->func[i] = PMBUS_HAVE_VOUT
+ | PMBUS_HAVE_STATUS_VOUT;
+- data->vout_min[i] = 0xffff;
+ }
+ break;
+ case ltc3880:
+@@ -354,12 +355,15 @@ static int ltc2978_probe(struct i2c_client *client,
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT
+ | PMBUS_HAVE_POUT
+ | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
+- data->vout_min[1] = 0xffff;
++ data->iout_max[0] = 0x7c00;
++ data->iout_max[1] = 0x7c00;
+ break;
+ default:
+ ret = -ENODEV;
+ goto err_mem;
+ }
++ for (i = 0; i < info->pages; i++)
++ data->vout_min[i] = 0xffff;
+
+ ret = pmbus_do_probe(client, id, info);
+ if (ret)
+diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
+index 5357925..3e3153e 100644
+--- a/drivers/hwmon/sht15.c
++++ b/drivers/hwmon/sht15.c
+@@ -926,7 +926,13 @@ static int __devinit sht15_probe(struct platform_device *pdev)
+ if (voltage)
+ data->supply_uV = voltage;
+
+- regulator_enable(data->reg);
++ ret = regulator_enable(data->reg);
++ if (ret != 0) {
++ dev_err(&pdev->dev,
++ "failed to enable regulator: %d\n", ret);
++ goto err_free_data;
++ }
++
+ /*
+ * Setup a notifier block to update this if another device
+ * causes the voltage to change
+diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
+index 62a4d5c..b7d1cdd 100644
+--- a/drivers/iommu/amd_iommu_init.c
++++ b/drivers/iommu/amd_iommu_init.c
+@@ -1396,6 +1396,7 @@ static struct syscore_ops amd_iommu_syscore_ops = {
+ */
+ static int __init amd_iommu_init(void)
+ {
++ struct amd_iommu *iommu;
+ int i, ret = 0;
+
+ /*
+@@ -1444,9 +1445,6 @@ static int __init amd_iommu_init(void)
+ if (amd_iommu_pd_alloc_bitmap == NULL)
+ goto free;
+
+- /* init the device table */
+- init_device_table();
+-
+ /*
+ * let all alias entries point to itself
+ */
+@@ -1496,6 +1494,12 @@ static int __init amd_iommu_init(void)
+ if (ret)
+ goto free_disable;
+
++ /* init the device table */
++ init_device_table();
++
++ for_each_iommu(iommu)
++ iommu_flush_all_caches(iommu);
++
+ amd_iommu_init_api();
+
+ amd_iommu_init_notifier();
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
+index 58d8c6d..aa142f9 100644
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -1262,20 +1262,6 @@ static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
+ return 0;
+ }
+
+-/*
+- * Encode key into its hex representation
+- */
+-static void crypt_encode_key(char *hex, u8 *key, unsigned int size)
+-{
+- unsigned int i;
+-
+- for (i = 0; i < size; i++) {
+- sprintf(hex, "%02x", *key);
+- hex += 2;
+- key++;
+- }
+-}
+-
+ static void crypt_free_tfms(struct crypt_config *cc, int cpu)
+ {
+ struct crypt_cpu *cpu_cc = per_cpu_ptr(cc->cpu, cpu);
+@@ -1739,11 +1725,11 @@ static int crypt_map(struct dm_target *ti, struct bio *bio,
+ return DM_MAPIO_SUBMITTED;
+ }
+
+-static int crypt_status(struct dm_target *ti, status_type_t type,
+- char *result, unsigned int maxlen)
++static void crypt_status(struct dm_target *ti, status_type_t type,
++ char *result, unsigned maxlen)
+ {
+ struct crypt_config *cc = ti->private;
+- unsigned int sz = 0;
++ unsigned i, sz = 0;
+
+ switch (type) {
+ case STATUSTYPE_INFO:
+@@ -1753,17 +1739,11 @@ static int crypt_status(struct dm_target *ti, status_type_t type,
+ case STATUSTYPE_TABLE:
+ DMEMIT("%s ", cc->cipher_string);
+
+- if (cc->key_size > 0) {
+- if ((maxlen - sz) < ((cc->key_size << 1) + 1))
+- return -ENOMEM;
+-
+- crypt_encode_key(result + sz, cc->key, cc->key_size);
+- sz += cc->key_size << 1;
+- } else {
+- if (sz >= maxlen)
+- return -ENOMEM;
+- result[sz++] = '-';
+- }
++ if (cc->key_size > 0)
++ for (i = 0; i < cc->key_size; i++)
++ DMEMIT("%02x", cc->key[i]);
++ else
++ DMEMIT("-");
+
+ DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
+ cc->dev->name, (unsigned long long)cc->start);
+@@ -1773,7 +1753,6 @@ static int crypt_status(struct dm_target *ti, status_type_t type,
+
+ break;
+ }
+- return 0;
+ }
+
+ static void crypt_postsuspend(struct dm_target *ti)
+@@ -1867,7 +1846,7 @@ static int crypt_iterate_devices(struct dm_target *ti,
+
+ static struct target_type crypt_target = {
+ .name = "crypt",
+- .version = {1, 11, 0},
++ .version = {1, 11, 1},
+ .module = THIS_MODULE,
+ .ctr = crypt_ctr,
+ .dtr = crypt_dtr,
+diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
+index f18375d..11431ac 100644
+--- a/drivers/md/dm-delay.c
++++ b/drivers/md/dm-delay.c
+@@ -293,8 +293,8 @@ static int delay_map(struct dm_target *ti, struct bio *bio,
+ return delay_bio(dc, dc->read_delay, bio);
+ }
+
+-static int delay_status(struct dm_target *ti, status_type_t type,
+- char *result, unsigned maxlen)
++static void delay_status(struct dm_target *ti, status_type_t type,
++ char *result, unsigned maxlen)
+ {
+ struct delay_c *dc = ti->private;
+ int sz = 0;
+@@ -314,8 +314,6 @@ static int delay_status(struct dm_target *ti, status_type_t type,
+ dc->write_delay);
+ break;
+ }
+-
+- return 0;
+ }
+
+ static int delay_iterate_devices(struct dm_target *ti,
+@@ -337,7 +335,7 @@ out:
+
+ static struct target_type delay_target = {
+ .name = "delay",
+- .version = {1, 1, 0},
++ .version = {1, 1, 1},
+ .module = THIS_MODULE,
+ .ctr = delay_ctr,
+ .dtr = delay_dtr,
+diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
+index b280c43..746b5e8 100644
+--- a/drivers/md/dm-flakey.c
++++ b/drivers/md/dm-flakey.c
+@@ -331,8 +331,8 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio,
+ return error;
+ }
+
+-static int flakey_status(struct dm_target *ti, status_type_t type,
+- char *result, unsigned int maxlen)
++static void flakey_status(struct dm_target *ti, status_type_t type,
++ char *result, unsigned maxlen)
+ {
+ unsigned sz = 0;
+ struct flakey_c *fc = ti->private;
+@@ -362,7 +362,6 @@ static int flakey_status(struct dm_target *ti, status_type_t type,
+
+ break;
+ }
+- return 0;
+ }
+
+ static int flakey_ioctl(struct dm_target *ti, unsigned int cmd, unsigned long arg)
+@@ -405,7 +404,7 @@ static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_
+
+ static struct target_type flakey_target = {
+ .name = "flakey",
+- .version = {1, 2, 0},
++ .version = {1, 2, 1},
+ .module = THIS_MODULE,
+ .ctr = flakey_ctr,
+ .dtr = flakey_dtr,
+diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
+index 42c873f..e6a300c 100644
+--- a/drivers/md/dm-ioctl.c
++++ b/drivers/md/dm-ioctl.c
+@@ -1065,6 +1065,7 @@ static void retrieve_status(struct dm_table *table,
+ num_targets = dm_table_get_num_targets(table);
+ for (i = 0; i < num_targets; i++) {
+ struct dm_target *ti = dm_table_get_target(table, i);
++ size_t l;
+
+ remaining = len - (outptr - outbuf);
+ if (remaining <= sizeof(struct dm_target_spec)) {
+@@ -1089,14 +1090,17 @@ static void retrieve_status(struct dm_table *table,
+
+ /* Get the status/table string from the target driver */
+ if (ti->type->status) {
+- if (ti->type->status(ti, type, outptr, remaining)) {
+- param->flags |= DM_BUFFER_FULL_FLAG;
+- break;
+- }
++ ti->type->status(ti, type, outptr, remaining);
+ } else
+ outptr[0] = '\0';
+
+- outptr += strlen(outptr) + 1;
++ l = strlen(outptr) + 1;
++ if (l == remaining) {
++ param->flags |= DM_BUFFER_FULL_FLAG;
++ break;
++ }
++
++ outptr += l;
+ used = param->data_start + (outptr - outbuf);
+
+ outptr = align_ptr(outptr);
+diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
+index 9728839..c55d8e4 100644
+--- a/drivers/md/dm-linear.c
++++ b/drivers/md/dm-linear.c
+@@ -94,8 +94,8 @@ static int linear_map(struct dm_target *ti, struct bio *bio,
+ return DM_MAPIO_REMAPPED;
+ }
+
+-static int linear_status(struct dm_target *ti, status_type_t type,
+- char *result, unsigned int maxlen)
++static void linear_status(struct dm_target *ti, status_type_t type,
++ char *result, unsigned maxlen)
+ {
+ struct linear_c *lc = (struct linear_c *) ti->private;
+
+@@ -109,7 +109,6 @@ static int linear_status(struct dm_target *ti, status_type_t type,
+ (unsigned long long)lc->start);
+ break;
+ }
+- return 0;
+ }
+
+ static int linear_ioctl(struct dm_target *ti, unsigned int cmd,
+@@ -154,7 +153,7 @@ static int linear_iterate_devices(struct dm_target *ti,
+
+ static struct target_type linear_target = {
+ .name = "linear",
+- .version = {1, 1, 0},
++ .version = {1, 1, 1},
+ .module = THIS_MODULE,
+ .ctr = linear_ctr,
+ .dtr = linear_dtr,
+diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
+index a417f94..7e766f9 100644
+--- a/drivers/md/dm-mpath.c
++++ b/drivers/md/dm-mpath.c
+@@ -1323,8 +1323,8 @@ static void multipath_resume(struct dm_target *ti)
+ * [priority selector-name num_ps_args [ps_args]*
+ * num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
+ */
+-static int multipath_status(struct dm_target *ti, status_type_t type,
+- char *result, unsigned int maxlen)
++static void multipath_status(struct dm_target *ti, status_type_t type,
++ char *result, unsigned maxlen)
+ {
+ int sz = 0;
+ unsigned long flags;
+@@ -1427,8 +1427,6 @@ static int multipath_status(struct dm_target *ti, status_type_t type,
+ }
+
+ spin_unlock_irqrestore(&m->lock, flags);
+-
+- return 0;
+ }
+
+ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
+@@ -1623,7 +1621,7 @@ out:
+ *---------------------------------------------------------------*/
+ static struct target_type multipath_target = {
+ .name = "multipath",
+- .version = {1, 3, 0},
++ .version = {1, 3, 1},
+ .module = THIS_MODULE,
+ .ctr = multipath_ctr,
+ .dtr = multipath_dtr,
+diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
+index d2a3223..86862ea 100644
+--- a/drivers/md/dm-raid.c
++++ b/drivers/md/dm-raid.c
+@@ -1017,8 +1017,8 @@ static int raid_map(struct dm_target *ti, struct bio *bio, union map_info *map_c
+ return DM_MAPIO_SUBMITTED;
+ }
+
+-static int raid_status(struct dm_target *ti, status_type_t type,
+- char *result, unsigned maxlen)
++static void raid_status(struct dm_target *ti, status_type_t type,
++ char *result, unsigned maxlen)
+ {
+ struct raid_set *rs = ti->private;
+ unsigned raid_param_cnt = 1; /* at least 1 for chunksize */
+@@ -1153,8 +1153,6 @@ static int raid_status(struct dm_target *ti, status_type_t type,
+ DMEMIT(" -");
+ }
+ }
+-
+- return 0;
+ }
+
+ static int raid_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data)
+@@ -1208,7 +1206,7 @@ static void raid_resume(struct dm_target *ti)
+
+ static struct target_type raid_target = {
+ .name = "raid",
+- .version = {1, 1, 0},
++ .version = {1, 1, 1},
+ .module = THIS_MODULE,
+ .ctr = raid_ctr,
+ .dtr = raid_dtr,
+diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
+index dae2b7a..b7b649d 100644
+--- a/drivers/md/dm-raid1.c
++++ b/drivers/md/dm-raid1.c
+@@ -1358,8 +1358,8 @@ static char device_status_char(struct mirror *m)
+ }
+
+
+-static int mirror_status(struct dm_target *ti, status_type_t type,
+- char *result, unsigned int maxlen)
++static void mirror_status(struct dm_target *ti, status_type_t type,
++ char *result, unsigned maxlen)
+ {
+ unsigned int m, sz = 0;
+ struct mirror_set *ms = (struct mirror_set *) ti->private;
+@@ -1394,8 +1394,6 @@ static int mirror_status(struct dm_target *ti, status_type_t type,
+ if (ms->features & DM_RAID1_HANDLE_ERRORS)
+ DMEMIT(" 1 handle_errors");
+ }
+-
+- return 0;
+ }
+
+ static int mirror_iterate_devices(struct dm_target *ti,
+@@ -1414,7 +1412,7 @@ static int mirror_iterate_devices(struct dm_target *ti,
+
+ static struct target_type mirror_target = {
+ .name = "mirror",
+- .version = {1, 12, 1},
++ .version = {1, 12, 2},
+ .module = THIS_MODULE,
+ .ctr = mirror_ctr,
+ .dtr = mirror_dtr,
+diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
+index 6f75887..34ec2b5 100644
+--- a/drivers/md/dm-snap.c
++++ b/drivers/md/dm-snap.c
+@@ -1845,8 +1845,8 @@ static void snapshot_merge_resume(struct dm_target *ti)
+ start_merge(s);
+ }
+
+-static int snapshot_status(struct dm_target *ti, status_type_t type,
+- char *result, unsigned int maxlen)
++static void snapshot_status(struct dm_target *ti, status_type_t type,
++ char *result, unsigned maxlen)
+ {
+ unsigned sz = 0;
+ struct dm_snapshot *snap = ti->private;
+@@ -1892,8 +1892,6 @@ static int snapshot_status(struct dm_target *ti, status_type_t type,
+ maxlen - sz);
+ break;
+ }
+-
+- return 0;
+ }
+
+ static int snapshot_iterate_devices(struct dm_target *ti,
+@@ -2148,8 +2146,8 @@ static void origin_resume(struct dm_target *ti)
+ ti->split_io = get_origin_minimum_chunksize(dev->bdev);
+ }
+
+-static int origin_status(struct dm_target *ti, status_type_t type, char *result,
+- unsigned int maxlen)
++static void origin_status(struct dm_target *ti, status_type_t type,
++ char *result, unsigned maxlen)
+ {
+ struct dm_dev *dev = ti->private;
+
+@@ -2162,8 +2160,6 @@ static int origin_status(struct dm_target *ti, status_type_t type, char *result,
+ snprintf(result, maxlen, "%s", dev->name);
+ break;
+ }
+-
+- return 0;
+ }
+
+ static int origin_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
+@@ -2191,7 +2187,7 @@ static int origin_iterate_devices(struct dm_target *ti,
+
+ static struct target_type origin_target = {
+ .name = "snapshot-origin",
+- .version = {1, 7, 1},
++ .version = {1, 7, 2},
+ .module = THIS_MODULE,
+ .ctr = origin_ctr,
+ .dtr = origin_dtr,
+@@ -2204,7 +2200,7 @@ static struct target_type origin_target = {
+
+ static struct target_type snapshot_target = {
+ .name = "snapshot",
+- .version = {1, 10, 0},
++ .version = {1, 10, 1},
+ .module = THIS_MODULE,
+ .ctr = snapshot_ctr,
+ .dtr = snapshot_dtr,
+@@ -2327,3 +2323,5 @@ module_exit(dm_snapshot_exit);
+ MODULE_DESCRIPTION(DM_NAME " snapshot target");
+ MODULE_AUTHOR("Joe Thornber");
+ MODULE_LICENSE("GPL");
++MODULE_ALIAS("dm-snapshot-origin");
++MODULE_ALIAS("dm-snapshot-merge");
+diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
+index 3d80cf0..cbd41d2 100644
+--- a/drivers/md/dm-stripe.c
++++ b/drivers/md/dm-stripe.c
+@@ -301,8 +301,8 @@ static int stripe_map(struct dm_target *ti, struct bio *bio,
+ *
+ */
+
+-static int stripe_status(struct dm_target *ti,
+- status_type_t type, char *result, unsigned int maxlen)
++static void stripe_status(struct dm_target *ti, status_type_t type,
++ char *result, unsigned maxlen)
+ {
+ struct stripe_c *sc = (struct stripe_c *) ti->private;
+ char buffer[sc->stripes + 1];
+@@ -329,7 +329,6 @@ static int stripe_status(struct dm_target *ti,
+ (unsigned long long)sc->stripe[i].physical_start);
+ break;
+ }
+- return 0;
+ }
+
+ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
+@@ -418,7 +417,7 @@ static int stripe_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
+
+ static struct target_type stripe_target = {
+ .name = "striped",
+- .version = {1, 4, 0},
++ .version = {1, 4, 1},
+ .module = THIS_MODULE,
+ .ctr = stripe_ctr,
+ .dtr = stripe_dtr,
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index d432032..da4d299 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -2090,8 +2090,8 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
+ * <transaction id> <used metadata sectors>/<total metadata sectors>
+ * <used data sectors>/<total data sectors> <held metadata root>
+ */
+-static int pool_status(struct dm_target *ti, status_type_t type,
+- char *result, unsigned maxlen)
++static void pool_status(struct dm_target *ti, status_type_t type,
++ char *result, unsigned maxlen)
+ {
+ int r;
+ unsigned sz = 0;
+@@ -2108,32 +2108,41 @@ static int pool_status(struct dm_target *ti, status_type_t type,
+
+ switch (type) {
+ case STATUSTYPE_INFO:
+- r = dm_pool_get_metadata_transaction_id(pool->pmd,
+- &transaction_id);
+- if (r)
+- return r;
++ r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id);
++ if (r) {
++ DMERR("dm_pool_get_metadata_transaction_id returned %d", r);
++ goto err;
++ }
+
+- r = dm_pool_get_free_metadata_block_count(pool->pmd,
+- &nr_free_blocks_metadata);
+- if (r)
+- return r;
++ r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata);
++ if (r) {
++ DMERR("dm_pool_get_free_metadata_block_count returned %d", r);
++ goto err;
++ }
+
+ r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata);
+- if (r)
+- return r;
++ if (r) {
++ DMERR("dm_pool_get_metadata_dev_size returned %d", r);
++ goto err;
++ }
+
+- r = dm_pool_get_free_block_count(pool->pmd,
+- &nr_free_blocks_data);
+- if (r)
+- return r;
++ r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data);
++ if (r) {
++ DMERR("dm_pool_get_free_block_count returned %d", r);
++ goto err;
++ }
+
+ r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data);
+- if (r)
+- return r;
++ if (r) {
++ DMERR("dm_pool_get_data_dev_size returned %d", r);
++ goto err;
++ }
+
+ r = dm_pool_get_held_metadata_root(pool->pmd, &held_root);
+- if (r)
+- return r;
++ if (r) {
++ DMERR("dm_pool_get_held_metadata_root returned %d", r);
++ goto err;
++ }
+
+ DMEMIT("%llu %llu/%llu %llu/%llu ",
+ (unsigned long long)transaction_id,
+@@ -2162,8 +2171,10 @@ static int pool_status(struct dm_target *ti, status_type_t type,
+ DMEMIT("skip_block_zeroing ");
+ break;
+ }
++ return;
+
+- return 0;
++err:
++ DMEMIT("Error");
+ }
+
+ static int pool_iterate_devices(struct dm_target *ti,
+@@ -2201,7 +2212,7 @@ static struct target_type pool_target = {
+ .name = "thin-pool",
+ .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
+ DM_TARGET_IMMUTABLE,
+- .version = {1, 0, 0},
++ .version = {1, 0, 1},
+ .module = THIS_MODULE,
+ .ctr = pool_ctr,
+ .dtr = pool_dtr,
+@@ -2339,8 +2350,8 @@ static void thin_postsuspend(struct dm_target *ti)
+ /*
+ * <nr mapped sectors> <highest mapped sector>
+ */
+-static int thin_status(struct dm_target *ti, status_type_t type,
+- char *result, unsigned maxlen)
++static void thin_status(struct dm_target *ti, status_type_t type,
++ char *result, unsigned maxlen)
+ {
+ int r;
+ ssize_t sz = 0;
+@@ -2354,12 +2365,16 @@ static int thin_status(struct dm_target *ti, status_type_t type,
+ switch (type) {
+ case STATUSTYPE_INFO:
+ r = dm_thin_get_mapped_count(tc->td, &mapped);
+- if (r)
+- return r;
++ if (r) {
++ DMERR("dm_thin_get_mapped_count returned %d", r);
++ goto err;
++ }
+
+ r = dm_thin_get_highest_mapped_block(tc->td, &highest);
+- if (r < 0)
+- return r;
++ if (r < 0) {
++ DMERR("dm_thin_get_highest_mapped_block returned %d", r);
++ goto err;
++ }
+
+ DMEMIT("%llu ", mapped * tc->pool->sectors_per_block);
+ if (r)
+@@ -2377,7 +2392,10 @@ static int thin_status(struct dm_target *ti, status_type_t type,
+ }
+ }
+
+- return 0;
++ return;
++
++err:
++ DMEMIT("Error");
+ }
+
+ static int thin_iterate_devices(struct dm_target *ti,
+@@ -2410,7 +2428,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
+
+ static struct target_type thin_target = {
+ .name = "thin",
+- .version = {1, 0, 0},
++ .version = {1, 0, 1},
+ .module = THIS_MODULE,
+ .ctr = thin_ctr,
+ .dtr = thin_dtr,
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 145e378e..1702133 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -345,6 +345,10 @@ static void md_make_request(struct request_queue *q, struct bio *bio)
+ bio_io_error(bio);
+ return;
+ }
++ if (mddev->ro == 1 && unlikely(rw == WRITE)) {
++ bio_endio(bio, bio_sectors(bio) == 0 ? 0 : -EROFS);
++ return;
++ }
+ smp_rmb(); /* Ensure implications of 'active' are visible */
+ rcu_read_lock();
+ if (mddev->suspended) {
+@@ -2838,6 +2842,9 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
+ } else if (!sectors)
+ sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
+ rdev->data_offset;
++ if (!my_mddev->pers->resize)
++ /* Cannot change size for RAID0 or Linear etc */
++ return -EINVAL;
+ }
+ if (sectors < my_mddev->dev_sectors)
+ return -EINVAL; /* component must fit device */
+diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
+index 7294bd1..d3e6f35 100644
+--- a/drivers/md/raid0.c
++++ b/drivers/md/raid0.c
+@@ -286,7 +286,7 @@ abort:
+ kfree(conf->strip_zone);
+ kfree(conf->devlist);
+ kfree(conf);
+- *private_conf = NULL;
++ *private_conf = ERR_PTR(err);
+ return err;
+ }
+
+@@ -330,7 +330,8 @@ static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks
+ "%s does not support generic reshape\n", __func__);
+
+ list_for_each_entry(rdev, &mddev->disks, same_set)
+- array_sectors += rdev->sectors;
++ array_sectors += (rdev->sectors &
++ ~(sector_t)(mddev->chunk_sectors-1));
+
+ return array_sectors;
+ }
+diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
+index 0182649..a783530 100644
+--- a/drivers/net/ethernet/intel/e1000e/netdev.c
++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
+@@ -5402,7 +5402,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
+ */
+ e1000e_release_hw_control(adapter);
+
+- pci_disable_device(pdev);
++ pci_clear_master(pdev);
+
+ return 0;
+ }
+diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
+index ad14fec..f1c32a5 100644
+--- a/drivers/net/wireless/ath/ath9k/common.h
++++ b/drivers/net/wireless/ath/ath9k/common.h
+@@ -35,7 +35,7 @@
+ #define WME_AC_BK 3
+ #define WME_NUM_AC 4
+
+-#define ATH_RSSI_DUMMY_MARKER 0x127
++#define ATH_RSSI_DUMMY_MARKER 127
+ #define ATH_RSSI_LPF_LEN 10
+ #define RSSI_LPF_THRESHOLD -20
+ #define ATH_RSSI_EP_MULTIPLIER (1<<7)
+diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
+index da55967..09be407 100644
+--- a/drivers/net/wireless/ath/ath9k/htc.h
++++ b/drivers/net/wireless/ath/ath9k/htc.h
+@@ -22,6 +22,7 @@
+ #include <linux/firmware.h>
+ #include <linux/skbuff.h>
+ #include <linux/netdevice.h>
++#include <linux/etherdevice.h>
+ #include <linux/leds.h>
+ #include <linux/slab.h>
+ #include <net/mac80211.h>
+diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+index 2d81c70..a48bb83 100644
+--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
++++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+@@ -1069,15 +1069,19 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
+
+ last_rssi = priv->rx.last_rssi;
+
+- if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
+- rxbuf->rxstatus.rs_rssi = ATH_EP_RND(last_rssi,
+- ATH_RSSI_EP_MULTIPLIER);
++ if (ieee80211_is_beacon(hdr->frame_control) &&
++ !is_zero_ether_addr(common->curbssid) &&
++ compare_ether_addr(hdr->addr3, common->curbssid) == 0) {
++ s8 rssi = rxbuf->rxstatus.rs_rssi;
+
+- if (rxbuf->rxstatus.rs_rssi < 0)
+- rxbuf->rxstatus.rs_rssi = 0;
++ if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
++ rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER);
+
+- if (ieee80211_is_beacon(fc))
+- priv->ah->stats.avgbrssi = rxbuf->rxstatus.rs_rssi;
++ if (rssi < 0)
++ rssi = 0;
++
++ priv->ah->stats.avgbrssi = rssi;
++ }
+
+ rx_status->mactime = be64_to_cpu(rxbuf->rxstatus.rs_tstamp);
+ rx_status->band = hw->conf.channel->band;
+diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
+index 8533ba2..f081d53 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
++++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
+@@ -180,6 +180,15 @@ struct iwl_queue {
+ #define TFD_TX_CMD_SLOTS 256
+ #define TFD_CMD_SLOTS 32
+
++/*
++ * The FH will write back to the first TB only, so we need
++ * to copy some data into the buffer regardless of whether
++ * it should be mapped or not. This indicates how much to
++ * copy, even for HCMDs it must be big enough to fit the
++ * DRAM scratch from the TX cmd, at least 16 bytes.
++ */
++#define IWL_HCMD_MIN_COPY_SIZE 16
++
+ struct iwl_tx_queue {
+ struct iwl_queue q;
+ struct iwl_tfd *tfds;
+diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
+index 4a0c953..e6b3853 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
++++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
+@@ -688,11 +688,13 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
+ dma_addr_t phys_addr;
+ unsigned long flags;
+ u32 idx;
+- u16 copy_size, cmd_size;
++ u16 copy_size, cmd_size, dma_size;
+ bool is_ct_kill = false;
+ bool had_nocopy = false;
+ int i;
+ u8 *cmd_dest;
++ const u8 *cmddata[IWL_MAX_CMD_TFDS];
++ u16 cmdlen[IWL_MAX_CMD_TFDS];
+ #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
+ const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {};
+ int trace_lens[IWL_MAX_CMD_TFDS + 1] = {};
+@@ -717,15 +719,30 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
+ BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);
+
+ for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
++ cmddata[i] = cmd->data[i];
++ cmdlen[i] = cmd->len[i];
++
+ if (!cmd->len[i])
+ continue;
++
++ /* need at least IWL_HCMD_MIN_COPY_SIZE copied */
++ if (copy_size < IWL_HCMD_MIN_COPY_SIZE) {
++ int copy = IWL_HCMD_MIN_COPY_SIZE - copy_size;
++
++ if (copy > cmdlen[i])
++ copy = cmdlen[i];
++ cmdlen[i] -= copy;
++ cmddata[i] += copy;
++ copy_size += copy;
++ }
++
+ if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
+ had_nocopy = true;
+ } else {
+ /* NOCOPY must not be followed by normal! */
+ if (WARN_ON(had_nocopy))
+ return -EINVAL;
+- copy_size += cmd->len[i];
++ copy_size += cmdlen[i];
+ }
+ cmd_size += cmd->len[i];
+ }
+@@ -778,13 +795,30 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
+ /* and copy the data that needs to be copied */
+
+ cmd_dest = out_cmd->payload;
++ copy_size = sizeof(out_cmd->hdr);
+ for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
+- if (!cmd->len[i])
++ int copy = 0;
++
++ if (!cmd->len)
+ continue;
+- if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
+- break;
+- memcpy(cmd_dest, cmd->data[i], cmd->len[i]);
+- cmd_dest += cmd->len[i];
++
++ /* need at least IWL_HCMD_MIN_COPY_SIZE copied */
++ if (copy_size < IWL_HCMD_MIN_COPY_SIZE) {
++ copy = IWL_HCMD_MIN_COPY_SIZE - copy_size;
++
++ if (copy > cmd->len[i])
++ copy = cmd->len[i];
++ }
++
++ /* copy everything if not nocopy/dup */
++ if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
++ copy = cmd->len[i];
++
++ if (copy) {
++ memcpy(cmd_dest, cmd->data[i], copy);
++ cmd_dest += copy;
++ copy_size += copy;
++ }
+ }
+
+ IWL_DEBUG_HC(trans, "Sending command %s (#%x), seq: 0x%04X, "
+@@ -794,7 +828,14 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
+ le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
+ q->write_ptr, idx, trans->shrd->cmd_queue);
+
+- phys_addr = dma_map_single(bus(trans)->dev, &out_cmd->hdr, copy_size,
++ /*
++ * If the entire command is smaller than IWL_HCMD_MIN_COPY_SIZE, we must
++ * still map at least that many bytes for the hardware to write back to.
++ * We have enough space, so that's not a problem.
++ */
++ dma_size = max_t(u16, copy_size, IWL_HCMD_MIN_COPY_SIZE);
++
++ phys_addr = dma_map_single(bus(trans)->dev, &out_cmd->hdr, dma_size,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(bus(trans)->dev, phys_addr))) {
+ idx = -ENOMEM;
+@@ -802,7 +843,7 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
+ }
+
+ dma_unmap_addr_set(out_meta, mapping, phys_addr);
+- dma_unmap_len_set(out_meta, len, copy_size);
++ dma_unmap_len_set(out_meta, len, dma_size);
+
+ iwlagn_txq_attach_buf_to_tfd(trans, txq,
+ phys_addr, copy_size, 1);
+@@ -812,14 +853,15 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
+ trace_idx = 1;
+ #endif
+
++ /* map the remaining (adjusted) nocopy/dup fragments */
+ for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
+- if (!cmd->len[i])
++ if (!cmdlen[i])
+ continue;
+ if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
+ continue;
+ phys_addr = dma_map_single(bus(trans)->dev,
+- (void *)cmd->data[i],
+- cmd->len[i], DMA_BIDIRECTIONAL);
++ (void *)cmddata[i],
++ cmdlen[i], DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(bus(trans)->dev, phys_addr)) {
+ iwlagn_unmap_tfd(trans, out_meta,
+ &txq->tfds[q->write_ptr],
+@@ -829,10 +871,10 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
+ }
+
+ iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
+- cmd->len[i], 0);
++ cmdlen[i], 0);
+ #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
+- trace_bufs[trace_idx] = cmd->data[i];
+- trace_lens[trace_idx] = cmd->len[i];
++ trace_bufs[trace_idx] = cmddata[i];
++ trace_lens[trace_idx] = cmdlen[i];
+ trace_idx++;
+ #endif
+ }
+diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
+index de94244..3cf4ecc 100644
+--- a/drivers/net/wireless/mwifiex/pcie.c
++++ b/drivers/net/wireless/mwifiex/pcie.c
+@@ -290,7 +290,7 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
+ i++;
+ udelay(10);
+ /* 50ms max wait */
+- if (i == 50000)
++ if (i == 5000)
+ break;
+ }
+
+diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
+index fc35308..9b9843e 100644
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -1707,7 +1707,6 @@ static void netback_changed(struct xenbus_device *dev,
+ case XenbusStateInitialised:
+ case XenbusStateReconfiguring:
+ case XenbusStateReconfigured:
+- case XenbusStateConnected:
+ case XenbusStateUnknown:
+ case XenbusStateClosed:
+ break;
+@@ -1718,6 +1717,9 @@ static void netback_changed(struct xenbus_device *dev,
+ if (xennet_connect(netdev) != 0)
+ break;
+ xenbus_switch_state(dev, XenbusStateConnected);
++ break;
++
++ case XenbusStateConnected:
+ netif_notify_peers(netdev);
+ break;
+
+diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
+index f5b718d..aed7756 100644
+--- a/drivers/scsi/dc395x.c
++++ b/drivers/scsi/dc395x.c
+@@ -3747,13 +3747,13 @@ static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb,
+ dcb->max_command = 1;
+ dcb->target_id = target;
+ dcb->target_lun = lun;
++ dcb->dev_mode = eeprom->target[target].cfg0;
+ #ifndef DC395x_NO_DISCONNECT
+ dcb->identify_msg =
+ IDENTIFY(dcb->dev_mode & NTC_DO_DISCONNECT, lun);
+ #else
+ dcb->identify_msg = IDENTIFY(0, lun);
+ #endif
+- dcb->dev_mode = eeprom->target[target].cfg0;
+ dcb->inquiry7 = 0;
+ dcb->sync_mode = 0;
+ dcb->min_nego_period = clock_period[period_index];
+diff --git a/drivers/staging/hv/storvsc_drv.c b/drivers/staging/hv/storvsc_drv.c
+index abc5ac5..8960f1a 100644
+--- a/drivers/staging/hv/storvsc_drv.c
++++ b/drivers/staging/hv/storvsc_drv.c
+@@ -815,6 +815,7 @@ static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl,
+ if (!bounce_sgl)
+ return NULL;
+
++ sg_init_table(bounce_sgl, num_pages);
+ for (i = 0; i < num_pages; i++) {
+ page_buf = alloc_page(GFP_ATOMIC);
+ if (!page_buf)
+diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
+index ae62d57..754d54e 100644
+--- a/drivers/staging/vt6656/main_usb.c
++++ b/drivers/staging/vt6656/main_usb.c
+@@ -718,8 +718,6 @@ static int vt6656_suspend(struct usb_interface *intf, pm_message_t message)
+ if (device->flags & DEVICE_FLAGS_OPENED)
+ device_close(device->dev);
+
+- usb_put_dev(interface_to_usbdev(intf));
+-
+ return 0;
+ }
+
+@@ -730,8 +728,6 @@ static int vt6656_resume(struct usb_interface *intf)
+ if (!device || !device->dev)
+ return -ENODEV;
+
+- usb_get_dev(interface_to_usbdev(intf));
+-
+ if (!(device->flags & DEVICE_FLAGS_OPENED))
+ device_open(device->dev);
+
+diff --git a/drivers/tty/serial/8250.c b/drivers/tty/serial/8250.c
+index 6748568..cff03e5 100644
+--- a/drivers/tty/serial/8250.c
++++ b/drivers/tty/serial/8250.c
+@@ -322,6 +322,27 @@ static const struct serial8250_config uart_config[] = {
+ .tx_loadsz = 1024,
+ .flags = UART_CAP_HFIFO,
+ },
++ [PORT_ALTR_16550_F32] = {
++ .name = "Altera 16550 FIFO32",
++ .fifo_size = 32,
++ .tx_loadsz = 32,
++ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
++ .flags = UART_CAP_FIFO | UART_CAP_AFE,
++ },
++ [PORT_ALTR_16550_F64] = {
++ .name = "Altera 16550 FIFO64",
++ .fifo_size = 64,
++ .tx_loadsz = 64,
++ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
++ .flags = UART_CAP_FIFO | UART_CAP_AFE,
++ },
++ [PORT_ALTR_16550_F128] = {
++ .name = "Altera 16550 FIFO128",
++ .fifo_size = 128,
++ .tx_loadsz = 128,
++ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
++ .flags = UART_CAP_FIFO | UART_CAP_AFE,
++ },
+ };
+
+ #if defined(CONFIG_MIPS_ALCHEMY)
+diff --git a/drivers/tty/serial/8250_pci.c b/drivers/tty/serial/8250_pci.c
+index a753956..6986256 100644
+--- a/drivers/tty/serial/8250_pci.c
++++ b/drivers/tty/serial/8250_pci.c
+@@ -1154,6 +1154,7 @@ pci_xr17c154_setup(struct serial_private *priv,
+
+ /* Unknown vendors/cards - this should not be in linux/pci_ids.h */
+ #define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584
++#define PCI_SUBDEVICE_ID_UNKNOWN_0x1588 0x1588
+
+ /*
+ * Master list of serial port init/setup/exit quirks.
+@@ -1418,15 +1419,6 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
+ },
+ {
+ .vendor = PCI_VENDOR_ID_PLX,
+- .device = PCI_DEVICE_ID_PLX_9050,
+- .subvendor = PCI_VENDOR_ID_PLX,
+- .subdevice = PCI_SUBDEVICE_ID_UNKNOWN_0x1584,
+- .init = pci_plx9050_init,
+- .setup = pci_default_setup,
+- .exit = __devexit_p(pci_plx9050_exit),
+- },
+- {
+- .vendor = PCI_VENDOR_ID_PLX,
+ .device = PCI_DEVICE_ID_PLX_ROMULUS,
+ .subvendor = PCI_VENDOR_ID_PLX,
+ .subdevice = PCI_DEVICE_ID_PLX_ROMULUS,
+@@ -3120,7 +3112,12 @@ static struct pci_device_id serial_pci_tbl[] = {
+ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
+ PCI_VENDOR_ID_PLX,
+ PCI_SUBDEVICE_ID_UNKNOWN_0x1584, 0, 0,
+- pbn_b0_4_115200 },
++ pbn_b2_4_115200 },
++ /* Unknown card - subdevice 0x1588 */
++ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
++ PCI_VENDOR_ID_PLX,
++ PCI_SUBDEVICE_ID_UNKNOWN_0x1588, 0, 0,
++ pbn_b2_8_115200 },
+ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
+ PCI_SUBVENDOR_ID_KEYSPAN,
+ PCI_SUBDEVICE_ID_KEYSPAN_SX2, 0, 0,
+@@ -4086,6 +4083,10 @@ static struct pci_device_id serial_pci_tbl[] = {
+ PCI_VENDOR_ID_IBM, 0x0299,
+ 0, 0, pbn_b0_bt_2_115200 },
+
++ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9835,
++ 0x1000, 0x0012,
++ 0, 0, pbn_b0_bt_2_115200 },
++
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9901,
+ 0xA000, 0x1000,
+ 0, 0, pbn_b0_1_115200 },
+diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
+index 925a1e5..3771277 100644
+--- a/drivers/tty/serial/Kconfig
++++ b/drivers/tty/serial/Kconfig
+@@ -464,7 +464,7 @@ config SERIAL_SAMSUNG_UARTS_4
+ config SERIAL_SAMSUNG_UARTS
+ int
+ depends on ARM && PLAT_SAMSUNG
+- default 6 if ARCH_S5P6450
++ default 6 if CPU_S5P6450
+ default 4 if SERIAL_SAMSUNG_UARTS_4
+ default 3
+ help
+diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c
+index e8c9cee..6563cad 100644
+--- a/drivers/tty/serial/of_serial.c
++++ b/drivers/tty/serial/of_serial.c
+@@ -182,6 +182,12 @@ static struct of_device_id __devinitdata of_platform_serial_table[] = {
+ { .compatible = "ns16750", .data = (void *)PORT_16750, },
+ { .compatible = "ns16850", .data = (void *)PORT_16850, },
+ { .compatible = "nvidia,tegra20-uart", .data = (void *)PORT_TEGRA, },
++ { .compatible = "altr,16550-FIFO32",
++ .data = (void *)PORT_ALTR_16550_F32, },
++ { .compatible = "altr,16550-FIFO64",
++ .data = (void *)PORT_ALTR_16550_F64, },
++ { .compatible = "altr,16550-FIFO128",
++ .data = (void *)PORT_ALTR_16550_F128, },
+ #ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL
+ { .compatible = "ibm,qpace-nwp-serial",
+ .data = (void *)PORT_NWPSERIAL, },
+diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
+index 6c9b7cd..4f02f9c 100644
+--- a/drivers/tty/tty_buffer.c
++++ b/drivers/tty/tty_buffer.c
+@@ -114,11 +114,14 @@ static void __tty_buffer_flush(struct tty_struct *tty)
+ {
+ struct tty_buffer *thead;
+
+- while ((thead = tty->buf.head) != NULL) {
+- tty->buf.head = thead->next;
+- tty_buffer_free(tty, thead);
++ if (tty->buf.head == NULL)
++ return;
++ while ((thead = tty->buf.head->next) != NULL) {
++ tty_buffer_free(tty, tty->buf.head);
++ tty->buf.head = thead;
+ }
+- tty->buf.tail = NULL;
++ WARN_ON(tty->buf.head != tty->buf.tail);
++ tty->buf.head->read = tty->buf.head->commit;
+ }
+
+ /**
+diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
+index 97b2c55..fe8c04b 100644
+--- a/drivers/usb/class/cdc-wdm.c
++++ b/drivers/usb/class/cdc-wdm.c
+@@ -70,6 +70,7 @@ MODULE_DEVICE_TABLE (usb, wdm_ids);
+ #define WDM_POLL_RUNNING 6
+ #define WDM_RESPONDING 7
+ #define WDM_SUSPENDING 8
++#define WDM_OVERFLOW 10
+
+ #define WDM_MAX 16
+
+@@ -134,6 +135,7 @@ static void wdm_in_callback(struct urb *urb)
+ {
+ struct wdm_device *desc = urb->context;
+ int status = urb->status;
++ int length = urb->actual_length;
+
+ spin_lock(&desc->iuspin);
+ clear_bit(WDM_RESPONDING, &desc->flags);
+@@ -164,9 +166,17 @@ static void wdm_in_callback(struct urb *urb)
+ }
+
+ desc->rerr = status;
+- desc->reslength = urb->actual_length;
+- memmove(desc->ubuf + desc->length, desc->inbuf, desc->reslength);
+- desc->length += desc->reslength;
++ if (length + desc->length > desc->wMaxCommand) {
++ /* The buffer would overflow */
++ set_bit(WDM_OVERFLOW, &desc->flags);
++ } else {
++ /* we may already be in overflow */
++ if (!test_bit(WDM_OVERFLOW, &desc->flags)) {
++ memmove(desc->ubuf + desc->length, desc->inbuf, length);
++ desc->length += length;
++ desc->reslength = length;
++ }
++ }
+ skip_error:
+ wake_up(&desc->wait);
+
+@@ -433,6 +443,11 @@ retry:
+ rv = -ENODEV;
+ goto err;
+ }
++ if (test_bit(WDM_OVERFLOW, &desc->flags)) {
++ clear_bit(WDM_OVERFLOW, &desc->flags);
++ rv = -ENOBUFS;
++ goto err;
++ }
+ i++;
+ if (file->f_flags & O_NONBLOCK) {
+ if (!test_bit(WDM_READ, &desc->flags)) {
+@@ -472,6 +487,7 @@ retry:
+ spin_unlock_irq(&desc->iuspin);
+ goto retry;
+ }
++
+ if (!desc->reslength) { /* zero length read */
+ dev_dbg(&desc->intf->dev, "%s: zero length - clearing WDM_READ\n", __func__);
+ clear_bit(WDM_READ, &desc->flags);
+@@ -926,6 +942,7 @@ static int wdm_post_reset(struct usb_interface *intf)
+ struct wdm_device *desc = usb_get_intfdata(intf);
+ int rv;
+
++ clear_bit(WDM_OVERFLOW, &desc->flags);
+ rv = recover_from_urb_loss(desc);
+ mutex_unlock(&desc->wlock);
+ mutex_unlock(&desc->rlock);
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 2564d8d..22cbe06 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -2148,70 +2148,35 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
+ if ((portstatus & USB_PORT_STAT_RESET))
+ goto delay;
+
+- /*
+- * Some buggy devices require a warm reset to be issued even
+- * when the port appears not to be connected.
++ if (hub_port_warm_reset_required(hub, portstatus))
++ return -ENOTCONN;
++
++ /* Device went away? */
++ if (!(portstatus & USB_PORT_STAT_CONNECTION))
++ return -ENOTCONN;
++
++ /* bomb out completely if the connection bounced. A USB 3.0
++ * connection may bounce if multiple warm resets were issued,
++ * but the device may have successfully re-connected. Ignore it.
+ */
+- if (!warm) {
+- /*
+- * Some buggy devices can cause an NEC host controller
+- * to transition to the "Error" state after a hot port
+- * reset. This will show up as the port state in
+- * "Inactive", and the port may also report a
+- * disconnect. Forcing a warm port reset seems to make
+- * the device work.
+- *
+- * See https://bugzilla.kernel.org/show_bug.cgi?id=41752
+- */
+- if (hub_port_warm_reset_required(hub, portstatus)) {
+- int ret;
+-
+- if ((portchange & USB_PORT_STAT_C_CONNECTION))
+- clear_port_feature(hub->hdev, port1,
+- USB_PORT_FEAT_C_CONNECTION);
+- if (portchange & USB_PORT_STAT_C_LINK_STATE)
+- clear_port_feature(hub->hdev, port1,
+- USB_PORT_FEAT_C_PORT_LINK_STATE);
+- if (portchange & USB_PORT_STAT_C_RESET)
+- clear_port_feature(hub->hdev, port1,
+- USB_PORT_FEAT_C_RESET);
+- dev_dbg(hub->intfdev, "hot reset failed, warm reset port %d\n",
+- port1);
+- ret = hub_port_reset(hub, port1,
+- udev, HUB_BH_RESET_TIME,
+- true);
+- if ((portchange & USB_PORT_STAT_C_CONNECTION))
+- clear_port_feature(hub->hdev, port1,
+- USB_PORT_FEAT_C_CONNECTION);
+- return ret;
+- }
+- /* Device went away? */
+- if (!(portstatus & USB_PORT_STAT_CONNECTION))
+- return -ENOTCONN;
+-
+- /* bomb out completely if the connection bounced */
+- if ((portchange & USB_PORT_STAT_C_CONNECTION))
+- return -ENOTCONN;
+-
+- if ((portstatus & USB_PORT_STAT_ENABLE)) {
+- if (hub_is_wusb(hub))
+- udev->speed = USB_SPEED_WIRELESS;
+- else if (hub_is_superspeed(hub->hdev))
+- udev->speed = USB_SPEED_SUPER;
+- else if (portstatus & USB_PORT_STAT_HIGH_SPEED)
+- udev->speed = USB_SPEED_HIGH;
+- else if (portstatus & USB_PORT_STAT_LOW_SPEED)
+- udev->speed = USB_SPEED_LOW;
+- else
+- udev->speed = USB_SPEED_FULL;
++ if (!hub_is_superspeed(hub->hdev) &&
++ (portchange & USB_PORT_STAT_C_CONNECTION))
++ return -ENOTCONN;
++
++ if ((portstatus & USB_PORT_STAT_ENABLE)) {
++ if (!udev)
+ return 0;
+- }
+- } else {
+- if (!(portstatus & USB_PORT_STAT_CONNECTION) ||
+- hub_port_warm_reset_required(hub,
+- portstatus))
+- return -ENOTCONN;
+
++ if (hub_is_wusb(hub))
++ udev->speed = USB_SPEED_WIRELESS;
++ else if (hub_is_superspeed(hub->hdev))
++ udev->speed = USB_SPEED_SUPER;
++ else if (portstatus & USB_PORT_STAT_HIGH_SPEED)
++ udev->speed = USB_SPEED_HIGH;
++ else if (portstatus & USB_PORT_STAT_LOW_SPEED)
++ udev->speed = USB_SPEED_LOW;
++ else
++ udev->speed = USB_SPEED_FULL;
+ return 0;
+ }
+
+@@ -2229,16 +2194,16 @@ delay:
+ }
+
+ static void hub_port_finish_reset(struct usb_hub *hub, int port1,
+- struct usb_device *udev, int *status, bool warm)
++ struct usb_device *udev, int *status)
+ {
+ switch (*status) {
+ case 0:
+- if (!warm) {
+- struct usb_hcd *hcd;
+- /* TRSTRCY = 10 ms; plus some extra */
+- msleep(10 + 40);
++ /* TRSTRCY = 10 ms; plus some extra */
++ msleep(10 + 40);
++ if (udev) {
++ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
++
+ update_devnum(udev, 0);
+- hcd = bus_to_hcd(udev->bus);
+ /* The xHC may think the device is already reset,
+ * so ignore the status.
+ */
+@@ -2250,14 +2215,15 @@ static void hub_port_finish_reset(struct usb_hub *hub, int port1,
+ case -ENODEV:
+ clear_port_feature(hub->hdev,
+ port1, USB_PORT_FEAT_C_RESET);
+- /* FIXME need disconnect() for NOTATTACHED device */
+ if (hub_is_superspeed(hub->hdev)) {
+ clear_port_feature(hub->hdev, port1,
+ USB_PORT_FEAT_C_BH_PORT_RESET);
+ clear_port_feature(hub->hdev, port1,
+ USB_PORT_FEAT_C_PORT_LINK_STATE);
++ clear_port_feature(hub->hdev, port1,
++ USB_PORT_FEAT_C_CONNECTION);
+ }
+- if (!warm)
++ if (udev)
+ usb_set_device_state(udev, *status
+ ? USB_STATE_NOTATTACHED
+ : USB_STATE_DEFAULT);
+@@ -2270,18 +2236,30 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
+ struct usb_device *udev, unsigned int delay, bool warm)
+ {
+ int i, status;
++ u16 portchange, portstatus;
+
+- if (!warm) {
+- /* Block EHCI CF initialization during the port reset.
+- * Some companion controllers don't like it when they mix.
+- */
+- down_read(&ehci_cf_port_reset_rwsem);
+- } else {
+- if (!hub_is_superspeed(hub->hdev)) {
++ if (!hub_is_superspeed(hub->hdev)) {
++ if (warm) {
+ dev_err(hub->intfdev, "only USB3 hub support "
+ "warm reset\n");
+ return -EINVAL;
+ }
++ /* Block EHCI CF initialization during the port reset.
++ * Some companion controllers don't like it when they mix.
++ */
++ down_read(&ehci_cf_port_reset_rwsem);
++ } else if (!warm) {
++ /*
++ * If the caller hasn't explicitly requested a warm reset,
++ * double check and see if one is needed.
++ */
++ status = hub_port_status(hub, port1,
++ &portstatus, &portchange);
++ if (status < 0)
++ goto done;
++
++ if (hub_port_warm_reset_required(hub, portstatus))
++ warm = true;
+ }
+
+ /* Reset the port */
+@@ -2302,10 +2280,33 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
+ status);
+ }
+
+- /* return on disconnect or reset */
++ /* Check for disconnect or reset */
+ if (status == 0 || status == -ENOTCONN || status == -ENODEV) {
+- hub_port_finish_reset(hub, port1, udev, &status, warm);
+- goto done;
++ hub_port_finish_reset(hub, port1, udev, &status);
++
++ if (!hub_is_superspeed(hub->hdev))
++ goto done;
++
++ /*
++ * If a USB 3.0 device migrates from reset to an error
++ * state, re-issue the warm reset.
++ */
++ if (hub_port_status(hub, port1,
++ &portstatus, &portchange) < 0)
++ goto done;
++
++ if (!hub_port_warm_reset_required(hub, portstatus))
++ goto done;
++
++ /*
++ * If the port is in SS.Inactive or Compliance Mode, the
++ * hot or warm reset failed. Try another warm reset.
++ */
++ if (!warm) {
++ dev_dbg(hub->intfdev, "hot reset failed, warm reset port %d\n",
++ port1);
++ warm = true;
++ }
+ }
+
+ dev_dbg (hub->intfdev,
+@@ -2319,7 +2320,7 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
+ port1);
+
+ done:
+- if (!warm)
++ if (!hub_is_superspeed(hub->hdev))
+ up_read(&ehci_cf_port_reset_rwsem);
+
+ return status;
+@@ -3735,12 +3736,21 @@ static void hub_events(void)
+ */
+ if (hub_port_warm_reset_required(hub, portstatus)) {
+ int status;
++ struct usb_device *udev =
++ hub->hdev->children[i - 1];
+
+ dev_dbg(hub_dev, "warm reset port %d\n", i);
+- status = hub_port_reset(hub, i, NULL,
+- HUB_BH_RESET_TIME, true);
+- if (status < 0)
+- hub_port_disable(hub, i, 1);
++ if (!udev) {
++ status = hub_port_reset(hub, i,
++ NULL, HUB_BH_RESET_TIME,
++ true);
++ if (status < 0)
++ hub_port_disable(hub, i, 1);
++ } else {
++ usb_lock_device(udev);
++ status = usb_reset_device(udev);
++ usb_unlock_device(udev);
++ }
+ connect_change = 0;
+ }
+
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 381d00d..913a178 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -91,6 +91,7 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x10C4, 0x813F) }, /* Tams Master Easy Control */
+ { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */
+ { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
++ { USB_DEVICE(0x2405, 0x0003) }, /* West Mountain Radio RIGblaster Advantage */
+ { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
+ { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
+ { USB_DEVICE(0x10C4, 0x815F) }, /* Timewave HamLinkUSB */
+@@ -156,6 +157,25 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
+ { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */
+ { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */
++ { USB_DEVICE(0x1FB9, 0x0100) }, /* Lake Shore Model 121 Current Source */
++ { USB_DEVICE(0x1FB9, 0x0200) }, /* Lake Shore Model 218A Temperature Monitor */
++ { USB_DEVICE(0x1FB9, 0x0201) }, /* Lake Shore Model 219 Temperature Monitor */
++ { USB_DEVICE(0x1FB9, 0x0202) }, /* Lake Shore Model 233 Temperature Transmitter */
++ { USB_DEVICE(0x1FB9, 0x0203) }, /* Lake Shore Model 235 Temperature Transmitter */
++ { USB_DEVICE(0x1FB9, 0x0300) }, /* Lake Shore Model 335 Temperature Controller */
++ { USB_DEVICE(0x1FB9, 0x0301) }, /* Lake Shore Model 336 Temperature Controller */
++ { USB_DEVICE(0x1FB9, 0x0302) }, /* Lake Shore Model 350 Temperature Controller */
++ { USB_DEVICE(0x1FB9, 0x0303) }, /* Lake Shore Model 371 AC Bridge */
++ { USB_DEVICE(0x1FB9, 0x0400) }, /* Lake Shore Model 411 Handheld Gaussmeter */
++ { USB_DEVICE(0x1FB9, 0x0401) }, /* Lake Shore Model 425 Gaussmeter */
++ { USB_DEVICE(0x1FB9, 0x0402) }, /* Lake Shore Model 455A Gaussmeter */
++ { USB_DEVICE(0x1FB9, 0x0403) }, /* Lake Shore Model 475A Gaussmeter */
++ { USB_DEVICE(0x1FB9, 0x0404) }, /* Lake Shore Model 465 Three Axis Gaussmeter */
++ { USB_DEVICE(0x1FB9, 0x0600) }, /* Lake Shore Model 625A Superconducting MPS */
++ { USB_DEVICE(0x1FB9, 0x0601) }, /* Lake Shore Model 642A Magnet Power Supply */
++ { USB_DEVICE(0x1FB9, 0x0602) }, /* Lake Shore Model 648 Magnet Power Supply */
++ { USB_DEVICE(0x1FB9, 0x0700) }, /* Lake Shore Model 737 VSM Controller */
++ { USB_DEVICE(0x1FB9, 0x0701) }, /* Lake Shore Model 776 Hall Matrix */
+ { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
+ { USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
+ { USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 24a3ea6..4418538 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -341,6 +341,8 @@ static void option_instat_callback(struct urb *urb);
+ #define CINTERION_PRODUCT_EU3_E 0x0051
+ #define CINTERION_PRODUCT_EU3_P 0x0052
+ #define CINTERION_PRODUCT_PH8 0x0053
++#define CINTERION_PRODUCT_AH6 0x0055
++#define CINTERION_PRODUCT_PLS8 0x0060
+
+ /* Olivetti products */
+ #define OLIVETTI_VENDOR_ID 0x0b3c
+@@ -579,6 +581,7 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(QUANTA_VENDOR_ID, 0xea42),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c05, USB_CLASS_COMM, 0x02, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c1f, USB_CLASS_COMM, 0x02, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
+@@ -1260,6 +1263,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8) },
++ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AH6) },
++ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLS8) },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
+ { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) },
+diff --git a/drivers/usb/serial/qcaux.c b/drivers/usb/serial/qcaux.c
+index 87271e3..153d719 100644
+--- a/drivers/usb/serial/qcaux.c
++++ b/drivers/usb/serial/qcaux.c
+@@ -69,6 +69,7 @@ static struct usb_device_id id_table[] = {
+ { USB_VENDOR_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, 0xff, 0xfd, 0xff) }, /* NMEA */
+ { USB_VENDOR_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, 0xff, 0xfe, 0xff) }, /* WMC */
+ { USB_VENDOR_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, 0xff, 0xff, 0xff) }, /* DIAG */
++ { USB_DEVICE_AND_INTERFACE_INFO(0x1fac, 0x0151, 0xff, 0xff, 0xff) },
+ { },
+ };
+ MODULE_DEVICE_TABLE(usb, id_table);
+diff --git a/drivers/usb/storage/initializers.c b/drivers/usb/storage/initializers.c
+index 7ab9046..105d900 100644
+--- a/drivers/usb/storage/initializers.c
++++ b/drivers/usb/storage/initializers.c
+@@ -92,8 +92,8 @@ int usb_stor_ucr61s2b_init(struct us_data *us)
+ return 0;
+ }
+
+-/* This places the HUAWEI usb dongles in multi-port mode */
+-static int usb_stor_huawei_feature_init(struct us_data *us)
++/* This places the HUAWEI E220 devices in multi-port mode */
++int usb_stor_huawei_e220_init(struct us_data *us)
+ {
+ int result;
+
+@@ -104,75 +104,3 @@ static int usb_stor_huawei_feature_init(struct us_data *us)
+ US_DEBUGP("Huawei mode set result is %d\n", result);
+ return 0;
+ }
+-
+-/*
+- * It will send a scsi switch command called rewind' to huawei dongle.
+- * When the dongle receives this command at the first time,
+- * it will reboot immediately. After rebooted, it will ignore this command.
+- * So it is unnecessary to read its response.
+- */
+-static int usb_stor_huawei_scsi_init(struct us_data *us)
+-{
+- int result = 0;
+- int act_len = 0;
+- struct bulk_cb_wrap *bcbw = (struct bulk_cb_wrap *) us->iobuf;
+- char rewind_cmd[] = {0x11, 0x06, 0x20, 0x00, 0x00, 0x01, 0x01, 0x00,
+- 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+-
+- bcbw->Signature = cpu_to_le32(US_BULK_CB_SIGN);
+- bcbw->Tag = 0;
+- bcbw->DataTransferLength = 0;
+- bcbw->Flags = bcbw->Lun = 0;
+- bcbw->Length = sizeof(rewind_cmd);
+- memset(bcbw->CDB, 0, sizeof(bcbw->CDB));
+- memcpy(bcbw->CDB, rewind_cmd, sizeof(rewind_cmd));
+-
+- result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcbw,
+- US_BULK_CB_WRAP_LEN, &act_len);
+- US_DEBUGP("transfer actual length=%d, result=%d\n", act_len, result);
+- return result;
+-}
+-
+-/*
+- * It tries to find the supported Huawei USB dongles.
+- * In Huawei, they assign the following product IDs
+- * for all of their mobile broadband dongles,
+- * including the new dongles in the future.
+- * So if the product ID is not included in this list,
+- * it means it is not Huawei's mobile broadband dongles.
+- */
+-static int usb_stor_huawei_dongles_pid(struct us_data *us)
+-{
+- struct usb_interface_descriptor *idesc;
+- int idProduct;
+-
+- idesc = &us->pusb_intf->cur_altsetting->desc;
+- idProduct = le16_to_cpu(us->pusb_dev->descriptor.idProduct);
+- /* The first port is CDROM,
+- * means the dongle in the single port mode,
+- * and a switch command is required to be sent. */
+- if (idesc && idesc->bInterfaceNumber == 0) {
+- if ((idProduct == 0x1001)
+- || (idProduct == 0x1003)
+- || (idProduct == 0x1004)
+- || (idProduct >= 0x1401 && idProduct <= 0x1500)
+- || (idProduct >= 0x1505 && idProduct <= 0x1600)
+- || (idProduct >= 0x1c02 && idProduct <= 0x2202)) {
+- return 1;
+- }
+- }
+- return 0;
+-}
+-
+-int usb_stor_huawei_init(struct us_data *us)
+-{
+- int result = 0;
+-
+- if (usb_stor_huawei_dongles_pid(us)) {
+- if (le16_to_cpu(us->pusb_dev->descriptor.idProduct) >= 0x1446)
+- result = usb_stor_huawei_scsi_init(us);
+- else
+- result = usb_stor_huawei_feature_init(us);
+- }
+- return result;
+-}
+diff --git a/drivers/usb/storage/initializers.h b/drivers/usb/storage/initializers.h
+index 5376d4f..529327f 100644
+--- a/drivers/usb/storage/initializers.h
++++ b/drivers/usb/storage/initializers.h
+@@ -46,5 +46,5 @@ int usb_stor_euscsi_init(struct us_data *us);
+ * flash reader */
+ int usb_stor_ucr61s2b_init(struct us_data *us);
+
+-/* This places the HUAWEI usb dongles in multi-port mode */
+-int usb_stor_huawei_init(struct us_data *us);
++/* This places the HUAWEI E220 devices in multi-port mode */
++int usb_stor_huawei_e220_init(struct us_data *us);
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index 12640ef..fa8a1b2 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -1515,10 +1515,335 @@ UNUSUAL_DEV( 0x1210, 0x0003, 0x0100, 0x0100,
+ /* Reported by fangxiaozhi <huananhu@huawei.com>
+ * This brings the HUAWEI data card devices into multi-port mode
+ */
+-UNUSUAL_VENDOR_INTF(0x12d1, 0x08, 0x06, 0x50,
++UNUSUAL_DEV( 0x12d1, 0x1001, 0x0000, 0x0000,
+ "HUAWEI MOBILE",
+ "Mass Storage",
+- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_init,
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1003, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1004, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1401, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1402, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1403, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1404, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1405, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1406, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1407, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1408, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1409, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x140A, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x140B, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x140C, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x140D, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x140E, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x140F, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1410, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1411, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1412, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1413, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1414, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1415, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1416, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1417, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1418, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1419, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x141A, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x141B, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x141C, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x141D, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x141E, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x141F, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1420, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1421, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1422, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1423, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1424, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1425, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1426, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1427, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1428, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1429, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x142A, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x142B, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x142C, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x142D, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x142E, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x142F, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1430, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1431, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1432, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1433, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1434, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1435, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1436, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1437, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1438, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x1439, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x143A, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x143B, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x143C, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x143D, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x143E, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
++ 0),
++UNUSUAL_DEV( 0x12d1, 0x143F, 0x0000, 0x0000,
++ "HUAWEI MOBILE",
++ "Mass Storage",
++ USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+ 0),
+
+ /* Reported by Vilius Bilinkevicius <vilisas AT xxx DOT lt) */
+diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
+index c374978..5149d4e 100644
+--- a/drivers/w1/w1.c
++++ b/drivers/w1/w1.c
+@@ -918,7 +918,8 @@ void w1_search(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb
+ tmp64 = (triplet_ret >> 2);
+ rn |= (tmp64 << i);
+
+- if (kthread_should_stop()) {
++ /* ensure we're called from kthread and not by netlink callback */
++ if (!dev->priv && kthread_should_stop()) {
+ dev_dbg(&dev->dev, "Abort w1_search\n");
+ return;
+ }
+diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
+index d07c4cd..e947e78 100644
+--- a/drivers/xen/xen-pciback/pciback_ops.c
++++ b/drivers/xen/xen-pciback/pciback_ops.c
+@@ -114,7 +114,8 @@ void xen_pcibk_reset_device(struct pci_dev *dev)
+ if (dev->msi_enabled)
+ pci_disable_msi(dev);
+ #endif
+- pci_disable_device(dev);
++ if (pci_is_enabled(dev))
++ pci_disable_device(dev);
+
+ pci_write_config_word(dev, PCI_COMMAND, 0);
+
+diff --git a/fs/block_dev.c b/fs/block_dev.c
+index 613edd8..833dddb 100644
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -1064,7 +1064,9 @@ void bd_set_size(struct block_device *bdev, loff_t size)
+ {
+ unsigned bsize = bdev_logical_block_size(bdev);
+
+- bdev->bd_inode->i_size = size;
++ mutex_lock(&bdev->bd_inode->i_mutex);
++ i_size_write(bdev->bd_inode, size);
++ mutex_unlock(&bdev->bd_inode->i_mutex);
+ while (bsize < PAGE_CACHE_SIZE) {
+ if (size & bsize)
+ break;
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index f4b839f..9899205 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -543,6 +543,7 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
+ new_device->writeable = 0;
+ new_device->in_fs_metadata = 0;
+ new_device->can_discard = 0;
++ spin_lock_init(&new_device->io_lock);
+ list_replace_rcu(&device->dev_list, &new_device->dev_list);
+
+ call_rcu(&device->rcu, free_device);
+@@ -576,6 +577,12 @@ int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
+ __btrfs_close_devices(fs_devices);
+ free_fs_devices(fs_devices);
+ }
++ /*
++ * Wait for rcu kworkers under __btrfs_close_devices
++ * to finish all blkdev_puts so device is really
++ * free when umount is done.
++ */
++ rcu_barrier();
+ return ret;
+ }
+
+diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
+index b1451af..b3a2a40 100644
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -561,6 +561,11 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
+ dentry = ERR_PTR(-ENOENT);
+ break;
+ }
++ if (!S_ISDIR(dir->i_mode)) {
++ dput(dentry);
++ dentry = ERR_PTR(-ENOTDIR);
++ break;
++ }
+
+ /* skip separators */
+ while (*s == sep)
+diff --git a/fs/compat.c b/fs/compat.c
+index e07a3d3..4bf082d 100644
+--- a/fs/compat.c
++++ b/fs/compat.c
+@@ -572,6 +572,10 @@ ssize_t compat_rw_copy_check_uvector(int type,
+ }
+ *ret_pointer = iov;
+
++ ret = -EFAULT;
++ if (!access_ok(VERIFY_READ, uvector, nr_segs*sizeof(*uvector)))
++ goto out;
++
+ /*
+ * Single unix specification:
+ * We should -EINVAL if an element length is not >= 0 and fitting an
+@@ -1103,17 +1107,12 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
+ if (!file->f_op)
+ goto out;
+
+- ret = -EFAULT;
+- if (!access_ok(VERIFY_READ, uvector, nr_segs*sizeof(*uvector)))
+- goto out;
+-
+- tot_len = compat_rw_copy_check_uvector(type, uvector, nr_segs,
++ ret = compat_rw_copy_check_uvector(type, uvector, nr_segs,
+ UIO_FASTIOV, iovstack, &iov, 1);
+- if (tot_len == 0) {
+- ret = 0;
++ if (ret <= 0)
+ goto out;
+- }
+
++ tot_len = ret;
+ ret = rw_verify_area(type, file, pos, tot_len);
+ if (ret < 0)
+ goto out;
+diff --git a/fs/ext3/super.c b/fs/ext3/super.c
+index 922d289..b7f314f 100644
+--- a/fs/ext3/super.c
++++ b/fs/ext3/super.c
+@@ -374,7 +374,7 @@ static struct block_device *ext3_blkdev_get(dev_t dev, struct super_block *sb)
+ return bdev;
+
+ fail:
+- ext3_msg(sb, "error: failed to open journal device %s: %ld",
++ ext3_msg(sb, KERN_ERR, "error: failed to open journal device %s: %ld",
+ __bdevname(dev, b), PTR_ERR(bdev));
+
+ return NULL;
+@@ -902,7 +902,7 @@ static ext3_fsblk_t get_sb_block(void **data, struct super_block *sb)
+ /*todo: use simple_strtoll with >32bit ext3 */
+ sb_block = simple_strtoul(options, &options, 0);
+ if (*options && *options != ',') {
+- ext3_msg(sb, "error: invalid sb specification: %s",
++ ext3_msg(sb, KERN_ERR, "error: invalid sb specification: %s",
+ (char *) *data);
+ return 1;
+ }
+diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
+index a87a656..c25cf15 100644
+--- a/fs/fat/namei_vfat.c
++++ b/fs/fat/namei_vfat.c
+@@ -512,7 +512,8 @@ xlate_to_uni(const unsigned char *name, int len, unsigned char *outname,
+ int charlen;
+
+ if (utf8) {
+- *outlen = utf8s_to_utf16s(name, len, (wchar_t *)outname);
++ *outlen = utf8s_to_utf16s(name, len, UTF16_HOST_ENDIAN,
++ (wchar_t *) outname, FAT_LFN_LEN + 2);
+ if (*outlen < 0)
+ return *outlen;
+ else if (*outlen > FAT_LFN_LEN)
+diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
+index 4f9319a..98f1261 100644
+--- a/fs/nfs/unlink.c
++++ b/fs/nfs/unlink.c
+@@ -366,20 +366,14 @@ static void nfs_async_rename_done(struct rpc_task *task, void *calldata)
+ struct inode *old_dir = data->old_dir;
+ struct inode *new_dir = data->new_dir;
+ struct dentry *old_dentry = data->old_dentry;
+- struct dentry *new_dentry = data->new_dentry;
+
+ if (!NFS_PROTO(old_dir)->rename_done(task, old_dir, new_dir)) {
+ rpc_restart_call_prepare(task);
+ return;
+ }
+
+- if (task->tk_status != 0) {
++ if (task->tk_status != 0)
+ nfs_cancel_async_unlink(old_dentry);
+- return;
+- }
+-
+- d_drop(old_dentry);
+- d_drop(new_dentry);
+ }
+
+ /**
+@@ -589,6 +583,18 @@ nfs_sillyrename(struct inode *dir, struct dentry *dentry)
+ error = rpc_wait_for_completion_task(task);
+ if (error == 0)
+ error = task->tk_status;
++ switch (error) {
++ case 0:
++ /* The rename succeeded */
++ nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
++ d_move(dentry, sdentry);
++ break;
++ case -ERESTARTSYS:
++ /* The result of the rename is unknown. Play it safe by
++ * forcing a new lookup */
++ d_drop(dentry);
++ d_drop(sdentry);
++ }
+ rpc_put_task(task);
+ out_dput:
+ dput(sdentry);
+diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
+index 44a88a9..0eb059e 100644
+--- a/fs/nls/nls_base.c
++++ b/fs/nls/nls_base.c
+@@ -114,34 +114,57 @@ int utf32_to_utf8(unicode_t u, u8 *s, int maxlen)
+ }
+ EXPORT_SYMBOL(utf32_to_utf8);
+
+-int utf8s_to_utf16s(const u8 *s, int len, wchar_t *pwcs)
++static inline void put_utf16(wchar_t *s, unsigned c, enum utf16_endian endian)
++{
++ switch (endian) {
++ default:
++ *s = (wchar_t) c;
++ break;
++ case UTF16_LITTLE_ENDIAN:
++ *s = __cpu_to_le16(c);
++ break;
++ case UTF16_BIG_ENDIAN:
++ *s = __cpu_to_be16(c);
++ break;
++ }
++}
++
++int utf8s_to_utf16s(const u8 *s, int len, enum utf16_endian endian,
++ wchar_t *pwcs, int maxlen)
+ {
+ u16 *op;
+ int size;
+ unicode_t u;
+
+ op = pwcs;
+- while (*s && len > 0) {
++ while (len > 0 && maxlen > 0 && *s) {
+ if (*s & 0x80) {
+ size = utf8_to_utf32(s, len, &u);
+ if (size < 0)
+ return -EINVAL;
++ s += size;
++ len -= size;
+
+ if (u >= PLANE_SIZE) {
++ if (maxlen < 2)
++ break;
+ u -= PLANE_SIZE;
+- *op++ = (wchar_t) (SURROGATE_PAIR |
+- ((u >> 10) & SURROGATE_BITS));
+- *op++ = (wchar_t) (SURROGATE_PAIR |
++ put_utf16(op++, SURROGATE_PAIR |
++ ((u >> 10) & SURROGATE_BITS),
++ endian);
++ put_utf16(op++, SURROGATE_PAIR |
+ SURROGATE_LOW |
+- (u & SURROGATE_BITS));
++ (u & SURROGATE_BITS),
++ endian);
++ maxlen -= 2;
+ } else {
+- *op++ = (wchar_t) u;
++ put_utf16(op++, u, endian);
++ maxlen--;
+ }
+- s += size;
+- len -= size;
+ } else {
+- *op++ = *s++;
++ put_utf16(op++, *s++, endian);
+ len--;
++ maxlen--;
+ }
+ }
+ return op - pwcs;
+diff --git a/fs/pipe.c b/fs/pipe.c
+index 05ed5ca..8ca88fc 100644
+--- a/fs/pipe.c
++++ b/fs/pipe.c
+@@ -859,6 +859,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
+ {
+ int ret = -ENOENT;
+
++ if (!(filp->f_mode & (FMODE_READ|FMODE_WRITE)))
++ return -EINVAL;
++
+ mutex_lock(&inode->i_mutex);
+
+ if (inode->i_pipe) {
+diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
+index 98f34b8..6712ab6 100644
+--- a/include/linux/device-mapper.h
++++ b/include/linux/device-mapper.h
+@@ -72,8 +72,8 @@ typedef void (*dm_postsuspend_fn) (struct dm_target *ti);
+ typedef int (*dm_preresume_fn) (struct dm_target *ti);
+ typedef void (*dm_resume_fn) (struct dm_target *ti);
+
+-typedef int (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
+- char *result, unsigned int maxlen);
++typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
++ char *result, unsigned maxlen);
+
+ typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv);
+
+diff --git a/include/linux/efi.h b/include/linux/efi.h
+index 1721c41..ce95a4b 100644
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -30,7 +30,12 @@
+ #define EFI_UNSUPPORTED ( 3 | (1UL << (BITS_PER_LONG-1)))
+ #define EFI_BAD_BUFFER_SIZE ( 4 | (1UL << (BITS_PER_LONG-1)))
+ #define EFI_BUFFER_TOO_SMALL ( 5 | (1UL << (BITS_PER_LONG-1)))
++#define EFI_NOT_READY ( 6 | (1UL << (BITS_PER_LONG-1)))
++#define EFI_DEVICE_ERROR ( 7 | (1UL << (BITS_PER_LONG-1)))
++#define EFI_WRITE_PROTECTED ( 8 | (1UL << (BITS_PER_LONG-1)))
++#define EFI_OUT_OF_RESOURCES ( 9 | (1UL << (BITS_PER_LONG-1)))
+ #define EFI_NOT_FOUND (14 | (1UL << (BITS_PER_LONG-1)))
++#define EFI_SECURITY_VIOLATION (26 | (1UL << (BITS_PER_LONG-1)))
+
+ typedef unsigned long efi_status_t;
+ typedef u8 efi_bool_t;
+@@ -470,6 +475,7 @@ struct efivar_operations {
+ efi_get_variable_t *get_variable;
+ efi_get_next_variable_t *get_next_variable;
+ efi_set_variable_t *set_variable;
++ efi_query_variable_info_t *query_variable_info;
+ };
+
+ struct efivars {
+diff --git a/include/linux/nls.h b/include/linux/nls.h
+index d47beef..5dc635f 100644
+--- a/include/linux/nls.h
++++ b/include/linux/nls.h
+@@ -43,7 +43,7 @@ enum utf16_endian {
+ UTF16_BIG_ENDIAN
+ };
+
+-/* nls.c */
++/* nls_base.c */
+ extern int register_nls(struct nls_table *);
+ extern int unregister_nls(struct nls_table *);
+ extern struct nls_table *load_nls(char *);
+@@ -52,7 +52,8 @@ extern struct nls_table *load_nls_default(void);
+
+ extern int utf8_to_utf32(const u8 *s, int len, unicode_t *pu);
+ extern int utf32_to_utf8(unicode_t u, u8 *s, int maxlen);
+-extern int utf8s_to_utf16s(const u8 *s, int len, wchar_t *pwcs);
++extern int utf8s_to_utf16s(const u8 *s, int len,
++ enum utf16_endian endian, wchar_t *pwcs, int maxlen);
+ extern int utf16s_to_utf8s(const wchar_t *pwcs, int len,
+ enum utf16_endian endian, u8 *s, int maxlen);
+
+diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
+index bae516e..42e35c3 100644
+--- a/include/linux/serial_core.h
++++ b/include/linux/serial_core.h
+@@ -48,7 +48,10 @@
+ #define PORT_TEGRA 20 /* NVIDIA Tegra internal UART */
+ #define PORT_XR17D15X 21 /* Exar XR17D15x UART */
+ #define PORT_BRCM_TRUMANAGE 25
+-#define PORT_MAX_8250 25 /* max port ID */
++#define PORT_ALTR_16550_F32 26 /* Altera 16550 UART with 32 FIFOs */
++#define PORT_ALTR_16550_F64 27 /* Altera 16550 UART with 64 FIFOs */
++#define PORT_ALTR_16550_F128 28 /* Altera 16550 UART with 128 FIFOs */
++#define PORT_MAX_8250 28 /* max port ID */
+
+ /*
+ * ARM specific type numbers. These are not currently guaranteed
+diff --git a/kernel/signal.c b/kernel/signal.c
+index d2f55ea..71e1816 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -481,6 +481,9 @@ flush_signal_handlers(struct task_struct *t, int force_default)
+ if (force_default || ka->sa.sa_handler != SIG_IGN)
+ ka->sa.sa_handler = SIG_DFL;
+ ka->sa.sa_flags = 0;
++#ifdef SA_RESTORER
++ ka->sa.sa_restorer = NULL;
++#endif
+ sigemptyset(&ka->sa.sa_mask);
+ ka++;
+ }
+diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
+index cd31345..762264d 100644
+--- a/kernel/trace/Kconfig
++++ b/kernel/trace/Kconfig
+@@ -386,24 +386,28 @@ config KPROBE_EVENT
+ If you want to use perf tools, this option is strongly recommended.
+
+ config DYNAMIC_FTRACE
+- bool "enable/disable ftrace tracepoints dynamically"
++ bool "enable/disable function tracing dynamically"
+ depends on FUNCTION_TRACER
+ depends on HAVE_DYNAMIC_FTRACE
+ default y
+ help
+- This option will modify all the calls to ftrace dynamically
+- (will patch them out of the binary image and replace them
+- with a No-Op instruction) as they are called. A table is
+- created to dynamically enable them again.
++ This option will modify all the calls to function tracing
++ dynamically (will patch them out of the binary image and
++ replace them with a No-Op instruction) on boot up. During
++ compile time, a table is made of all the locations that ftrace
++ can function trace, and this table is linked into the kernel
++ image. When this is enabled, functions can be individually
++ enabled, and the functions not enabled will not affect
++ performance of the system.
++
++ See the files in /sys/kernel/debug/tracing:
++ available_filter_functions
++ set_ftrace_filter
++ set_ftrace_notrace
+
+ This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but
+ otherwise has native performance as long as no tracing is active.
+
+- The changes to the code are done by a kernel thread that
+- wakes up once a second and checks to see if any ftrace calls
+- were made. If so, it runs stop_machine (stops all CPUS)
+- and modifies the code to jump over the call to ftrace.
+-
+ config FUNCTION_PROFILER
+ bool "Kernel function profiler"
+ depends on FUNCTION_TRACER
+diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
+index 9ad7d1e..09d87b7 100644
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -515,19 +515,20 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages)
+
+ zone->present_pages += onlined_pages;
+ zone->zone_pgdat->node_present_pages += onlined_pages;
+- if (need_zonelists_rebuild)
+- build_all_zonelists(zone);
+- else
+- zone_pcp_update(zone);
++ if (onlined_pages) {
++ node_set_state(zone_to_nid(zone), N_HIGH_MEMORY);
++ if (need_zonelists_rebuild)
++ build_all_zonelists(zone);
++ else
++ zone_pcp_update(zone);
++ }
+
+ mutex_unlock(&zonelists_mutex);
+
+ init_per_zone_wmark_min();
+
+- if (onlined_pages) {
++ if (onlined_pages)
+ kswapd_run(zone_to_nid(zone));
+- node_set_state(zone_to_nid(zone), N_HIGH_MEMORY);
+- }
+
+ vm_total_pages = nr_free_pagecache_pages();
+
+diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
+index e920aa3..70e814a 100644
+--- a/mm/process_vm_access.c
++++ b/mm/process_vm_access.c
+@@ -434,12 +434,6 @@ compat_process_vm_rw(compat_pid_t pid,
+ if (flags != 0)
+ return -EINVAL;
+
+- if (!access_ok(VERIFY_READ, lvec, liovcnt * sizeof(*lvec)))
+- goto out;
+-
+- if (!access_ok(VERIFY_READ, rvec, riovcnt * sizeof(*rvec)))
+- goto out;
+-
+ if (vm_write)
+ rc = compat_rw_copy_check_uvector(WRITE, lvec, liovcnt,
+ UIO_FASTIOV, iovstack_l,
+@@ -464,8 +458,6 @@ free_iovecs:
+ kfree(iov_r);
+ if (iov_l != iovstack_l)
+ kfree(iov_l);
+-
+-out:
+ return rc;
+ }
+
+diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
+index ac3520e..0c82ce3 100644
+--- a/net/batman-adv/icmp_socket.c
++++ b/net/batman-adv/icmp_socket.c
+@@ -136,10 +136,9 @@ static ssize_t bat_socket_read(struct file *file, char __user *buf,
+
+ spin_unlock_bh(&socket_client->lock);
+
+- error = __copy_to_user(buf, &socket_packet->icmp_packet,
+- socket_packet->icmp_len);
++ packet_len = min(count, socket_packet->icmp_len);
++ error = copy_to_user(buf, &socket_packet->icmp_packet, packet_len);
+
+- packet_len = socket_packet->icmp_len;
+ kfree(socket_packet);
+
+ if (error)
+diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
+index 19acd00..16fbf8c 100644
+--- a/net/decnet/af_decnet.c
++++ b/net/decnet/af_decnet.c
+@@ -2354,6 +2354,8 @@ static const struct proto_ops dn_proto_ops = {
+ .sendpage = sock_no_sendpage,
+ };
+
++void dn_register_sysctl_skeleton(void);
++void dn_unregister_sysctl_skeleton(void);
+ void dn_register_sysctl(void);
+ void dn_unregister_sysctl(void);
+
+@@ -2374,6 +2376,7 @@ static int __init decnet_init(void)
+ if (rc != 0)
+ goto out;
+
++ dn_register_sysctl_skeleton();
+ dn_neigh_init();
+ dn_dev_init();
+ dn_route_init();
+@@ -2413,6 +2416,7 @@ static void __exit decnet_exit(void)
+ dn_fib_cleanup();
+
+ proc_net_remove(&init_net, "decnet");
++ dn_unregister_sysctl_skeleton();
+
+ proto_unregister(&dn_proto);
+
+diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
+index 02e75d1..d50a13c 100644
+--- a/net/decnet/sysctl_net_decnet.c
++++ b/net/decnet/sysctl_net_decnet.c
+@@ -55,6 +55,7 @@ static int max_decnet_no_fc_max_cwnd[] = { NSP_MAX_WINDOW };
+ static char node_name[7] = "???";
+
+ static struct ctl_table_header *dn_table_header = NULL;
++static struct ctl_table_header *dn_skeleton_table_header = NULL;
+
+ /*
+ * ctype.h :-)
+@@ -357,6 +358,27 @@ static struct ctl_path dn_path[] = {
+ { }
+ };
+
++static struct ctl_table empty[1];
++
++static struct ctl_table dn_skeleton[] = {
++ {
++ .procname = "conf",
++ .mode = 0555,
++ .child = empty,
++ },
++ { }
++};
++
++void dn_register_sysctl_skeleton(void)
++{
++ dn_skeleton_table_header = register_sysctl_paths(dn_path, dn_skeleton);
++}
++
++void dn_unregister_sysctl_skeleton(void)
++{
++ unregister_sysctl_table(dn_skeleton_table_header);
++}
++
+ void dn_register_sysctl(void)
+ {
+ dn_table_header = register_sysctl_paths(dn_path, dn_table);
+@@ -368,6 +390,12 @@ void dn_unregister_sysctl(void)
+ }
+
+ #else /* CONFIG_SYSCTL */
++void dn_register_sysctl_skeleton(void)
++{
++}
++void dn_unregister_sysctl_skeleton(void)
++{
++}
+ void dn_unregister_sysctl(void)
+ {
+ }
+diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
+index 6c91208..3c5d329 100644
+--- a/net/sunrpc/xprt.c
++++ b/net/sunrpc/xprt.c
+@@ -481,13 +481,17 @@ EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
+ * xprt_wait_for_buffer_space - wait for transport output buffer to clear
+ * @task: task to be put to sleep
+ * @action: function pointer to be executed after wait
++ *
++ * Note that we only set the timer for the case of RPC_IS_SOFT(), since
++ * we don't in general want to force a socket disconnection due to
++ * an incomplete RPC call transmission.
+ */
+ void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action)
+ {
+ struct rpc_rqst *req = task->tk_rqstp;
+ struct rpc_xprt *xprt = req->rq_xprt;
+
+- task->tk_timeout = req->rq_timeout;
++ task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0;
+ rpc_sleep_on(&xprt->pending, task, action);
+ }
+ EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
+diff --git a/security/keys/compat.c b/security/keys/compat.c
+index 4c48e13..1b0b7bf 100644
+--- a/security/keys/compat.c
++++ b/security/keys/compat.c
+@@ -40,12 +40,12 @@ long compat_keyctl_instantiate_key_iov(
+ ARRAY_SIZE(iovstack),
+ iovstack, &iov, 1);
+ if (ret < 0)
+- return ret;
++ goto err;
+ if (ret == 0)
+ goto no_payload_free;
+
+ ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
+-
++err:
+ if (iov != iovstack)
+ kfree(iov);
+ return ret;
+diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
+index 1068cb1..60d0df7 100644
+--- a/security/keys/process_keys.c
++++ b/security/keys/process_keys.c
+@@ -54,7 +54,7 @@ int install_user_keyrings(void)
+
+ kenter("%p{%u}", user, user->uid);
+
+- if (user->uid_keyring) {
++ if (user->uid_keyring && user->session_keyring) {
+ kleave(" = 0 [exist]");
+ return 0;
+ }
+diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c
+index 160b1bd..24d44b2 100644
+--- a/sound/core/seq/seq_timer.c
++++ b/sound/core/seq/seq_timer.c
+@@ -290,10 +290,10 @@ int snd_seq_timer_open(struct snd_seq_queue *q)
+ tid.device = SNDRV_TIMER_GLOBAL_SYSTEM;
+ err = snd_timer_open(&t, str, &tid, q->queue);
+ }
+- if (err < 0) {
+- snd_printk(KERN_ERR "seq fatal error: cannot create timer (%i)\n", err);
+- return err;
+- }
++ }
++ if (err < 0) {
++ snd_printk(KERN_ERR "seq fatal error: cannot create timer (%i)\n", err);
++ return err;
+ }
+ t->callback = snd_seq_timer_interrupt;
+ t->callback_data = q;
+diff --git a/sound/core/vmaster.c b/sound/core/vmaster.c
+index 130cfe6..ac0af03 100644
+--- a/sound/core/vmaster.c
++++ b/sound/core/vmaster.c
+@@ -209,7 +209,10 @@ static int slave_put(struct snd_kcontrol *kcontrol,
+ }
+ if (!changed)
+ return 0;
+- return slave_put_val(slave, ucontrol);
++ err = slave_put_val(slave, ucontrol);
++ if (err < 0)
++ return err;
++ return 1;
+ }
+
+ static int slave_tlv_cmd(struct snd_kcontrol *kcontrol,
diff --git a/1041_linux-3.2.42.patch b/1041_linux-3.2.42.patch
new file mode 100644
index 00000000..77a08ed2
--- /dev/null
+++ b/1041_linux-3.2.42.patch
@@ -0,0 +1,3602 @@
+diff --git a/Makefile b/Makefile
+index 95e6220..d44f009 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 41
++SUBLEVEL = 42
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/include/asm/signal.h b/arch/arm/include/asm/signal.h
+index 43ba0fb..559ee24 100644
+--- a/arch/arm/include/asm/signal.h
++++ b/arch/arm/include/asm/signal.h
+@@ -127,6 +127,7 @@ struct sigaction {
+ __sigrestore_t sa_restorer;
+ sigset_t sa_mask; /* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+
+ struct k_sigaction {
+ struct sigaction sa;
+diff --git a/arch/avr32/include/asm/signal.h b/arch/avr32/include/asm/signal.h
+index 8790dfc..e6952a0 100644
+--- a/arch/avr32/include/asm/signal.h
++++ b/arch/avr32/include/asm/signal.h
+@@ -128,6 +128,7 @@ struct sigaction {
+ __sigrestore_t sa_restorer;
+ sigset_t sa_mask; /* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+
+ struct k_sigaction {
+ struct sigaction sa;
+diff --git a/arch/cris/include/asm/signal.h b/arch/cris/include/asm/signal.h
+index ea6af9a..057fea2 100644
+--- a/arch/cris/include/asm/signal.h
++++ b/arch/cris/include/asm/signal.h
+@@ -122,6 +122,7 @@ struct sigaction {
+ void (*sa_restorer)(void);
+ sigset_t sa_mask; /* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+
+ struct k_sigaction {
+ struct sigaction sa;
+diff --git a/arch/h8300/include/asm/signal.h b/arch/h8300/include/asm/signal.h
+index fd8b66e..8695707 100644
+--- a/arch/h8300/include/asm/signal.h
++++ b/arch/h8300/include/asm/signal.h
+@@ -121,6 +121,7 @@ struct sigaction {
+ void (*sa_restorer)(void);
+ sigset_t sa_mask; /* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+
+ struct k_sigaction {
+ struct sigaction sa;
+diff --git a/arch/m32r/include/asm/signal.h b/arch/m32r/include/asm/signal.h
+index b2eeb0d..802d561 100644
+--- a/arch/m32r/include/asm/signal.h
++++ b/arch/m32r/include/asm/signal.h
+@@ -123,6 +123,7 @@ struct sigaction {
+ __sigrestore_t sa_restorer;
+ sigset_t sa_mask; /* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+
+ struct k_sigaction {
+ struct sigaction sa;
+diff --git a/arch/m68k/include/asm/signal.h b/arch/m68k/include/asm/signal.h
+index 93fe83e..a20ae63 100644
+--- a/arch/m68k/include/asm/signal.h
++++ b/arch/m68k/include/asm/signal.h
+@@ -119,6 +119,7 @@ struct sigaction {
+ __sigrestore_t sa_restorer;
+ sigset_t sa_mask; /* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+
+ struct k_sigaction {
+ struct sigaction sa;
+diff --git a/arch/mn10300/include/asm/signal.h b/arch/mn10300/include/asm/signal.h
+index 1865d72..eecaa76 100644
+--- a/arch/mn10300/include/asm/signal.h
++++ b/arch/mn10300/include/asm/signal.h
+@@ -131,6 +131,7 @@ struct sigaction {
+ __sigrestore_t sa_restorer;
+ sigset_t sa_mask; /* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+
+ struct k_sigaction {
+ struct sigaction sa;
+diff --git a/arch/powerpc/include/asm/signal.h b/arch/powerpc/include/asm/signal.h
+index 3eb13be..ec63a0a 100644
+--- a/arch/powerpc/include/asm/signal.h
++++ b/arch/powerpc/include/asm/signal.h
+@@ -109,6 +109,7 @@ struct sigaction {
+ __sigrestore_t sa_restorer;
+ sigset_t sa_mask; /* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+
+ struct k_sigaction {
+ struct sigaction sa;
+diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
+index edae5bb..b92b756 100644
+--- a/arch/powerpc/kernel/cputable.c
++++ b/arch/powerpc/kernel/cputable.c
+@@ -268,7 +268,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
+ .cpu_features = CPU_FTRS_PPC970,
+ .cpu_user_features = COMMON_USER_POWER4 |
+ PPC_FEATURE_HAS_ALTIVEC_COMP,
+- .mmu_features = MMU_FTR_HPTE_TABLE,
++ .mmu_features = MMU_FTRS_PPC970,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .num_pmcs = 8,
+diff --git a/arch/s390/include/asm/signal.h b/arch/s390/include/asm/signal.h
+index cdf5cb2..c872626 100644
+--- a/arch/s390/include/asm/signal.h
++++ b/arch/s390/include/asm/signal.h
+@@ -131,6 +131,7 @@ struct sigaction {
+ void (*sa_restorer)(void);
+ sigset_t sa_mask; /* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+
+ struct k_sigaction {
+ struct sigaction sa;
+diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
+index 1d8648c..8743029 100644
+--- a/arch/s390/include/asm/tlbflush.h
++++ b/arch/s390/include/asm/tlbflush.h
+@@ -74,8 +74,6 @@ static inline void __tlb_flush_idte(unsigned long asce)
+
+ static inline void __tlb_flush_mm(struct mm_struct * mm)
+ {
+- if (unlikely(cpumask_empty(mm_cpumask(mm))))
+- return;
+ /*
+ * If the machine has IDTE we prefer to do a per mm flush
+ * on all cpus instead of doing a local flush if the mm
+diff --git a/arch/sparc/include/asm/signal.h b/arch/sparc/include/asm/signal.h
+index e49b828..4929431 100644
+--- a/arch/sparc/include/asm/signal.h
++++ b/arch/sparc/include/asm/signal.h
+@@ -191,6 +191,7 @@ struct __old_sigaction {
+ unsigned long sa_flags;
+ void (*sa_restorer)(void); /* not used by Linux/SPARC yet */
+ };
++#define __ARCH_HAS_SA_RESTORER
+
+ typedef struct sigaltstack {
+ void __user *ss_sp;
+diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h
+index 598457c..6cbc795 100644
+--- a/arch/x86/include/asm/signal.h
++++ b/arch/x86/include/asm/signal.h
+@@ -125,6 +125,8 @@ typedef unsigned long sigset_t;
+ extern void do_notify_resume(struct pt_regs *, void *, __u32);
+ # endif /* __KERNEL__ */
+
++#define __ARCH_HAS_SA_RESTORER
++
+ #ifdef __i386__
+ # ifdef __KERNEL__
+ struct old_sigaction {
+diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
+index 73da6b6..2d4e76b 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
++++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
+@@ -736,3 +736,13 @@ void intel_ds_init(void)
+ }
+ }
+ }
++
++void perf_restore_debug_store(void)
++{
++ struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
++
++ if (!x86_pmu.bts && !x86_pmu.pebs)
++ return;
++
++ wrmsrl(MSR_IA32_DS_AREA, (unsigned long)ds);
++}
+diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
+index b7c2849..554b7b5 100644
+--- a/arch/x86/lib/usercopy_64.c
++++ b/arch/x86/lib/usercopy_64.c
+@@ -169,10 +169,10 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
+ char c;
+ unsigned zero_len;
+
+- for (; len; --len) {
++ for (; len; --len, to++) {
+ if (__get_user_nocheck(c, from++, sizeof(char)))
+ break;
+- if (__put_user_nocheck(c, to++, sizeof(char)))
++ if (__put_user_nocheck(c, to, sizeof(char)))
+ break;
+ }
+
+diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
+index f10c0af..43c9f6a 100644
+--- a/arch/x86/power/cpu.c
++++ b/arch/x86/power/cpu.c
+@@ -11,6 +11,7 @@
+ #include <linux/suspend.h>
+ #include <linux/export.h>
+ #include <linux/smp.h>
++#include <linux/perf_event.h>
+
+ #include <asm/pgtable.h>
+ #include <asm/proto.h>
+@@ -225,6 +226,7 @@ static void __restore_processor_state(struct saved_context *ctxt)
+
+ do_fpu_end();
+ mtrr_bp_restore();
++ perf_restore_debug_store();
+ }
+
+ /* Needed by apm.c */
+diff --git a/arch/xtensa/include/asm/signal.h b/arch/xtensa/include/asm/signal.h
+index 633ba73..75edf8a 100644
+--- a/arch/xtensa/include/asm/signal.h
++++ b/arch/xtensa/include/asm/signal.h
+@@ -133,6 +133,7 @@ struct sigaction {
+ void (*sa_restorer)(void);
+ sigset_t sa_mask; /* mask last for extensibility */
+ };
++#define __ARCH_HAS_SA_RESTORER
+
+ struct k_sigaction {
+ struct sigaction sa;
+diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
+index efba163..bf4d6e2 100644
+--- a/drivers/firmware/Kconfig
++++ b/drivers/firmware/Kconfig
+@@ -53,6 +53,24 @@ config EFI_VARS
+ Subsequent efibootmgr releases may be found at:
+ <http://linux.dell.com/efibootmgr>
+
++config EFI_VARS_PSTORE
++ bool "Register efivars backend for pstore"
++ depends on EFI_VARS && PSTORE
++ default y
++ help
++ Say Y here to enable use efivars as a backend to pstore. This
++ will allow writing console messages, crash dumps, or anything
++ else supported by pstore to EFI variables.
++
++config EFI_VARS_PSTORE_DEFAULT_DISABLE
++ bool "Disable using efivars as a pstore backend by default"
++ depends on EFI_VARS_PSTORE
++ default n
++ help
++ Saying Y here will disable the use of efivars as a storage
++ backend for pstore by default. This setting can be overridden
++ using the efivars module's pstore_disable parameter.
++
+ config EFI_PCDP
+ bool "Console device selection via EFI PCDP or HCDP table"
+ depends on ACPI && EFI && IA64
+diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
+index 81346ae..b15c0aa 100644
+--- a/drivers/firmware/efivars.c
++++ b/drivers/firmware/efivars.c
+@@ -92,6 +92,11 @@ MODULE_VERSION(EFIVARS_VERSION);
+
+ #define DUMP_NAME_LEN 52
+
++static bool efivars_pstore_disable =
++ IS_ENABLED(CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE);
++
++module_param_named(pstore_disable, efivars_pstore_disable, bool, 0644);
++
+ /*
+ * The maximum size of VariableName + Data = 1024
+ * Therefore, it's reasonable to save that much
+@@ -122,6 +127,8 @@ struct efivar_attribute {
+ ssize_t (*store)(struct efivar_entry *entry, const char *buf, size_t count);
+ };
+
++static struct efivars __efivars;
++
+ #define PSTORE_EFI_ATTRIBUTES \
+ (EFI_VARIABLE_NON_VOLATILE | \
+ EFI_VARIABLE_BOOTSERVICE_ACCESS | \
+@@ -146,6 +153,14 @@ efivar_create_sysfs_entry(struct efivars *efivars,
+ efi_char16_t *variable_name,
+ efi_guid_t *vendor_guid);
+
++/*
++ * Prototype for workqueue functions updating sysfs entry
++ */
++
++static void efivar_update_sysfs_entries(struct work_struct *);
++static DECLARE_WORK(efivar_work, efivar_update_sysfs_entries);
++static bool efivar_wq_enabled = true;
++
+ /* Return the number of unicode characters in data */
+ static unsigned long
+ utf16_strnlen(efi_char16_t *s, size_t maxlength)
+@@ -659,8 +674,6 @@ static struct kobj_type efivar_ktype = {
+ .default_attrs = def_attrs,
+ };
+
+-static struct pstore_info efi_pstore_info;
+-
+ static inline void
+ efivar_unregister(struct efivar_entry *var)
+ {
+@@ -697,7 +710,7 @@ static int efi_status_to_err(efi_status_t status)
+ return err;
+ }
+
+-#ifdef CONFIG_PSTORE
++#ifdef CONFIG_EFI_VARS_PSTORE
+
+ static int efi_pstore_open(struct pstore_info *psi)
+ {
+@@ -774,19 +787,21 @@ static int efi_pstore_write(enum pstore_type_id type, u64 *id,
+
+ spin_lock_irqsave(&efivars->lock, flags);
+
+- /*
+- * Check if there is a space enough to log.
+- * size: a size of logging data
+- * DUMP_NAME_LEN * 2: a maximum size of variable name
+- */
++ if (size) {
++ /*
++ * Check if there is a space enough to log.
++ * size: a size of logging data
++ * DUMP_NAME_LEN * 2: a maximum size of variable name
++ */
+
+- status = check_var_size_locked(efivars, PSTORE_EFI_ATTRIBUTES,
+- size + DUMP_NAME_LEN * 2);
++ status = check_var_size_locked(efivars, PSTORE_EFI_ATTRIBUTES,
++ size + DUMP_NAME_LEN * 2);
+
+- if (status) {
+- spin_unlock_irqrestore(&efivars->lock, flags);
+- *id = part;
+- return -ENOSPC;
++ if (status) {
++ spin_unlock_irqrestore(&efivars->lock, flags);
++ *id = part;
++ return -ENOSPC;
++ }
+ }
+
+ for (i = 0; i < DUMP_NAME_LEN; i++)
+@@ -830,11 +845,8 @@ static int efi_pstore_write(enum pstore_type_id type, u64 *id,
+ if (found)
+ efivar_unregister(found);
+
+- if (size)
+- ret = efivar_create_sysfs_entry(efivars,
+- utf16_strsize(efi_name,
+- DUMP_NAME_LEN * 2),
+- efi_name, &vendor);
++ if (efivar_wq_enabled)
++ schedule_work(&efivar_work);
+
+ *id = part;
+ return ret;
+@@ -847,36 +859,6 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id,
+
+ return 0;
+ }
+-#else
+-static int efi_pstore_open(struct pstore_info *psi)
+-{
+- return 0;
+-}
+-
+-static int efi_pstore_close(struct pstore_info *psi)
+-{
+- return 0;
+-}
+-
+-static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
+- struct timespec *timespec,
+- char **buf, struct pstore_info *psi)
+-{
+- return -1;
+-}
+-
+-static int efi_pstore_write(enum pstore_type_id type, u64 *id,
+- unsigned int part, size_t size, struct pstore_info *psi)
+-{
+- return 0;
+-}
+-
+-static int efi_pstore_erase(enum pstore_type_id type, u64 id,
+- struct pstore_info *psi)
+-{
+- return 0;
+-}
+-#endif
+
+ static struct pstore_info efi_pstore_info = {
+ .owner = THIS_MODULE,
+@@ -888,6 +870,24 @@ static struct pstore_info efi_pstore_info = {
+ .erase = efi_pstore_erase,
+ };
+
++static void efivar_pstore_register(struct efivars *efivars)
++{
++ efivars->efi_pstore_info = efi_pstore_info;
++ efivars->efi_pstore_info.buf = kmalloc(4096, GFP_KERNEL);
++ if (efivars->efi_pstore_info.buf) {
++ efivars->efi_pstore_info.bufsize = 1024;
++ efivars->efi_pstore_info.data = efivars;
++ spin_lock_init(&efivars->efi_pstore_info.buf_lock);
++ pstore_register(&efivars->efi_pstore_info);
++ }
++}
++#else
++static void efivar_pstore_register(struct efivars *efivars)
++{
++ return;
++}
++#endif
++
+ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
+@@ -1025,6 +1025,103 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
+ return count;
+ }
+
++static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor)
++{
++ struct efivar_entry *entry, *n;
++ struct efivars *efivars = &__efivars;
++ unsigned long strsize1, strsize2;
++ bool found = false;
++
++ strsize1 = utf16_strsize(variable_name, 1024);
++ list_for_each_entry_safe(entry, n, &efivars->list, list) {
++ strsize2 = utf16_strsize(entry->var.VariableName, 1024);
++ if (strsize1 == strsize2 &&
++ !memcmp(variable_name, &(entry->var.VariableName),
++ strsize2) &&
++ !efi_guidcmp(entry->var.VendorGuid,
++ *vendor)) {
++ found = true;
++ break;
++ }
++ }
++ return found;
++}
++
++/*
++ * Returns the size of variable_name, in bytes, including the
++ * terminating NULL character, or variable_name_size if no NULL
++ * character is found among the first variable_name_size bytes.
++ */
++static unsigned long var_name_strnsize(efi_char16_t *variable_name,
++ unsigned long variable_name_size)
++{
++ unsigned long len;
++ efi_char16_t c;
++
++ /*
++ * The variable name is, by definition, a NULL-terminated
++ * string, so make absolutely sure that variable_name_size is
++ * the value we expect it to be. If not, return the real size.
++ */
++ for (len = 2; len <= variable_name_size; len += sizeof(c)) {
++ c = variable_name[(len / sizeof(c)) - 1];
++ if (!c)
++ break;
++ }
++
++ return min(len, variable_name_size);
++}
++
++static void efivar_update_sysfs_entries(struct work_struct *work)
++{
++ struct efivars *efivars = &__efivars;
++ efi_guid_t vendor;
++ efi_char16_t *variable_name;
++ unsigned long variable_name_size = 1024;
++ efi_status_t status = EFI_NOT_FOUND;
++ bool found;
++
++ /* Add new sysfs entries */
++ while (1) {
++ variable_name = kzalloc(variable_name_size, GFP_KERNEL);
++ if (!variable_name) {
++ pr_err("efivars: Memory allocation failed.\n");
++ return;
++ }
++
++ spin_lock_irq(&efivars->lock);
++ found = false;
++ while (1) {
++ variable_name_size = 1024;
++ status = efivars->ops->get_next_variable(
++ &variable_name_size,
++ variable_name,
++ &vendor);
++ if (status != EFI_SUCCESS) {
++ break;
++ } else {
++ if (!variable_is_present(variable_name,
++ &vendor)) {
++ found = true;
++ break;
++ }
++ }
++ }
++ spin_unlock_irq(&efivars->lock);
++
++ if (!found) {
++ kfree(variable_name);
++ break;
++ } else {
++ variable_name_size = var_name_strnsize(variable_name,
++ variable_name_size);
++ efivar_create_sysfs_entry(efivars,
++ variable_name_size,
++ variable_name, &vendor);
++ }
++ }
++}
++
+ /*
+ * Let's not leave out systab information that snuck into
+ * the efivars driver
+@@ -1212,6 +1309,35 @@ void unregister_efivars(struct efivars *efivars)
+ }
+ EXPORT_SYMBOL_GPL(unregister_efivars);
+
++/*
++ * Print a warning when duplicate EFI variables are encountered and
++ * disable the sysfs workqueue since the firmware is buggy.
++ */
++static void dup_variable_bug(efi_char16_t *s16, efi_guid_t *vendor_guid,
++ unsigned long len16)
++{
++ size_t i, len8 = len16 / sizeof(efi_char16_t);
++ char *s8;
++
++ /*
++ * Disable the workqueue since the algorithm it uses for
++ * detecting new variables won't work with this buggy
++ * implementation of GetNextVariableName().
++ */
++ efivar_wq_enabled = false;
++
++ s8 = kzalloc(len8, GFP_KERNEL);
++ if (!s8)
++ return;
++
++ for (i = 0; i < len8; i++)
++ s8[i] = s16[i];
++
++ printk(KERN_WARNING "efivars: duplicate variable: %s-%pUl\n",
++ s8, vendor_guid);
++ kfree(s8);
++}
++
+ int register_efivars(struct efivars *efivars,
+ const struct efivar_operations *ops,
+ struct kobject *parent_kobj)
+@@ -1252,6 +1378,24 @@ int register_efivars(struct efivars *efivars,
+ &vendor_guid);
+ switch (status) {
+ case EFI_SUCCESS:
++ variable_name_size = var_name_strnsize(variable_name,
++ variable_name_size);
++
++ /*
++ * Some firmware implementations return the
++ * same variable name on multiple calls to
++ * get_next_variable(). Terminate the loop
++ * immediately as there is no guarantee that
++ * we'll ever see a different variable name,
++ * and may end up looping here forever.
++ */
++ if (variable_is_present(variable_name, &vendor_guid)) {
++ dup_variable_bug(variable_name, &vendor_guid,
++ variable_name_size);
++ status = EFI_NOT_FOUND;
++ break;
++ }
++
+ efivar_create_sysfs_entry(efivars,
+ variable_name_size,
+ variable_name,
+@@ -1271,15 +1415,8 @@ int register_efivars(struct efivars *efivars,
+ if (error)
+ unregister_efivars(efivars);
+
+- efivars->efi_pstore_info = efi_pstore_info;
+-
+- efivars->efi_pstore_info.buf = kmalloc(4096, GFP_KERNEL);
+- if (efivars->efi_pstore_info.buf) {
+- efivars->efi_pstore_info.bufsize = 1024;
+- efivars->efi_pstore_info.data = efivars;
+- spin_lock_init(&efivars->efi_pstore_info.buf_lock);
+- pstore_register(&efivars->efi_pstore_info);
+- }
++ if (!efivars_pstore_disable)
++ efivar_pstore_register(efivars);
+
+ out:
+ kfree(variable_name);
+@@ -1288,7 +1425,6 @@ out:
+ }
+ EXPORT_SYMBOL_GPL(register_efivars);
+
+-static struct efivars __efivars;
+ static struct efivar_operations ops;
+
+ /*
+@@ -1346,6 +1482,8 @@ err_put:
+ static void __exit
+ efivars_exit(void)
+ {
++ cancel_work_sync(&efivar_work);
++
+ if (efi_enabled(EFI_RUNTIME_SERVICES)) {
+ unregister_efivars(&__efivars);
+ kobject_put(efi_kobj);
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index 9080eb7..7211f67 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -852,7 +852,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
+ unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo;
+ unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo;
+ unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo;
+- unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) >> 2 | pt->vsync_offset_pulse_width_lo >> 4;
++ unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) << 2 | pt->vsync_offset_pulse_width_lo >> 4;
+ unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf);
+
+ /* ignore tiny modes */
+@@ -933,6 +933,7 @@ set_size:
+ }
+
+ mode->type = DRM_MODE_TYPE_DRIVER;
++ mode->vrefresh = drm_mode_vrefresh(mode);
+ drm_mode_set_name(mode);
+
+ return mode;
+diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
+index 5620192..9b4e5c6 100644
+--- a/drivers/gpu/drm/i915/i915_debugfs.c
++++ b/drivers/gpu/drm/i915/i915_debugfs.c
+@@ -122,7 +122,7 @@ static const char *cache_level_str(int type)
+ static void
+ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
+ {
+- seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s%s",
++ seq_printf(m, "%pK: %s%s %8zd %04x %04x %d %d%s%s%s",
+ &obj->base,
+ get_pin_flag(obj),
+ get_tiling_flag(obj),
+diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+index 878b989..b1bb734 100644
+--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+@@ -907,15 +907,20 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
+ int count)
+ {
+ int i;
++ int relocs_total = 0;
++ int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
+
+ for (i = 0; i < count; i++) {
+ char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
+ int length; /* limited by fault_in_pages_readable() */
+
+- /* First check for malicious input causing overflow */
+- if (exec[i].relocation_count >
+- INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
++ /* First check for malicious input causing overflow in
++ * the worst case where we need to allocate the entire
++ * relocation tree as a single array.
++ */
++ if (exec[i].relocation_count > relocs_max - relocs_total)
+ return -EINVAL;
++ relocs_total += exec[i].relocation_count;
+
+ length = exec[i].relocation_count *
+ sizeof(struct drm_i915_gem_relocation_entry);
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 4591582..17961df 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -8059,7 +8059,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
+ I915_WRITE(GEN6_RC_SLEEP, 0);
+ I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
+ I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
+- I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
++ I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
+ I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
+
+ if (intel_enable_rc6(dev_priv->dev))
+diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
+index 289140b..cffb007 100644
+--- a/drivers/gpu/drm/i915/intel_opregion.c
++++ b/drivers/gpu/drm/i915/intel_opregion.c
+@@ -419,6 +419,25 @@ blind_set:
+ goto end;
+ }
+
++static void intel_setup_cadls(struct drm_device *dev)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ struct intel_opregion *opregion = &dev_priv->opregion;
++ int i = 0;
++ u32 disp_id;
++
++ /* Initialize the CADL field by duplicating the DIDL values.
++ * Technically, this is not always correct as display outputs may exist,
++ * but not active. This initialization is necessary for some Clevo
++ * laptops that check this field before processing the brightness and
++ * display switching hotkeys. Just like DIDL, CADL is NULL-terminated if
++ * there are less than eight devices. */
++ do {
++ disp_id = ioread32(&opregion->acpi->didl[i]);
++ iowrite32(disp_id, &opregion->acpi->cadl[i]);
++ } while (++i < 8 && disp_id != 0);
++}
++
+ void intel_opregion_init(struct drm_device *dev)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+@@ -428,8 +447,10 @@ void intel_opregion_init(struct drm_device *dev)
+ return;
+
+ if (opregion->acpi) {
+- if (drm_core_check_feature(dev, DRIVER_MODESET))
++ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ intel_didl_outputs(dev);
++ intel_setup_cadls(dev);
++ }
+
+ /* Notify BIOS we are ready to handle ACPI video ext notifs.
+ * Right now, all the events are handled by the ACPI video module.
+diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
+index 17e1a9b..441de38 100644
+--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
++++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
+@@ -139,13 +139,15 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
+ sdomain, ddomain, "dma");
+ }
+
+- time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
+- RADEON_BENCHMARK_COPY_BLIT, n);
+- if (time < 0)
+- goto out_cleanup;
+- if (time > 0)
+- radeon_benchmark_log_results(n, size, time,
+- sdomain, ddomain, "blit");
++ if (rdev->asic->copy_blit) {
++ time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
++ RADEON_BENCHMARK_COPY_BLIT, n);
++ if (time < 0)
++ goto out_cleanup;
++ if (time > 0)
++ radeon_benchmark_log_results(n, size, time,
++ sdomain, ddomain, "blit");
++ }
+
+ out_cleanup:
+ if (sobj) {
+diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
+index 3d7885a..3056ea4 100644
+--- a/drivers/i2c/busses/i2c-tegra.c
++++ b/drivers/i2c/busses/i2c-tegra.c
+@@ -341,7 +341,11 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
+ u32 val;
+ int err = 0;
+
+- clk_enable(i2c_dev->clk);
++ err = clk_enable(i2c_dev->clk);
++ if (err < 0) {
++ dev_err(i2c_dev->dev, "Clock enable failed %d\n", err);
++ return err;
++ }
+
+ tegra_periph_reset_assert(i2c_dev->clk);
+ udelay(2);
+@@ -536,7 +540,12 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
+ if (i2c_dev->is_suspended)
+ return -EBUSY;
+
+- clk_enable(i2c_dev->clk);
++ ret = clk_enable(i2c_dev->clk);
++ if (ret < 0) {
++ dev_err(i2c_dev->dev, "Clock enable failed %d\n", ret);
++ return ret;
++ }
++
+ for (i = 0; i < num; i++) {
+ int stop = (i == (num - 1)) ? 1 : 0;
+ ret = tegra_i2c_xfer_msg(i2c_dev, &msgs[i], stop);
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index da4d299..2c9dd2c 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -2212,7 +2212,7 @@ static struct target_type pool_target = {
+ .name = "thin-pool",
+ .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
+ DM_TARGET_IMMUTABLE,
+- .version = {1, 0, 1},
++ .version = {1, 0, 2},
+ .module = THIS_MODULE,
+ .ctr = pool_ctr,
+ .dtr = pool_dtr,
+@@ -2428,7 +2428,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
+
+ static struct target_type thin_target = {
+ .name = "thin",
+- .version = {1, 0, 1},
++ .version = {1, 0, 2},
+ .module = THIS_MODULE,
+ .ctr = thin_ctr,
+ .dtr = thin_dtr,
+diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
+index e6cdfde..1de0f5f 100644
+--- a/drivers/md/persistent-data/dm-btree-remove.c
++++ b/drivers/md/persistent-data/dm-btree-remove.c
+@@ -139,15 +139,8 @@ struct child {
+ struct btree_node *n;
+ };
+
+-static struct dm_btree_value_type le64_type = {
+- .context = NULL,
+- .size = sizeof(__le64),
+- .inc = NULL,
+- .dec = NULL,
+- .equal = NULL
+-};
+-
+-static int init_child(struct dm_btree_info *info, struct btree_node *parent,
++static int init_child(struct dm_btree_info *info, struct dm_btree_value_type *vt,
++ struct btree_node *parent,
+ unsigned index, struct child *result)
+ {
+ int r, inc;
+@@ -164,7 +157,7 @@ static int init_child(struct dm_btree_info *info, struct btree_node *parent,
+ result->n = dm_block_data(result->block);
+
+ if (inc)
+- inc_children(info->tm, result->n, &le64_type);
++ inc_children(info->tm, result->n, vt);
+
+ *((__le64 *) value_ptr(parent, index, sizeof(__le64))) =
+ cpu_to_le64(dm_block_location(result->block));
+@@ -236,7 +229,7 @@ static void __rebalance2(struct dm_btree_info *info, struct btree_node *parent,
+ }
+
+ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
+- unsigned left_index)
++ struct dm_btree_value_type *vt, unsigned left_index)
+ {
+ int r;
+ struct btree_node *parent;
+@@ -244,11 +237,11 @@ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
+
+ parent = dm_block_data(shadow_current(s));
+
+- r = init_child(info, parent, left_index, &left);
++ r = init_child(info, vt, parent, left_index, &left);
+ if (r)
+ return r;
+
+- r = init_child(info, parent, left_index + 1, &right);
++ r = init_child(info, vt, parent, left_index + 1, &right);
+ if (r) {
+ exit_child(info, &left);
+ return r;
+@@ -368,7 +361,7 @@ static void __rebalance3(struct dm_btree_info *info, struct btree_node *parent,
+ }
+
+ static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
+- unsigned left_index)
++ struct dm_btree_value_type *vt, unsigned left_index)
+ {
+ int r;
+ struct btree_node *parent = dm_block_data(shadow_current(s));
+@@ -377,17 +370,17 @@ static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
+ /*
+ * FIXME: fill out an array?
+ */
+- r = init_child(info, parent, left_index, &left);
++ r = init_child(info, vt, parent, left_index, &left);
+ if (r)
+ return r;
+
+- r = init_child(info, parent, left_index + 1, &center);
++ r = init_child(info, vt, parent, left_index + 1, &center);
+ if (r) {
+ exit_child(info, &left);
+ return r;
+ }
+
+- r = init_child(info, parent, left_index + 2, &right);
++ r = init_child(info, vt, parent, left_index + 2, &right);
+ if (r) {
+ exit_child(info, &left);
+ exit_child(info, &center);
+@@ -434,7 +427,8 @@ static int get_nr_entries(struct dm_transaction_manager *tm,
+ }
+
+ static int rebalance_children(struct shadow_spine *s,
+- struct dm_btree_info *info, uint64_t key)
++ struct dm_btree_info *info,
++ struct dm_btree_value_type *vt, uint64_t key)
+ {
+ int i, r, has_left_sibling, has_right_sibling;
+ uint32_t child_entries;
+@@ -472,13 +466,13 @@ static int rebalance_children(struct shadow_spine *s,
+ has_right_sibling = i < (le32_to_cpu(n->header.nr_entries) - 1);
+
+ if (!has_left_sibling)
+- r = rebalance2(s, info, i);
++ r = rebalance2(s, info, vt, i);
+
+ else if (!has_right_sibling)
+- r = rebalance2(s, info, i - 1);
++ r = rebalance2(s, info, vt, i - 1);
+
+ else
+- r = rebalance3(s, info, i - 1);
++ r = rebalance3(s, info, vt, i - 1);
+
+ return r;
+ }
+@@ -529,7 +523,7 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
+ if (le32_to_cpu(n->header.flags) & LEAF_NODE)
+ return do_leaf(n, key, index);
+
+- r = rebalance_children(s, info, key);
++ r = rebalance_children(s, info, vt, key);
+ if (r)
+ break;
+
+@@ -550,6 +544,14 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
+ return r;
+ }
+
++static struct dm_btree_value_type le64_type = {
++ .context = NULL,
++ .size = sizeof(__le64),
++ .inc = NULL,
++ .dec = NULL,
++ .equal = NULL
++};
++
+ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
+ uint64_t *keys, dm_block_t *new_root)
+ {
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 202ae34..63e3c47 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1715,6 +1715,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
+
+ bond_compute_features(bond);
+
++ bond_update_speed_duplex(new_slave);
++
+ read_lock(&bond->lock);
+
+ new_slave->last_arp_rx = jiffies;
+@@ -1758,8 +1760,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
+ new_slave->link = BOND_LINK_DOWN;
+ }
+
+- bond_update_speed_duplex(new_slave);
+-
+ if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) {
+ /* if there is a primary slave, remember it */
+ if (strcmp(bond->params.primary, new_slave->dev->name) == 0) {
+@@ -2437,8 +2437,6 @@ static void bond_miimon_commit(struct bonding *bond)
+ bond_set_backup_slave(slave);
+ }
+
+- bond_update_speed_duplex(slave);
+-
+ pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex.\n",
+ bond->dev->name, slave->dev->name,
+ slave->speed, slave->duplex ? "full" : "half");
+diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
+index a6611f1..5d5a05f 100644
+--- a/drivers/net/ethernet/sfc/efx.c
++++ b/drivers/net/ethernet/sfc/efx.c
+@@ -650,25 +650,30 @@ static void efx_fini_channels(struct efx_nic *efx)
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
++ struct pci_dev *dev = efx->pci_dev;
+ int rc;
+
+ EFX_ASSERT_RESET_SERIALISED(efx);
+ BUG_ON(efx->port_enabled);
+
+- rc = efx_nic_flush_queues(efx);
+- if (rc && EFX_WORKAROUND_7803(efx)) {
+- /* Schedule a reset to recover from the flush failure. The
+- * descriptor caches reference memory we're about to free,
+- * but falcon_reconfigure_mac_wrapper() won't reconnect
+- * the MACs because of the pending reset. */
+- netif_err(efx, drv, efx->net_dev,
+- "Resetting to recover from flush failure\n");
+- efx_schedule_reset(efx, RESET_TYPE_ALL);
+- } else if (rc) {
+- netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
+- } else {
+- netif_dbg(efx, drv, efx->net_dev,
+- "successfully flushed all queues\n");
++ /* Only perform flush if dma is enabled */
++ if (dev->is_busmaster) {
++ rc = efx_nic_flush_queues(efx);
++
++ if (rc && EFX_WORKAROUND_7803(efx)) {
++ /* Schedule a reset to recover from the flush failure. The
++ * descriptor caches reference memory we're about to free,
++ * but falcon_reconfigure_mac_wrapper() won't reconnect
++ * the MACs because of the pending reset. */
++ netif_err(efx, drv, efx->net_dev,
++ "Resetting to recover from flush failure\n");
++ efx_schedule_reset(efx, RESET_TYPE_ALL);
++ } else if (rc) {
++ netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
++ } else {
++ netif_dbg(efx, drv, efx->net_dev,
++ "successfully flushed all queues\n");
++ }
+ }
+
+ efx_for_each_channel(channel, efx) {
+@@ -714,6 +719,7 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
+ unsigned i;
+ int rc;
+
++ efx_device_detach_sync(efx);
+ efx_stop_all(efx);
+ efx_fini_channels(efx);
+
+@@ -757,6 +763,7 @@ out:
+
+ efx_init_channels(efx);
+ efx_start_all(efx);
++ netif_device_attach(efx->net_dev);
+ return rc;
+
+ rollback:
+@@ -1525,8 +1532,12 @@ static void efx_stop_all(struct efx_nic *efx)
+ /* Flush efx_mac_work(), refill_workqueue, monitor_work */
+ efx_flush_all(efx);
+
+- /* Stop the kernel transmit interface late, so the watchdog
+- * timer isn't ticking over the flush */
++ /* Stop the kernel transmit interface. This is only valid if
++ * the device is stopped or detached; otherwise the watchdog
++ * may fire immediately.
++ */
++ WARN_ON(netif_running(efx->net_dev) &&
++ netif_device_present(efx->net_dev));
+ if (efx_dev_registered(efx)) {
+ netif_tx_stop_all_queues(efx->net_dev);
+ netif_tx_lock_bh(efx->net_dev);
+@@ -1827,10 +1838,11 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
+ if (new_mtu > EFX_MAX_MTU)
+ return -EINVAL;
+
+- efx_stop_all(efx);
+-
+ netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
+
++ efx_device_detach_sync(efx);
++ efx_stop_all(efx);
++
+ efx_fini_channels(efx);
+
+ mutex_lock(&efx->mac_lock);
+@@ -1843,6 +1855,7 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
+ efx_init_channels(efx);
+
+ efx_start_all(efx);
++ netif_device_attach(efx->net_dev);
+ return rc;
+ }
+
+@@ -2132,7 +2145,7 @@ int efx_reset(struct efx_nic *efx, enum reset_type method)
+ netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
+ RESET_TYPE(method));
+
+- netif_device_detach(efx->net_dev);
++ efx_device_detach_sync(efx);
+ efx_reset_down(efx, method);
+
+ rc = efx->type->reset(efx, method);
+@@ -2580,7 +2593,7 @@ static int efx_pm_freeze(struct device *dev)
+
+ efx->state = STATE_FINI;
+
+- netif_device_detach(efx->net_dev);
++ efx_device_detach_sync(efx);
+
+ efx_stop_all(efx);
+ efx_fini_channels(efx);
+diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
+index 1355245..9668d29 100644
+--- a/drivers/net/ethernet/sfc/efx.h
++++ b/drivers/net/ethernet/sfc/efx.h
+@@ -149,4 +149,17 @@ extern void efx_link_status_changed(struct efx_nic *efx);
+ extern void efx_link_set_advertising(struct efx_nic *efx, u32);
+ extern void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
+
++static inline void efx_device_detach_sync(struct efx_nic *efx)
++{
++ struct net_device *dev = efx->net_dev;
++
++ /* Lock/freeze all TX queues so that we can be sure the
++ * TX scheduler is stopped when we're done and before
++ * netif_device_present() becomes false.
++ */
++ netif_tx_lock_bh(dev);
++ netif_device_detach(dev);
++ netif_tx_unlock_bh(dev);
++}
++
+ #endif /* EFX_EFX_H */
+diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
+index 97b606b..26cd6c0 100644
+--- a/drivers/net/ethernet/sfc/falcon.c
++++ b/drivers/net/ethernet/sfc/falcon.c
+@@ -1762,6 +1762,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
+ .remove_port = falcon_remove_port,
+ .handle_global_event = falcon_handle_global_event,
+ .prepare_flush = falcon_prepare_flush,
++ .finish_flush = efx_port_dummy_op_void,
+ .update_stats = falcon_update_nic_stats,
+ .start_stats = falcon_start_nic_stats,
+ .stop_stats = falcon_stop_nic_stats,
+@@ -1804,6 +1805,7 @@ const struct efx_nic_type falcon_b0_nic_type = {
+ .remove_port = falcon_remove_port,
+ .handle_global_event = falcon_handle_global_event,
+ .prepare_flush = falcon_prepare_flush,
++ .finish_flush = efx_port_dummy_op_void,
+ .update_stats = falcon_update_nic_stats,
+ .start_stats = falcon_start_nic_stats,
+ .stop_stats = falcon_stop_nic_stats,
+diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
+index 81a4253..c1000ce 100644
+--- a/drivers/net/ethernet/sfc/mcdi.c
++++ b/drivers/net/ethernet/sfc/mcdi.c
+@@ -30,7 +30,7 @@
+ #define REBOOT_FLAG_PORT0 0x3f8
+ #define REBOOT_FLAG_PORT1 0x3fc
+
+-#define MCDI_RPC_TIMEOUT 10 /*seconds */
++#define MCDI_RPC_TIMEOUT (10 * HZ)
+
+ #define MCDI_PDU(efx) \
+ (efx_port_num(efx) ? CMD_PDU_PORT1 : CMD_PDU_PORT0)
+@@ -120,7 +120,7 @@ static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen)
+ static int efx_mcdi_poll(struct efx_nic *efx)
+ {
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+- unsigned int time, finish;
++ unsigned long time, finish;
+ unsigned int respseq, respcmd, error;
+ unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
+ unsigned int rc, spins;
+@@ -136,7 +136,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
+ * and poll once a jiffy (approximately)
+ */
+ spins = TICK_USEC;
+- finish = get_seconds() + MCDI_RPC_TIMEOUT;
++ finish = jiffies + MCDI_RPC_TIMEOUT;
+
+ while (1) {
+ if (spins != 0) {
+@@ -146,7 +146,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
+ schedule_timeout_uninterruptible(1);
+ }
+
+- time = get_seconds();
++ time = jiffies;
+
+ rmb();
+ efx_readd(efx, &reg, pdu);
+@@ -158,7 +158,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
+ EFX_DWORD_FIELD(reg, MCDI_HEADER_RESPONSE))
+ break;
+
+- if (time >= finish)
++ if (time_after(time, finish))
+ return -ETIMEDOUT;
+ }
+
+@@ -250,7 +250,7 @@ static int efx_mcdi_await_completion(struct efx_nic *efx)
+ if (wait_event_timeout(
+ mcdi->wq,
+ atomic_read(&mcdi->state) == MCDI_STATE_COMPLETED,
+- msecs_to_jiffies(MCDI_RPC_TIMEOUT * 1000)) == 0)
++ MCDI_RPC_TIMEOUT) == 0)
+ return -ETIMEDOUT;
+
+ /* Check if efx_mcdi_set_mode() switched us back to polled completions.
+@@ -666,9 +666,8 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
+ u16 *fw_subtype_list)
+ {
+ uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LEN];
+- size_t outlen;
++ size_t outlen, offset, i;
+ int port_num = efx_port_num(efx);
+- int offset;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0);
+@@ -688,10 +687,16 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
+ : MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST;
+ if (mac_address)
+ memcpy(mac_address, outbuf + offset, ETH_ALEN);
+- if (fw_subtype_list)
+- memcpy(fw_subtype_list,
+- outbuf + MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST,
+- MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN);
++ if (fw_subtype_list) {
++ offset = MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST;
++ for (i = 0;
++ i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN / 2;
++ i++) {
++ fw_subtype_list[i] =
++ le16_to_cpup((__le16 *)(outbuf + offset));
++ offset += 2;
++ }
++ }
+
+ return 0;
+
+diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
+index aced2a7..b61eea0 100644
+--- a/drivers/net/ethernet/sfc/mcdi.h
++++ b/drivers/net/ethernet/sfc/mcdi.h
+@@ -126,5 +126,6 @@ extern int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx,
+ extern int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
+ extern int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id);
+ extern int efx_mcdi_wol_filter_reset(struct efx_nic *efx);
++extern int efx_mcdi_set_mac(struct efx_nic *efx);
+
+ #endif /* EFX_MCDI_H */
+diff --git a/drivers/net/ethernet/sfc/mcdi_mac.c b/drivers/net/ethernet/sfc/mcdi_mac.c
+index 50c2077..da269d7 100644
+--- a/drivers/net/ethernet/sfc/mcdi_mac.c
++++ b/drivers/net/ethernet/sfc/mcdi_mac.c
+@@ -13,7 +13,7 @@
+ #include "mcdi.h"
+ #include "mcdi_pcol.h"
+
+-static int efx_mcdi_set_mac(struct efx_nic *efx)
++int efx_mcdi_set_mac(struct efx_nic *efx)
+ {
+ u32 reject, fcntl;
+ u8 cmdbytes[MC_CMD_SET_MAC_IN_LEN];
+@@ -45,6 +45,8 @@ static int efx_mcdi_set_mac(struct efx_nic *efx)
+ }
+ if (efx->wanted_fc & EFX_FC_AUTO)
+ fcntl = MC_CMD_FCNTL_AUTO;
++ if (efx->fc_disable)
++ fcntl = MC_CMD_FCNTL_OFF;
+
+ MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl);
+
+diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
+index b8e251a..8bcb8fd 100644
+--- a/drivers/net/ethernet/sfc/net_driver.h
++++ b/drivers/net/ethernet/sfc/net_driver.h
+@@ -213,6 +213,7 @@ struct efx_tx_queue {
+ * If both this and page are %NULL, the buffer slot is currently free.
+ * @page: The associated page buffer, if any.
+ * If both this and skb are %NULL, the buffer slot is currently free.
++ * @page_offset: Offset within page. Valid iff @flags & %EFX_RX_BUF_PAGE.
+ * @len: Buffer length, in bytes.
+ * @is_page: Indicates if @page is valid. If false, @skb is valid.
+ */
+@@ -222,7 +223,8 @@ struct efx_rx_buffer {
+ struct sk_buff *skb;
+ struct page *page;
+ } u;
+- unsigned int len;
++ u16 page_offset;
++ u16 len;
+ bool is_page;
+ };
+
+@@ -689,6 +691,9 @@ struct efx_filter_state;
+ * @promiscuous: Promiscuous flag. Protected by netif_tx_lock.
+ * @multicast_hash: Multicast hash table
+ * @wanted_fc: Wanted flow control flags
++ * @fc_disable: When non-zero flow control is disabled. Typically used to
++ * ensure that network back pressure doesn't delay dma queue flushes.
++ * Serialised by the rtnl lock.
+ * @mac_work: Work item for changing MAC promiscuity and multicast hash
+ * @loopback_mode: Loopback status
+ * @loopback_modes: Supported loopback mode bitmask
+@@ -782,6 +787,7 @@ struct efx_nic {
+ bool promiscuous;
+ union efx_multicast_hash multicast_hash;
+ u8 wanted_fc;
++ unsigned fc_disable;
+
+ atomic_t rx_reset;
+ enum efx_loopback_mode loopback_mode;
+@@ -835,6 +841,7 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
+ * @remove_port: Free resources allocated by probe_port()
+ * @handle_global_event: Handle a "global" event (may be %NULL)
+ * @prepare_flush: Prepare the hardware for flushing the DMA queues
++ * @finish_flush: Clean up after flushing the DMA queues
+ * @update_stats: Update statistics not provided by event handling
+ * @start_stats: Start the regular fetching of statistics
+ * @stop_stats: Stop the regular fetching of statistics
+@@ -880,6 +887,7 @@ struct efx_nic_type {
+ void (*remove_port)(struct efx_nic *efx);
+ bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *);
+ void (*prepare_flush)(struct efx_nic *efx);
++ void (*finish_flush)(struct efx_nic *efx);
+ void (*update_stats)(struct efx_nic *efx);
+ void (*start_stats)(struct efx_nic *efx);
+ void (*stop_stats)(struct efx_nic *efx);
+diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
+index 3edfbaf..2e9ca10 100644
+--- a/drivers/net/ethernet/sfc/nic.c
++++ b/drivers/net/ethernet/sfc/nic.c
+@@ -371,7 +371,8 @@ efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count)
+ return false;
+
+ tx_queue->empty_read_count = 0;
+- return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
++ return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0
++ && tx_queue->write_count - write_count == 1;
+ }
+
+ /* For each entry inserted into the software descriptor ring, create a
+@@ -1261,13 +1262,27 @@ int efx_nic_flush_queues(struct efx_nic *efx)
+ }
+ efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
+ if (tx_queue->initialised &&
+- tx_queue->flushed != FLUSH_DONE)
+- ++tx_pending;
++ tx_queue->flushed != FLUSH_DONE) {
++ efx_oword_t txd_ptr_tbl;
++
++ efx_reado_table(efx, &txd_ptr_tbl,
++ FR_BZ_TX_DESC_PTR_TBL,
++ tx_queue->queue);
++ if (EFX_OWORD_FIELD(txd_ptr_tbl,
++ FRF_AZ_TX_DESCQ_FLUSH) ||
++ EFX_OWORD_FIELD(txd_ptr_tbl,
++ FRF_AZ_TX_DESCQ_EN))
++ ++tx_pending;
++ else
++ tx_queue->flushed = FLUSH_DONE;
++ }
+ }
+ }
+
+- if (rx_pending == 0 && tx_pending == 0)
++ if (rx_pending == 0 && tx_pending == 0) {
++ efx->type->finish_flush(efx);
+ return 0;
++ }
+
+ msleep(EFX_FLUSH_INTERVAL);
+ efx_poll_flush_events(efx);
+@@ -1293,6 +1308,7 @@ int efx_nic_flush_queues(struct efx_nic *efx)
+ }
+ }
+
++ efx->type->finish_flush(efx);
+ return -ETIMEDOUT;
+ }
+
+diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
+index 66ece48..58302a2 100644
+--- a/drivers/net/ethernet/sfc/nic.h
++++ b/drivers/net/ethernet/sfc/nic.h
+@@ -210,6 +210,8 @@ extern void falcon_irq_ack_a1(struct efx_nic *efx);
+
+ /* Global Resources */
+ extern int efx_nic_flush_queues(struct efx_nic *efx);
++extern void siena_prepare_flush(struct efx_nic *efx);
++extern void siena_finish_flush(struct efx_nic *efx);
+ extern void falcon_start_nic_stats(struct efx_nic *efx);
+ extern void falcon_stop_nic_stats(struct efx_nic *efx);
+ extern void falcon_setup_xaui(struct efx_nic *efx);
+diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
+index 5ef4cc0..9ce8665 100644
+--- a/drivers/net/ethernet/sfc/rx.c
++++ b/drivers/net/ethernet/sfc/rx.c
+@@ -95,11 +95,7 @@ static unsigned int rx_refill_limit = 95;
+ static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
+ struct efx_rx_buffer *buf)
+ {
+- /* Offset is always within one page, so we don't need to consider
+- * the page order.
+- */
+- return (((__force unsigned long) buf->dma_addr & (PAGE_SIZE - 1)) +
+- efx->type->rx_buffer_hash_size);
++ return buf->page_offset + efx->type->rx_buffer_hash_size;
+ }
+ static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
+ {
+@@ -194,6 +190,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
+ struct efx_rx_buffer *rx_buf;
+ struct page *page;
+ void *page_addr;
++ unsigned int page_offset;
+ struct efx_rx_page_state *state;
+ dma_addr_t dma_addr;
+ unsigned index, count;
+@@ -220,12 +217,14 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
+
+ page_addr += sizeof(struct efx_rx_page_state);
+ dma_addr += sizeof(struct efx_rx_page_state);
++ page_offset = sizeof(struct efx_rx_page_state);
+
+ split:
+ index = rx_queue->added_count & rx_queue->ptr_mask;
+ rx_buf = efx_rx_buffer(rx_queue, index);
+ rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
+ rx_buf->u.page = page;
++ rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN;
+ rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
+ rx_buf->is_page = true;
+ ++rx_queue->added_count;
+@@ -237,6 +236,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
+ get_page(page);
+ dma_addr += (PAGE_SIZE >> 1);
+ page_addr += (PAGE_SIZE >> 1);
++ page_offset += (PAGE_SIZE >> 1);
+ ++count;
+ goto split;
+ }
+@@ -246,7 +246,8 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
+ }
+
+ static void efx_unmap_rx_buffer(struct efx_nic *efx,
+- struct efx_rx_buffer *rx_buf)
++ struct efx_rx_buffer *rx_buf,
++ unsigned int used_len)
+ {
+ if (rx_buf->is_page && rx_buf->u.page) {
+ struct efx_rx_page_state *state;
+@@ -257,6 +258,10 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
+ state->dma_addr,
+ efx_rx_buf_size(efx),
+ PCI_DMA_FROMDEVICE);
++ } else if (used_len) {
++ dma_sync_single_for_cpu(&efx->pci_dev->dev,
++ rx_buf->dma_addr, used_len,
++ DMA_FROM_DEVICE);
+ }
+ } else if (!rx_buf->is_page && rx_buf->u.skb) {
+ pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
+@@ -279,7 +284,7 @@ static void efx_free_rx_buffer(struct efx_nic *efx,
+ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
+ struct efx_rx_buffer *rx_buf)
+ {
+- efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
++ efx_unmap_rx_buffer(rx_queue->efx, rx_buf, 0);
+ efx_free_rx_buffer(rx_queue->efx, rx_buf);
+ }
+
+@@ -550,10 +555,10 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
+ goto out;
+ }
+
+- /* Release card resources - assumes all RX buffers consumed in-order
+- * per RX queue
++ /* Release and/or sync DMA mapping - assumes all RX buffers
++ * consumed in-order per RX queue
+ */
+- efx_unmap_rx_buffer(efx, rx_buf);
++ efx_unmap_rx_buffer(efx, rx_buf, len);
+
+ /* Prefetch nice and early so data will (hopefully) be in cache by
+ * the time we look at it.
+diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
+index 822f6c2..4907885 100644
+--- a/drivers/net/ethernet/sfc/selftest.c
++++ b/drivers/net/ethernet/sfc/selftest.c
+@@ -698,7 +698,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
+ /* Detach the device so the kernel doesn't transmit during the
+ * loopback test and the watchdog timeout doesn't fire.
+ */
+- netif_device_detach(efx->net_dev);
++ efx_device_detach_sync(efx);
+
+ mutex_lock(&efx->mac_lock);
+ if (efx->loopback_modes) {
+diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
+index cc2549c..c58b973 100644
+--- a/drivers/net/ethernet/sfc/siena.c
++++ b/drivers/net/ethernet/sfc/siena.c
+@@ -137,6 +137,18 @@ static void siena_remove_port(struct efx_nic *efx)
+ efx_nic_free_buffer(efx, &efx->stats_buffer);
+ }
+
++void siena_prepare_flush(struct efx_nic *efx)
++{
++ if (efx->fc_disable++ == 0)
++ efx_mcdi_set_mac(efx);
++}
++
++void siena_finish_flush(struct efx_nic *efx)
++{
++ if (--efx->fc_disable == 0)
++ efx_mcdi_set_mac(efx);
++}
++
+ static const struct efx_nic_register_test siena_register_tests[] = {
+ { FR_AZ_ADR_REGION,
+ EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
+@@ -624,7 +636,8 @@ const struct efx_nic_type siena_a0_nic_type = {
+ .reset = siena_reset_hw,
+ .probe_port = siena_probe_port,
+ .remove_port = siena_remove_port,
+- .prepare_flush = efx_port_dummy_op_void,
++ .prepare_flush = siena_prepare_flush,
++ .finish_flush = siena_finish_flush,
+ .update_stats = siena_update_nic_stats,
+ .start_stats = siena_start_nic_stats,
+ .stop_stats = siena_stop_nic_stats,
+diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
+index 97f342e..544ac06 100644
+--- a/drivers/net/macvlan.c
++++ b/drivers/net/macvlan.c
+@@ -585,6 +585,7 @@ void macvlan_common_setup(struct net_device *dev)
+ ether_setup(dev);
+
+ dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
++ dev->priv_flags |= IFF_UNICAST_FLT;
+ dev->netdev_ops = &macvlan_netdev_ops;
+ dev->destructor = free_netdev;
+ dev->header_ops = &macvlan_hard_header_ops,
+diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
+index 01b104e..bff6908 100644
+--- a/drivers/net/netconsole.c
++++ b/drivers/net/netconsole.c
+@@ -630,6 +630,7 @@ static int netconsole_netdev_event(struct notifier_block *this,
+ goto done;
+
+ spin_lock_irqsave(&target_list_lock, flags);
++restart:
+ list_for_each_entry(nt, &target_list, list) {
+ netconsole_target_get(nt);
+ if (nt->np.dev == dev) {
+@@ -642,20 +643,17 @@ static int netconsole_netdev_event(struct notifier_block *this,
+ case NETDEV_UNREGISTER:
+ /*
+ * rtnl_lock already held
++ * we might sleep in __netpoll_cleanup()
+ */
+- if (nt->np.dev) {
+- spin_unlock_irqrestore(
+- &target_list_lock,
+- flags);
+- __netpoll_cleanup(&nt->np);
+- spin_lock_irqsave(&target_list_lock,
+- flags);
+- dev_put(nt->np.dev);
+- nt->np.dev = NULL;
+- }
++ spin_unlock_irqrestore(&target_list_lock, flags);
++ __netpoll_cleanup(&nt->np);
++ spin_lock_irqsave(&target_list_lock, flags);
++ dev_put(nt->np.dev);
++ nt->np.dev = NULL;
+ nt->enabled = 0;
+ stopped = true;
+- break;
++ netconsole_target_put(nt);
++ goto restart;
+ }
+ }
+ netconsole_target_put(nt);
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index a12c9bf..f4c5de6 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -417,6 +417,8 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
+ * for indefinite time. */
+ skb_orphan(skb);
+
++ nf_reset(skb);
++
+ /* Enqueue packet */
+ skb_queue_tail(&tun->socket.sk->sk_receive_queue, skb);
+
+diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
+index 62b4c29..d48dfb7 100644
+--- a/drivers/net/wireless/mwifiex/join.c
++++ b/drivers/net/wireless/mwifiex/join.c
+@@ -1062,10 +1062,9 @@ mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv,
+ adhoc_join->bss_descriptor.bssid,
+ adhoc_join->bss_descriptor.ssid);
+
+- for (i = 0; bss_desc->supported_rates[i] &&
+- i < MWIFIEX_SUPPORTED_RATES;
+- i++)
+- ;
++ for (i = 0; i < MWIFIEX_SUPPORTED_RATES &&
++ bss_desc->supported_rates[i]; i++)
++ ;
+ rates_size = i;
+
+ /* Copy Data Rates from the Rates recorded in scan response */
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+index 814c05d..d3920da 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+@@ -1557,74 +1557,57 @@ void rtl92cu_card_disable(struct ieee80211_hw *hw)
+
+ void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
+ {
+- /* dummy routine needed for callback from rtl_op_configure_filter() */
+-}
+-
+-/*========================================================================== */
+-
+-static void _rtl92cu_set_check_bssid(struct ieee80211_hw *hw,
+- enum nl80211_iftype type)
+-{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+- u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+- struct rtl_phy *rtlphy = &(rtlpriv->phy);
+- u8 filterout_non_associated_bssid = false;
++ u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR);
+
+- switch (type) {
+- case NL80211_IFTYPE_ADHOC:
+- case NL80211_IFTYPE_STATION:
+- filterout_non_associated_bssid = true;
+- break;
+- case NL80211_IFTYPE_UNSPECIFIED:
+- case NL80211_IFTYPE_AP:
+- default:
+- break;
+- }
+- if (filterout_non_associated_bssid) {
++ if (rtlpriv->psc.rfpwr_state != ERFON)
++ return;
++
++ if (check_bssid) {
++ u8 tmp;
+ if (IS_NORMAL_CHIP(rtlhal->version)) {
+- switch (rtlphy->current_io_type) {
+- case IO_CMD_RESUME_DM_BY_SCAN:
+- reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
+- rtlpriv->cfg->ops->set_hw_reg(hw,
+- HW_VAR_RCR, (u8 *)(&reg_rcr));
+- /* enable update TSF */
+- _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(4));
+- break;
+- case IO_CMD_PAUSE_DM_BY_SCAN:
+- reg_rcr &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN);
+- rtlpriv->cfg->ops->set_hw_reg(hw,
+- HW_VAR_RCR, (u8 *)(&reg_rcr));
+- /* disable update TSF */
+- _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0);
+- break;
+- }
++ reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
++ tmp = BIT(4);
+ } else {
+- reg_rcr |= (RCR_CBSSID);
+- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
+- (u8 *)(&reg_rcr));
+- _rtl92cu_set_bcn_ctrl_reg(hw, 0, (BIT(4)|BIT(5)));
++ reg_rcr |= RCR_CBSSID;
++ tmp = BIT(4) | BIT(5);
+ }
+- } else if (filterout_non_associated_bssid == false) {
++ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
++ (u8 *) (&reg_rcr));
++ _rtl92cu_set_bcn_ctrl_reg(hw, 0, tmp);
++ } else {
++ u8 tmp;
+ if (IS_NORMAL_CHIP(rtlhal->version)) {
+- reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN));
+- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
+- (u8 *)(&reg_rcr));
+- _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0);
++ reg_rcr &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN);
++ tmp = BIT(4);
+ } else {
+- reg_rcr &= (~RCR_CBSSID);
+- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
+- (u8 *)(&reg_rcr));
+- _rtl92cu_set_bcn_ctrl_reg(hw, (BIT(4)|BIT(5)), 0);
++ reg_rcr &= ~RCR_CBSSID;
++ tmp = BIT(4) | BIT(5);
+ }
++ reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN));
++ rtlpriv->cfg->ops->set_hw_reg(hw,
++ HW_VAR_RCR, (u8 *) (&reg_rcr));
++ _rtl92cu_set_bcn_ctrl_reg(hw, tmp, 0);
+ }
+ }
+
++/*========================================================================== */
++
+ int rtl92cu_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type)
+ {
++ struct rtl_priv *rtlpriv = rtl_priv(hw);
++
+ if (_rtl92cu_set_media_status(hw, type))
+ return -EOPNOTSUPP;
+- _rtl92cu_set_check_bssid(hw, type);
++
++ if (rtlpriv->mac80211.link_state == MAC80211_LINKED) {
++ if (type != NL80211_IFTYPE_AP)
++ rtl92cu_set_check_bssid(hw, true);
++ } else {
++ rtl92cu_set_check_bssid(hw, false);
++ }
++
+ return 0;
+ }
+
+@@ -2238,8 +2221,6 @@ void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw,
+ (shortgi_rate << 4) | (shortgi_rate);
+ }
+ rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
+- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, ("%x\n", rtl_read_dword(rtlpriv,
+- REG_ARFR0)));
+ }
+
+ void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
+diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
+index e18604b..d19b879 100644
+--- a/drivers/tty/pty.c
++++ b/drivers/tty/pty.c
+@@ -49,7 +49,6 @@ static void pty_close(struct tty_struct *tty, struct file *filp)
+ tty->packet = 0;
+ if (!tty->link)
+ return;
+- tty->link->packet = 0;
+ set_bit(TTY_OTHER_CLOSED, &tty->link->flags);
+ wake_up_interruptible(&tty->link->read_wait);
+ wake_up_interruptible(&tty->link->write_wait);
+diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
+index ad0f8f5..2dc4d9b 100644
+--- a/drivers/tty/serial/sunsu.c
++++ b/drivers/tty/serial/sunsu.c
+@@ -968,6 +968,7 @@ static struct uart_ops sunsu_pops = {
+ #define UART_NR 4
+
+ static struct uart_sunsu_port sunsu_ports[UART_NR];
++static int nr_inst; /* Number of already registered ports */
+
+ #ifdef CONFIG_SERIO
+
+@@ -1337,13 +1338,8 @@ static int __init sunsu_console_setup(struct console *co, char *options)
+ printk("Console: ttyS%d (SU)\n",
+ (sunsu_reg.minor - 64) + co->index);
+
+- /*
+- * Check whether an invalid uart number has been specified, and
+- * if so, search for the first available port that does have
+- * console support.
+- */
+- if (co->index >= UART_NR)
+- co->index = 0;
++ if (co->index > nr_inst)
++ return -ENODEV;
+ port = &sunsu_ports[co->index].port;
+
+ /*
+@@ -1408,7 +1404,6 @@ static enum su_type __devinit su_get_type(struct device_node *dp)
+
+ static int __devinit su_probe(struct platform_device *op)
+ {
+- static int inst;
+ struct device_node *dp = op->dev.of_node;
+ struct uart_sunsu_port *up;
+ struct resource *rp;
+@@ -1418,16 +1413,16 @@ static int __devinit su_probe(struct platform_device *op)
+
+ type = su_get_type(dp);
+ if (type == SU_PORT_PORT) {
+- if (inst >= UART_NR)
++ if (nr_inst >= UART_NR)
+ return -EINVAL;
+- up = &sunsu_ports[inst];
++ up = &sunsu_ports[nr_inst];
+ } else {
+ up = kzalloc(sizeof(*up), GFP_KERNEL);
+ if (!up)
+ return -ENOMEM;
+ }
+
+- up->port.line = inst;
++ up->port.line = nr_inst;
+
+ spin_lock_init(&up->port.lock);
+
+@@ -1461,6 +1456,8 @@ static int __devinit su_probe(struct platform_device *op)
+ }
+ dev_set_drvdata(&op->dev, up);
+
++ nr_inst++;
++
+ return 0;
+ }
+
+@@ -1488,7 +1485,7 @@ static int __devinit su_probe(struct platform_device *op)
+
+ dev_set_drvdata(&op->dev, up);
+
+- inst++;
++ nr_inst++;
+
+ return 0;
+
+diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
+index 61d08dd..76be3ba 100644
+--- a/drivers/usb/core/hcd-pci.c
++++ b/drivers/usb/core/hcd-pci.c
+@@ -173,6 +173,7 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ struct hc_driver *driver;
+ struct usb_hcd *hcd;
+ int retval;
++ int hcd_irq = 0;
+
+ if (usb_disabled())
+ return -ENODEV;
+@@ -187,15 +188,19 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ return -ENODEV;
+ dev->current_state = PCI_D0;
+
+- /* The xHCI driver supports MSI and MSI-X,
+- * so don't fail if the BIOS doesn't provide a legacy IRQ.
++ /*
++ * The xHCI driver has its own irq management
++ * make sure irq setup is not touched for xhci in generic hcd code
+ */
+- if (!dev->irq && (driver->flags & HCD_MASK) != HCD_USB3) {
+- dev_err(&dev->dev,
+- "Found HC with no IRQ. Check BIOS/PCI %s setup!\n",
+- pci_name(dev));
+- retval = -ENODEV;
+- goto disable_pci;
++ if ((driver->flags & HCD_MASK) != HCD_USB3) {
++ if (!dev->irq) {
++ dev_err(&dev->dev,
++ "Found HC with no IRQ. Check BIOS/PCI %s setup!\n",
++ pci_name(dev));
++ retval = -ENODEV;
++ goto disable_pci;
++ }
++ hcd_irq = dev->irq;
+ }
+
+ hcd = usb_create_hcd(driver, &dev->dev, pci_name(dev));
+@@ -245,7 +250,7 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+
+ pci_set_master(dev);
+
+- retval = usb_add_hcd(hcd, dev->irq, IRQF_SHARED);
++ retval = usb_add_hcd(hcd, hcd_irq, IRQF_SHARED);
+ if (retval != 0)
+ goto unmap_registers;
+ set_hs_companion(dev, hcd);
+diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c
+index 901924a..d433fdf 100644
+--- a/drivers/usb/gadget/udc-core.c
++++ b/drivers/usb/gadget/udc-core.c
+@@ -213,7 +213,7 @@ static void usb_gadget_remove_driver(struct usb_udc *udc)
+ udc->driver->disconnect(udc->gadget);
+ usb_gadget_disconnect(udc->gadget);
+ udc->driver->unbind(udc->gadget);
+- usb_gadget_udc_stop(udc->gadget, udc->driver);
++ usb_gadget_udc_stop(udc->gadget, NULL);
+ } else {
+ usb_gadget_stop(udc->gadget, udc->driver);
+ }
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 53c8be1..2c0350f 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -342,7 +342,7 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd)
+ * generate interrupts. Don't even try to enable MSI.
+ */
+ if (xhci->quirks & XHCI_BROKEN_MSI)
+- return 0;
++ goto legacy_irq;
+
+ /* unregister the legacy interrupt */
+ if (hcd->irq)
+@@ -363,6 +363,7 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd)
+ return -EINVAL;
+ }
+
++ legacy_irq:
+ /* fall back to legacy interrupt*/
+ ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
+ hcd->irq_descr, hcd);
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index cc368c2..c519a31 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -206,8 +206,8 @@ struct xhci_op_regs {
+ /* bits 12:31 are reserved (and should be preserved on writes). */
+
+ /* IMAN - Interrupt Management Register */
+-#define IMAN_IP (1 << 1)
+-#define IMAN_IE (1 << 0)
++#define IMAN_IE (1 << 1)
++#define IMAN_IP (1 << 0)
+
+ /* USBSTS - USB status - status bitmasks */
+ /* HC not running - set to 1 when run/stop bit is cleared. */
+diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
+index 1a49ca9..e664bac 100644
+--- a/drivers/usb/serial/garmin_gps.c
++++ b/drivers/usb/serial/garmin_gps.c
+@@ -973,10 +973,7 @@ static void garmin_close(struct usb_serial_port *port)
+ if (!serial)
+ return;
+
+- mutex_lock(&port->serial->disc_mutex);
+-
+- if (!port->serial->disconnected)
+- garmin_clear(garmin_data_p);
++ garmin_clear(garmin_data_p);
+
+ /* shutdown our urbs */
+ usb_kill_urb(port->read_urb);
+@@ -985,8 +982,6 @@ static void garmin_close(struct usb_serial_port *port)
+ /* keep reset state so we know that we must start a new session */
+ if (garmin_data_p->state != STATE_RESET)
+ garmin_data_p->state = STATE_DISCONNECTED;
+-
+- mutex_unlock(&port->serial->disc_mutex);
+ }
+
+
+diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
+index 3de751d..1f145bf 100644
+--- a/drivers/usb/serial/io_ti.c
++++ b/drivers/usb/serial/io_ti.c
+@@ -2796,6 +2796,7 @@ static struct usb_serial_driver edgeport_2port_device = {
+ .set_termios = edge_set_termios,
+ .tiocmget = edge_tiocmget,
+ .tiocmset = edge_tiocmset,
++ .get_icount = edge_get_icount,
+ .write = edge_write,
+ .write_room = edge_write_room,
+ .chars_in_buffer = edge_chars_in_buffer,
+diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
+index dc1ce62..2482d5e 100644
+--- a/drivers/usb/serial/usb-serial.c
++++ b/drivers/usb/serial/usb-serial.c
+@@ -168,6 +168,7 @@ static void destroy_serial(struct kref *kref)
+ }
+ }
+
++ usb_put_intf(serial->interface);
+ usb_put_dev(serial->dev);
+ kfree(serial);
+ }
+@@ -624,7 +625,7 @@ static struct usb_serial *create_serial(struct usb_device *dev,
+ }
+ serial->dev = usb_get_dev(dev);
+ serial->type = driver;
+- serial->interface = interface;
++ serial->interface = usb_get_intf(interface);
+ kref_init(&serial->kref);
+ mutex_init(&serial->disc_mutex);
+ serial->minor = SERIAL_TTY_NO_MINOR;
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index fa8a1b2..7b8d564 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -488,6 +488,13 @@ UNUSUAL_DEV( 0x04e8, 0x5122, 0x0000, 0x9999,
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_MAX_SECTORS_64 | US_FL_BULK_IGNORE_TAG),
+
++/* Added by Dmitry Artamonow <mad_soft@inbox.ru> */
++UNUSUAL_DEV( 0x04e8, 0x5136, 0x0000, 0x9999,
++ "Samsung",
++ "YP-Z3",
++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++ US_FL_MAX_SECTORS_64),
++
+ /* Entry and supporting patch by Theodore Kilgore <kilgota@auburn.edu>.
+ * Device uses standards-violating 32-byte Bulk Command Block Wrappers and
+ * reports itself as "Proprietary SCSI Bulk." Cf. device entry 0x084d:0x0011.
+diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
+index b76071e..5c58128 100644
+--- a/drivers/vhost/net.c
++++ b/drivers/vhost/net.c
+@@ -234,7 +234,8 @@ static void handle_tx(struct vhost_net *net)
+ msg.msg_controllen = 0;
+ ubufs = NULL;
+ } else {
+- struct ubuf_info *ubuf = &vq->ubuf_info[head];
++ struct ubuf_info *ubuf;
++ ubuf = vq->ubuf_info + vq->upend_idx;
+
+ vq->heads[vq->upend_idx].len = len;
+ ubuf->callback = vhost_zerocopy_callback;
+diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c
+index cfd1ce3..1d36db1 100644
+--- a/fs/cifs/asn1.c
++++ b/fs/cifs/asn1.c
+@@ -614,53 +614,10 @@ decode_negTokenInit(unsigned char *security_blob, int length,
+ }
+ }
+
+- /* mechlistMIC */
+- if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
+- /* Check if we have reached the end of the blob, but with
+- no mechListMic (e.g. NTLMSSP instead of KRB5) */
+- if (ctx.error == ASN1_ERR_DEC_EMPTY)
+- goto decode_negtoken_exit;
+- cFYI(1, "Error decoding last part negTokenInit exit3");
+- return 0;
+- } else if ((cls != ASN1_CTX) || (con != ASN1_CON)) {
+- /* tag = 3 indicating mechListMIC */
+- cFYI(1, "Exit 4 cls = %d con = %d tag = %d end = %p (%d)",
+- cls, con, tag, end, *end);
+- return 0;
+- }
+-
+- /* sequence */
+- if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
+- cFYI(1, "Error decoding last part negTokenInit exit5");
+- return 0;
+- } else if ((cls != ASN1_UNI) || (con != ASN1_CON)
+- || (tag != ASN1_SEQ)) {
+- cFYI(1, "cls = %d con = %d tag = %d end = %p (%d)",
+- cls, con, tag, end, *end);
+- }
+-
+- /* sequence of */
+- if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
+- cFYI(1, "Error decoding last part negTokenInit exit 7");
+- return 0;
+- } else if ((cls != ASN1_CTX) || (con != ASN1_CON)) {
+- cFYI(1, "Exit 8 cls = %d con = %d tag = %d end = %p (%d)",
+- cls, con, tag, end, *end);
+- return 0;
+- }
+-
+- /* general string */
+- if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
+- cFYI(1, "Error decoding last part negTokenInit exit9");
+- return 0;
+- } else if ((cls != ASN1_UNI) || (con != ASN1_PRI)
+- || (tag != ASN1_GENSTR)) {
+- cFYI(1, "Exit10 cls = %d con = %d tag = %d end = %p (%d)",
+- cls, con, tag, end, *end);
+- return 0;
+- }
+- cFYI(1, "Need to call asn1_octets_decode() function for %s",
+- ctx.pointer); /* is this UTF-8 or ASCII? */
+-decode_negtoken_exit:
++ /*
++ * We currently ignore anything at the end of the SPNEGO blob after
++ * the mechTypes have been parsed, since none of that info is
++ * used at the moment.
++ */
+ return 1;
+ }
+diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
+index b3a2a40..25bb97f 100644
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -90,6 +90,30 @@ extern mempool_t *cifs_sm_req_poolp;
+ extern mempool_t *cifs_req_poolp;
+ extern mempool_t *cifs_mid_poolp;
+
++/*
++ * Bumps refcount for cifs super block.
++ * Note that it should be only called if a referece to VFS super block is
++ * already held, e.g. in open-type syscalls context. Otherwise it can race with
++ * atomic_dec_and_test in deactivate_locked_super.
++ */
++void
++cifs_sb_active(struct super_block *sb)
++{
++ struct cifs_sb_info *server = CIFS_SB(sb);
++
++ if (atomic_inc_return(&server->active) == 1)
++ atomic_inc(&sb->s_active);
++}
++
++void
++cifs_sb_deactive(struct super_block *sb)
++{
++ struct cifs_sb_info *server = CIFS_SB(sb);
++
++ if (atomic_dec_and_test(&server->active))
++ deactivate_super(sb);
++}
++
+ static int
+ cifs_read_super(struct super_block *sb)
+ {
+diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
+index 30ff560..c91ea81 100644
+--- a/fs/cifs/cifsfs.h
++++ b/fs/cifs/cifsfs.h
+@@ -41,6 +41,10 @@ extern struct file_system_type cifs_fs_type;
+ extern const struct address_space_operations cifs_addr_ops;
+ extern const struct address_space_operations cifs_addr_ops_smallbuf;
+
++/* Functions related to super block operations */
++extern void cifs_sb_active(struct super_block *sb);
++extern void cifs_sb_deactive(struct super_block *sb);
++
+ /* Functions related to inodes */
+ extern const struct inode_operations cifs_dir_inode_ops;
+ extern struct inode *cifs_root_iget(struct super_block *);
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index 51574d4..c55808e 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -265,6 +265,8 @@ cifs_new_fileinfo(__u16 fileHandle, struct file *file,
+ mutex_init(&pCifsFile->fh_mutex);
+ INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break);
+
++ cifs_sb_active(inode->i_sb);
++
+ spin_lock(&cifs_file_list_lock);
+ list_add(&pCifsFile->tlist, &(tlink_tcon(tlink)->openFileList));
+ /* if readable file instance put first in list*/
+@@ -293,7 +295,8 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
+ struct inode *inode = cifs_file->dentry->d_inode;
+ struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
+ struct cifsInodeInfo *cifsi = CIFS_I(inode);
+- struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
++ struct super_block *sb = inode->i_sb;
++ struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+ struct cifsLockInfo *li, *tmp;
+
+ spin_lock(&cifs_file_list_lock);
+@@ -345,6 +348,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
+
+ cifs_put_tlink(cifs_file->tlink);
+ dput(cifs_file->dentry);
++ cifs_sb_deactive(sb);
+ kfree(cifs_file);
+ }
+
+diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
+index 484ffee..2845a1f 100644
+--- a/fs/ext4/balloc.c
++++ b/fs/ext4/balloc.c
+@@ -571,7 +571,7 @@ ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb)
+ brelse(bitmap_bh);
+ printk(KERN_DEBUG "ext4_count_free_clusters: stored = %llu"
+ ", computed = %llu, %llu\n",
+- EXT4_B2C(EXT4_SB(sb), ext4_free_blocks_count(es)),
++ EXT4_NUM_B2C(EXT4_SB(sb), ext4_free_blocks_count(es)),
+ desc_count, bitmap_count);
+ return bitmap_count;
+ #else
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 8cb184c..60b6ca5 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -309,9 +309,9 @@ struct ext4_group_desc
+ */
+
+ struct flex_groups {
+- atomic_t free_inodes;
+- atomic_t free_clusters;
+- atomic_t used_dirs;
++ atomic64_t free_clusters;
++ atomic_t free_inodes;
++ atomic_t used_dirs;
+ };
+
+ #define EXT4_BG_INODE_UNINIT 0x0001 /* Inode table/bitmap not in use */
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index b48e0dc..ce0bc25 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -2960,6 +2960,7 @@ static int ext4_split_extent(handle_t *handle,
+ int err = 0;
+ int uninitialized;
+ int split_flag1, flags1;
++ int allocated = map->m_len;
+
+ depth = ext_depth(inode);
+ ex = path[depth].p_ext;
+@@ -2979,6 +2980,8 @@ static int ext4_split_extent(handle_t *handle,
+ map->m_lblk + map->m_len, split_flag1, flags1);
+ if (err)
+ goto out;
++ } else {
++ allocated = ee_len - (map->m_lblk - ee_block);
+ }
+
+ ext4_ext_drop_refs(path);
+@@ -3001,7 +3004,7 @@ static int ext4_split_extent(handle_t *handle,
+
+ ext4_ext_show_leaf(inode, path);
+ out:
+- return err ? err : map->m_len;
++ return err ? err : allocated;
+ }
+
+ #define EXT4_EXT_ZERO_LEN 7
+@@ -3663,6 +3666,7 @@ out:
+ allocated - map->m_len);
+ allocated = map->m_len;
+ }
++ map->m_len = allocated;
+
+ /*
+ * If we have done fallocate with the offset that is already
+diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
+index 6266799..6d1f577 100644
+--- a/fs/ext4/ialloc.c
++++ b/fs/ext4/ialloc.c
+@@ -294,8 +294,8 @@ error_return:
+ }
+
+ struct orlov_stats {
++ __u64 free_clusters;
+ __u32 free_inodes;
+- __u32 free_clusters;
+ __u32 used_dirs;
+ };
+
+@@ -312,7 +312,7 @@ static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
+
+ if (flex_size > 1) {
+ stats->free_inodes = atomic_read(&flex_group[g].free_inodes);
+- stats->free_clusters = atomic_read(&flex_group[g].free_clusters);
++ stats->free_clusters = atomic64_read(&flex_group[g].free_clusters);
+ stats->used_dirs = atomic_read(&flex_group[g].used_dirs);
+ return;
+ }
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 4b2bb75..3270ffd 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -142,7 +142,8 @@ void ext4_evict_inode(struct inode *inode)
+ * don't use page cache.
+ */
+ if (ext4_should_journal_data(inode) &&
+- (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) {
++ (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) &&
++ inode->i_ino != EXT4_JOURNAL_INO) {
+ journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
+ tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
+
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 553ff71..7b18563 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -2866,8 +2866,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
+ if (sbi->s_log_groups_per_flex) {
+ ext4_group_t flex_group = ext4_flex_group(sbi,
+ ac->ac_b_ex.fe_group);
+- atomic_sub(ac->ac_b_ex.fe_len,
+- &sbi->s_flex_groups[flex_group].free_clusters);
++ atomic64_sub(ac->ac_b_ex.fe_len,
++ &sbi->s_flex_groups[flex_group].free_clusters);
+ }
+
+ err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
+@@ -3485,7 +3485,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
+ win = offs;
+
+ ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical -
+- EXT4_B2C(sbi, win);
++ EXT4_NUM_B2C(sbi, win);
+ BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
+ BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
+ }
+@@ -4634,7 +4634,7 @@ do_more:
+ EXT4_BLOCKS_PER_GROUP(sb);
+ count -= overflow;
+ }
+- count_clusters = EXT4_B2C(sbi, count);
++ count_clusters = EXT4_NUM_B2C(sbi, count);
+ bitmap_bh = ext4_read_block_bitmap(sb, block_group);
+ if (!bitmap_bh) {
+ err = -EIO;
+@@ -4724,8 +4724,8 @@ do_more:
+
+ if (sbi->s_log_groups_per_flex) {
+ ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
+- atomic_add(count_clusters,
+- &sbi->s_flex_groups[flex_group].free_clusters);
++ atomic64_add(count_clusters,
++ &sbi->s_flex_groups[flex_group].free_clusters);
+ }
+
+ ext4_mb_unload_buddy(&e4b);
+@@ -4865,12 +4865,12 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
+ desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc);
+ ext4_unlock_group(sb, block_group);
+ percpu_counter_add(&sbi->s_freeclusters_counter,
+- EXT4_B2C(sbi, blocks_freed));
++ EXT4_NUM_B2C(sbi, blocks_freed));
+
+ if (sbi->s_log_groups_per_flex) {
+ ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
+- atomic_add(EXT4_B2C(sbi, blocks_freed),
+- &sbi->s_flex_groups[flex_group].free_clusters);
++ atomic64_add(EXT4_NUM_B2C(sbi, blocks_freed),
++ &sbi->s_flex_groups[flex_group].free_clusters);
+ }
+
+ ext4_mb_unload_buddy(&e4b);
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index 33129c0..6e67b97 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -938,7 +938,7 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
+
+ /* Update the free space counts */
+ percpu_counter_add(&sbi->s_freeclusters_counter,
+- EXT4_B2C(sbi, input->free_blocks_count));
++ EXT4_NUM_B2C(sbi, input->free_blocks_count));
+ percpu_counter_add(&sbi->s_freeinodes_counter,
+ EXT4_INODES_PER_GROUP(sb));
+
+@@ -946,8 +946,8 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
+ sbi->s_log_groups_per_flex) {
+ ext4_group_t flex_group;
+ flex_group = ext4_flex_group(sbi, input->group);
+- atomic_add(EXT4_B2C(sbi, input->free_blocks_count),
+- &sbi->s_flex_groups[flex_group].free_clusters);
++ atomic64_add(EXT4_NUM_B2C(sbi, input->free_blocks_count),
++ &sbi->s_flex_groups[flex_group].free_clusters);
+ atomic_add(EXT4_INODES_PER_GROUP(sb),
+ &sbi->s_flex_groups[flex_group].free_inodes);
+ }
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 24ac7a2..cc386b2 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -2047,8 +2047,8 @@ static int ext4_fill_flex_info(struct super_block *sb)
+ flex_group = ext4_flex_group(sbi, i);
+ atomic_add(ext4_free_inodes_count(sb, gdp),
+ &sbi->s_flex_groups[flex_group].free_inodes);
+- atomic_add(ext4_free_group_clusters(sb, gdp),
+- &sbi->s_flex_groups[flex_group].free_clusters);
++ atomic64_add(ext4_free_group_clusters(sb, gdp),
++ &sbi->s_flex_groups[flex_group].free_clusters);
+ atomic_add(ext4_used_dirs_count(sb, gdp),
+ &sbi->s_flex_groups[flex_group].used_dirs);
+ }
+diff --git a/fs/isofs/export.c b/fs/isofs/export.c
+index 516eb21..fd88add 100644
+--- a/fs/isofs/export.c
++++ b/fs/isofs/export.c
+@@ -135,6 +135,7 @@ isofs_export_encode_fh(struct dentry *dentry,
+ len = 3;
+ fh32[0] = ei->i_iget5_block;
+ fh16[2] = (__u16)ei->i_iget5_offset; /* fh16 [sic] */
++ fh16[3] = 0; /* avoid leaking uninitialized data */
+ fh32[2] = inode->i_generation;
+ if (connectable && !S_ISDIR(inode->i_mode)) {
+ struct inode *parent;
+diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
+index d7dd774..6ac5bb1 100644
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -1016,9 +1016,12 @@ out:
+ void jbd2_journal_set_triggers(struct buffer_head *bh,
+ struct jbd2_buffer_trigger_type *type)
+ {
+- struct journal_head *jh = bh2jh(bh);
++ struct journal_head *jh = jbd2_journal_grab_journal_head(bh);
+
++ if (WARN_ON(!jh))
++ return;
+ jh->b_triggers = type;
++ jbd2_journal_put_journal_head(jh);
+ }
+
+ void jbd2_buffer_frozen_trigger(struct journal_head *jh, void *mapped_data,
+@@ -1070,17 +1073,18 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
+ {
+ transaction_t *transaction = handle->h_transaction;
+ journal_t *journal = transaction->t_journal;
+- struct journal_head *jh = bh2jh(bh);
++ struct journal_head *jh;
+ int ret = 0;
+
+- jbd_debug(5, "journal_head %p\n", jh);
+- JBUFFER_TRACE(jh, "entry");
+ if (is_handle_aborted(handle))
+ goto out;
+- if (!buffer_jbd(bh)) {
++ jh = jbd2_journal_grab_journal_head(bh);
++ if (!jh) {
+ ret = -EUCLEAN;
+ goto out;
+ }
++ jbd_debug(5, "journal_head %p\n", jh);
++ JBUFFER_TRACE(jh, "entry");
+
+ jbd_lock_bh_state(bh);
+
+@@ -1171,6 +1175,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
+ spin_unlock(&journal->j_list_lock);
+ out_unlock_bh:
+ jbd_unlock_bh_state(bh);
++ jbd2_journal_put_journal_head(jh);
+ out:
+ JBUFFER_TRACE(jh, "exit");
+ WARN_ON(ret); /* All errors are bugs, so dump the stack */
+diff --git a/fs/proc/inode.c b/fs/proc/inode.c
+index 7737c54..00f08b3 100644
+--- a/fs/proc/inode.c
++++ b/fs/proc/inode.c
+@@ -427,12 +427,10 @@ static const struct file_operations proc_reg_file_ops_no_compat = {
+
+ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
+ {
+- struct inode * inode;
++ struct inode *inode = new_inode_pseudo(sb);
+
+- inode = iget_locked(sb, de->low_ino);
+- if (!inode)
+- return NULL;
+- if (inode->i_state & I_NEW) {
++ if (inode) {
++ inode->i_ino = de->low_ino;
+ inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+ PROC_I(inode)->fd = 0;
+ PROC_I(inode)->pde = de;
+@@ -461,9 +459,7 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
+ inode->i_fop = de->proc_fops;
+ }
+ }
+- unlock_new_inode(inode);
+- } else
+- pde_put(de);
++ }
+ return inode;
+ }
+
+diff --git a/fs/udf/namei.c b/fs/udf/namei.c
+index 4639e13..71c97fb 100644
+--- a/fs/udf/namei.c
++++ b/fs/udf/namei.c
+@@ -1293,6 +1293,7 @@ static int udf_encode_fh(struct dentry *de, __u32 *fh, int *lenp,
+ *lenp = 3;
+ fid->udf.block = location.logicalBlockNum;
+ fid->udf.partref = location.partitionReferenceNum;
++ fid->udf.parent_partref = 0;
+ fid->udf.generation = inode->i_generation;
+
+ if (connectable && !S_ISDIR(inode->i_mode)) {
+diff --git a/include/asm-generic/signal.h b/include/asm-generic/signal.h
+index 555c0ae..743f7a5 100644
+--- a/include/asm-generic/signal.h
++++ b/include/asm-generic/signal.h
+@@ -99,6 +99,10 @@ typedef unsigned long old_sigset_t;
+
+ #include <asm-generic/signal-defs.h>
+
++#ifdef SA_RESTORER
++#define __ARCH_HAS_SA_RESTORER
++#endif
++
+ struct sigaction {
+ __sighandler_t sa_handler;
+ unsigned long sa_flags;
+diff --git a/include/linux/efi.h b/include/linux/efi.h
+index ce95a4b..8469f3f 100644
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -484,7 +484,8 @@ struct efivars {
+ * 1) ->list - adds, removals, reads, writes
+ * 2) ops.[gs]et_variable() calls.
+ * It must not be held when creating sysfs entries or calling kmalloc.
+- * ops.get_next_variable() is only called from register_efivars(),
++ * ops.get_next_variable() is only called from register_efivars()
++ * or efivar_update_sysfs_entries(),
+ * which is protected by the BKL, so that path is safe.
+ */
+ spinlock_t lock;
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
+index b669be6..9b9b2aa 100644
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -1186,6 +1186,12 @@ static inline void perf_event_disable(struct perf_event *event) { }
+ static inline void perf_event_task_tick(void) { }
+ #endif
+
++#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
++extern void perf_restore_debug_store(void);
++#else
++static inline void perf_restore_debug_store(void) { }
++#endif
++
+ #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
+
+ /*
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index 53dc7e7..da65890 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -455,7 +455,7 @@ struct sk_buff {
+ union {
+ __u32 mark;
+ __u32 dropcount;
+- __u32 avail_size;
++ __u32 reserved_tailroom;
+ };
+
+ __u16 vlan_tci;
+@@ -1332,7 +1332,10 @@ static inline int skb_tailroom(const struct sk_buff *skb)
+ */
+ static inline int skb_availroom(const struct sk_buff *skb)
+ {
+- return skb_is_nonlinear(skb) ? 0 : skb->avail_size - skb->len;
++ if (skb_is_nonlinear(skb))
++ return 0;
++
++ return skb->end - skb->tail - skb->reserved_tailroom;
+ }
+
+ /**
+diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
+index 16ff29a..b289bd2 100644
+--- a/include/net/inet_frag.h
++++ b/include/net/inet_frag.h
+@@ -33,6 +33,13 @@ struct inet_frag_queue {
+
+ #define INETFRAGS_HASHSZ 64
+
++/* averaged:
++ * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ /
++ * rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or
++ * struct frag_queue))
++ */
++#define INETFRAGS_MAXDEPTH 128
++
+ struct inet_frags {
+ struct hlist_head hash[INETFRAGS_HASHSZ];
+ rwlock_t lock;
+@@ -64,6 +71,8 @@ int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f);
+ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
+ struct inet_frags *f, void *key, unsigned int hash)
+ __releases(&f->lock);
++void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
++ const char *prefix);
+
+ static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
+ {
+diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
+index 10422ef..2124004 100644
+--- a/include/net/ip_fib.h
++++ b/include/net/ip_fib.h
+@@ -129,18 +129,16 @@ struct fib_result_nl {
+ };
+
+ #ifdef CONFIG_IP_ROUTE_MULTIPATH
+-
+ #define FIB_RES_NH(res) ((res).fi->fib_nh[(res).nh_sel])
+-
+-#define FIB_TABLE_HASHSZ 2
+-
+ #else /* CONFIG_IP_ROUTE_MULTIPATH */
+-
+ #define FIB_RES_NH(res) ((res).fi->fib_nh[0])
++#endif /* CONFIG_IP_ROUTE_MULTIPATH */
+
++#ifdef CONFIG_IP_MULTIPLE_TABLES
+ #define FIB_TABLE_HASHSZ 256
+-
+-#endif /* CONFIG_IP_ROUTE_MULTIPATH */
++#else
++#define FIB_TABLE_HASHSZ 2
++#endif
+
+ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
+
+diff --git a/kernel/signal.c b/kernel/signal.c
+index 71e1816..ea76d30 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -481,7 +481,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
+ if (force_default || ka->sa.sa_handler != SIG_IGN)
+ ka->sa.sa_handler = SIG_DFL;
+ ka->sa.sa_flags = 0;
+-#ifdef SA_RESTORER
++#ifdef __ARCH_HAS_SA_RESTORER
+ ka->sa.sa_restorer = NULL;
+ #endif
+ sigemptyset(&ka->sa.sa_mask);
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 6c880e8..0943d2a 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -2725,8 +2725,8 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
+ continue;
+ }
+
+- hlist_del(&entry->node);
+- call_rcu(&entry->rcu, ftrace_free_entry_rcu);
++ hlist_del_rcu(&entry->node);
++ call_rcu_sched(&entry->rcu, ftrace_free_entry_rcu);
+ }
+ }
+ __disable_ftrace_function_probe();
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 5638104..17edb14 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -652,7 +652,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
+ void
+ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
+ {
+- struct ring_buffer *buf = tr->buffer;
++ struct ring_buffer *buf;
+
+ if (trace_stop_count)
+ return;
+@@ -664,6 +664,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
+ }
+ arch_spin_lock(&ftrace_max_lock);
+
++ buf = tr->buffer;
+ tr->buffer = max_tr.buffer;
+ max_tr.buffer = buf;
+
+@@ -2635,11 +2636,25 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
+ return -EINVAL;
+ }
+
+-static void set_tracer_flags(unsigned int mask, int enabled)
++/* Some tracers require overwrite to stay enabled */
++int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
++{
++ if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
++ return -1;
++
++ return 0;
++}
++
++int set_tracer_flag(unsigned int mask, int enabled)
+ {
+ /* do nothing if flag is already set */
+ if (!!(trace_flags & mask) == !!enabled)
+- return;
++ return 0;
++
++ /* Give the tracer a chance to approve the change */
++ if (current_trace->flag_changed)
++ if (current_trace->flag_changed(current_trace, mask, !!enabled))
++ return -EINVAL;
+
+ if (enabled)
+ trace_flags |= mask;
+@@ -2649,8 +2664,14 @@ static void set_tracer_flags(unsigned int mask, int enabled)
+ if (mask == TRACE_ITER_RECORD_CMD)
+ trace_event_enable_cmd_record(enabled);
+
+- if (mask == TRACE_ITER_OVERWRITE)
++ if (mask == TRACE_ITER_OVERWRITE) {
+ ring_buffer_change_overwrite(global_trace.buffer, enabled);
++#ifdef CONFIG_TRACER_MAX_TRACE
++ ring_buffer_change_overwrite(max_tr.buffer, enabled);
++#endif
++ }
++
++ return 0;
+ }
+
+ static ssize_t
+@@ -2660,7 +2681,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
+ char buf[64];
+ char *cmp;
+ int neg = 0;
+- int ret;
++ int ret = 0;
+ int i;
+
+ if (cnt >= sizeof(buf))
+@@ -2677,21 +2698,23 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
+ cmp += 2;
+ }
+
++ mutex_lock(&trace_types_lock);
++
+ for (i = 0; trace_options[i]; i++) {
+ if (strcmp(cmp, trace_options[i]) == 0) {
+- set_tracer_flags(1 << i, !neg);
++ ret = set_tracer_flag(1 << i, !neg);
+ break;
+ }
+ }
+
+ /* If no option could be set, test the specific tracer options */
+- if (!trace_options[i]) {
+- mutex_lock(&trace_types_lock);
++ if (!trace_options[i])
+ ret = set_tracer_option(current_trace, cmp, neg);
+- mutex_unlock(&trace_types_lock);
+- if (ret)
+- return ret;
+- }
++
++ mutex_unlock(&trace_types_lock);
++
++ if (ret)
++ return ret;
+
+ *ppos += cnt;
+
+@@ -3015,6 +3038,9 @@ static int tracing_set_tracer(const char *buf)
+ goto out;
+
+ trace_branch_disable();
++
++ current_trace->enabled = false;
++
+ if (current_trace && current_trace->reset)
+ current_trace->reset(tr);
+ if (current_trace && current_trace->use_max_tr) {
+@@ -3044,6 +3070,7 @@ static int tracing_set_tracer(const char *buf)
+ goto out;
+ }
+
++ current_trace->enabled = true;
+ trace_branch_enable(tr);
+ out:
+ mutex_unlock(&trace_types_lock);
+@@ -4378,7 +4405,13 @@ trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
+
+ if (val != 0 && val != 1)
+ return -EINVAL;
+- set_tracer_flags(1 << index, val);
++
++ mutex_lock(&trace_types_lock);
++ ret = set_tracer_flag(1 << index, val);
++ mutex_unlock(&trace_types_lock);
++
++ if (ret < 0)
++ return ret;
+
+ *ppos += cnt;
+
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index 092e1f8..c3c3f6b 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -271,10 +271,14 @@ struct tracer {
+ enum print_line_t (*print_line)(struct trace_iterator *iter);
+ /* If you handled the flag setting, return 0 */
+ int (*set_flag)(u32 old_flags, u32 bit, int set);
++ /* Return 0 if OK with change, else return non-zero */
++ int (*flag_changed)(struct tracer *tracer,
++ u32 mask, int set);
+ struct tracer *next;
+ struct tracer_flags *flags;
+ int print_max;
+ int use_max_tr;
++ bool enabled;
+ };
+
+
+@@ -815,6 +819,9 @@ extern struct list_head ftrace_events;
+ extern const char *__start___trace_bprintk_fmt[];
+ extern const char *__stop___trace_bprintk_fmt[];
+
++int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
++int set_tracer_flag(unsigned int mask, int enabled);
++
+ #undef FTRACE_ENTRY
+ #define FTRACE_ENTRY(call, struct_name, id, tstruct, print) \
+ extern struct ftrace_event_call \
+diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
+index 20dad0d..1626e1a 100644
+--- a/kernel/trace/trace_irqsoff.c
++++ b/kernel/trace/trace_irqsoff.c
+@@ -32,7 +32,7 @@ enum {
+
+ static int trace_type __read_mostly;
+
+-static int save_lat_flag;
++static int save_flags;
+
+ static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
+ static int start_irqsoff_tracer(struct trace_array *tr, int graph);
+@@ -546,8 +546,11 @@ static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
+
+ static void __irqsoff_tracer_init(struct trace_array *tr)
+ {
+- save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT;
+- trace_flags |= TRACE_ITER_LATENCY_FMT;
++ save_flags = trace_flags;
++
++ /* non overwrite screws up the latency tracers */
++ set_tracer_flag(TRACE_ITER_OVERWRITE, 1);
++ set_tracer_flag(TRACE_ITER_LATENCY_FMT, 1);
+
+ tracing_max_latency = 0;
+ irqsoff_trace = tr;
+@@ -561,10 +564,13 @@ static void __irqsoff_tracer_init(struct trace_array *tr)
+
+ static void irqsoff_tracer_reset(struct trace_array *tr)
+ {
++ int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
++ int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
++
+ stop_irqsoff_tracer(tr, is_graph());
+
+- if (!save_lat_flag)
+- trace_flags &= ~TRACE_ITER_LATENCY_FMT;
++ set_tracer_flag(TRACE_ITER_LATENCY_FMT, lat_flag);
++ set_tracer_flag(TRACE_ITER_OVERWRITE, overwrite_flag);
+ }
+
+ static void irqsoff_tracer_start(struct trace_array *tr)
+@@ -597,6 +603,7 @@ static struct tracer irqsoff_tracer __read_mostly =
+ .print_line = irqsoff_print_line,
+ .flags = &tracer_flags,
+ .set_flag = irqsoff_set_flag,
++ .flag_changed = trace_keep_overwrite,
+ #ifdef CONFIG_FTRACE_SELFTEST
+ .selftest = trace_selftest_startup_irqsoff,
+ #endif
+@@ -630,6 +637,7 @@ static struct tracer preemptoff_tracer __read_mostly =
+ .print_line = irqsoff_print_line,
+ .flags = &tracer_flags,
+ .set_flag = irqsoff_set_flag,
++ .flag_changed = trace_keep_overwrite,
+ #ifdef CONFIG_FTRACE_SELFTEST
+ .selftest = trace_selftest_startup_preemptoff,
+ #endif
+@@ -665,6 +673,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
+ .print_line = irqsoff_print_line,
+ .flags = &tracer_flags,
+ .set_flag = irqsoff_set_flag,
++ .flag_changed = trace_keep_overwrite,
+ #ifdef CONFIG_FTRACE_SELFTEST
+ .selftest = trace_selftest_startup_preemptirqsoff,
+ #endif
+diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
+index e4a70c0..6857e0c 100644
+--- a/kernel/trace/trace_sched_wakeup.c
++++ b/kernel/trace/trace_sched_wakeup.c
+@@ -36,7 +36,7 @@ static void __wakeup_reset(struct trace_array *tr);
+ static int wakeup_graph_entry(struct ftrace_graph_ent *trace);
+ static void wakeup_graph_return(struct ftrace_graph_ret *trace);
+
+-static int save_lat_flag;
++static int save_flags;
+
+ #define TRACE_DISPLAY_GRAPH 1
+
+@@ -528,8 +528,11 @@ static void stop_wakeup_tracer(struct trace_array *tr)
+
+ static int __wakeup_tracer_init(struct trace_array *tr)
+ {
+- save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT;
+- trace_flags |= TRACE_ITER_LATENCY_FMT;
++ save_flags = trace_flags;
++
++ /* non overwrite screws up the latency tracers */
++ set_tracer_flag(TRACE_ITER_OVERWRITE, 1);
++ set_tracer_flag(TRACE_ITER_LATENCY_FMT, 1);
+
+ tracing_max_latency = 0;
+ wakeup_trace = tr;
+@@ -551,12 +554,15 @@ static int wakeup_rt_tracer_init(struct trace_array *tr)
+
+ static void wakeup_tracer_reset(struct trace_array *tr)
+ {
++ int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
++ int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
++
+ stop_wakeup_tracer(tr);
+ /* make sure we put back any tasks we are tracing */
+ wakeup_reset(tr);
+
+- if (!save_lat_flag)
+- trace_flags &= ~TRACE_ITER_LATENCY_FMT;
++ set_tracer_flag(TRACE_ITER_LATENCY_FMT, lat_flag);
++ set_tracer_flag(TRACE_ITER_OVERWRITE, overwrite_flag);
+ }
+
+ static void wakeup_tracer_start(struct trace_array *tr)
+@@ -582,6 +588,7 @@ static struct tracer wakeup_tracer __read_mostly =
+ .print_line = wakeup_print_line,
+ .flags = &tracer_flags,
+ .set_flag = wakeup_set_flag,
++ .flag_changed = trace_keep_overwrite,
+ #ifdef CONFIG_FTRACE_SELFTEST
+ .selftest = trace_selftest_startup_wakeup,
+ #endif
+@@ -603,6 +610,7 @@ static struct tracer wakeup_rt_tracer __read_mostly =
+ .print_line = wakeup_print_line,
+ .flags = &tracer_flags,
+ .set_flag = wakeup_set_flag,
++ .flag_changed = trace_keep_overwrite,
+ #ifdef CONFIG_FTRACE_SELFTEST
+ .selftest = trace_selftest_startup_wakeup,
+ #endif
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index d6c0fdf..4c7d42a 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -2092,8 +2092,12 @@ int hugetlb_report_node_meminfo(int nid, char *buf)
+ /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
+ unsigned long hugetlb_total_pages(void)
+ {
+- struct hstate *h = &default_hstate;
+- return h->nr_huge_pages * pages_per_huge_page(h);
++ struct hstate *h;
++ unsigned long nr_total_pages = 0;
++
++ for_each_hstate(h)
++ nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
++ return nr_total_pages;
+ }
+
+ static int hugetlb_acct_memory(struct hstate *h, long delta)
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 2aac4ec..b23bbbf 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3299,6 +3299,7 @@ ncls:
+ }
+ switch (rx_handler(&skb)) {
+ case RX_HANDLER_CONSUMED:
++ ret = NET_RX_SUCCESS;
+ goto out;
+ case RX_HANDLER_ANOTHER:
+ goto another_round;
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 5229c7f..3b5e680 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -973,6 +973,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
+ * report anything.
+ */
+ ivi.spoofchk = -1;
++ memset(ivi.mac, 0, sizeof(ivi.mac));
+ if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi))
+ break;
+ vf_mac.vf =
+@@ -2041,7 +2042,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ struct rtattr *attr = (void *)nlh + NLMSG_ALIGN(min_len);
+
+ while (RTA_OK(attr, attrlen)) {
+- unsigned flavor = attr->rta_type;
++ unsigned int flavor = attr->rta_type & NLA_TYPE_MASK;
+ if (flavor) {
+ if (flavor > rta_max[sz_idx])
+ return -EINVAL;
+diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
+index d860530..2f9517d 100644
+--- a/net/dcb/dcbnl.c
++++ b/net/dcb/dcbnl.c
+@@ -336,6 +336,7 @@ static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlattr **tb,
+ dcb->dcb_family = AF_UNSPEC;
+ dcb->cmd = DCB_CMD_GPERM_HWADDR;
+
++ memset(perm_addr, 0, sizeof(perm_addr));
+ netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr);
+
+ ret = nla_put(dcbnl_skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr),
+@@ -1238,6 +1239,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
+
+ if (ops->ieee_getets) {
+ struct ieee_ets ets;
++ memset(&ets, 0, sizeof(ets));
+ err = ops->ieee_getets(netdev, &ets);
+ if (!err)
+ NLA_PUT(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets);
+@@ -1245,6 +1247,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
+
+ if (ops->ieee_getpfc) {
+ struct ieee_pfc pfc;
++ memset(&pfc, 0, sizeof(pfc));
+ err = ops->ieee_getpfc(netdev, &pfc);
+ if (!err)
+ NLA_PUT(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc);
+@@ -1277,6 +1280,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
+ /* get peer info if available */
+ if (ops->ieee_peer_getets) {
+ struct ieee_ets ets;
++ memset(&ets, 0, sizeof(ets));
+ err = ops->ieee_peer_getets(netdev, &ets);
+ if (!err)
+ NLA_PUT(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets);
+@@ -1284,6 +1288,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
+
+ if (ops->ieee_peer_getpfc) {
+ struct ieee_pfc pfc;
++ memset(&pfc, 0, sizeof(pfc));
+ err = ops->ieee_peer_getpfc(netdev, &pfc);
+ if (!err)
+ NLA_PUT(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc);
+@@ -1463,6 +1468,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
+ /* peer info if available */
+ if (ops->cee_peer_getpg) {
+ struct cee_pg pg;
++ memset(&pg, 0, sizeof(pg));
+ err = ops->cee_peer_getpg(netdev, &pg);
+ if (!err)
+ NLA_PUT(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg);
+@@ -1470,6 +1476,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
+
+ if (ops->cee_peer_getpfc) {
+ struct cee_pfc pfc;
++ memset(&pfc, 0, sizeof(pfc));
+ err = ops->cee_peer_getpfc(netdev, &pfc);
+ if (!err)
+ NLA_PUT(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc);
+diff --git a/net/ieee802154/6lowpan.h b/net/ieee802154/6lowpan.h
+index 5d8cf80..8b0866f 100644
+--- a/net/ieee802154/6lowpan.h
++++ b/net/ieee802154/6lowpan.h
+@@ -87,7 +87,7 @@
+ (memcmp(addr1, addr2, length >> 3) == 0)
+
+ /* local link, i.e. FE80::/10 */
+-#define is_addr_link_local(a) (((a)->s6_addr16[0]) == 0x80FE)
++#define is_addr_link_local(a) (((a)->s6_addr16[0]) == htons(0xFE80))
+
+ /*
+ * check whether we can compress the IID to 16 bits,
+diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
+index 5ff2a51..210b710 100644
+--- a/net/ipv4/inet_fragment.c
++++ b/net/ipv4/inet_fragment.c
+@@ -21,6 +21,7 @@
+ #include <linux/rtnetlink.h>
+ #include <linux/slab.h>
+
++#include <net/sock.h>
+ #include <net/inet_frag.h>
+
+ static void inet_frag_secret_rebuild(unsigned long dummy)
+@@ -271,6 +272,7 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
+ {
+ struct inet_frag_queue *q;
+ struct hlist_node *n;
++ int depth = 0;
+
+ hlist_for_each_entry(q, n, &f->hash[hash], list) {
+ if (q->net == nf && f->match(q, key)) {
+@@ -278,9 +280,25 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
+ read_unlock(&f->lock);
+ return q;
+ }
++ depth++;
+ }
+ read_unlock(&f->lock);
+
+- return inet_frag_create(nf, f, key);
++ if (depth <= INETFRAGS_MAXDEPTH)
++ return inet_frag_create(nf, f, key);
++ else
++ return ERR_PTR(-ENOBUFS);
+ }
+ EXPORT_SYMBOL(inet_frag_find);
++
++void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
++ const char *prefix)
++{
++ static const char msg[] = "inet_frag_find: Fragment hash bucket"
++ " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
++ ". Dropping fragment.\n";
++
++ if (PTR_ERR(q) == -ENOBUFS)
++ LIMIT_NETDEBUG(KERN_WARNING "%s%s", prefix, msg);
++}
++EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);
+diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
+index a4e7131..b2cfe83 100644
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -20,6 +20,8 @@
+ * Patrick McHardy : LRU queue of frag heads for evictor.
+ */
+
++#define pr_fmt(fmt) "IPv4: " fmt
++
+ #include <linux/compiler.h>
+ #include <linux/module.h>
+ #include <linux/types.h>
+@@ -293,14 +295,12 @@ static inline struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user)
+ hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol);
+
+ q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash);
+- if (q == NULL)
+- goto out_nomem;
++ if (IS_ERR_OR_NULL(q)) {
++ inet_frag_maybe_warn_overflow(q, pr_fmt());
++ return NULL;
++ }
+
+ return container_of(q, struct ipq, q);
+-
+-out_nomem:
+- LIMIT_NETDEBUG(KERN_ERR "ip_frag_create: no memory left !\n");
+- return NULL;
+ }
+
+ /* Is the fragment too far ahead to be part of ipq? */
+diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
+index 42dd1a9..40eb4fc 100644
+--- a/net/ipv4/ip_options.c
++++ b/net/ipv4/ip_options.c
+@@ -358,7 +358,6 @@ int ip_options_compile(struct net *net,
+ }
+ switch (optptr[3]&0xF) {
+ case IPOPT_TS_TSONLY:
+- opt->ts = optptr - iph;
+ if (skb)
+ timeptr = &optptr[optptr[2]-1];
+ opt->ts_needtime = 1;
+@@ -369,7 +368,6 @@ int ip_options_compile(struct net *net,
+ pp_ptr = optptr + 2;
+ goto error;
+ }
+- opt->ts = optptr - iph;
+ if (rt) {
+ memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4);
+ timeptr = &optptr[optptr[2]+3];
+@@ -383,7 +381,6 @@ int ip_options_compile(struct net *net,
+ pp_ptr = optptr + 2;
+ goto error;
+ }
+- opt->ts = optptr - iph;
+ {
+ __be32 addr;
+ memcpy(&addr, &optptr[optptr[2]-1], 4);
+@@ -416,12 +413,12 @@ int ip_options_compile(struct net *net,
+ pp_ptr = optptr + 3;
+ goto error;
+ }
+- opt->ts = optptr - iph;
+ if (skb) {
+ optptr[3] = (optptr[3]&0xF)|((overflow+1)<<4);
+ opt->is_changed = 1;
+ }
+ }
++ opt->ts = optptr - iph;
+ break;
+ case IPOPT_RA:
+ if (optlen < 4) {
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 52edbb8..fe381c2 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -704,7 +704,7 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
+ * Make sure that we have exactly size bytes
+ * available to the caller, no more, no less.
+ */
+- skb->avail_size = size;
++ skb->reserved_tailroom = skb->end - skb->tail - size;
+ return skb;
+ }
+ __kfree_skb(skb);
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index e865ed1..1b1f7af 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -5494,6 +5494,9 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
+ if (tcp_checksum_complete_user(sk, skb))
+ goto csum_error;
+
++ if ((int)skb->truesize > sk->sk_forward_alloc)
++ goto step5;
++
+ /* Predicted packet is in window by definition.
+ * seq == rcv_nxt and rcv_wup <= rcv_nxt.
+ * Hence, check seq<=rcv_wup reduces to:
+@@ -5505,9 +5508,6 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
+
+ tcp_rcv_rtt_measure_ts(sk, skb);
+
+- if ((int)skb->truesize > sk->sk_forward_alloc)
+- goto step5;
+-
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS);
+
+ /* Bulk data transfer: receiver */
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 921cbac..9bb7400 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1096,7 +1096,6 @@ static void __pskb_trim_head(struct sk_buff *skb, int len)
+ eat = min_t(int, len, skb_headlen(skb));
+ if (eat) {
+ __skb_pull(skb, eat);
+- skb->avail_size -= eat;
+ len -= eat;
+ if (!len)
+ return;
+diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
+index a46c64e..f8d24dd 100644
+--- a/net/ipv6/ip6_input.c
++++ b/net/ipv6/ip6_input.c
+@@ -265,7 +265,8 @@ int ip6_mc_input(struct sk_buff *skb)
+ * IPv6 multicast router mode is now supported ;)
+ */
+ if (dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding &&
+- !(ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) &&
++ !(ipv6_addr_type(&hdr->daddr) &
++ (IPV6_ADDR_LOOPBACK|IPV6_ADDR_LINKLOCAL)) &&
+ likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) {
+ /*
+ * Okay, we try to forward - split and duplicate
+diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
+index 38f00b0..52e2f65 100644
+--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
+@@ -14,6 +14,8 @@
+ * 2 of the License, or (at your option) any later version.
+ */
+
++#define pr_fmt(fmt) "IPv6-nf: " fmt
++
+ #include <linux/errno.h>
+ #include <linux/types.h>
+ #include <linux/string.h>
+@@ -176,13 +178,12 @@ fq_find(__be32 id, u32 user, struct in6_addr *src, struct in6_addr *dst)
+
+ q = inet_frag_find(&nf_init_frags, &nf_frags, &arg, hash);
+ local_bh_enable();
+- if (q == NULL)
+- goto oom;
++ if (IS_ERR_OR_NULL(q)) {
++ inet_frag_maybe_warn_overflow(q, pr_fmt());
++ return NULL;
++ }
+
+ return container_of(q, struct nf_ct_frag6_queue, q);
+-
+-oom:
+- return NULL;
+ }
+
+
+diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
+index dfb164e..2b0a4ca 100644
+--- a/net/ipv6/reassembly.c
++++ b/net/ipv6/reassembly.c
+@@ -26,6 +26,9 @@
+ * YOSHIFUJI,H. @USAGI Always remove fragment header to
+ * calculate ICV correctly.
+ */
++
++#define pr_fmt(fmt) "IPv6: " fmt
++
+ #include <linux/errno.h>
+ #include <linux/types.h>
+ #include <linux/string.h>
+@@ -240,9 +243,10 @@ fq_find(struct net *net, __be32 id, const struct in6_addr *src, const struct in6
+ hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd);
+
+ q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);
+- if (q == NULL)
++ if (IS_ERR_OR_NULL(q)) {
++ inet_frag_maybe_warn_overflow(q, pr_fmt());
+ return NULL;
+-
++ }
+ return container_of(q, struct frag_queue, q);
+ }
+
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 791c1fa..18ea73c 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -1920,7 +1920,8 @@ void rt6_purge_dflt_routers(struct net *net)
+ restart:
+ read_lock_bh(&table->tb6_lock);
+ for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
+- if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) {
++ if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
++ (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) {
+ dst_hold(&rt->dst);
+ read_unlock_bh(&table->tb6_lock);
+ ip6_del_rt(rt);
+diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
+index b1bd16f..6f60175 100644
+--- a/net/l2tp/l2tp_ppp.c
++++ b/net/l2tp/l2tp_ppp.c
+@@ -360,6 +360,7 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
+ l2tp_xmit_skb(session, skb, session->hdr_len);
+
+ sock_put(ps->tunnel_sock);
++ sock_put(sk);
+
+ return error;
+
+diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
+index d463f5a..23267b3 100644
+--- a/net/netlabel/netlabel_unlabeled.c
++++ b/net/netlabel/netlabel_unlabeled.c
+@@ -1189,8 +1189,6 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb,
+ struct netlbl_unlhsh_walk_arg cb_arg;
+ u32 skip_bkt = cb->args[0];
+ u32 skip_chain = cb->args[1];
+- u32 skip_addr4 = cb->args[2];
+- u32 skip_addr6 = cb->args[3];
+ u32 iter_bkt;
+ u32 iter_chain = 0, iter_addr4 = 0, iter_addr6 = 0;
+ struct netlbl_unlhsh_iface *iface;
+@@ -1215,7 +1213,7 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb,
+ continue;
+ netlbl_af4list_foreach_rcu(addr4,
+ &iface->addr4_list) {
+- if (iter_addr4++ < skip_addr4)
++ if (iter_addr4++ < cb->args[2])
+ continue;
+ if (netlbl_unlabel_staticlist_gen(
+ NLBL_UNLABEL_C_STATICLIST,
+@@ -1231,7 +1229,7 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb,
+ #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ netlbl_af6list_foreach_rcu(addr6,
+ &iface->addr6_list) {
+- if (iter_addr6++ < skip_addr6)
++ if (iter_addr6++ < cb->args[3])
+ continue;
+ if (netlbl_unlabel_staticlist_gen(
+ NLBL_UNLABEL_C_STATICLIST,
+@@ -1250,10 +1248,10 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb,
+
+ unlabel_staticlist_return:
+ rcu_read_unlock();
+- cb->args[0] = skip_bkt;
+- cb->args[1] = skip_chain;
+- cb->args[2] = skip_addr4;
+- cb->args[3] = skip_addr6;
++ cb->args[0] = iter_bkt;
++ cb->args[1] = iter_chain;
++ cb->args[2] = iter_addr4;
++ cb->args[3] = iter_addr6;
+ return skb->len;
+ }
+
+@@ -1273,12 +1271,9 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb,
+ {
+ struct netlbl_unlhsh_walk_arg cb_arg;
+ struct netlbl_unlhsh_iface *iface;
+- u32 skip_addr4 = cb->args[0];
+- u32 skip_addr6 = cb->args[1];
+- u32 iter_addr4 = 0;
++ u32 iter_addr4 = 0, iter_addr6 = 0;
+ struct netlbl_af4list *addr4;
+ #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+- u32 iter_addr6 = 0;
+ struct netlbl_af6list *addr6;
+ #endif
+
+@@ -1292,7 +1287,7 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb,
+ goto unlabel_staticlistdef_return;
+
+ netlbl_af4list_foreach_rcu(addr4, &iface->addr4_list) {
+- if (iter_addr4++ < skip_addr4)
++ if (iter_addr4++ < cb->args[0])
+ continue;
+ if (netlbl_unlabel_staticlist_gen(NLBL_UNLABEL_C_STATICLISTDEF,
+ iface,
+@@ -1305,7 +1300,7 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb,
+ }
+ #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ netlbl_af6list_foreach_rcu(addr6, &iface->addr6_list) {
+- if (iter_addr6++ < skip_addr6)
++ if (iter_addr6++ < cb->args[1])
+ continue;
+ if (netlbl_unlabel_staticlist_gen(NLBL_UNLABEL_C_STATICLISTDEF,
+ iface,
+@@ -1320,8 +1315,8 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb,
+
+ unlabel_staticlistdef_return:
+ rcu_read_unlock();
+- cb->args[0] = skip_addr4;
+- cb->args[1] = skip_addr6;
++ cb->args[0] = iter_addr4;
++ cb->args[1] = iter_addr6;
+ return skb->len;
+ }
+
+diff --git a/net/rds/message.c b/net/rds/message.c
+index f0a4658..aff589c 100644
+--- a/net/rds/message.c
++++ b/net/rds/message.c
+@@ -197,6 +197,9 @@ struct rds_message *rds_message_alloc(unsigned int extra_len, gfp_t gfp)
+ {
+ struct rds_message *rm;
+
++ if (extra_len > KMALLOC_MAX_SIZE - sizeof(struct rds_message))
++ return NULL;
++
+ rm = kzalloc(sizeof(struct rds_message) + extra_len, gfp);
+ if (!rm)
+ goto out;
+diff --git a/net/sctp/associola.c b/net/sctp/associola.c
+index acd2edb..3c04692 100644
+--- a/net/sctp/associola.c
++++ b/net/sctp/associola.c
+@@ -1050,7 +1050,7 @@ struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc,
+ transports) {
+
+ if (transport == active)
+- break;
++ continue;
+ list_for_each_entry(chunk, &transport->transmitted,
+ transmitted_list) {
+ if (key == chunk->subh.data_hdr->tsn) {
+diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
+index 891f5db..cb1c430 100644
+--- a/net/sctp/sm_statefuns.c
++++ b/net/sctp/sm_statefuns.c
+@@ -2044,7 +2044,7 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep,
+ }
+
+ /* Delete the tempory new association. */
+- sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
++ sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC, SCTP_ASOC(new_asoc));
+ sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
+
+ /* Restore association pointer to provide SCTP command interpeter
+diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c
+index 48665ec..8ab2951 100644
+--- a/security/selinux/xfrm.c
++++ b/security/selinux/xfrm.c
+@@ -310,7 +310,7 @@ int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx,
+
+ if (old_ctx) {
+ new_ctx = kmalloc(sizeof(*old_ctx) + old_ctx->ctx_len,
+- GFP_KERNEL);
++ GFP_ATOMIC);
+ if (!new_ctx)
+ return -ENOMEM;
+
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index b0187e7..7747d26 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -2771,7 +2771,7 @@ static unsigned int convert_to_spdif_status(unsigned short val)
+ if (val & AC_DIG1_PROFESSIONAL)
+ sbits |= IEC958_AES0_PROFESSIONAL;
+ if (sbits & IEC958_AES0_PROFESSIONAL) {
+- if (sbits & AC_DIG1_EMPHASIS)
++ if (val & AC_DIG1_EMPHASIS)
+ sbits |= IEC958_AES0_PRO_EMPHASIS_5015;
+ } else {
+ if (val & AC_DIG1_EMPHASIS)
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index c9269ce..984b5b1 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -1236,7 +1236,7 @@ static int patch_cxt5045(struct hda_codec *codec)
+ }
+
+ if (spec->beep_amp)
+- snd_hda_attach_beep_device(codec, spec->beep_amp);
++ snd_hda_attach_beep_device(codec, get_amp_nid_(spec->beep_amp));
+
+ return 0;
+ }
+@@ -2027,7 +2027,7 @@ static int patch_cxt5051(struct hda_codec *codec)
+ }
+
+ if (spec->beep_amp)
+- snd_hda_attach_beep_device(codec, spec->beep_amp);
++ snd_hda_attach_beep_device(codec, get_amp_nid_(spec->beep_amp));
+
+ conexant_init_jacks(codec);
+ if (spec->auto_mic & AUTO_MIC_PORTB)
+@@ -3225,7 +3225,7 @@ static int patch_cxt5066(struct hda_codec *codec)
+ }
+
+ if (spec->beep_amp)
+- snd_hda_attach_beep_device(codec, spec->beep_amp);
++ snd_hda_attach_beep_device(codec, get_amp_nid_(spec->beep_amp));
+
+ return 0;
+ }
+@@ -4556,7 +4556,7 @@ static int patch_conexant_auto(struct hda_codec *codec)
+ spec->capture_stream = &cx_auto_pcm_analog_capture;
+ codec->patch_ops = cx_auto_patch_ops;
+ if (spec->beep_amp)
+- snd_hda_attach_beep_device(codec, spec->beep_amp);
++ snd_hda_attach_beep_device(codec, get_amp_nid_(spec->beep_amp));
+ return 0;
+ }
+
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index 9121dee..f4540bf 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -711,8 +711,9 @@ static int check_input_term(struct mixer_build *state, int id, struct usb_audio_
+ case UAC2_CLOCK_SELECTOR: {
+ struct uac_selector_unit_descriptor *d = p1;
+ /* call recursively to retrieve the channel info */
+- if (check_input_term(state, d->baSourceID[0], term) < 0)
+- return -ENODEV;
++ err = check_input_term(state, d->baSourceID[0], term);
++ if (err < 0)
++ return err;
+ term->type = d->bDescriptorSubtype << 16; /* virtual type */
+ term->id = id;
+ term->name = uac_selector_unit_iSelector(d);
+@@ -1263,8 +1264,9 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, void
+ return err;
+
+ /* determine the input source type and name */
+- if (check_input_term(state, hdr->bSourceID, &iterm) < 0)
+- return -EINVAL;
++ err = check_input_term(state, hdr->bSourceID, &iterm);
++ if (err < 0)
++ return err;
+
+ master_bits = snd_usb_combine_bytes(bmaControls, csize);
+ /* master configuration quirks */
+@@ -2018,7 +2020,7 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer)
+ state.oterm.type = le16_to_cpu(desc->wTerminalType);
+ state.oterm.name = desc->iTerminal;
+ err = parse_audio_unit(&state, desc->bSourceID);
+- if (err < 0)
++ if (err < 0 && err != -EINVAL)
+ return err;
+ } else { /* UAC_VERSION_2 */
+ struct uac2_output_terminal_descriptor *desc = p;
+@@ -2030,12 +2032,12 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer)
+ state.oterm.type = le16_to_cpu(desc->wTerminalType);
+ state.oterm.name = desc->iTerminal;
+ err = parse_audio_unit(&state, desc->bSourceID);
+- if (err < 0)
++ if (err < 0 && err != -EINVAL)
+ return err;
+
+ /* for UAC2, use the same approach to also add the clock selectors */
+ err = parse_audio_unit(&state, desc->bCSourceID);
+- if (err < 0)
++ if (err < 0 && err != -EINVAL)
+ return err;
+ }
+ }
+diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
+index 0961d88..5e19410 100644
+--- a/tools/hv/hv_kvp_daemon.c
++++ b/tools/hv/hv_kvp_daemon.c
+@@ -393,13 +393,19 @@ int main(void)
+ len = recvfrom(fd, kvp_recv_buffer, sizeof(kvp_recv_buffer), 0,
+ addr_p, &addr_l);
+
+- if (len < 0 || addr.nl_pid) {
++ if (len < 0) {
+ syslog(LOG_ERR, "recvfrom failed; pid:%u error:%d %s",
+ addr.nl_pid, errno, strerror(errno));
+ close(fd);
+ return -1;
+ }
+
++ if (addr.nl_pid) {
++ syslog(LOG_WARNING, "Received packet from untrusted pid:%u",
++ addr.nl_pid);
++ continue;
++ }
++
+ incoming_msg = (struct nlmsghdr *)kvp_recv_buffer;
+ incoming_cn_msg = (struct cn_msg *)NLMSG_DATA(incoming_msg);
+
+diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
+index bf54c48..6c164dc 100644
+--- a/tools/perf/util/trace-event-parse.c
++++ b/tools/perf/util/trace-event-parse.c
+@@ -1582,8 +1582,6 @@ process_symbols(struct event *event, struct print_arg *arg, char **tok)
+ field = malloc_or_die(sizeof(*field));
+
+ type = process_arg(event, field, &token);
+- while (type == EVENT_OP)
+- type = process_op(event, field, &token);
+ if (test_type_token(type, token, EVENT_DELIM, ","))
+ goto out_free;
+
diff --git a/1042_linux-3.2.43.patch b/1042_linux-3.2.43.patch
new file mode 100644
index 00000000..a3f878bc
--- /dev/null
+++ b/1042_linux-3.2.43.patch
@@ -0,0 +1,2442 @@
+diff --git a/Makefile b/Makefile
+index d44f009..59130db 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 42
++SUBLEVEL = 43
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
+index 5f85d8b..a09b6c3 100644
+--- a/arch/tile/kernel/setup.c
++++ b/arch/tile/kernel/setup.c
+@@ -914,7 +914,7 @@ void __cpuinit setup_cpu(int boot)
+ #ifdef CONFIG_BLK_DEV_INITRD
+
+ static int __initdata set_initramfs_file;
+-static char __initdata initramfs_file[128] = "initramfs.cpio.gz";
++static char __initdata initramfs_file[128] = "initramfs";
+
+ static int __init setup_initramfs_file(char *str)
+ {
+@@ -928,9 +928,9 @@ static int __init setup_initramfs_file(char *str)
+ early_param("initramfs_file", setup_initramfs_file);
+
+ /*
+- * We look for an additional "initramfs.cpio.gz" file in the hvfs.
+- * If there is one, we allocate some memory for it and it will be
+- * unpacked to the initramfs after any built-in initramfs_data.
++ * We look for a file called "initramfs" in the hvfs. If there is one, we
++ * allocate some memory for it and it will be unpacked to the initramfs.
++ * If it's compressed, the initd code will uncompress it first.
+ */
+ static void __init load_hv_initrd(void)
+ {
+@@ -940,10 +940,16 @@ static void __init load_hv_initrd(void)
+
+ fd = hv_fs_findfile((HV_VirtAddr) initramfs_file);
+ if (fd == HV_ENOENT) {
+- if (set_initramfs_file)
++ if (set_initramfs_file) {
+ pr_warning("No such hvfs initramfs file '%s'\n",
+ initramfs_file);
+- return;
++ return;
++ } else {
++ /* Try old backwards-compatible name. */
++ fd = hv_fs_findfile((HV_VirtAddr)"initramfs.cpio.gz");
++ if (fd == HV_ENOENT)
++ return;
++ }
+ }
+ BUG_ON(fd < 0);
+ stat = hv_fs_fstat(fd);
+diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
+index 887f68f..db30542 100644
+--- a/drivers/block/aoe/aoecmd.c
++++ b/drivers/block/aoe/aoecmd.c
+@@ -30,8 +30,9 @@ new_skb(ulong len)
+ {
+ struct sk_buff *skb;
+
+- skb = alloc_skb(len, GFP_ATOMIC);
++ skb = alloc_skb(len + MAX_HEADER, GFP_ATOMIC);
+ if (skb) {
++ skb_reserve(skb, MAX_HEADER);
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb->protocol = __constant_htons(ETH_P_AOE);
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index 8c6787a..a365562 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -907,6 +907,11 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
+ lo->lo_flags |= LO_FLAGS_PARTSCAN;
+ if (lo->lo_flags & LO_FLAGS_PARTSCAN)
+ ioctl_by_bdev(bdev, BLKRRPART, 0);
++
++ /* Grab the block_device to prevent its destruction after we
++ * put /dev/loopXX inode. Later in loop_clr_fd() we bdput(bdev).
++ */
++ bdgrab(bdev);
+ return 0;
+
+ out_clr:
+@@ -1003,8 +1008,10 @@ static int loop_clr_fd(struct loop_device *lo)
+ memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
+ memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
+ memset(lo->lo_file_name, 0, LO_NAME_SIZE);
+- if (bdev)
++ if (bdev) {
++ bdput(bdev);
+ invalidate_bdev(bdev);
++ }
+ set_capacity(lo->lo_disk, 0);
+ loop_sysfs_exit(lo);
+ if (bdev) {
+diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
+index 15ec4db..85fdd4b 100644
+--- a/drivers/block/xen-blkback/blkback.c
++++ b/drivers/block/xen-blkback/blkback.c
+@@ -758,13 +758,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
+ }
+ }
+
+- /*
+- * We set it one so that the last submit_bio does not have to call
+- * atomic_inc.
+- */
+ atomic_set(&pending_req->pendcnt, nbio);
+-
+- /* Get a reference count for the disk queue and start sending I/O */
+ blk_start_plug(&plug);
+
+ for (i = 0; i < nbio; i++)
+@@ -792,6 +786,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
+ fail_put_bio:
+ for (i = 0; i < nbio; i++)
+ bio_put(biolist[i]);
++ atomic_set(&pending_req->pendcnt, 1);
+ __end_block_io_op(pending_req, -EINVAL);
+ msleep(1); /* back off a bit */
+ return -EIO;
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index 574ce73..853fdf8 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -72,14 +72,23 @@ static struct usb_device_id ath3k_table[] = {
+ { USB_DEVICE(0x03F0, 0x311D) },
+
+ /* Atheros AR3012 with sflash firmware*/
++ { USB_DEVICE(0x0CF3, 0x0036) },
+ { USB_DEVICE(0x0CF3, 0x3004) },
++ { USB_DEVICE(0x0CF3, 0x3008) },
+ { USB_DEVICE(0x0CF3, 0x311D) },
++ { USB_DEVICE(0x0CF3, 0x817a) },
+ { USB_DEVICE(0x13d3, 0x3375) },
++ { USB_DEVICE(0x04CA, 0x3004) },
+ { USB_DEVICE(0x04CA, 0x3005) },
++ { USB_DEVICE(0x04CA, 0x3006) },
++ { USB_DEVICE(0x04CA, 0x3008) },
+ { USB_DEVICE(0x13d3, 0x3362) },
+ { USB_DEVICE(0x0CF3, 0xE004) },
+ { USB_DEVICE(0x0930, 0x0219) },
+ { USB_DEVICE(0x0489, 0xe057) },
++ { USB_DEVICE(0x13d3, 0x3393) },
++ { USB_DEVICE(0x0489, 0xe04e) },
++ { USB_DEVICE(0x0489, 0xe056) },
+
+ /* Atheros AR5BBU12 with sflash firmware */
+ { USB_DEVICE(0x0489, 0xE02C) },
+@@ -99,14 +108,23 @@ MODULE_DEVICE_TABLE(usb, ath3k_table);
+ static struct usb_device_id ath3k_blist_tbl[] = {
+
+ /* Atheros AR3012 with sflash firmware*/
++ { USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
+
+ /* Atheros AR5BBU22 with sflash firmware */
+ { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index c5e44a3..6b784b7 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -139,14 +139,23 @@ static struct usb_device_id blacklist_table[] = {
+ { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
+
+ /* Atheros 3012 with sflash firmware */
++ { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0cf3, 0x817a), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
+
+ /* Atheros AR5BBU12 with sflash firmware */
+ { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
+diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
+index 7795d1e..d5ae736 100644
+--- a/drivers/char/virtio_console.c
++++ b/drivers/char/virtio_console.c
+@@ -131,7 +131,8 @@ struct ports_device {
+ spinlock_t ports_lock;
+
+ /* To protect the vq operations for the control channel */
+- spinlock_t cvq_lock;
++ spinlock_t c_ivq_lock;
++ spinlock_t c_ovq_lock;
+
+ /* The current config space is stored here */
+ struct virtio_console_config config;
+@@ -457,11 +458,14 @@ static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id,
+ vq = portdev->c_ovq;
+
+ sg_init_one(sg, &cpkt, sizeof(cpkt));
++
++ spin_lock(&portdev->c_ovq_lock);
+ if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt) >= 0) {
+ virtqueue_kick(vq);
+ while (!virtqueue_get_buf(vq, &len))
+ cpu_relax();
+ }
++ spin_unlock(&portdev->c_ovq_lock);
+ return 0;
+ }
+
+@@ -1466,23 +1470,23 @@ static void control_work_handler(struct work_struct *work)
+ portdev = container_of(work, struct ports_device, control_work);
+ vq = portdev->c_ivq;
+
+- spin_lock(&portdev->cvq_lock);
++ spin_lock(&portdev->c_ivq_lock);
+ while ((buf = virtqueue_get_buf(vq, &len))) {
+- spin_unlock(&portdev->cvq_lock);
++ spin_unlock(&portdev->c_ivq_lock);
+
+ buf->len = len;
+ buf->offset = 0;
+
+ handle_control_message(portdev, buf);
+
+- spin_lock(&portdev->cvq_lock);
++ spin_lock(&portdev->c_ivq_lock);
+ if (add_inbuf(portdev->c_ivq, buf) < 0) {
+ dev_warn(&portdev->vdev->dev,
+ "Error adding buffer to queue\n");
+ free_buf(buf);
+ }
+ }
+- spin_unlock(&portdev->cvq_lock);
++ spin_unlock(&portdev->c_ivq_lock);
+ }
+
+ static void out_intr(struct virtqueue *vq)
+@@ -1721,10 +1725,12 @@ static int __devinit virtcons_probe(struct virtio_device *vdev)
+ if (multiport) {
+ unsigned int nr_added_bufs;
+
+- spin_lock_init(&portdev->cvq_lock);
++ spin_lock_init(&portdev->c_ivq_lock);
++ spin_lock_init(&portdev->c_ovq_lock);
+ INIT_WORK(&portdev->control_work, &control_work_handler);
+
+- nr_added_bufs = fill_queue(portdev->c_ivq, &portdev->cvq_lock);
++ nr_added_bufs = fill_queue(portdev->c_ivq,
++ &portdev->c_ivq_lock);
+ if (!nr_added_bufs) {
+ dev_err(&vdev->dev,
+ "Error allocating buffers for control queue\n");
+diff --git a/drivers/eisa/pci_eisa.c b/drivers/eisa/pci_eisa.c
+index cdae207..d4cd56a 100644
+--- a/drivers/eisa/pci_eisa.c
++++ b/drivers/eisa/pci_eisa.c
+@@ -19,8 +19,7 @@
+ /* There is only *one* pci_eisa device per machine, right ? */
+ static struct eisa_root_device pci_eisa_root;
+
+-static int __init pci_eisa_init(struct pci_dev *pdev,
+- const struct pci_device_id *ent)
++static int __init pci_eisa_init(struct pci_dev *pdev)
+ {
+ int rc;
+
+@@ -45,22 +44,26 @@ static int __init pci_eisa_init(struct pci_dev *pdev,
+ return 0;
+ }
+
+-static struct pci_device_id pci_eisa_pci_tbl[] = {
+- { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+- PCI_CLASS_BRIDGE_EISA << 8, 0xffff00, 0 },
+- { 0, }
+-};
++/*
++ * We have to call pci_eisa_init_early() before pnpacpi_init()/isapnp_init().
++ * Otherwise pnp resource will get enabled early and could prevent eisa
++ * to be initialized.
++ * Also need to make sure pci_eisa_init_early() is called after
++ * x86/pci_subsys_init().
++ * So need to use subsys_initcall_sync with it.
++ */
++static int __init pci_eisa_init_early(void)
++{
++ struct pci_dev *dev = NULL;
++ int ret;
+
+-static struct pci_driver __refdata pci_eisa_driver = {
+- .name = "pci_eisa",
+- .id_table = pci_eisa_pci_tbl,
+- .probe = pci_eisa_init,
+-};
++ for_each_pci_dev(dev)
++ if ((dev->class >> 8) == PCI_CLASS_BRIDGE_EISA) {
++ ret = pci_eisa_init(dev);
++ if (ret)
++ return ret;
++ }
+
+-static int __init pci_eisa_init_module (void)
+-{
+- return pci_register_driver (&pci_eisa_driver);
++ return 0;
+ }
+-
+-device_initcall(pci_eisa_init_module);
+-MODULE_DEVICE_TABLE(pci, pci_eisa_pci_tbl);
++subsys_initcall_sync(pci_eisa_init_early);
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index e665bdf..08075f2 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -539,6 +539,9 @@
+ #define USB_VENDOR_ID_MONTEREY 0x0566
+ #define USB_DEVICE_ID_GENIUS_KB29E 0x3004
+
++#define USB_VENDOR_ID_MSI 0x1770
++#define USB_DEVICE_ID_MSI_GX680R_LED_PANEL 0xff00
++
+ #define USB_VENDOR_ID_NATIONAL_SEMICONDUCTOR 0x0400
+ #define USB_DEVICE_ID_N_S_HARMONY 0xc359
+
+@@ -621,6 +624,9 @@
+ #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH 0x3000
+ #define USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN 0x3001
+
++#define USB_VENDOR_ID_REALTEK 0x0bda
++#define USB_DEVICE_ID_REALTEK_READER 0x0152
++
+ #define USB_VENDOR_ID_ROCCAT 0x1e7d
+ #define USB_DEVICE_ID_ROCCAT_ARVO 0x30d4
+ #define USB_DEVICE_ID_ROCCAT_KONE 0x2ced
+diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
+index 3899989..259b9f4 100644
+--- a/drivers/hid/hid-microsoft.c
++++ b/drivers/hid/hid-microsoft.c
+@@ -47,9 +47,9 @@ static __u8 *ms_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ rdesc[559] = 0x45;
+ }
+ /* the same as above (s/usage/physical/) */
+- if ((quirks & MS_RDESC_3K) && *rsize == 106 &&
+- !memcmp((char []){ 0x19, 0x00, 0x29, 0xff },
+- &rdesc[94], 4)) {
++ if ((quirks & MS_RDESC_3K) && *rsize == 106 && rdesc[94] == 0x19 &&
++ rdesc[95] == 0x00 && rdesc[96] == 0x29 &&
++ rdesc[97] == 0xff) {
+ rdesc[94] = 0x35;
+ rdesc[96] = 0x45;
+ }
+diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
+index e26eddf..96a1e0f 100644
+--- a/drivers/hid/usbhid/hid-quirks.c
++++ b/drivers/hid/usbhid/hid-quirks.c
+@@ -71,11 +71,13 @@ static const struct hid_blacklist {
+ { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
++ { USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GX680R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NOGET },
++ { USB_VENDOR_ID_REALTEK, USB_DEVICE_ID_REALTEK_READER, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_1, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_2, HID_QUIRK_NOGET },
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+index 014504d..3767853 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+@@ -755,9 +755,13 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
+ if (++priv->tx_outstanding == ipoib_sendq_size) {
+ ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
+ tx->qp->qp_num);
+- if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
+- ipoib_warn(priv, "request notify on send CQ failed\n");
+ netif_stop_queue(dev);
++ rc = ib_req_notify_cq(priv->send_cq,
++ IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
++ if (rc < 0)
++ ipoib_warn(priv, "request notify on send CQ failed\n");
++ else if (rc)
++ ipoib_send_comp_handler(priv->send_cq, dev);
+ }
+ }
+ }
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index 0f074e0..07cb1a6 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -1874,16 +1874,16 @@ static int device_change_notifier(struct notifier_block *nb,
+
+ /* allocate a protection domain if a device is added */
+ dma_domain = find_protection_domain(devid);
+- if (dma_domain)
+- goto out;
+- dma_domain = dma_ops_domain_alloc();
+- if (!dma_domain)
+- goto out;
+- dma_domain->target_dev = devid;
+-
+- spin_lock_irqsave(&iommu_pd_list_lock, flags);
+- list_add_tail(&dma_domain->list, &iommu_pd_list);
+- spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
++ if (!dma_domain) {
++ dma_domain = dma_ops_domain_alloc();
++ if (!dma_domain)
++ goto out;
++ dma_domain->target_dev = devid;
++
++ spin_lock_irqsave(&iommu_pd_list_lock, flags);
++ list_add_tail(&dma_domain->list, &iommu_pd_list);
++ spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
++ }
+
+ dev->archdata.dma_ops = &amd_iommu_dma_ops;
+
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 63e3c47..fc07f90 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1934,12 +1934,11 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
+ return -EINVAL;
+ }
+
++ write_unlock_bh(&bond->lock);
+ /* unregister rx_handler early so bond_handle_frame wouldn't be called
+ * for this slave anymore.
+ */
+ netdev_rx_handler_unregister(slave_dev);
+- write_unlock_bh(&bond->lock);
+- synchronize_net();
+ write_lock_bh(&bond->lock);
+
+ if (!bond->params.fail_over_mac) {
+@@ -3422,6 +3421,28 @@ static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count)
+
+ /*-------------------------- Device entry points ----------------------------*/
+
++static void bond_work_init_all(struct bonding *bond)
++{
++ INIT_DELAYED_WORK(&bond->mcast_work,
++ bond_resend_igmp_join_requests_delayed);
++ INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
++ INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor);
++ if (bond->params.mode == BOND_MODE_ACTIVEBACKUP)
++ INIT_DELAYED_WORK(&bond->arp_work, bond_activebackup_arp_mon);
++ else
++ INIT_DELAYED_WORK(&bond->arp_work, bond_loadbalance_arp_mon);
++ INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler);
++}
++
++static void bond_work_cancel_all(struct bonding *bond)
++{
++ cancel_delayed_work_sync(&bond->mii_work);
++ cancel_delayed_work_sync(&bond->arp_work);
++ cancel_delayed_work_sync(&bond->alb_work);
++ cancel_delayed_work_sync(&bond->ad_work);
++ cancel_delayed_work_sync(&bond->mcast_work);
++}
++
+ static int bond_open(struct net_device *bond_dev)
+ {
+ struct bonding *bond = netdev_priv(bond_dev);
+@@ -3444,41 +3465,27 @@ static int bond_open(struct net_device *bond_dev)
+ }
+ read_unlock(&bond->lock);
+
+- INIT_DELAYED_WORK(&bond->mcast_work, bond_resend_igmp_join_requests_delayed);
++ bond_work_init_all(bond);
+
+ if (bond_is_lb(bond)) {
+ /* bond_alb_initialize must be called before the timer
+ * is started.
+ */
+- if (bond_alb_initialize(bond, (bond->params.mode == BOND_MODE_ALB))) {
+- /* something went wrong - fail the open operation */
++ if (bond_alb_initialize(bond, (bond->params.mode == BOND_MODE_ALB)))
+ return -ENOMEM;
+- }
+-
+- INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
+ queue_delayed_work(bond->wq, &bond->alb_work, 0);
+ }
+
+- if (bond->params.miimon) { /* link check interval, in milliseconds. */
+- INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor);
++ if (bond->params.miimon) /* link check interval, in milliseconds. */
+ queue_delayed_work(bond->wq, &bond->mii_work, 0);
+- }
+
+ if (bond->params.arp_interval) { /* arp interval, in milliseconds. */
+- if (bond->params.mode == BOND_MODE_ACTIVEBACKUP)
+- INIT_DELAYED_WORK(&bond->arp_work,
+- bond_activebackup_arp_mon);
+- else
+- INIT_DELAYED_WORK(&bond->arp_work,
+- bond_loadbalance_arp_mon);
+-
+ queue_delayed_work(bond->wq, &bond->arp_work, 0);
+ if (bond->params.arp_validate)
+ bond->recv_probe = bond_arp_rcv;
+ }
+
+ if (bond->params.mode == BOND_MODE_8023AD) {
+- INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler);
+ queue_delayed_work(bond->wq, &bond->ad_work, 0);
+ /* register to receive LACPDUs */
+ bond->recv_probe = bond_3ad_lacpdu_recv;
+@@ -3493,34 +3500,10 @@ static int bond_close(struct net_device *bond_dev)
+ struct bonding *bond = netdev_priv(bond_dev);
+
+ write_lock_bh(&bond->lock);
+-
+ bond->send_peer_notif = 0;
+-
+ write_unlock_bh(&bond->lock);
+
+- if (bond->params.miimon) { /* link check interval, in milliseconds. */
+- cancel_delayed_work_sync(&bond->mii_work);
+- }
+-
+- if (bond->params.arp_interval) { /* arp interval, in milliseconds. */
+- cancel_delayed_work_sync(&bond->arp_work);
+- }
+-
+- switch (bond->params.mode) {
+- case BOND_MODE_8023AD:
+- cancel_delayed_work_sync(&bond->ad_work);
+- break;
+- case BOND_MODE_TLB:
+- case BOND_MODE_ALB:
+- cancel_delayed_work_sync(&bond->alb_work);
+- break;
+- default:
+- break;
+- }
+-
+- if (delayed_work_pending(&bond->mcast_work))
+- cancel_delayed_work_sync(&bond->mcast_work);
+-
++ bond_work_cancel_all(bond);
+ if (bond_is_lb(bond)) {
+ /* Must be called only after all
+ * slaves have been released
+@@ -4364,26 +4347,6 @@ static void bond_setup(struct net_device *bond_dev)
+ bond_dev->features |= bond_dev->hw_features;
+ }
+
+-static void bond_work_cancel_all(struct bonding *bond)
+-{
+- if (bond->params.miimon && delayed_work_pending(&bond->mii_work))
+- cancel_delayed_work_sync(&bond->mii_work);
+-
+- if (bond->params.arp_interval && delayed_work_pending(&bond->arp_work))
+- cancel_delayed_work_sync(&bond->arp_work);
+-
+- if (bond->params.mode == BOND_MODE_ALB &&
+- delayed_work_pending(&bond->alb_work))
+- cancel_delayed_work_sync(&bond->alb_work);
+-
+- if (bond->params.mode == BOND_MODE_8023AD &&
+- delayed_work_pending(&bond->ad_work))
+- cancel_delayed_work_sync(&bond->ad_work);
+-
+- if (delayed_work_pending(&bond->mcast_work))
+- cancel_delayed_work_sync(&bond->mcast_work);
+-}
+-
+ /*
+ * Destroy a bonding device.
+ * Must be under rtnl_lock when this function is called.
+diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
+index a03fde9..8ed48c2 100644
+--- a/drivers/net/bonding/bond_sysfs.c
++++ b/drivers/net/bonding/bond_sysfs.c
+@@ -184,6 +184,11 @@ int bond_create_slave_symlinks(struct net_device *master,
+ sprintf(linkname, "slave_%s", slave->name);
+ ret = sysfs_create_link(&(master->dev.kobj), &(slave->dev.kobj),
+ linkname);
++
++ /* free the master link created earlier in case of error */
++ if (ret)
++ sysfs_remove_link(&(slave->dev.kobj), "master");
++
+ return ret;
+
+ }
+@@ -514,6 +519,8 @@ static ssize_t bonding_store_arp_interval(struct device *d,
+ int new_value, ret = count;
+ struct bonding *bond = to_bond(d);
+
++ if (!rtnl_trylock())
++ return restart_syscall();
+ if (sscanf(buf, "%d", &new_value) != 1) {
+ pr_err("%s: no arp_interval value specified.\n",
+ bond->dev->name);
+@@ -521,7 +528,7 @@ static ssize_t bonding_store_arp_interval(struct device *d,
+ goto out;
+ }
+ if (new_value < 0) {
+- pr_err("%s: Invalid arp_interval value %d not in range 1-%d; rejected.\n",
++ pr_err("%s: Invalid arp_interval value %d not in range 0-%d; rejected.\n",
+ bond->dev->name, new_value, INT_MAX);
+ ret = -EINVAL;
+ goto out;
+@@ -536,18 +543,15 @@ static ssize_t bonding_store_arp_interval(struct device *d,
+ pr_info("%s: Setting ARP monitoring interval to %d.\n",
+ bond->dev->name, new_value);
+ bond->params.arp_interval = new_value;
+- if (bond->params.miimon) {
+- pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
+- bond->dev->name, bond->dev->name);
+- bond->params.miimon = 0;
+- if (delayed_work_pending(&bond->mii_work)) {
+- cancel_delayed_work(&bond->mii_work);
+- flush_workqueue(bond->wq);
++ if (new_value) {
++ if (bond->params.miimon) {
++ pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
++ bond->dev->name, bond->dev->name);
++ bond->params.miimon = 0;
+ }
+- }
+- if (!bond->params.arp_targets[0]) {
+- pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n",
+- bond->dev->name);
++ if (!bond->params.arp_targets[0])
++ pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n",
++ bond->dev->name);
+ }
+ if (bond->dev->flags & IFF_UP) {
+ /* If the interface is up, we may need to fire off
+@@ -555,19 +559,15 @@ static ssize_t bonding_store_arp_interval(struct device *d,
+ * timer will get fired off when the open function
+ * is called.
+ */
+- if (!delayed_work_pending(&bond->arp_work)) {
+- if (bond->params.mode == BOND_MODE_ACTIVEBACKUP)
+- INIT_DELAYED_WORK(&bond->arp_work,
+- bond_activebackup_arp_mon);
+- else
+- INIT_DELAYED_WORK(&bond->arp_work,
+- bond_loadbalance_arp_mon);
+-
++ if (!new_value) {
++ cancel_delayed_work_sync(&bond->arp_work);
++ } else {
++ cancel_delayed_work_sync(&bond->mii_work);
+ queue_delayed_work(bond->wq, &bond->arp_work, 0);
+ }
+ }
+-
+ out:
++ rtnl_unlock();
+ return ret;
+ }
+ static DEVICE_ATTR(arp_interval, S_IRUGO | S_IWUSR,
+@@ -707,7 +707,7 @@ static ssize_t bonding_store_downdelay(struct device *d,
+ }
+ if (new_value < 0) {
+ pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n",
+- bond->dev->name, new_value, 1, INT_MAX);
++ bond->dev->name, new_value, 0, INT_MAX);
+ ret = -EINVAL;
+ goto out;
+ } else {
+@@ -762,8 +762,8 @@ static ssize_t bonding_store_updelay(struct device *d,
+ goto out;
+ }
+ if (new_value < 0) {
+- pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n",
+- bond->dev->name, new_value, 1, INT_MAX);
++ pr_err("%s: Invalid up delay value %d not in range %d-%d; rejected.\n",
++ bond->dev->name, new_value, 0, INT_MAX);
+ ret = -EINVAL;
+ goto out;
+ } else {
+@@ -963,6 +963,8 @@ static ssize_t bonding_store_miimon(struct device *d,
+ int new_value, ret = count;
+ struct bonding *bond = to_bond(d);
+
++ if (!rtnl_trylock())
++ return restart_syscall();
+ if (sscanf(buf, "%d", &new_value) != 1) {
+ pr_err("%s: no miimon value specified.\n",
+ bond->dev->name);
+@@ -971,50 +973,43 @@ static ssize_t bonding_store_miimon(struct device *d,
+ }
+ if (new_value < 0) {
+ pr_err("%s: Invalid miimon value %d not in range %d-%d; rejected.\n",
+- bond->dev->name, new_value, 1, INT_MAX);
++ bond->dev->name, new_value, 0, INT_MAX);
+ ret = -EINVAL;
+ goto out;
+- } else {
+- pr_info("%s: Setting MII monitoring interval to %d.\n",
+- bond->dev->name, new_value);
+- bond->params.miimon = new_value;
+- if (bond->params.updelay)
+- pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n",
+- bond->dev->name,
+- bond->params.updelay * bond->params.miimon);
+- if (bond->params.downdelay)
+- pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n",
+- bond->dev->name,
+- bond->params.downdelay * bond->params.miimon);
+- if (bond->params.arp_interval) {
+- pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n",
+- bond->dev->name);
+- bond->params.arp_interval = 0;
+- if (bond->params.arp_validate) {
+- bond->params.arp_validate =
+- BOND_ARP_VALIDATE_NONE;
+- }
+- if (delayed_work_pending(&bond->arp_work)) {
+- cancel_delayed_work(&bond->arp_work);
+- flush_workqueue(bond->wq);
+- }
+- }
+-
+- if (bond->dev->flags & IFF_UP) {
+- /* If the interface is up, we may need to fire off
+- * the MII timer. If the interface is down, the
+- * timer will get fired off when the open function
+- * is called.
+- */
+- if (!delayed_work_pending(&bond->mii_work)) {
+- INIT_DELAYED_WORK(&bond->mii_work,
+- bond_mii_monitor);
+- queue_delayed_work(bond->wq,
+- &bond->mii_work, 0);
+- }
++ }
++ pr_info("%s: Setting MII monitoring interval to %d.\n",
++ bond->dev->name, new_value);
++ bond->params.miimon = new_value;
++ if (bond->params.updelay)
++ pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n",
++ bond->dev->name,
++ bond->params.updelay * bond->params.miimon);
++ if (bond->params.downdelay)
++ pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n",
++ bond->dev->name,
++ bond->params.downdelay * bond->params.miimon);
++ if (new_value && bond->params.arp_interval) {
++ pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n",
++ bond->dev->name);
++ bond->params.arp_interval = 0;
++ if (bond->params.arp_validate)
++ bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
++ }
++ if (bond->dev->flags & IFF_UP) {
++ /* If the interface is up, we may need to fire off
++ * the MII timer. If the interface is down, the
++ * timer will get fired off when the open function
++ * is called.
++ */
++ if (!new_value) {
++ cancel_delayed_work_sync(&bond->mii_work);
++ } else {
++ cancel_delayed_work_sync(&bond->arp_work);
++ queue_delayed_work(bond->wq, &bond->mii_work, 0);
+ }
+ }
+ out:
++ rtnl_unlock();
+ return ret;
+ }
+ static DEVICE_ATTR(miimon, S_IRUGO | S_IWUSR,
+diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
+index c7f3d4e..c31c2d6 100644
+--- a/drivers/net/can/sja1000/plx_pci.c
++++ b/drivers/net/can/sja1000/plx_pci.c
+@@ -309,7 +309,7 @@ static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv)
+ */
+ if ((priv->read_reg(priv, REG_CR) & REG_CR_BASICCAN_INITIAL_MASK) ==
+ REG_CR_BASICCAN_INITIAL &&
+- (priv->read_reg(priv, REG_SR) == REG_SR_BASICCAN_INITIAL) &&
++ (priv->read_reg(priv, SJA1000_REG_SR) == REG_SR_BASICCAN_INITIAL) &&
+ (priv->read_reg(priv, REG_IR) == REG_IR_BASICCAN_INITIAL))
+ flag = 1;
+
+@@ -321,7 +321,7 @@ static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv)
+ * See states on p. 23 of the Datasheet.
+ */
+ if (priv->read_reg(priv, REG_MOD) == REG_MOD_PELICAN_INITIAL &&
+- priv->read_reg(priv, REG_SR) == REG_SR_PELICAN_INITIAL &&
++ priv->read_reg(priv, SJA1000_REG_SR) == REG_SR_PELICAN_INITIAL &&
+ priv->read_reg(priv, REG_IR) == REG_IR_PELICAN_INITIAL)
+ return flag;
+
+diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
+index 192b0d1..6a1acfe 100644
+--- a/drivers/net/can/sja1000/sja1000.c
++++ b/drivers/net/can/sja1000/sja1000.c
+@@ -91,7 +91,7 @@ static void sja1000_write_cmdreg(struct sja1000_priv *priv, u8 val)
+ */
+ spin_lock_irqsave(&priv->cmdreg_lock, flags);
+ priv->write_reg(priv, REG_CMR, val);
+- priv->read_reg(priv, REG_SR);
++ priv->read_reg(priv, SJA1000_REG_SR);
+ spin_unlock_irqrestore(&priv->cmdreg_lock, flags);
+ }
+
+@@ -497,7 +497,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
+
+ while ((isrc = priv->read_reg(priv, REG_IR)) && (n < SJA1000_MAX_IRQ)) {
+ n++;
+- status = priv->read_reg(priv, REG_SR);
++ status = priv->read_reg(priv, SJA1000_REG_SR);
+ /* check for absent controller due to hw unplug */
+ if (status == 0xFF && sja1000_is_absent(priv))
+ return IRQ_NONE;
+@@ -516,7 +516,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
+ /* receive interrupt */
+ while (status & SR_RBS) {
+ sja1000_rx(dev);
+- status = priv->read_reg(priv, REG_SR);
++ status = priv->read_reg(priv, SJA1000_REG_SR);
+ /* check for absent controller */
+ if (status == 0xFF && sja1000_is_absent(priv))
+ return IRQ_NONE;
+diff --git a/drivers/net/can/sja1000/sja1000.h b/drivers/net/can/sja1000/sja1000.h
+index 23fff06..2a79543 100644
+--- a/drivers/net/can/sja1000/sja1000.h
++++ b/drivers/net/can/sja1000/sja1000.h
+@@ -56,7 +56,7 @@
+ /* SJA1000 registers - manual section 6.4 (Pelican Mode) */
+ #define REG_MOD 0x00
+ #define REG_CMR 0x01
+-#define REG_SR 0x02
++#define SJA1000_REG_SR 0x02
+ #define REG_IR 0x03
+ #define REG_IER 0x04
+ #define REG_ALC 0x0B
+diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e.h b/drivers/net/ethernet/atheros/atl1e/atl1e.h
+index 829b5ad..edfdf6b 100644
+--- a/drivers/net/ethernet/atheros/atl1e/atl1e.h
++++ b/drivers/net/ethernet/atheros/atl1e/atl1e.h
+@@ -438,7 +438,6 @@ struct atl1e_adapter {
+ struct atl1e_hw hw;
+ struct atl1e_hw_stats hw_stats;
+
+- bool have_msi;
+ u32 wol;
+ u16 link_speed;
+ u16 link_duplex;
+diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+index 95483bc..c69dc29 100644
+--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
++++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+@@ -1867,37 +1867,19 @@ static void atl1e_free_irq(struct atl1e_adapter *adapter)
+ struct net_device *netdev = adapter->netdev;
+
+ free_irq(adapter->pdev->irq, netdev);
+-
+- if (adapter->have_msi)
+- pci_disable_msi(adapter->pdev);
+ }
+
+ static int atl1e_request_irq(struct atl1e_adapter *adapter)
+ {
+ struct pci_dev *pdev = adapter->pdev;
+ struct net_device *netdev = adapter->netdev;
+- int flags = 0;
+ int err = 0;
+
+- adapter->have_msi = true;
+- err = pci_enable_msi(adapter->pdev);
+- if (err) {
+- netdev_dbg(adapter->netdev,
+- "Unable to allocate MSI interrupt Error: %d\n", err);
+- adapter->have_msi = false;
+- } else
+- netdev->irq = pdev->irq;
+-
+-
+- if (!adapter->have_msi)
+- flags |= IRQF_SHARED;
+- err = request_irq(adapter->pdev->irq, atl1e_intr, flags,
+- netdev->name, netdev);
++ err = request_irq(pdev->irq, atl1e_intr, IRQF_SHARED,
++ netdev->name, netdev);
+ if (err) {
+ netdev_dbg(adapter->netdev,
+ "Unable to allocate interrupt Error: %d\n", err);
+- if (adapter->have_msi)
+- pci_disable_msi(adapter->pdev);
+ return err;
+ }
+ netdev_dbg(adapter->netdev, "atl1e_request_irq OK\n");
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index c86fa50..c6b9903 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -13433,8 +13433,11 @@ static void __devinit tg3_read_vpd(struct tg3 *tp)
+ if (j + len > block_end)
+ goto partno;
+
+- memcpy(tp->fw_ver, &vpd_data[j], len);
+- strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
++ if (len >= sizeof(tp->fw_ver))
++ len = sizeof(tp->fw_ver) - 1;
++ memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
++ snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
++ &vpd_data[j]);
+ }
+
+ partno:
+diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
+index 2a22f52..2f2e98b 100644
+--- a/drivers/net/ethernet/davicom/dm9000.c
++++ b/drivers/net/ethernet/davicom/dm9000.c
+@@ -257,6 +257,107 @@ static void dm9000_dumpblk_32bit(void __iomem *reg, int count)
+ tmp = readl(reg);
+ }
+
++/*
++ * Sleep, either by using msleep() or if we are suspending, then
++ * use mdelay() to sleep.
++ */
++static void dm9000_msleep(board_info_t *db, unsigned int ms)
++{
++ if (db->in_suspend)
++ mdelay(ms);
++ else
++ msleep(ms);
++}
++
++/* Read a word from phyxcer */
++static int
++dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
++{
++ board_info_t *db = netdev_priv(dev);
++ unsigned long flags;
++ unsigned int reg_save;
++ int ret;
++
++ mutex_lock(&db->addr_lock);
++
++ spin_lock_irqsave(&db->lock, flags);
++
++ /* Save previous register address */
++ reg_save = readb(db->io_addr);
++
++ /* Fill the phyxcer register into REG_0C */
++ iow(db, DM9000_EPAR, DM9000_PHY | reg);
++
++ /* Issue phyxcer read command */
++ iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS);
++
++ writeb(reg_save, db->io_addr);
++ spin_unlock_irqrestore(&db->lock, flags);
++
++ dm9000_msleep(db, 1); /* Wait read complete */
++
++ spin_lock_irqsave(&db->lock, flags);
++ reg_save = readb(db->io_addr);
++
++ iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */
++
++ /* The read data keeps on REG_0D & REG_0E */
++ ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL);
++
++ /* restore the previous address */
++ writeb(reg_save, db->io_addr);
++ spin_unlock_irqrestore(&db->lock, flags);
++
++ mutex_unlock(&db->addr_lock);
++
++ dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
++ return ret;
++}
++
++/* Write a word to phyxcer */
++static void
++dm9000_phy_write(struct net_device *dev,
++ int phyaddr_unused, int reg, int value)
++{
++ board_info_t *db = netdev_priv(dev);
++ unsigned long flags;
++ unsigned long reg_save;
++
++ dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
++ mutex_lock(&db->addr_lock);
++
++ spin_lock_irqsave(&db->lock, flags);
++
++ /* Save previous register address */
++ reg_save = readb(db->io_addr);
++
++ /* Fill the phyxcer register into REG_0C */
++ iow(db, DM9000_EPAR, DM9000_PHY | reg);
++
++ /* Fill the written data into REG_0D & REG_0E */
++ iow(db, DM9000_EPDRL, value);
++ iow(db, DM9000_EPDRH, value >> 8);
++
++ /* Issue phyxcer write command */
++ iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW);
++
++ writeb(reg_save, db->io_addr);
++ spin_unlock_irqrestore(&db->lock, flags);
++
++ dm9000_msleep(db, 1); /* Wait write complete */
++
++ spin_lock_irqsave(&db->lock, flags);
++ reg_save = readb(db->io_addr);
++
++ iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */
++
++ /* restore the previous address */
++ writeb(reg_save, db->io_addr);
++
++ spin_unlock_irqrestore(&db->lock, flags);
++ mutex_unlock(&db->addr_lock);
++}
++
+ /* dm9000_set_io
+ *
+ * select the specified set of io routines to use with the
+@@ -793,6 +894,9 @@ dm9000_init_dm9000(struct net_device *dev)
+
+ iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */
+
++ dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */
++ dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM); /* Init */
++
+ ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
+
+ /* if wol is needed, then always set NCR_WAKEEN otherwise we end
+@@ -1199,109 +1303,6 @@ dm9000_open(struct net_device *dev)
+ return 0;
+ }
+
+-/*
+- * Sleep, either by using msleep() or if we are suspending, then
+- * use mdelay() to sleep.
+- */
+-static void dm9000_msleep(board_info_t *db, unsigned int ms)
+-{
+- if (db->in_suspend)
+- mdelay(ms);
+- else
+- msleep(ms);
+-}
+-
+-/*
+- * Read a word from phyxcer
+- */
+-static int
+-dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
+-{
+- board_info_t *db = netdev_priv(dev);
+- unsigned long flags;
+- unsigned int reg_save;
+- int ret;
+-
+- mutex_lock(&db->addr_lock);
+-
+- spin_lock_irqsave(&db->lock,flags);
+-
+- /* Save previous register address */
+- reg_save = readb(db->io_addr);
+-
+- /* Fill the phyxcer register into REG_0C */
+- iow(db, DM9000_EPAR, DM9000_PHY | reg);
+-
+- iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS); /* Issue phyxcer read command */
+-
+- writeb(reg_save, db->io_addr);
+- spin_unlock_irqrestore(&db->lock,flags);
+-
+- dm9000_msleep(db, 1); /* Wait read complete */
+-
+- spin_lock_irqsave(&db->lock,flags);
+- reg_save = readb(db->io_addr);
+-
+- iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */
+-
+- /* The read data keeps on REG_0D & REG_0E */
+- ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL);
+-
+- /* restore the previous address */
+- writeb(reg_save, db->io_addr);
+- spin_unlock_irqrestore(&db->lock,flags);
+-
+- mutex_unlock(&db->addr_lock);
+-
+- dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
+- return ret;
+-}
+-
+-/*
+- * Write a word to phyxcer
+- */
+-static void
+-dm9000_phy_write(struct net_device *dev,
+- int phyaddr_unused, int reg, int value)
+-{
+- board_info_t *db = netdev_priv(dev);
+- unsigned long flags;
+- unsigned long reg_save;
+-
+- dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
+- mutex_lock(&db->addr_lock);
+-
+- spin_lock_irqsave(&db->lock,flags);
+-
+- /* Save previous register address */
+- reg_save = readb(db->io_addr);
+-
+- /* Fill the phyxcer register into REG_0C */
+- iow(db, DM9000_EPAR, DM9000_PHY | reg);
+-
+- /* Fill the written data into REG_0D & REG_0E */
+- iow(db, DM9000_EPDRL, value);
+- iow(db, DM9000_EPDRH, value >> 8);
+-
+- iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW); /* Issue phyxcer write command */
+-
+- writeb(reg_save, db->io_addr);
+- spin_unlock_irqrestore(&db->lock, flags);
+-
+- dm9000_msleep(db, 1); /* Wait write complete */
+-
+- spin_lock_irqsave(&db->lock,flags);
+- reg_save = readb(db->io_addr);
+-
+- iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */
+-
+- /* restore the previous address */
+- writeb(reg_save, db->io_addr);
+-
+- spin_unlock_irqrestore(&db->lock, flags);
+- mutex_unlock(&db->addr_lock);
+-}
+-
+ static void
+ dm9000_shutdown(struct net_device *dev)
+ {
+@@ -1502,7 +1503,12 @@ dm9000_probe(struct platform_device *pdev)
+ db->flags |= DM9000_PLATF_SIMPLE_PHY;
+ #endif
+
+- dm9000_reset(db);
++ /* Fixing bug on dm9000_probe, takeover dm9000_reset(db),
++ * Need 'NCR_MAC_LBK' bit to indeed stable our DM9000 fifo
++ * while probe stage.
++ */
++
++ iow(db, DM9000_NCR, NCR_MAC_LBK | NCR_RST);
+
+ /* try multiple times, DM9000 sometimes gets the read wrong */
+ for (i = 0; i < 8; i++) {
+diff --git a/drivers/net/ethernet/davicom/dm9000.h b/drivers/net/ethernet/davicom/dm9000.h
+index 55688bd..9ce058a 100644
+--- a/drivers/net/ethernet/davicom/dm9000.h
++++ b/drivers/net/ethernet/davicom/dm9000.h
+@@ -69,7 +69,9 @@
+ #define NCR_WAKEEN (1<<6)
+ #define NCR_FCOL (1<<4)
+ #define NCR_FDX (1<<3)
+-#define NCR_LBK (3<<1)
++
++#define NCR_RESERVED (3<<1)
++#define NCR_MAC_LBK (1<<1)
+ #define NCR_RST (1<<0)
+
+ #define NSR_SPEED (1<<7)
+@@ -167,5 +169,12 @@
+ #define ISR_LNKCHNG (1<<5)
+ #define ISR_UNDERRUN (1<<4)
+
++/* Davicom MII registers.
++ */
++
++#define MII_DM_DSPCR 0x1b /* DSP Control Register */
++
++#define DSPCR_INIT_PARAM 0xE100 /* DSP init parameter */
++
+ #endif /* _DM9000X_H_ */
+
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+index cc96a5a..41396fa 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+@@ -8003,12 +8003,15 @@ static int __init ixgbe_init_module(void)
+ pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
+ pr_info("%s\n", ixgbe_copyright);
+
++ ret = pci_register_driver(&ixgbe_driver);
++ if (ret)
++ return ret;
++
+ #ifdef CONFIG_IXGBE_DCA
+ dca_register_notify(&dca_notifier);
+ #endif
+
+- ret = pci_register_driver(&ixgbe_driver);
+- return ret;
++ return 0;
+ }
+
+ module_init(ixgbe_init_module);
+diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
+index 69fc888..94f9a8f 100644
+--- a/drivers/net/ethernet/marvell/sky2.c
++++ b/drivers/net/ethernet/marvell/sky2.c
+@@ -1066,7 +1066,7 @@ static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space)
+ sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp);
+ sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2);
+
+- tp = space - 2048/8;
++ tp = space - 8192/8;
+ sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp);
+ sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4);
+ } else {
+diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h
+index 3c896ce..a0f229e 100644
+--- a/drivers/net/ethernet/marvell/sky2.h
++++ b/drivers/net/ethernet/marvell/sky2.h
+@@ -2069,7 +2069,7 @@ enum {
+ GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */
+ GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */
+
+-#define GMAC_DEF_MSK GM_IS_TX_FF_UR
++#define GMAC_DEF_MSK (GM_IS_TX_FF_UR | GM_IS_RX_FF_OR)
+ };
+
+ /* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */
+diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
+index f56743a..115e374 100644
+--- a/drivers/net/ethernet/micrel/ks8851.c
++++ b/drivers/net/ethernet/micrel/ks8851.c
+@@ -490,7 +490,7 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
+ for (; rxfc != 0; rxfc--) {
+ rxh = ks8851_rdreg32(ks, KS_RXFHSR);
+ rxstat = rxh & 0xffff;
+- rxlen = rxh >> 16;
++ rxlen = (rxh >> 16) & 0xfff;
+
+ netif_dbg(ks, rx_status, ks->netdev,
+ "rx: stat 0x%04x, len 0x%04x\n", rxstat, rxlen);
+diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+index 43c7b25..495d65c 100644
+--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
++++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+@@ -1545,9 +1545,9 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
+ skb_put(skb, length);
+ skb->protocol = eth_type_trans(skb, netdev);
+ if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK)
+- skb->ip_summed = CHECKSUM_NONE;
+- else
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
++ else
++ skb->ip_summed = CHECKSUM_NONE;
+
+ napi_gro_receive(&adapter->napi, skb);
+ (*work_done)++;
+diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
+index 22f2788..fd8115e 100644
+--- a/drivers/net/ethernet/ti/davinci_emac.c
++++ b/drivers/net/ethernet/ti/davinci_emac.c
+@@ -1048,7 +1048,7 @@ static void emac_tx_handler(void *token, int len, int status)
+ struct net_device *ndev = skb->dev;
+
+ if (unlikely(netif_queue_stopped(ndev)))
+- netif_start_queue(ndev);
++ netif_wake_queue(ndev);
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += len;
+ dev_kfree_skb_any(skb);
+diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
+index 7bd219b..f3d17f8 100644
+--- a/drivers/net/usb/smsc75xx.c
++++ b/drivers/net/usb/smsc75xx.c
+@@ -720,8 +720,12 @@ static int smsc75xx_set_rx_max_frame_length(struct usbnet *dev, int size)
+ static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu)
+ {
+ struct usbnet *dev = netdev_priv(netdev);
++ int ret;
++
++ if (new_mtu > MAX_SINGLE_PACKET_SIZE)
++ return -EINVAL;
+
+- int ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu);
++ ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
+ check_warn_return(ret, "Failed to set mac rx frame length");
+
+ return usbnet_change_mtu(netdev, new_mtu);
+@@ -965,7 +969,7 @@ static int smsc75xx_reset(struct usbnet *dev)
+
+ netif_dbg(dev, ifup, dev->net, "FCT_TX_CTL set to 0x%08x", buf);
+
+- ret = smsc75xx_set_rx_max_frame_length(dev, 1514);
++ ret = smsc75xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
+ check_warn_return(ret, "Failed to set max rx frame length");
+
+ ret = smsc75xx_read_reg(dev, MAC_RX, &buf);
+@@ -1109,8 +1113,8 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ else if (rx_cmd_a & (RX_CMD_A_LONG | RX_CMD_A_RUNT))
+ dev->net->stats.rx_frame_errors++;
+ } else {
+- /* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */
+- if (unlikely(size > (ETH_FRAME_LEN + 12))) {
++ /* MAX_SINGLE_PACKET_SIZE + 4(CRC) + 2(COE) + 4(Vlan) */
++ if (unlikely(size > (MAX_SINGLE_PACKET_SIZE + ETH_HLEN + 12))) {
+ netif_dbg(dev, rx_err, dev->net,
+ "size err rx_cmd_a=0x%08x", rx_cmd_a);
+ return 0;
+diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+index ae750f9..3965356 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
++++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+@@ -946,6 +946,7 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
+ AR_PHY_CL_TAB_1,
+ AR_PHY_CL_TAB_2 };
+
++ /* Use chip chainmask only for calibration */
+ ar9003_hw_set_chain_masks(ah, ah->caps.rx_chainmask, ah->caps.tx_chainmask);
+
+ if (rtt) {
+@@ -1087,6 +1088,9 @@ skip_tx_iqcal:
+ ar9003_hw_rtt_disable(ah);
+ }
+
++ /* Revert chainmask to runtime parameters */
++ ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
++
+ /* Initialize list pointers */
+ ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
+ ah->supp_cals = IQ_MISMATCH_CAL;
+diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
+index 5e45604..12975ad 100644
+--- a/drivers/net/wireless/b43/dma.c
++++ b/drivers/net/wireless/b43/dma.c
+@@ -1482,8 +1482,12 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
+ const struct b43_dma_ops *ops;
+ struct b43_dmaring *ring;
+ struct b43_dmadesc_meta *meta;
++ static const struct b43_txstatus fake; /* filled with 0 */
++ const struct b43_txstatus *txstat;
+ int slot, firstused;
+ bool frame_succeed;
++ int skip;
++ static u8 err_out1, err_out2;
+
+ ring = parse_cookie(dev, status->cookie, &slot);
+ if (unlikely(!ring))
+@@ -1496,13 +1500,36 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
+ firstused = ring->current_slot - ring->used_slots + 1;
+ if (firstused < 0)
+ firstused = ring->nr_slots + firstused;
++
++ skip = 0;
+ if (unlikely(slot != firstused)) {
+ /* This possibly is a firmware bug and will result in
+- * malfunction, memory leaks and/or stall of DMA functionality. */
+- b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. "
+- "Expected %d, but got %d\n",
+- ring->index, firstused, slot);
+- return;
++ * malfunction, memory leaks and/or stall of DMA functionality.
++ */
++ if (slot == next_slot(ring, next_slot(ring, firstused))) {
++ /* If a single header/data pair was missed, skip over
++ * the first two slots in an attempt to recover.
++ */
++ slot = firstused;
++ skip = 2;
++ if (!err_out1) {
++ /* Report the error once. */
++ b43dbg(dev->wl,
++ "Skip on DMA ring %d slot %d.\n",
++ ring->index, slot);
++ err_out1 = 1;
++ }
++ } else {
++ /* More than a single header/data pair were missed.
++ * Report this error once.
++ */
++ if (!err_out2)
++ b43dbg(dev->wl,
++ "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n",
++ ring->index, firstused, slot);
++ err_out2 = 1;
++ return;
++ }
+ }
+
+ ops = ring->ops;
+@@ -1517,11 +1544,13 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
+ slot, firstused, ring->index);
+ break;
+ }
++
+ if (meta->skb) {
+ struct b43_private_tx_info *priv_info =
+- b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));
++ b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));
+
+- unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
++ unmap_descbuffer(ring, meta->dmaaddr,
++ meta->skb->len, 1);
+ kfree(priv_info->bouncebuffer);
+ priv_info->bouncebuffer = NULL;
+ } else {
+@@ -1533,8 +1562,9 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
+ struct ieee80211_tx_info *info;
+
+ if (unlikely(!meta->skb)) {
+- /* This is a scatter-gather fragment of a frame, so
+- * the skb pointer must not be NULL. */
++ /* This is a scatter-gather fragment of a frame,
++ * so the skb pointer must not be NULL.
++ */
+ b43dbg(dev->wl, "TX status unexpected NULL skb "
+ "at slot %d (first=%d) on ring %d\n",
+ slot, firstused, ring->index);
+@@ -1545,9 +1575,18 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
+
+ /*
+ * Call back to inform the ieee80211 subsystem about
+- * the status of the transmission.
++ * the status of the transmission. When skipping over
++ * a missed TX status report, use a status structure
++ * filled with zeros to indicate that the frame was not
++ * sent (frame_count 0) and not acknowledged
+ */
+- frame_succeed = b43_fill_txstatus_report(dev, info, status);
++ if (unlikely(skip))
++ txstat = &fake;
++ else
++ txstat = status;
++
++ frame_succeed = b43_fill_txstatus_report(dev, info,
++ txstat);
+ #ifdef CONFIG_B43_DEBUG
+ if (frame_succeed)
+ ring->nr_succeed_tx_packets++;
+@@ -1575,12 +1614,14 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
+ /* Everything unmapped and free'd. So it's not used anymore. */
+ ring->used_slots--;
+
+- if (meta->is_last_fragment) {
++ if (meta->is_last_fragment && !skip) {
+ /* This is the last scatter-gather
+ * fragment of the frame. We are done. */
+ break;
+ }
+ slot = next_slot(ring, slot);
++ if (skip > 0)
++ --skip;
+ }
+ if (ring->stopped) {
+ B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME);
+diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
+index f099b30..3de9875 100644
+--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
++++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
+@@ -1146,7 +1146,9 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
+ rt2x00dev->hw->wiphy->interface_modes |=
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_AP) |
++#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
++#endif
+ BIT(NL80211_IFTYPE_WDS);
+
+ /*
+diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
+index c04ee92..e5fe956 100644
+--- a/drivers/net/wireless/rtlwifi/usb.c
++++ b/drivers/net/wireless/rtlwifi/usb.c
+@@ -812,6 +812,7 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb,
+ if (unlikely(!_urb)) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("Can't allocate urb. Drop skb!\n"));
++ kfree_skb(skb);
+ return;
+ }
+ urb_list = &rtlusb->tx_pending[ep_num];
+diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c
+index 4c63f77..2e2b04f 100644
+--- a/drivers/spi/spi-mpc512x-psc.c
++++ b/drivers/spi/spi-mpc512x-psc.c
+@@ -164,7 +164,7 @@ static int mpc512x_psc_spi_transfer_rxtx(struct spi_device *spi,
+
+ for (i = count; i > 0; i--) {
+ data = tx_buf ? *tx_buf++ : 0;
+- if (len == EOFBYTE)
++ if (len == EOFBYTE && t->cs_change)
+ setbits32(&fifo->txcmd, MPC512x_PSC_FIFO_EOF);
+ out_8(&fifo->txdata_8, data);
+ len--;
+diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c
+index c72128f..42cad5c 100644
+--- a/drivers/staging/comedi/drivers/s626.c
++++ b/drivers/staging/comedi/drivers/s626.c
+@@ -1882,7 +1882,7 @@ static int s626_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
+ case TRIG_NONE:
+ /* continous acquisition */
+ devpriv->ai_continous = 1;
+- devpriv->ai_sample_count = 0;
++ devpriv->ai_sample_count = 1;
+ break;
+ }
+
+diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
+index 90c8e3a..99fcb8c 100644
+--- a/drivers/tty/serial/atmel_serial.c
++++ b/drivers/tty/serial/atmel_serial.c
+@@ -159,7 +159,7 @@ struct atmel_uart_port {
+ };
+
+ static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART];
+-static unsigned long atmel_ports_in_use;
++static DECLARE_BITMAP(atmel_ports_in_use, ATMEL_MAX_UART);
+
+ #ifdef SUPPORT_SYSRQ
+ static struct console atmel_console;
+@@ -1784,15 +1784,14 @@ static int __devinit atmel_serial_probe(struct platform_device *pdev)
+ if (ret < 0)
+ /* port id not found in platform data nor device-tree aliases:
+ * auto-enumerate it */
+- ret = find_first_zero_bit(&atmel_ports_in_use,
+- sizeof(atmel_ports_in_use));
++ ret = find_first_zero_bit(atmel_ports_in_use, ATMEL_MAX_UART);
+
+- if (ret > ATMEL_MAX_UART) {
++ if (ret >= ATMEL_MAX_UART) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+- if (test_and_set_bit(ret, &atmel_ports_in_use)) {
++ if (test_and_set_bit(ret, atmel_ports_in_use)) {
+ /* port already in use */
+ ret = -EBUSY;
+ goto err;
+@@ -1866,7 +1865,7 @@ static int __devexit atmel_serial_remove(struct platform_device *pdev)
+
+ /* "port" is allocated statically, so we shouldn't free it */
+
+- clear_bit(port->line, &atmel_ports_in_use);
++ clear_bit(port->line, atmel_ports_in_use);
+
+ clk_put(atmel_port->clk);
+
+diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c
+index 7a367ff..fd89c42 100644
+--- a/drivers/tty/vt/vc_screen.c
++++ b/drivers/tty/vt/vc_screen.c
+@@ -93,7 +93,7 @@ vcs_poll_data_free(struct vcs_poll_data *poll)
+ static struct vcs_poll_data *
+ vcs_poll_data_get(struct file *file)
+ {
+- struct vcs_poll_data *poll = file->private_data;
++ struct vcs_poll_data *poll = file->private_data, *kill = NULL;
+
+ if (poll)
+ return poll;
+@@ -122,10 +122,12 @@ vcs_poll_data_get(struct file *file)
+ file->private_data = poll;
+ } else {
+ /* someone else raced ahead of us */
+- vcs_poll_data_free(poll);
++ kill = poll;
+ poll = file->private_data;
+ }
+ spin_unlock(&file->f_lock);
++ if (kill)
++ vcs_poll_data_free(kill);
+
+ return poll;
+ }
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 5c1f9e7..37b2a89 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -1964,8 +1964,8 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ if (event_trb != ep_ring->dequeue &&
+ event_trb != td->last_trb)
+ td->urb->actual_length =
+- td->urb->transfer_buffer_length
+- - TRB_LEN(le32_to_cpu(event->transfer_len));
++ td->urb->transfer_buffer_length -
++ EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+ else
+ td->urb->actual_length = 0;
+
+@@ -1997,7 +1997,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ /* Maybe the event was for the data stage? */
+ td->urb->actual_length =
+ td->urb->transfer_buffer_length -
+- TRB_LEN(le32_to_cpu(event->transfer_len));
++ EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+ xhci_dbg(xhci, "Waiting for status "
+ "stage event\n");
+ return 0;
+@@ -2033,7 +2033,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ /* handle completion code */
+ switch (trb_comp_code) {
+ case COMP_SUCCESS:
+- if (TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) {
++ if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) {
+ frame->status = 0;
+ break;
+ }
+@@ -2078,7 +2078,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
+ }
+ len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
+- TRB_LEN(le32_to_cpu(event->transfer_len));
++ EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+
+ if (trb_comp_code != COMP_STOP_INVAL) {
+ frame->actual_length = len;
+@@ -2136,7 +2136,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ case COMP_SUCCESS:
+ /* Double check that the HW transferred everything. */
+ if (event_trb != td->last_trb ||
+- TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
++ EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
+ xhci_warn(xhci, "WARN Successful completion "
+ "on short TX\n");
+ if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+@@ -2164,18 +2164,18 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ "%d bytes untransferred\n",
+ td->urb->ep->desc.bEndpointAddress,
+ td->urb->transfer_buffer_length,
+- TRB_LEN(le32_to_cpu(event->transfer_len)));
++ EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
+ /* Fast path - was this the last TRB in the TD for this URB? */
+ if (event_trb == td->last_trb) {
+- if (TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
++ if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
+ td->urb->actual_length =
+ td->urb->transfer_buffer_length -
+- TRB_LEN(le32_to_cpu(event->transfer_len));
++ EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+ if (td->urb->transfer_buffer_length <
+ td->urb->actual_length) {
+ xhci_warn(xhci, "HC gave bad length "
+ "of %d bytes left\n",
+- TRB_LEN(le32_to_cpu(event->transfer_len)));
++ EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
+ td->urb->actual_length = 0;
+ if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
+ *status = -EREMOTEIO;
+@@ -2217,7 +2217,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ if (trb_comp_code != COMP_STOP_INVAL)
+ td->urb->actual_length +=
+ TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
+- TRB_LEN(le32_to_cpu(event->transfer_len));
++ EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+ }
+
+ return finish_td(xhci, td, event_trb, event, ep, status, false);
+@@ -2283,7 +2283,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
+ * transfer type
+ */
+ case COMP_SUCCESS:
+- if (TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
++ if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
+ break;
+ if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
+ trb_comp_code = COMP_SHORT_TX;
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index c519a31..8b4cce45 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -968,6 +968,10 @@ struct xhci_transfer_event {
+ __le32 flags;
+ };
+
++/* Transfer event TRB length bit mask */
++/* bits 0:23 */
++#define EVENT_TRB_LEN(p) ((p) & 0xffffff)
++
+ /** Transfer Event bit fields **/
+ #define TRB_TO_EP_ID(p) (((p) >> 16) & 0x1f)
+
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 71c4696..878ff05 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -648,6 +648,7 @@ static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) },
+ { USB_DEVICE(ACTON_VID, ACTON_SPECTRAPRO_PID) },
+ { USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) },
++ { USB_DEVICE(MITSUBISHI_VID, MITSUBISHI_FXUSB_PID) },
+ { USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) },
+ { USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) },
+ { USB_DEVICE(BANDB_VID, BANDB_USO9ML2_PID) },
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 97e0a6b..809c03a 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -584,6 +584,13 @@
+ #define CONTEC_COM1USBH_PID 0x8311 /* COM-1(USB)H */
+
+ /*
++ * Mitsubishi Electric Corp. (http://www.meau.com)
++ * Submitted by Konstantin Holoborodko
++ */
++#define MITSUBISHI_VID 0x06D3
++#define MITSUBISHI_FXUSB_PID 0x0284 /* USB/RS422 converters: FX-USB-AW/-BD */
++
++/*
+ * Definitions for B&B Electronics products.
+ */
+ #define BANDB_VID 0x0856 /* B&B Electronics Vendor ID */
+diff --git a/fs/block_dev.c b/fs/block_dev.c
+index 833dddb..53ab273 100644
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -587,6 +587,7 @@ struct block_device *bdgrab(struct block_device *bdev)
+ ihold(bdev->bd_inode);
+ return bdev;
+ }
++EXPORT_SYMBOL(bdgrab);
+
+ long nr_blockdev_pages(void)
+ {
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index f5fbe57..8d4d53d 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -3996,7 +3996,7 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
+ spin_lock(&block_rsv->lock);
+ spin_lock(&sinfo->lock);
+
+- block_rsv->size = num_bytes;
++ block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
+
+ num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
+ sinfo->bytes_reserved + sinfo->bytes_readonly +
+diff --git a/fs/dcache.c b/fs/dcache.c
+index bb7f4cc..e923bf4 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -2445,7 +2445,6 @@ static int prepend_path(const struct path *path,
+ bool slash = false;
+ int error = 0;
+
+- br_read_lock(vfsmount_lock);
+ while (dentry != root->dentry || vfsmnt != root->mnt) {
+ struct dentry * parent;
+
+@@ -2475,8 +2474,6 @@ static int prepend_path(const struct path *path,
+ if (!error && !slash)
+ error = prepend(buffer, buflen, "/", 1);
+
+-out:
+- br_read_unlock(vfsmount_lock);
+ return error;
+
+ global_root:
+@@ -2493,7 +2490,7 @@ global_root:
+ error = prepend(buffer, buflen, "/", 1);
+ if (!error)
+ error = vfsmnt->mnt_ns ? 1 : 2;
+- goto out;
++ return error;
+ }
+
+ /**
+@@ -2520,9 +2517,11 @@ char *__d_path(const struct path *path,
+ int error;
+
+ prepend(&res, &buflen, "\0", 1);
++ br_read_lock(vfsmount_lock);
+ write_seqlock(&rename_lock);
+ error = prepend_path(path, root, &res, &buflen);
+ write_sequnlock(&rename_lock);
++ br_read_unlock(vfsmount_lock);
+
+ if (error < 0)
+ return ERR_PTR(error);
+@@ -2539,9 +2538,11 @@ char *d_absolute_path(const struct path *path,
+ int error;
+
+ prepend(&res, &buflen, "\0", 1);
++ br_read_lock(vfsmount_lock);
+ write_seqlock(&rename_lock);
+ error = prepend_path(path, &root, &res, &buflen);
+ write_sequnlock(&rename_lock);
++ br_read_unlock(vfsmount_lock);
+
+ if (error > 1)
+ error = -EINVAL;
+@@ -2605,11 +2606,13 @@ char *d_path(const struct path *path, char *buf, int buflen)
+ return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
+
+ get_fs_root(current->fs, &root);
++ br_read_lock(vfsmount_lock);
+ write_seqlock(&rename_lock);
+ error = path_with_deleted(path, &root, &res, &buflen);
++ write_sequnlock(&rename_lock);
++ br_read_unlock(vfsmount_lock);
+ if (error < 0)
+ res = ERR_PTR(error);
+- write_sequnlock(&rename_lock);
+ path_put(&root);
+ return res;
+ }
+@@ -2764,6 +2767,7 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
+ get_fs_root_and_pwd(current->fs, &root, &pwd);
+
+ error = -ENOENT;
++ br_read_lock(vfsmount_lock);
+ write_seqlock(&rename_lock);
+ if (!d_unlinked(pwd.dentry)) {
+ unsigned long len;
+@@ -2773,6 +2777,7 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
+ prepend(&cwd, &buflen, "\0", 1);
+ error = prepend_path(&pwd, &root, &cwd, &buflen);
+ write_sequnlock(&rename_lock);
++ br_read_unlock(vfsmount_lock);
+
+ if (error < 0)
+ goto out;
+@@ -2793,6 +2798,7 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
+ }
+ } else {
+ write_sequnlock(&rename_lock);
++ br_read_unlock(vfsmount_lock);
+ }
+
+ out:
+diff --git a/fs/nfs/blocklayout/blocklayoutdm.c b/fs/nfs/blocklayout/blocklayoutdm.c
+index d055c75..7326e6e 100644
+--- a/fs/nfs/blocklayout/blocklayoutdm.c
++++ b/fs/nfs/blocklayout/blocklayoutdm.c
+@@ -52,7 +52,8 @@ static void dev_remove(dev_t dev)
+ dprintk("Entering %s\n", __func__);
+
+ memset(&msg, 0, sizeof(msg));
+- msg.data = kzalloc(1 + sizeof(bl_umount_request), GFP_NOFS);
++ msg.len = sizeof(bl_msg) + bl_msg.totallen;
++ msg.data = kzalloc(msg.len, GFP_NOFS);
+ if (!msg.data)
+ goto out;
+
+@@ -63,7 +64,6 @@ static void dev_remove(dev_t dev)
+ memcpy(msg.data, &bl_msg, sizeof(bl_msg));
+ dataptr = (uint8_t *) msg.data;
+ memcpy(&dataptr[sizeof(bl_msg)], &bl_umount_request, sizeof(bl_umount_request));
+- msg.len = sizeof(bl_msg) + bl_msg.totallen;
+
+ add_wait_queue(&bl_wq, &wq);
+ if (rpc_queue_upcall(bl_device_pipe->d_inode, &msg) < 0) {
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 6d7c53d..5639efd 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -3578,7 +3578,8 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ };
+- int ret = -ENOMEM, npages, i, acl_len = 0;
++ int ret = -ENOMEM, npages, i;
++ size_t acl_len = 0;
+
+ npages = (buflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ /* As long as we're doing a round trip to the server anyway,
+@@ -6108,22 +6109,8 @@ nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
+ static void nfs4_layoutcommit_release(void *calldata)
+ {
+ struct nfs4_layoutcommit_data *data = calldata;
+- struct pnfs_layout_segment *lseg, *tmp;
+- unsigned long *bitlock = &NFS_I(data->args.inode)->flags;
+
+ pnfs_cleanup_layoutcommit(data);
+- /* Matched by references in pnfs_set_layoutcommit */
+- list_for_each_entry_safe(lseg, tmp, &data->lseg_list, pls_lc_list) {
+- list_del_init(&lseg->pls_lc_list);
+- if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT,
+- &lseg->pls_flags))
+- put_lseg(lseg);
+- }
+-
+- clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
+- smp_mb__after_clear_bit();
+- wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
+-
+ put_rpccred(data->cred);
+ kfree(data);
+ }
+diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
+index 3ad6595..d12514a 100644
+--- a/fs/nfs/pnfs.c
++++ b/fs/nfs/pnfs.c
+@@ -1356,11 +1356,27 @@ static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp)
+
+ list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) {
+ if (lseg->pls_range.iomode == IOMODE_RW &&
+- test_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
++ test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
+ list_add(&lseg->pls_lc_list, listp);
+ }
+ }
+
++static void pnfs_list_write_lseg_done(struct inode *inode, struct list_head *listp)
++{
++ struct pnfs_layout_segment *lseg, *tmp;
++ unsigned long *bitlock = &NFS_I(inode)->flags;
++
++ /* Matched by references in pnfs_set_layoutcommit */
++ list_for_each_entry_safe(lseg, tmp, listp, pls_lc_list) {
++ list_del_init(&lseg->pls_lc_list);
++ put_lseg(lseg);
++ }
++
++ clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
++ smp_mb__after_clear_bit();
++ wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
++}
++
+ void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg)
+ {
+ if (lseg->pls_range.iomode == IOMODE_RW) {
+@@ -1409,6 +1425,7 @@ void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data)
+
+ if (nfss->pnfs_curr_ld->cleanup_layoutcommit)
+ nfss->pnfs_curr_ld->cleanup_layoutcommit(data);
++ pnfs_list_write_lseg_done(data->args.inode, &data->lseg_list);
+ }
+
+ /*
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 800c215..24afa96 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -280,7 +280,7 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
+ iattr->ia_valid |= ATTR_SIZE;
+ }
+ if (bmval[0] & FATTR4_WORD0_ACL) {
+- int nace;
++ u32 nace;
+ struct nfs4_ace *ace;
+
+ READ_BUF(4); len += 4;
+diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
+index 6bc346c..04eecc4 100644
+--- a/fs/reiserfs/xattr.c
++++ b/fs/reiserfs/xattr.c
+@@ -187,8 +187,8 @@ fill_with_dentries(void *buf, const char *name, int namelen, loff_t offset,
+ if (dbuf->count == ARRAY_SIZE(dbuf->dentries))
+ return -ENOSPC;
+
+- if (name[0] == '.' && (name[1] == '\0' ||
+- (name[1] == '.' && name[2] == '\0')))
++ if (name[0] == '.' && (namelen < 2 ||
++ (namelen == 2 && name[1] == '.')))
+ return 0;
+
+ dentry = lookup_one_len(name, dbuf->xadir, namelen);
+diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
+index fabbb81..3899e24 100644
+--- a/fs/sysfs/dir.c
++++ b/fs/sysfs/dir.c
+@@ -985,6 +985,8 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
+ ino = parent_sd->s_ino;
+ if (filldir(dirent, ".", 1, filp->f_pos, ino, DT_DIR) == 0)
+ filp->f_pos++;
++ else
++ return 0;
+ }
+ if (filp->f_pos == 1) {
+ if (parent_sd->s_parent)
+@@ -993,6 +995,8 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
+ ino = parent_sd->s_ino;
+ if (filldir(dirent, "..", 2, filp->f_pos, ino, DT_DIR) == 0)
+ filp->f_pos++;
++ else
++ return 0;
+ }
+ mutex_lock(&sysfs_mutex);
+ for (pos = sysfs_dir_pos(ns, parent_sd, filp->f_pos, pos);
+@@ -1023,10 +1027,21 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
+ return 0;
+ }
+
++static loff_t sysfs_dir_llseek(struct file *file, loff_t offset, int whence)
++{
++ struct inode *inode = file->f_path.dentry->d_inode;
++ loff_t ret;
++
++ mutex_lock(&inode->i_mutex);
++ ret = generic_file_llseek(file, offset, whence);
++ mutex_unlock(&inode->i_mutex);
++
++ return ret;
++}
+
+ const struct file_operations sysfs_dir_operations = {
+ .read = generic_read_dir,
+ .readdir = sysfs_readdir,
+ .release = sysfs_dir_release,
+- .llseek = generic_file_llseek,
++ .llseek = sysfs_dir_llseek,
+ };
+diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
+index ae0e76b..2f467e5 100644
+--- a/fs/ubifs/super.c
++++ b/fs/ubifs/super.c
+@@ -1583,6 +1583,12 @@ static int ubifs_remount_rw(struct ubifs_info *c)
+ c->remounting_rw = 1;
+ c->ro_mount = 0;
+
++ if (c->space_fixup) {
++ err = ubifs_fixup_free_space(c);
++ if (err)
++ return err;
++ }
++
+ err = check_free_space(c);
+ if (err)
+ goto out;
+@@ -1699,12 +1705,6 @@ static int ubifs_remount_rw(struct ubifs_info *c)
+ err = dbg_check_space_info(c);
+ }
+
+- if (c->space_fixup) {
+- err = ubifs_fixup_free_space(c);
+- if (err)
+- goto out;
+- }
+-
+ mutex_unlock(&c->umount_mutex);
+ return err;
+
+diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h
+index 5142a82..604f5fc 100644
+--- a/fs/udf/udf_sb.h
++++ b/fs/udf/udf_sb.h
+@@ -82,7 +82,7 @@ struct udf_virtual_data {
+ struct udf_bitmap {
+ __u32 s_extLength;
+ __u32 s_extPosition;
+- __u16 s_nr_groups;
++ int s_nr_groups;
+ struct buffer_head **s_block_bitmap;
+ };
+
+diff --git a/include/linux/thermal.h b/include/linux/thermal.h
+index 47b4a27..5ef859a 100644
+--- a/include/linux/thermal.h
++++ b/include/linux/thermal.h
+@@ -108,7 +108,7 @@ struct thermal_zone_device {
+ /* Adding event notification support elements */
+ #define THERMAL_GENL_FAMILY_NAME "thermal_event"
+ #define THERMAL_GENL_VERSION 0x01
+-#define THERMAL_GENL_MCAST_GROUP_NAME "thermal_mc_group"
++#define THERMAL_GENL_MCAST_GROUP_NAME "thermal_mc_grp"
+
+ enum events {
+ THERMAL_AUX0,
+diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
+index fd4a7b1..cd068b2 100644
+--- a/kernel/time/tick-broadcast.c
++++ b/kernel/time/tick-broadcast.c
+@@ -66,7 +66,8 @@ static void tick_broadcast_start_periodic(struct clock_event_device *bc)
+ */
+ int tick_check_broadcast_device(struct clock_event_device *dev)
+ {
+- if ((tick_broadcast_device.evtdev &&
++ if ((dev->features & CLOCK_EVT_FEAT_DUMMY) ||
++ (tick_broadcast_device.evtdev &&
+ tick_broadcast_device.evtdev->rating >= dev->rating) ||
+ (dev->features & CLOCK_EVT_FEAT_C3STOP))
+ return 0;
+diff --git a/mm/mmap.c b/mm/mmap.c
+index eae90af..dff37a6 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -1573,7 +1573,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
+ if (mm) {
+ /* Check the cache first. */
+ /* (Cache hit rate is typically around 35%.) */
+- vma = mm->mmap_cache;
++ vma = ACCESS_ONCE(mm->mmap_cache);
+ if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
+ struct rb_node * rb_node;
+
+diff --git a/mm/nommu.c b/mm/nommu.c
+index f59e170..f0cd7ab 100644
+--- a/mm/nommu.c
++++ b/mm/nommu.c
+@@ -807,7 +807,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
+ struct vm_area_struct *vma;
+
+ /* check the cache first */
+- vma = mm->mmap_cache;
++ vma = ACCESS_ONCE(mm->mmap_cache);
+ if (vma && vma->vm_start <= addr && vma->vm_end > addr)
+ return vma;
+
+diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
+index 5471628..963f285 100644
+--- a/net/8021q/vlan.c
++++ b/net/8021q/vlan.c
+@@ -110,13 +110,6 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
+ grp = rtnl_dereference(real_dev->vlgrp);
+ BUG_ON(!grp);
+
+- /* Take it out of our own structures, but be sure to interlock with
+- * HW accelerating devices or SW vlan input packet processing if
+- * VLAN is not 0 (leave it there for 802.1p).
+- */
+- if (vlan_id && (real_dev->features & NETIF_F_HW_VLAN_FILTER))
+- ops->ndo_vlan_rx_kill_vid(real_dev, vlan_id);
+-
+ grp->nr_vlans--;
+
+ if (vlan->flags & VLAN_FLAG_GVRP)
+@@ -139,6 +132,13 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
+ call_rcu(&grp->rcu, vlan_rcu_free);
+ }
+
++ /* Take it out of our own structures, but be sure to interlock with
++ * HW accelerating devices or SW vlan input packet processing if
++ * VLAN is not 0 (leave it there for 802.1p).
++ */
++ if (vlan_id && (real_dev->features & NETIF_F_HW_VLAN_FILTER))
++ ops->ndo_vlan_rx_kill_vid(real_dev, vlan_id);
++
+ /* Get rid of the vlan's reference to real_dev */
+ dev_put(real_dev);
+ }
+diff --git a/net/core/dev.c b/net/core/dev.c
+index b23bbbf..720aea0 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3193,6 +3193,7 @@ int netdev_rx_handler_register(struct net_device *dev,
+ if (dev->rx_handler)
+ return -EBUSY;
+
++ /* Note: rx_handler_data must be set before rx_handler */
+ rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
+ rcu_assign_pointer(dev->rx_handler, rx_handler);
+
+@@ -3213,6 +3214,11 @@ void netdev_rx_handler_unregister(struct net_device *dev)
+
+ ASSERT_RTNL();
+ RCU_INIT_POINTER(dev->rx_handler, NULL);
++ /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
++ * section has a guarantee to see a non NULL rx_handler_data
++ * as well.
++ */
++ synchronize_net();
+ RCU_INIT_POINTER(dev->rx_handler_data, NULL);
+ }
+ EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 1b1f7af..3124e17 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -2265,11 +2265,8 @@ void tcp_enter_loss(struct sock *sk, int how)
+ if (tcp_is_reno(tp))
+ tcp_reset_reno_sack(tp);
+
+- if (!how) {
+- /* Push undo marker, if it was plain RTO and nothing
+- * was retransmitted. */
+- tp->undo_marker = tp->snd_una;
+- } else {
++ tp->undo_marker = tp->snd_una;
++ if (how) {
+ tp->sacked_out = 0;
+ tp->fackets_out = 0;
+ }
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 9bb7400..5c1807c 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1587,8 +1587,11 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
+ goto send_now;
+ }
+
+- /* Ok, it looks like it is advisable to defer. */
+- tp->tso_deferred = 1 | (jiffies << 1);
++ /* Ok, it looks like it is advisable to defer.
++ * Do not rearm the timer if already set to not break TCP ACK clocking.
++ */
++ if (!tp->tso_deferred)
++ tp->tso_deferred = 1 | (jiffies << 1);
+
+ return 1;
+
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index b27baed..8589c2d 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -4658,26 +4658,20 @@ static void addrconf_sysctl_unregister(struct inet6_dev *idev)
+
+ static int __net_init addrconf_init_net(struct net *net)
+ {
+- int err;
++ int err = -ENOMEM;
+ struct ipv6_devconf *all, *dflt;
+
+- err = -ENOMEM;
+- all = &ipv6_devconf;
+- dflt = &ipv6_devconf_dflt;
++ all = kmemdup(&ipv6_devconf, sizeof(ipv6_devconf), GFP_KERNEL);
++ if (all == NULL)
++ goto err_alloc_all;
+
+- if (!net_eq(net, &init_net)) {
+- all = kmemdup(all, sizeof(ipv6_devconf), GFP_KERNEL);
+- if (all == NULL)
+- goto err_alloc_all;
++ dflt = kmemdup(&ipv6_devconf_dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL);
++ if (dflt == NULL)
++ goto err_alloc_dflt;
+
+- dflt = kmemdup(dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL);
+- if (dflt == NULL)
+- goto err_alloc_dflt;
+- } else {
+- /* these will be inherited by all namespaces */
+- dflt->autoconf = ipv6_defaults.autoconf;
+- dflt->disable_ipv6 = ipv6_defaults.disable_ipv6;
+- }
++ /* these will be inherited by all namespaces */
++ dflt->autoconf = ipv6_defaults.autoconf;
++ dflt->disable_ipv6 = ipv6_defaults.disable_ipv6;
+
+ net->ipv6.devconf_all = all;
+ net->ipv6.devconf_dflt = dflt;
+diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
+index f8d24dd..6a4f4f3 100644
+--- a/net/ipv6/ip6_input.c
++++ b/net/ipv6/ip6_input.c
+@@ -111,6 +111,27 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
+ ipv6_addr_loopback(&hdr->daddr))
+ goto err;
+
++ /* RFC4291 Errata ID: 3480
++ * Interface-Local scope spans only a single interface on a
++ * node and is useful only for loopback transmission of
++ * multicast. Packets with interface-local scope received
++ * from another node must be discarded.
++ */
++ if (!(skb->pkt_type == PACKET_LOOPBACK ||
++ dev->flags & IFF_LOOPBACK) &&
++ ipv6_addr_is_multicast(&hdr->daddr) &&
++ IPV6_ADDR_MC_SCOPE(&hdr->daddr) == 1)
++ goto err;
++
++ /* RFC4291 2.7
++ * Nodes must not originate a packet to a multicast address whose scope
++ * field contains the reserved value 0; if such a packet is received, it
++ * must be silently dropped.
++ */
++ if (ipv6_addr_is_multicast(&hdr->daddr) &&
++ IPV6_ADDR_MC_SCOPE(&hdr->daddr) == 0)
++ goto err;
++
+ /*
+ * RFC4291 2.7
+ * Multicast addresses must not be used as source addresses in IPv6
+diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
+index c24f25a..f4b49c5 100644
+--- a/net/irda/af_irda.c
++++ b/net/irda/af_irda.c
+@@ -2584,8 +2584,10 @@ bed:
+ NULL, NULL, NULL);
+
+ /* Check if the we got some results */
+- if (!self->cachedaddr)
+- return -EAGAIN; /* Didn't find any devices */
++ if (!self->cachedaddr) {
++ err = -EAGAIN; /* Didn't find any devices */
++ goto out;
++ }
+ daddr = self->cachedaddr;
+ /* Cleanup */
+ self->cachedaddr = 0;
+diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
+index 482fa57..874f8ff 100644
+--- a/net/netlink/genetlink.c
++++ b/net/netlink/genetlink.c
+@@ -134,6 +134,7 @@ int genl_register_mc_group(struct genl_family *family,
+ int err = 0;
+
+ BUG_ON(grp->name[0] == '\0');
++ BUG_ON(memchr(grp->name, '\0', GENL_NAMSIZ) == NULL);
+
+ genl_lock();
+
+diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
+index 18c5a50..dc6af27 100644
+--- a/net/sunrpc/sched.c
++++ b/net/sunrpc/sched.c
+@@ -139,6 +139,8 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
+ list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
+ task->tk_waitqueue = queue;
+ queue->qlen++;
++ /* barrier matches the read in rpc_wake_up_task_queue_locked() */
++ smp_wmb();
+ rpc_set_queued(task);
+
+ dprintk("RPC: %5u added to queue %p \"%s\"\n",
+@@ -389,8 +391,11 @@ static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task
+ */
+ static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task)
+ {
+- if (RPC_IS_QUEUED(task) && task->tk_waitqueue == queue)
+- __rpc_do_wake_up_task(queue, task);
++ if (RPC_IS_QUEUED(task)) {
++ smp_rmb();
++ if (task->tk_waitqueue == queue)
++ __rpc_do_wake_up_task(queue, task);
++ }
+ }
+
+ /*
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 317bfe3..18978b6 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -371,7 +371,7 @@ static void unix_sock_destructor(struct sock *sk)
+ #endif
+ }
+
+-static int unix_release_sock(struct sock *sk, int embrion)
++static void unix_release_sock(struct sock *sk, int embrion)
+ {
+ struct unix_sock *u = unix_sk(sk);
+ struct dentry *dentry;
+@@ -444,8 +444,6 @@ static int unix_release_sock(struct sock *sk, int embrion)
+
+ if (unix_tot_inflight)
+ unix_gc(); /* Garbage collect fds */
+-
+- return 0;
+ }
+
+ static void init_peercred(struct sock *sk)
+@@ -682,9 +680,10 @@ static int unix_release(struct socket *sock)
+ if (!sk)
+ return 0;
+
++ unix_release_sock(sk, 0);
+ sock->sk = NULL;
+
+- return unix_release_sock(sk, 0);
++ return 0;
+ }
+
+ static int unix_autobind(struct socket *sock)
+diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
+index 0b3f5d7..b70eaa2 100644
+--- a/security/keys/keyctl.c
++++ b/security/keys/keyctl.c
+@@ -1067,12 +1067,12 @@ long keyctl_instantiate_key_iov(key_serial_t id,
+ ret = rw_copy_check_uvector(WRITE, _payload_iov, ioc,
+ ARRAY_SIZE(iovstack), iovstack, &iov, 1);
+ if (ret < 0)
+- return ret;
++ goto err;
+ if (ret == 0)
+ goto no_payload_free;
+
+ ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
+-
++err:
+ if (iov != iovstack)
+ kfree(iov);
+ return ret;
+diff --git a/sound/soc/imx/imx-ssi.c b/sound/soc/imx/imx-ssi.c
+index 971eaf0..4969c98 100644
+--- a/sound/soc/imx/imx-ssi.c
++++ b/sound/soc/imx/imx-ssi.c
+@@ -573,6 +573,8 @@ static void imx_ssi_ac97_reset(struct snd_ac97 *ac97)
+
+ if (imx_ssi->ac97_reset)
+ imx_ssi->ac97_reset(ac97);
++ /* First read sometimes fails, do a dummy read */
++ imx_ssi_ac97_read(ac97, 0);
+ }
+
+ static void imx_ssi_ac97_warm_reset(struct snd_ac97 *ac97)
+@@ -581,6 +583,9 @@ static void imx_ssi_ac97_warm_reset(struct snd_ac97 *ac97)
+
+ if (imx_ssi->ac97_warm_reset)
+ imx_ssi->ac97_warm_reset(ac97);
++
++ /* First read sometimes fails, do a dummy read */
++ imx_ssi_ac97_read(ac97, 0);
+ }
+
+ struct snd_ac97_bus_ops soc_ac97_ops = {
+diff --git a/sound/soc/sh/dma-sh7760.c b/sound/soc/sh/dma-sh7760.c
+index db74005..1a757c3 100644
+--- a/sound/soc/sh/dma-sh7760.c
++++ b/sound/soc/sh/dma-sh7760.c
+@@ -342,8 +342,8 @@ static int camelot_pcm_new(struct snd_soc_pcm_runtime *rtd)
+ return 0;
+ }
+
+-static struct snd_soc_platform sh7760_soc_platform = {
+- .pcm_ops = &camelot_pcm_ops,
++static struct snd_soc_platform_driver sh7760_soc_platform = {
++ .ops = &camelot_pcm_ops,
+ .pcm_new = camelot_pcm_new,
+ .pcm_free = camelot_pcm_free,
+ };
diff --git a/1043_linux-3.2.44.patch b/1043_linux-3.2.44.patch
new file mode 100644
index 00000000..3d5e6fff
--- /dev/null
+++ b/1043_linux-3.2.44.patch
@@ -0,0 +1,2808 @@
+diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
+index ddbf18e..897f223 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -948,6 +948,20 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+ i8k.restricted [HW] Allow controlling fans only if SYS_ADMIN
+ capability is set.
+
++ i915.invert_brightness=
++ [DRM] Invert the sense of the variable that is used to
++ set the brightness of the panel backlight. Normally a
++ brightness value of 0 indicates backlight switched off,
++ and the maximum of the brightness value sets the backlight
++ to maximum brightness. If this parameter is set to 0
++ (default) and the machine requires it, or this parameter
++ is set to 1, a brightness value of 0 sets the backlight
++ to maximum brightness, and the maximum of the brightness
++ value switches the backlight off.
++ -1 -- never invert brightness
++ 0 -- machine default
++ 1 -- force brightness inversion
++
+ icn= [HW,ISDN]
+ Format: <io>[,<membase>[,<icn_id>[,<icn_id2>]]]
+
+diff --git a/Makefile b/Makefile
+index 59130db..566750c 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 43
++SUBLEVEL = 44
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c
+index 4112200..c52a8ef 100644
+--- a/arch/alpha/kernel/sys_nautilus.c
++++ b/arch/alpha/kernel/sys_nautilus.c
+@@ -189,6 +189,10 @@ nautilus_machine_check(unsigned long vector, unsigned long la_ptr)
+ extern void free_reserved_mem(void *, void *);
+ extern void pcibios_claim_one_bus(struct pci_bus *);
+
++static struct resource irongate_io = {
++ .name = "Irongate PCI IO",
++ .flags = IORESOURCE_IO,
++};
+ static struct resource irongate_mem = {
+ .name = "Irongate PCI MEM",
+ .flags = IORESOURCE_MEM,
+@@ -210,6 +214,7 @@ nautilus_init_pci(void)
+
+ irongate = pci_get_bus_and_slot(0, 0);
+ bus->self = irongate;
++ bus->resource[0] = &irongate_io;
+ bus->resource[1] = &irongate_mem;
+
+ pci_bus_size_bridges(bus);
+diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
+index ecebb89..a559ee7 100644
+--- a/arch/arm/kernel/perf_event.c
++++ b/arch/arm/kernel/perf_event.c
+@@ -326,7 +326,10 @@ validate_event(struct pmu_hw_events *hw_events,
+ struct hw_perf_event fake_event = event->hw;
+ struct pmu *leader_pmu = event->group_leader->pmu;
+
+- if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
++ if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
++ return 1;
++
++ if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
+ return 1;
+
+ return armpmu->get_event_idx(hw_events, &fake_event) >= 0;
+diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c
+index e0b0e7a..09f8851 100644
+--- a/arch/arm/mm/cache-feroceon-l2.c
++++ b/arch/arm/mm/cache-feroceon-l2.c
+@@ -342,6 +342,7 @@ void __init feroceon_l2_init(int __l2_wt_override)
+ outer_cache.inv_range = feroceon_l2_inv_range;
+ outer_cache.clean_range = feroceon_l2_clean_range;
+ outer_cache.flush_range = feroceon_l2_flush_range;
++ outer_cache.inv_all = l2_inv_all;
+
+ enable_l2();
+
+diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
+index 88fb3d9..927a639 100644
+--- a/arch/arm/mm/proc-arm920.S
++++ b/arch/arm/mm/proc-arm920.S
+@@ -380,7 +380,7 @@ ENTRY(cpu_arm920_set_pte_ext)
+ /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
+ .globl cpu_arm920_suspend_size
+ .equ cpu_arm920_suspend_size, 4 * 3
+-#ifdef CONFIG_PM_SLEEP
++#ifdef CONFIG_ARM_CPU_SUSPEND
+ ENTRY(cpu_arm920_do_suspend)
+ stmfd sp!, {r4 - r6, lr}
+ mrc p15, 0, r4, c13, c0, 0 @ PID
+diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
+index 9f8fd91..090f18f 100644
+--- a/arch/arm/mm/proc-arm926.S
++++ b/arch/arm/mm/proc-arm926.S
+@@ -395,7 +395,7 @@ ENTRY(cpu_arm926_set_pte_ext)
+ /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
+ .globl cpu_arm926_suspend_size
+ .equ cpu_arm926_suspend_size, 4 * 3
+-#ifdef CONFIG_PM_SLEEP
++#ifdef CONFIG_ARM_CPU_SUSPEND
+ ENTRY(cpu_arm926_do_suspend)
+ stmfd sp!, {r4 - r6, lr}
+ mrc p15, 0, r4, c13, c0, 0 @ PID
+diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
+index 7d91545..6594aef 100644
+--- a/arch/arm/mm/proc-sa1100.S
++++ b/arch/arm/mm/proc-sa1100.S
+@@ -169,7 +169,7 @@ ENTRY(cpu_sa1100_set_pte_ext)
+
+ .globl cpu_sa1100_suspend_size
+ .equ cpu_sa1100_suspend_size, 4 * 3
+-#ifdef CONFIG_PM_SLEEP
++#ifdef CONFIG_ARM_CPU_SUSPEND
+ ENTRY(cpu_sa1100_do_suspend)
+ stmfd sp!, {r4 - r6, lr}
+ mrc p15, 0, r4, c3, c0, 0 @ domain ID
+diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
+index d061d2f..8168d99 100644
+--- a/arch/arm/mm/proc-v6.S
++++ b/arch/arm/mm/proc-v6.S
+@@ -129,7 +129,7 @@ ENTRY(cpu_v6_set_pte_ext)
+ /* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */
+ .globl cpu_v6_suspend_size
+ .equ cpu_v6_suspend_size, 4 * 6
+-#ifdef CONFIG_PM_SLEEP
++#ifdef CONFIG_ARM_CPU_SUSPEND
+ ENTRY(cpu_v6_do_suspend)
+ stmfd sp!, {r4 - r9, lr}
+ mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
+diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
+index abf0507..5c4969d 100644
+--- a/arch/arm/mm/proc-xsc3.S
++++ b/arch/arm/mm/proc-xsc3.S
+@@ -407,7 +407,7 @@ ENTRY(cpu_xsc3_set_pte_ext)
+
+ .globl cpu_xsc3_suspend_size
+ .equ cpu_xsc3_suspend_size, 4 * 6
+-#ifdef CONFIG_PM_SLEEP
++#ifdef CONFIG_ARM_CPU_SUSPEND
+ ENTRY(cpu_xsc3_do_suspend)
+ stmfd sp!, {r4 - r9, lr}
+ mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
+diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
+index 3277904..b09d036 100644
+--- a/arch/arm/mm/proc-xscale.S
++++ b/arch/arm/mm/proc-xscale.S
+@@ -521,7 +521,7 @@ ENTRY(cpu_xscale_set_pte_ext)
+
+ .globl cpu_xscale_suspend_size
+ .equ cpu_xscale_suspend_size, 4 * 6
+-#ifdef CONFIG_PM_SLEEP
++#ifdef CONFIG_ARM_CPU_SUSPEND
+ ENTRY(cpu_xscale_do_suspend)
+ stmfd sp!, {r4 - r9, lr}
+ mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
+diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
+index dc36ea6..eb19b6c 100644
+--- a/arch/powerpc/platforms/pseries/lpar.c
++++ b/arch/powerpc/platforms/pseries/lpar.c
+@@ -186,7 +186,13 @@ static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
+ (0x1UL << 4), &dummy1, &dummy2);
+ if (lpar_rc == H_SUCCESS)
+ return i;
+- BUG_ON(lpar_rc != H_NOT_FOUND);
++
++ /*
++ * The test for adjunct partition is performed before the
++ * ANDCOND test. H_RESOURCE may be returned, so we need to
++ * check for that as well.
++ */
++ BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE);
+
+ slot_offset++;
+ slot_offset &= 0x7;
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index b4973f4..cfb5a40 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -393,8 +393,8 @@ struct kvm_vcpu_arch {
+ gpa_t time;
+ struct pvclock_vcpu_time_info hv_clock;
+ unsigned int hw_tsc_khz;
+- unsigned int time_offset;
+- struct page *time_page;
++ struct gfn_to_hva_cache pv_time;
++ bool pv_time_enabled;
+
+ struct {
+ u64 msr_val;
+diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
+index a7d2db9..91e758b 100644
+--- a/arch/x86/include/asm/paravirt.h
++++ b/arch/x86/include/asm/paravirt.h
+@@ -740,7 +740,10 @@ static inline void arch_leave_lazy_mmu_mode(void)
+ PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
+ }
+
+-void arch_flush_lazy_mmu_mode(void);
++static inline void arch_flush_lazy_mmu_mode(void)
++{
++ PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush);
++}
+
+ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
+ phys_addr_t phys, pgprot_t flags)
+diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
+index 8e8b9a4..faf2c04 100644
+--- a/arch/x86/include/asm/paravirt_types.h
++++ b/arch/x86/include/asm/paravirt_types.h
+@@ -91,6 +91,7 @@ struct pv_lazy_ops {
+ /* Set deferred update mode, used for batching operations. */
+ void (*enter)(void);
+ void (*leave)(void);
++ void (*flush)(void);
+ };
+
+ struct pv_time_ops {
+@@ -680,6 +681,7 @@ void paravirt_end_context_switch(struct task_struct *next);
+
+ void paravirt_enter_lazy_mmu(void);
+ void paravirt_leave_lazy_mmu(void);
++void paravirt_flush_lazy_mmu(void);
+
+ void _paravirt_nop(void);
+ u32 _paravirt_ident_32(u32);
+diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
+index d90272e..84c938f 100644
+--- a/arch/x86/kernel/paravirt.c
++++ b/arch/x86/kernel/paravirt.c
+@@ -261,6 +261,18 @@ void paravirt_leave_lazy_mmu(void)
+ leave_lazy(PARAVIRT_LAZY_MMU);
+ }
+
++void paravirt_flush_lazy_mmu(void)
++{
++ preempt_disable();
++
++ if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
++ arch_leave_lazy_mmu_mode();
++ arch_enter_lazy_mmu_mode();
++ }
++
++ preempt_enable();
++}
++
+ void paravirt_start_context_switch(struct task_struct *prev)
+ {
+ BUG_ON(preemptible());
+@@ -290,18 +302,6 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
+ return percpu_read(paravirt_lazy_mode);
+ }
+
+-void arch_flush_lazy_mmu_mode(void)
+-{
+- preempt_disable();
+-
+- if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
+- arch_leave_lazy_mmu_mode();
+- arch_enter_lazy_mmu_mode();
+- }
+-
+- preempt_enable();
+-}
+-
+ struct pv_info pv_info = {
+ .name = "bare hardware",
+ .paravirt_enabled = 0,
+@@ -475,6 +475,7 @@ struct pv_mmu_ops pv_mmu_ops = {
+ .lazy_mode = {
+ .enter = paravirt_nop,
+ .leave = paravirt_nop,
++ .flush = paravirt_nop,
+ },
+
+ .set_fixmap = native_set_fixmap,
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index f4063fd..e82a53a 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1105,7 +1105,6 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
+ {
+ unsigned long flags;
+ struct kvm_vcpu_arch *vcpu = &v->arch;
+- void *shared_kaddr;
+ unsigned long this_tsc_khz;
+ s64 kernel_ns, max_kernel_ns;
+ u64 tsc_timestamp;
+@@ -1141,7 +1140,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
+
+ local_irq_restore(flags);
+
+- if (!vcpu->time_page)
++ if (!vcpu->pv_time_enabled)
+ return 0;
+
+ /*
+@@ -1199,14 +1198,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
+ */
+ vcpu->hv_clock.version += 2;
+
+- shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0);
+-
+- memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
+- sizeof(vcpu->hv_clock));
+-
+- kunmap_atomic(shared_kaddr, KM_USER0);
+-
+- mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
++ kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
++ &vcpu->hv_clock,
++ sizeof(vcpu->hv_clock));
+ return 0;
+ }
+
+@@ -1486,7 +1480,8 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
+ return 0;
+ }
+
+- if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa))
++ if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
++ sizeof(u32)))
+ return 1;
+
+ vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
+@@ -1496,10 +1491,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
+
+ static void kvmclock_reset(struct kvm_vcpu *vcpu)
+ {
+- if (vcpu->arch.time_page) {
+- kvm_release_page_dirty(vcpu->arch.time_page);
+- vcpu->arch.time_page = NULL;
+- }
++ vcpu->arch.pv_time_enabled = false;
+ }
+
+ static void accumulate_steal_time(struct kvm_vcpu *vcpu)
+@@ -1591,6 +1583,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
+ break;
+ case MSR_KVM_SYSTEM_TIME_NEW:
+ case MSR_KVM_SYSTEM_TIME: {
++ u64 gpa_offset;
+ kvmclock_reset(vcpu);
+
+ vcpu->arch.time = data;
+@@ -1600,16 +1593,14 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
+ if (!(data & 1))
+ break;
+
+- /* ...but clean it before doing the actual write */
+- vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
++ gpa_offset = data & ~(PAGE_MASK | 1);
+
+- vcpu->arch.time_page =
+- gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
+-
+- if (is_error_page(vcpu->arch.time_page)) {
+- kvm_release_page_clean(vcpu->arch.time_page);
+- vcpu->arch.time_page = NULL;
+- }
++ if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
++ &vcpu->arch.pv_time, data & ~1ULL,
++ sizeof(struct pvclock_vcpu_time_info)))
++ vcpu->arch.pv_time_enabled = false;
++ else
++ vcpu->arch.pv_time_enabled = true;
+ break;
+ }
+ case MSR_KVM_ASYNC_PF_EN:
+@@ -1625,7 +1616,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
+ return 1;
+
+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
+- data & KVM_STEAL_VALID_BITS))
++ data & KVM_STEAL_VALID_BITS,
++ sizeof(struct kvm_steal_time)))
+ return 1;
+
+ vcpu->arch.st.msr_val = data;
+@@ -6549,6 +6541,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+ if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL))
+ goto fail_free_mce_banks;
+
++ vcpu->arch.pv_time_enabled = false;
+ kvm_async_pf_hash_reset(vcpu);
+
+ return 0;
+diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
+index cf4603b..8f4fda4 100644
+--- a/arch/x86/lguest/boot.c
++++ b/arch/x86/lguest/boot.c
+@@ -1328,6 +1328,7 @@ __init void lguest_init(void)
+ pv_mmu_ops.read_cr3 = lguest_read_cr3;
+ pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu;
+ pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mmu_mode;
++ pv_mmu_ops.lazy_mode.flush = paravirt_flush_lazy_mmu;
+ pv_mmu_ops.pte_update = lguest_pte_update;
+ pv_mmu_ops.pte_update_defer = lguest_pte_update;
+
+diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
+index 7b73c88..53a7b69 100644
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
+@@ -377,10 +377,12 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
+ if (pgd_none(*pgd_ref))
+ return -1;
+
+- if (pgd_none(*pgd))
++ if (pgd_none(*pgd)) {
+ set_pgd(pgd, *pgd_ref);
+- else
++ arch_flush_lazy_mmu_mode();
++ } else {
+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
++ }
+
+ /*
+ * Below here mismatches are bugs because these lower tables
+diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
+index 2b8b0de..fe00be6 100644
+--- a/arch/x86/xen/mmu.c
++++ b/arch/x86/xen/mmu.c
+@@ -2079,6 +2079,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
+ .lazy_mode = {
+ .enter = paravirt_enter_lazy_mmu,
+ .leave = xen_leave_lazy_mmu,
++ .flush = paravirt_flush_lazy_mmu,
+ },
+
+ .set_fixmap = xen_set_fixmap,
+diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
+index f0b2ca8..1789e7a 100644
+--- a/block/blk-sysfs.c
++++ b/block/blk-sysfs.c
+@@ -200,6 +200,8 @@ queue_store_##name(struct request_queue *q, const char *page, size_t count) \
+ unsigned long val; \
+ ssize_t ret; \
+ ret = queue_var_store(&val, page, count); \
++ if (ret < 0) \
++ return ret; \
+ if (neg) \
+ val = !val; \
+ \
+diff --git a/crypto/gcm.c b/crypto/gcm.c
+index 1a25263..b97b186 100644
+--- a/crypto/gcm.c
++++ b/crypto/gcm.c
+@@ -44,6 +44,7 @@ struct crypto_rfc4543_ctx {
+
+ struct crypto_rfc4543_req_ctx {
+ u8 auth_tag[16];
++ u8 assocbuf[32];
+ struct scatterlist cipher[1];
+ struct scatterlist payload[2];
+ struct scatterlist assoc[2];
+@@ -1142,9 +1143,19 @@ static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req,
+ scatterwalk_crypto_chain(payload, dst, vdst == req->iv + 8, 2);
+ assoclen += 8 + req->cryptlen - (enc ? 0 : authsize);
+
+- sg_init_table(assoc, 2);
+- sg_set_page(assoc, sg_page(req->assoc), req->assoc->length,
+- req->assoc->offset);
++ if (req->assoc->length == req->assoclen) {
++ sg_init_table(assoc, 2);
++ sg_set_page(assoc, sg_page(req->assoc), req->assoc->length,
++ req->assoc->offset);
++ } else {
++ BUG_ON(req->assoclen > sizeof(rctx->assocbuf));
++
++ scatterwalk_map_and_copy(rctx->assocbuf, req->assoc, 0,
++ req->assoclen, 0);
++
++ sg_init_table(assoc, 2);
++ sg_set_buf(assoc, rctx->assocbuf, req->assoclen);
++ }
+ scatterwalk_crypto_chain(assoc, payload, 0, 2);
+
+ aead_request_set_tfm(subreq, ctx->child);
+diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
+index df47397..ddfc1c1 100644
+--- a/drivers/ata/ata_piix.c
++++ b/drivers/ata/ata_piix.c
+@@ -150,6 +150,7 @@ enum piix_controller_ids {
+ tolapai_sata,
+ piix_pata_vmw, /* PIIX4 for VMware, spurious DMA_ERR */
+ ich8_sata_snb,
++ ich8_2port_sata_snb,
+ };
+
+ struct piix_map_db {
+@@ -326,7 +327,7 @@ static const struct pci_device_id piix_pci_tbl[] = {
+ /* SATA Controller IDE (Lynx Point) */
+ { 0x8086, 0x8c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+ /* SATA Controller IDE (Lynx Point) */
+- { 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
++ { 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
+ /* SATA Controller IDE (Lynx Point) */
+ { 0x8086, 0x8c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (Lynx Point-LP) */
+@@ -519,6 +520,7 @@ static const struct piix_map_db *piix_map_db_table[] = {
+ [ich8m_apple_sata] = &ich8m_apple_map_db,
+ [tolapai_sata] = &tolapai_map_db,
+ [ich8_sata_snb] = &ich8_map_db,
++ [ich8_2port_sata_snb] = &ich8_2port_map_db,
+ };
+
+ static struct ata_port_info piix_port_info[] = {
+@@ -660,6 +662,16 @@ static struct ata_port_info piix_port_info[] = {
+ .port_ops = &piix_sata_ops,
+ },
+
++ [ich8_2port_sata_snb] =
++ {
++ .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR
++ | PIIX_FLAG_PIO16,
++ .pio_mask = ATA_PIO4,
++ .mwdma_mask = ATA_MWDMA2,
++ .udma_mask = ATA_UDMA6,
++ .port_ops = &piix_sata_ops,
++ },
++
+ };
+
+ static struct pci_bits piix_enable_bits[] = {
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index c9540c0..288b635 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -2401,6 +2401,9 @@ int ata_dev_configure(struct ata_device *dev)
+ dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
+ dev->max_sectors);
+
++ if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
++ dev->max_sectors = ATA_MAX_SECTORS_LBA48;
++
+ if (ap->ops->dev_config)
+ ap->ops->dev_config(dev);
+
+@@ -4057,6 +4060,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ /* Weird ATAPI devices */
+ { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
+ { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
++ { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
+
+ /* Devices we expect to fail diagnostics */
+
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index 012a9d2..144d37c 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -274,6 +274,7 @@ enum intel_pch {
+
+ #define QUIRK_PIPEA_FORCE (1<<0)
+ #define QUIRK_LVDS_SSC_DISABLE (1<<1)
++#define QUIRK_INVERT_BRIGHTNESS (1<<2)
+
+ struct intel_fbdev;
+ struct intel_fbc_work;
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 17961df..897ca06 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -25,6 +25,7 @@
+ */
+
+ #include <linux/cpufreq.h>
++#include <linux/dmi.h>
+ #include <linux/module.h>
+ #include <linux/input.h>
+ #include <linux/i2c.h>
+@@ -8831,6 +8832,16 @@ static void quirk_ssc_force_disable(struct drm_device *dev)
+ dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
+ }
+
++/*
++ * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
++ * brightness value
++ */
++static void quirk_invert_brightness(struct drm_device *dev)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
++}
++
+ struct intel_quirk {
+ int device;
+ int subsystem_vendor;
+@@ -8838,6 +8849,34 @@ struct intel_quirk {
+ void (*hook)(struct drm_device *dev);
+ };
+
++/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
++struct intel_dmi_quirk {
++ void (*hook)(struct drm_device *dev);
++ const struct dmi_system_id (*dmi_id_list)[];
++};
++
++static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
++{
++ DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
++ return 1;
++}
++
++static const struct intel_dmi_quirk intel_dmi_quirks[] = {
++ {
++ .dmi_id_list = &(const struct dmi_system_id[]) {
++ {
++ .callback = intel_dmi_reverse_brightness,
++ .ident = "NCR Corporation",
++ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, ""),
++ },
++ },
++ { } /* terminating entry */
++ },
++ .hook = quirk_invert_brightness,
++ },
++};
++
+ struct intel_quirk intel_quirks[] = {
+ /* HP Compaq 2730p needs pipe A force quirk (LP: #291555) */
+ { 0x2a42, 0x103c, 0x30eb, quirk_pipea_force },
+@@ -8865,6 +8904,18 @@ struct intel_quirk intel_quirks[] = {
+
+ /* Sony Vaio Y cannot use SSC on LVDS */
+ { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
++
++ /* Acer Aspire 5734Z must invert backlight brightness */
++ { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
++
++ /* Acer/eMachines G725 */
++ { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
++
++ /* Acer/eMachines e725 */
++ { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
++
++ /* Acer/Packard Bell NCL20 */
++ { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
+ };
+
+ static void intel_init_quirks(struct drm_device *dev)
+@@ -8882,6 +8933,10 @@ static void intel_init_quirks(struct drm_device *dev)
+ q->subsystem_device == PCI_ANY_ID))
+ q->hook(dev);
+ }
++ for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
++ if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
++ intel_dmi_quirks[i].hook(dev);
++ }
+ }
+
+ /* Disable the VGA plane that we never use */
+diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
+index 04d79fd..72b8949 100644
+--- a/drivers/gpu/drm/i915/intel_panel.c
++++ b/drivers/gpu/drm/i915/intel_panel.c
+@@ -28,6 +28,7 @@
+ * Chris Wilson <chris@chris-wilson.co.uk>
+ */
+
++#include <linux/moduleparam.h>
+ #include "intel_drv.h"
+
+ #define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
+@@ -191,6 +192,27 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
+ return max;
+ }
+
++static int i915_panel_invert_brightness;
++MODULE_PARM_DESC(invert_brightness, "Invert backlight brightness "
++ "(-1 force normal, 0 machine defaults, 1 force inversion), please "
++ "report PCI device ID, subsystem vendor and subsystem device ID "
++ "to dri-devel@lists.freedesktop.org, if your machine needs it. "
++ "It will then be included in an upcoming module version.");
++module_param_named(invert_brightness, i915_panel_invert_brightness, int, 0600);
++static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++
++ if (i915_panel_invert_brightness < 0)
++ return val;
++
++ if (i915_panel_invert_brightness > 0 ||
++ dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS)
++ return intel_panel_get_max_backlight(dev) - val;
++
++ return val;
++}
++
+ u32 intel_panel_get_backlight(struct drm_device *dev)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+@@ -211,6 +233,7 @@ u32 intel_panel_get_backlight(struct drm_device *dev)
+ }
+ }
+
++ val = intel_panel_compute_brightness(dev, val);
+ DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
+ return val;
+ }
+@@ -228,6 +251,7 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level
+ u32 tmp;
+
+ DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
++ level = intel_panel_compute_brightness(dev, level);
+
+ if (HAS_PCH_SPLIT(dev))
+ return intel_pch_panel_set_backlight(dev, level);
+diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
+index 58434e8..37fe246 100644
+--- a/drivers/gpu/vga/vga_switcheroo.c
++++ b/drivers/gpu/vga/vga_switcheroo.c
+@@ -26,6 +26,7 @@
+ #include <linux/fb.h>
+
+ #include <linux/pci.h>
++#include <linux/console.h>
+ #include <linux/vga_switcheroo.h>
+
+ struct vga_switcheroo_client {
+@@ -256,8 +257,10 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
+
+ if (new_client->fb_info) {
+ struct fb_event event;
++ console_lock();
+ event.info = new_client->fb_info;
+ fb_notifier_call_chain(FB_EVENT_REMAP_ALL_CONSOLE, &event);
++ console_unlock();
+ }
+
+ ret = vgasr_priv.handler->switchto(new_client->id);
+diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c
+index 1201a15..08e7e72 100644
+--- a/drivers/hwspinlock/hwspinlock_core.c
++++ b/drivers/hwspinlock/hwspinlock_core.c
+@@ -416,6 +416,8 @@ static int __hwspin_lock_request(struct hwspinlock *hwlock)
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "%s: can't power on device\n", __func__);
++ pm_runtime_put_noidle(dev);
++ module_put(dev->driver->owner);
+ return ret;
+ }
+
+diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
+index e7dc732..1d90e26 100644
+--- a/drivers/mtd/mtdchar.c
++++ b/drivers/mtd/mtdchar.c
+@@ -1154,7 +1154,11 @@ static int mtd_mmap(struct file *file, struct vm_area_struct *vma)
+ unsigned long off;
+ u32 len;
+
+- if (mtd->type == MTD_RAM || mtd->type == MTD_ROM) {
++ /* This is broken because it assumes the MTD device is map-based
++ and that mtd->priv is a valid struct map_info. It should be
++ replaced with something that uses the mtd_get_unmapped_area()
++ operation properly. */
++ if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) {
+ off = vma->vm_pgoff << PAGE_SHIFT;
+ start = map->phys;
+ len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size);
+diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
+index c3dd9d0..1ee5f0c 100644
+--- a/drivers/net/can/sja1000/sja1000_of_platform.c
++++ b/drivers/net/can/sja1000/sja1000_of_platform.c
+@@ -94,8 +94,8 @@ static int __devinit sja1000_ofp_probe(struct platform_device *ofdev)
+ struct net_device *dev;
+ struct sja1000_priv *priv;
+ struct resource res;
+- const u32 *prop;
+- int err, irq, res_size, prop_size;
++ u32 prop;
++ int err, irq, res_size;
+ void __iomem *base;
+
+ err = of_address_to_resource(np, 0, &res);
+@@ -136,27 +136,27 @@ static int __devinit sja1000_ofp_probe(struct platform_device *ofdev)
+ priv->read_reg = sja1000_ofp_read_reg;
+ priv->write_reg = sja1000_ofp_write_reg;
+
+- prop = of_get_property(np, "nxp,external-clock-frequency", &prop_size);
+- if (prop && (prop_size == sizeof(u32)))
+- priv->can.clock.freq = *prop / 2;
++ err = of_property_read_u32(np, "nxp,external-clock-frequency", &prop);
++ if (!err)
++ priv->can.clock.freq = prop / 2;
+ else
+ priv->can.clock.freq = SJA1000_OFP_CAN_CLOCK; /* default */
+
+- prop = of_get_property(np, "nxp,tx-output-mode", &prop_size);
+- if (prop && (prop_size == sizeof(u32)))
+- priv->ocr |= *prop & OCR_MODE_MASK;
++ err = of_property_read_u32(np, "nxp,tx-output-mode", &prop);
++ if (!err)
++ priv->ocr |= prop & OCR_MODE_MASK;
+ else
+ priv->ocr |= OCR_MODE_NORMAL; /* default */
+
+- prop = of_get_property(np, "nxp,tx-output-config", &prop_size);
+- if (prop && (prop_size == sizeof(u32)))
+- priv->ocr |= (*prop << OCR_TX_SHIFT) & OCR_TX_MASK;
++ err = of_property_read_u32(np, "nxp,tx-output-config", &prop);
++ if (!err)
++ priv->ocr |= (prop << OCR_TX_SHIFT) & OCR_TX_MASK;
+ else
+ priv->ocr |= OCR_TX0_PULLDOWN; /* default */
+
+- prop = of_get_property(np, "nxp,clock-out-frequency", &prop_size);
+- if (prop && (prop_size == sizeof(u32)) && *prop) {
+- u32 divider = priv->can.clock.freq * 2 / *prop;
++ err = of_property_read_u32(np, "nxp,clock-out-frequency", &prop);
++ if (!err && prop) {
++ u32 divider = priv->can.clock.freq * 2 / prop;
+
+ if (divider > 1)
+ priv->cdr |= divider / 2 - 1;
+@@ -166,8 +166,7 @@ static int __devinit sja1000_ofp_probe(struct platform_device *ofdev)
+ priv->cdr |= CDR_CLK_OFF; /* default */
+ }
+
+- prop = of_get_property(np, "nxp,no-comparator-bypass", NULL);
+- if (!prop)
++ if (!of_property_read_bool(np, "nxp,no-comparator-bypass"))
+ priv->cdr |= CDR_CBP; /* default */
+
+ priv->irq_flags = IRQF_SHARED;
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index a6153f1..d812790 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -3516,6 +3516,30 @@ static void __devinit rtl_init_mdio_ops(struct rtl8169_private *tp)
+ }
+ }
+
++static void rtl_speed_down(struct rtl8169_private *tp)
++{
++ u32 adv;
++ int lpa;
++
++ rtl_writephy(tp, 0x1f, 0x0000);
++ lpa = rtl_readphy(tp, MII_LPA);
++
++ if (lpa & (LPA_10HALF | LPA_10FULL))
++ adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full;
++ else if (lpa & (LPA_100HALF | LPA_100FULL))
++ adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
++ ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
++ else
++ adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
++ ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
++ (tp->mii.supports_gmii ?
++ ADVERTISED_1000baseT_Half |
++ ADVERTISED_1000baseT_Full : 0);
++
++ rtl8169_set_speed(tp->dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
++ adv);
++}
++
+ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
+ {
+ void __iomem *ioaddr = tp->mmio_addr;
+@@ -3541,9 +3565,7 @@ static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)
+ if (!(__rtl8169_get_wol(tp) & WAKE_ANY))
+ return false;
+
+- rtl_writephy(tp, 0x1f, 0x0000);
+- rtl_writephy(tp, MII_BMCR, 0x0000);
+-
++ rtl_speed_down(tp);
+ rtl_wol_suspend_quirk(tp);
+
+ return true;
+diff --git a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
+index 06b3f0d..c16bea4 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
++++ b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
+@@ -648,7 +648,7 @@ static const u32 ar9580_1p0_mac_core[][2] = {
+ {0x00008258, 0x00000000},
+ {0x0000825c, 0x40000000},
+ {0x00008260, 0x00080922},
+- {0x00008264, 0x9bc00010},
++ {0x00008264, 0x9d400010},
+ {0x00008268, 0xffffffff},
+ {0x0000826c, 0x0000ffff},
+ {0x00008270, 0x00000000},
+diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+index 966661c..84890d5 100644
+--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
++++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+@@ -801,7 +801,7 @@ static int ath9k_init_firmware_version(struct ath9k_htc_priv *priv)
+ * required version.
+ */
+ if (priv->fw_version_major != MAJOR_VERSION_REQ ||
+- priv->fw_version_minor != MINOR_VERSION_REQ) {
++ priv->fw_version_minor < MINOR_VERSION_REQ) {
+ dev_err(priv->dev, "ath9k_htc: Please upgrade to FW version %d.%d\n",
+ MAJOR_VERSION_REQ, MINOR_VERSION_REQ);
+ return -EINVAL;
+diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
+index 17148bb..10fe07d 100644
+--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
++++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
+@@ -52,8 +52,8 @@ int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
+ udelay(REGISTER_BUSY_DELAY);
+ }
+
+- ERROR(rt2x00dev, "Indirect register access failed: "
+- "offset=0x%.08x, value=0x%.08x\n", offset, *reg);
++ printk_once(KERN_ERR "%s() Indirect register access failed: "
++ "offset=0x%.08x, value=0x%.08x\n", __func__, offset, *reg);
+ *reg = ~0;
+
+ return 0;
+diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
+index 2264331..b96766b 100644
+--- a/drivers/platform/x86/msi-wmi.c
++++ b/drivers/platform/x86/msi-wmi.c
+@@ -176,7 +176,7 @@ static void msi_wmi_notify(u32 value, void *context)
+ pr_debug("Suppressed key event 0x%X - "
+ "Last press was %lld us ago\n",
+ key->code, ktime_to_us(diff));
+- return;
++ goto msi_wmi_notify_exit;
+ }
+ last_pressed[key->code - SCANCODE_BASE] = cur;
+
+@@ -195,6 +195,8 @@ static void msi_wmi_notify(u32 value, void *context)
+ pr_info("Unknown key pressed - %x\n", eventcode);
+ } else
+ pr_info("Unknown event received\n");
++
++msi_wmi_notify_exit:
+ kfree(response.pointer);
+ }
+
+diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
+index 0364ca2..d5f4eb8 100644
+--- a/drivers/target/target_core_alua.c
++++ b/drivers/target/target_core_alua.c
+@@ -393,8 +393,9 @@ static inline int core_alua_state_standby(
+ case REPORT_LUNS:
+ case RECEIVE_DIAGNOSTIC:
+ case SEND_DIAGNOSTIC:
++ return 0;
+ case MAINTENANCE_IN:
+- switch (cdb[1]) {
++ switch (cdb[1] & 0x1f) {
+ case MI_REPORT_TARGET_PGS:
+ return 0;
+ default:
+@@ -435,8 +436,9 @@ static inline int core_alua_state_unavailable(
+ switch (cdb[0]) {
+ case INQUIRY:
+ case REPORT_LUNS:
++ return 0;
+ case MAINTENANCE_IN:
+- switch (cdb[1]) {
++ switch (cdb[1] & 0x1f) {
+ case MI_REPORT_TARGET_PGS:
+ return 0;
+ default:
+@@ -475,8 +477,9 @@ static inline int core_alua_state_transition(
+ switch (cdb[0]) {
+ case INQUIRY:
+ case REPORT_LUNS:
++ return 0;
+ case MAINTENANCE_IN:
+- switch (cdb[1]) {
++ switch (cdb[1] & 0x1f) {
+ case MI_REPORT_TARGET_PGS:
+ return 0;
+ default:
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 9176b2e..898c1de 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -1445,6 +1445,7 @@ static inline void transport_generic_prepare_cdb(
+ case VERIFY_16: /* SBC - VRProtect */
+ case WRITE_VERIFY: /* SBC - VRProtect */
+ case WRITE_VERIFY_12: /* SBC - VRProtect */
++ case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
+ break;
+ default:
+ cdb[1] &= 0x1f; /* clear logical unit number */
+@@ -2683,7 +2684,7 @@ static int transport_generic_cmd_sequencer(
+ /*
+ * Check for emulated MI_REPORT_TARGET_PGS.
+ */
+- if (cdb[1] == MI_REPORT_TARGET_PGS &&
++ if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS &&
+ su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
+ cmd->execute_task =
+ target_emulate_report_target_port_groups;
+diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
+index dd9a574..f6fb292 100644
+--- a/drivers/thermal/thermal_sys.c
++++ b/drivers/thermal/thermal_sys.c
+@@ -1399,6 +1399,7 @@ static int __init thermal_init(void)
+ idr_destroy(&thermal_cdev_idr);
+ mutex_destroy(&thermal_idr_lock);
+ mutex_destroy(&thermal_list_lock);
++ return result;
+ }
+ result = genetlink_init();
+ return result;
+diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
+index 18e875b..3ca6c0d 100644
+--- a/drivers/usb/serial/ark3116.c
++++ b/drivers/usb/serial/ark3116.c
+@@ -68,7 +68,6 @@ static int is_irda(struct usb_serial *serial)
+ }
+
+ struct ark3116_private {
+- wait_queue_head_t delta_msr_wait;
+ struct async_icount icount;
+ int irda; /* 1 for irda device */
+
+@@ -148,7 +147,6 @@ static int ark3116_attach(struct usb_serial *serial)
+ if (!priv)
+ return -ENOMEM;
+
+- init_waitqueue_head(&priv->delta_msr_wait);
+ mutex_init(&priv->hw_lock);
+ spin_lock_init(&priv->status_lock);
+
+@@ -460,10 +458,14 @@ static int ark3116_ioctl(struct tty_struct *tty,
+ case TIOCMIWAIT:
+ for (;;) {
+ struct async_icount prev = priv->icount;
+- interruptible_sleep_on(&priv->delta_msr_wait);
++ interruptible_sleep_on(&port->delta_msr_wait);
+ /* see if a signal did it */
+ if (signal_pending(current))
+ return -ERESTARTSYS;
++
++ if (port->serial->disconnected)
++ return -EIO;
++
+ if ((prev.rng == priv->icount.rng) &&
+ (prev.dsr == priv->icount.dsr) &&
+ (prev.dcd == priv->icount.dcd) &&
+@@ -584,7 +586,7 @@ static void ark3116_update_msr(struct usb_serial_port *port, __u8 msr)
+ priv->icount.dcd++;
+ if (msr & UART_MSR_TERI)
+ priv->icount.rng++;
+- wake_up_interruptible(&priv->delta_msr_wait);
++ wake_up_interruptible(&port->delta_msr_wait);
+ }
+ }
+
+diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
+index 6ae1c06..c4d95b0 100644
+--- a/drivers/usb/serial/ch341.c
++++ b/drivers/usb/serial/ch341.c
+@@ -82,7 +82,6 @@ MODULE_DEVICE_TABLE(usb, id_table);
+
+ struct ch341_private {
+ spinlock_t lock; /* access lock */
+- wait_queue_head_t delta_msr_wait; /* wait queue for modem status */
+ unsigned baud_rate; /* set baud rate */
+ u8 line_control; /* set line control value RTS/DTR */
+ u8 line_status; /* active status of modem control inputs */
+@@ -262,7 +261,6 @@ static int ch341_attach(struct usb_serial *serial)
+ return -ENOMEM;
+
+ spin_lock_init(&priv->lock);
+- init_waitqueue_head(&priv->delta_msr_wait);
+ priv->baud_rate = DEFAULT_BAUD_RATE;
+ priv->line_control = CH341_BIT_RTS | CH341_BIT_DTR;
+
+@@ -299,7 +297,7 @@ static void ch341_dtr_rts(struct usb_serial_port *port, int on)
+ priv->line_control &= ~(CH341_BIT_RTS | CH341_BIT_DTR);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ ch341_set_handshake(port->serial->dev, priv->line_control);
+- wake_up_interruptible(&priv->delta_msr_wait);
++ wake_up_interruptible(&port->delta_msr_wait);
+ }
+
+ static void ch341_close(struct usb_serial_port *port)
+@@ -503,7 +501,7 @@ static void ch341_read_int_callback(struct urb *urb)
+ tty_kref_put(tty);
+ }
+
+- wake_up_interruptible(&priv->delta_msr_wait);
++ wake_up_interruptible(&port->delta_msr_wait);
+ }
+
+ exit:
+@@ -529,11 +527,14 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ while (!multi_change) {
+- interruptible_sleep_on(&priv->delta_msr_wait);
++ interruptible_sleep_on(&port->delta_msr_wait);
+ /* see if a signal did it */
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+
++ if (port->serial->disconnected)
++ return -EIO;
++
+ spin_lock_irqsave(&priv->lock, flags);
+ status = priv->line_status;
+ multi_change = priv->multi_status_change;
+diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
+index d9906eb..01a44d3 100644
+--- a/drivers/usb/serial/cypress_m8.c
++++ b/drivers/usb/serial/cypress_m8.c
+@@ -150,7 +150,6 @@ struct cypress_private {
+ int baud_rate; /* stores current baud rate in
+ integer form */
+ int isthrottled; /* if throttled, discard reads */
+- wait_queue_head_t delta_msr_wait; /* used for TIOCMIWAIT */
+ char prev_status, diff_status; /* used for TIOCMIWAIT */
+ /* we pass a pointer to this as the argument sent to
+ cypress_set_termios old_termios */
+@@ -488,7 +487,6 @@ static int generic_startup(struct usb_serial *serial)
+ kfree(priv);
+ return -ENOMEM;
+ }
+- init_waitqueue_head(&priv->delta_msr_wait);
+
+ usb_reset_configuration(serial->dev);
+
+@@ -928,12 +926,16 @@ static int cypress_ioctl(struct tty_struct *tty,
+ switch (cmd) {
+ /* This code comes from drivers/char/serial.c and ftdi_sio.c */
+ case TIOCMIWAIT:
+- while (priv != NULL) {
+- interruptible_sleep_on(&priv->delta_msr_wait);
++ for (;;) {
++ interruptible_sleep_on(&port->delta_msr_wait);
+ /* see if a signal did it */
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+- else {
++
++ if (port->serial->disconnected)
++ return -EIO;
++
++ {
+ char diff = priv->diff_status;
+ if (diff == 0)
+ return -EIO; /* no change => error */
+@@ -1261,7 +1263,7 @@ static void cypress_read_int_callback(struct urb *urb)
+ if (priv->current_status != priv->prev_status) {
+ priv->diff_status |= priv->current_status ^
+ priv->prev_status;
+- wake_up_interruptible(&priv->delta_msr_wait);
++ wake_up_interruptible(&port->delta_msr_wait);
+ priv->prev_status = priv->current_status;
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 878ff05..06394e5a 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -74,9 +74,7 @@ struct ftdi_private {
+ int flags; /* some ASYNC_xxxx flags are supported */
+ unsigned long last_dtr_rts; /* saved modem control outputs */
+ struct async_icount icount;
+- wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */
+ char prev_status; /* Used for TIOCMIWAIT */
+- bool dev_gone; /* Used to abort TIOCMIWAIT */
+ char transmit_empty; /* If transmitter is empty or not */
+ struct usb_serial_port *port;
+ __u16 interface; /* FT2232C, FT2232H or FT4232H port interface
+@@ -1708,10 +1706,8 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port)
+ kref_init(&priv->kref);
+ mutex_init(&priv->cfg_lock);
+ memset(&priv->icount, 0x00, sizeof(priv->icount));
+- init_waitqueue_head(&priv->delta_msr_wait);
+
+ priv->flags = ASYNC_LOW_LATENCY;
+- priv->dev_gone = false;
+
+ if (quirk && quirk->port_probe)
+ quirk->port_probe(priv);
+@@ -1869,8 +1865,7 @@ static int ftdi_sio_port_remove(struct usb_serial_port *port)
+
+ dbg("%s", __func__);
+
+- priv->dev_gone = true;
+- wake_up_interruptible_all(&priv->delta_msr_wait);
++ wake_up_interruptible(&port->delta_msr_wait);
+
+ remove_sysfs_attrs(port);
+
+@@ -2025,7 +2020,7 @@ static int ftdi_process_packet(struct tty_struct *tty,
+ if (diff_status & FTDI_RS0_RLSD)
+ priv->icount.dcd++;
+
+- wake_up_interruptible_all(&priv->delta_msr_wait);
++ wake_up_interruptible(&port->delta_msr_wait);
+ priv->prev_status = status;
+ }
+
+@@ -2424,11 +2419,15 @@ static int ftdi_ioctl(struct tty_struct *tty,
+ */
+ case TIOCMIWAIT:
+ cprev = priv->icount;
+- while (!priv->dev_gone) {
+- interruptible_sleep_on(&priv->delta_msr_wait);
++ for (;;) {
++ interruptible_sleep_on(&port->delta_msr_wait);
+ /* see if a signal did it */
+ if (signal_pending(current))
+ return -ERESTARTSYS;
++
++ if (port->serial->disconnected)
++ return -EIO;
++
+ cnow = priv->icount;
+ if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) ||
+ ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) ||
+@@ -2438,8 +2437,6 @@ static int ftdi_ioctl(struct tty_struct *tty,
+ }
+ cprev = cnow;
+ }
+- return -EIO;
+- break;
+ case TIOCSERGETLSR:
+ return get_lsr_info(port, (struct serial_struct __user *)arg);
+ break;
+diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
+index 2ee8075..0af0b41 100644
+--- a/drivers/usb/serial/io_edgeport.c
++++ b/drivers/usb/serial/io_edgeport.c
+@@ -114,7 +114,6 @@ struct edgeport_port {
+ wait_queue_head_t wait_chase; /* for handling sleeping while waiting for chase to finish */
+ wait_queue_head_t wait_open; /* for handling sleeping while waiting for open to finish */
+ wait_queue_head_t wait_command; /* for handling sleeping while waiting for command to finish */
+- wait_queue_head_t delta_msr_wait; /* for handling sleeping while waiting for msr change to happen */
+
+ struct async_icount icount;
+ struct usb_serial_port *port; /* loop back to the owner of this object */
+@@ -885,7 +884,6 @@ static int edge_open(struct tty_struct *tty, struct usb_serial_port *port)
+ /* initialize our wait queues */
+ init_waitqueue_head(&edge_port->wait_open);
+ init_waitqueue_head(&edge_port->wait_chase);
+- init_waitqueue_head(&edge_port->delta_msr_wait);
+ init_waitqueue_head(&edge_port->wait_command);
+
+ /* initialize our icount structure */
+@@ -1703,13 +1701,17 @@ static int edge_ioctl(struct tty_struct *tty,
+ dbg("%s (%d) TIOCMIWAIT", __func__, port->number);
+ cprev = edge_port->icount;
+ while (1) {
+- prepare_to_wait(&edge_port->delta_msr_wait,
++ prepare_to_wait(&port->delta_msr_wait,
+ &wait, TASK_INTERRUPTIBLE);
+ schedule();
+- finish_wait(&edge_port->delta_msr_wait, &wait);
++ finish_wait(&port->delta_msr_wait, &wait);
+ /* see if a signal did it */
+ if (signal_pending(current))
+ return -ERESTARTSYS;
++
++ if (port->serial->disconnected)
++ return -EIO;
++
+ cnow = edge_port->icount;
+ if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
+ cnow.dcd == cprev.dcd && cnow.cts == cprev.cts)
+@@ -2090,7 +2092,7 @@ static void handle_new_msr(struct edgeport_port *edge_port, __u8 newMsr)
+ icount->dcd++;
+ if (newMsr & EDGEPORT_MSR_DELTA_RI)
+ icount->rng++;
+- wake_up_interruptible(&edge_port->delta_msr_wait);
++ wake_up_interruptible(&edge_port->port->delta_msr_wait);
+ }
+
+ /* Save the new modem status */
+diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
+index 1f145bf..f42119d 100644
+--- a/drivers/usb/serial/io_ti.c
++++ b/drivers/usb/serial/io_ti.c
+@@ -98,9 +98,6 @@ struct edgeport_port {
+ int close_pending;
+ int lsr_event;
+ struct async_icount icount;
+- wait_queue_head_t delta_msr_wait; /* for handling sleeping while
+- waiting for msr change to
+- happen */
+ struct edgeport_serial *edge_serial;
+ struct usb_serial_port *port;
+ __u8 bUartMode; /* Port type, 0: RS232, etc. */
+@@ -1557,7 +1554,7 @@ static void handle_new_msr(struct edgeport_port *edge_port, __u8 msr)
+ icount->dcd++;
+ if (msr & EDGEPORT_MSR_DELTA_RI)
+ icount->rng++;
+- wake_up_interruptible(&edge_port->delta_msr_wait);
++ wake_up_interruptible(&edge_port->port->delta_msr_wait);
+ }
+
+ /* Save the new modem status */
+@@ -1876,7 +1873,6 @@ static int edge_open(struct tty_struct *tty, struct usb_serial_port *port)
+ dev = port->serial->dev;
+
+ memset(&(edge_port->icount), 0x00, sizeof(edge_port->icount));
+- init_waitqueue_head(&edge_port->delta_msr_wait);
+
+ /* turn off loopback */
+ status = ti_do_config(edge_port, UMPC_SET_CLR_LOOPBACK, 0);
+@@ -2574,10 +2570,14 @@ static int edge_ioctl(struct tty_struct *tty,
+ dbg("%s - (%d) TIOCMIWAIT", __func__, port->number);
+ cprev = edge_port->icount;
+ while (1) {
+- interruptible_sleep_on(&edge_port->delta_msr_wait);
++ interruptible_sleep_on(&port->delta_msr_wait);
+ /* see if a signal did it */
+ if (signal_pending(current))
+ return -ERESTARTSYS;
++
++ if (port->serial->disconnected)
++ return -EIO;
++
+ cnow = edge_port->icount;
+ if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
+ cnow.dcd == cprev.dcd && cnow.cts == cprev.cts)
+diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
+index de0bb8e..96a62dd 100644
+--- a/drivers/usb/serial/mct_u232.c
++++ b/drivers/usb/serial/mct_u232.c
+@@ -168,8 +168,6 @@ struct mct_u232_private {
+ unsigned char last_msr; /* Modem Status Register */
+ unsigned int rx_flags; /* Throttling flags */
+ struct async_icount icount;
+- wait_queue_head_t msr_wait; /* for handling sleeping while waiting
+- for msr change to happen */
+ };
+
+ #define THROTTLED 0x01
+@@ -449,7 +447,6 @@ static int mct_u232_startup(struct usb_serial *serial)
+ if (!priv)
+ return -ENOMEM;
+ spin_lock_init(&priv->lock);
+- init_waitqueue_head(&priv->msr_wait);
+ usb_set_serial_port_data(serial->port[0], priv);
+
+ init_waitqueue_head(&serial->port[0]->write_wait);
+@@ -675,7 +672,7 @@ static void mct_u232_read_int_callback(struct urb *urb)
+ tty_kref_put(tty);
+ }
+ #endif
+- wake_up_interruptible(&priv->msr_wait);
++ wake_up_interruptible(&port->delta_msr_wait);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ exit:
+ retval = usb_submit_urb(urb, GFP_ATOMIC);
+@@ -896,13 +893,17 @@ static int mct_u232_ioctl(struct tty_struct *tty,
+ cprev = mct_u232_port->icount;
+ spin_unlock_irqrestore(&mct_u232_port->lock, flags);
+ for ( ; ; ) {
+- prepare_to_wait(&mct_u232_port->msr_wait,
++ prepare_to_wait(&port->delta_msr_wait,
+ &wait, TASK_INTERRUPTIBLE);
+ schedule();
+- finish_wait(&mct_u232_port->msr_wait, &wait);
++ finish_wait(&port->delta_msr_wait, &wait);
+ /* see if a signal did it */
+ if (signal_pending(current))
+ return -ERESTARTSYS;
++
++ if (port->serial->disconnected)
++ return -EIO;
++
+ spin_lock_irqsave(&mct_u232_port->lock, flags);
+ cnow = mct_u232_port->icount;
+ spin_unlock_irqrestore(&mct_u232_port->lock, flags);
+diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
+index 43a38aa..e89ee48 100644
+--- a/drivers/usb/serial/mos7840.c
++++ b/drivers/usb/serial/mos7840.c
+@@ -240,7 +240,6 @@ struct moschip_port {
+ char open;
+ char open_ports;
+ wait_queue_head_t wait_chase; /* for handling sleeping while waiting for chase to finish */
+- wait_queue_head_t delta_msr_wait; /* for handling sleeping while waiting for msr change to happen */
+ int delta_msr_cond;
+ struct async_icount icount;
+ struct usb_serial_port *port; /* loop back to the owner of this object */
+@@ -453,6 +452,9 @@ static void mos7840_handle_new_msr(struct moschip_port *port, __u8 new_msr)
+ icount->rng++;
+ smp_wmb();
+ }
++
++ mos7840_port->delta_msr_cond = 1;
++ wake_up_interruptible(&port->port->delta_msr_wait);
+ }
+ }
+
+@@ -1115,7 +1117,6 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
+
+ /* initialize our wait queues */
+ init_waitqueue_head(&mos7840_port->wait_chase);
+- init_waitqueue_head(&mos7840_port->delta_msr_wait);
+
+ /* initialize our icount structure */
+ memset(&(mos7840_port->icount), 0x00, sizeof(mos7840_port->icount));
+@@ -2073,8 +2074,6 @@ static void mos7840_change_port_settings(struct tty_struct *tty,
+ mos7840_port->read_urb_busy = false;
+ }
+ }
+- wake_up(&mos7840_port->delta_msr_wait);
+- mos7840_port->delta_msr_cond = 1;
+ dbg("mos7840_change_port_settings mos7840_port->shadowLCR is End %x",
+ mos7840_port->shadowLCR);
+ }
+@@ -2284,13 +2283,18 @@ static int mos7840_ioctl(struct tty_struct *tty,
+ while (1) {
+ /* interruptible_sleep_on(&mos7840_port->delta_msr_wait); */
+ mos7840_port->delta_msr_cond = 0;
+- wait_event_interruptible(mos7840_port->delta_msr_wait,
+- (mos7840_port->
++ wait_event_interruptible(port->delta_msr_wait,
++ (port->serial->disconnected ||
++ mos7840_port->
+ delta_msr_cond == 1));
+
+ /* see if a signal did it */
+ if (signal_pending(current))
+ return -ERESTARTSYS;
++
++ if (port->serial->disconnected)
++ return -EIO;
++
+ cnow = mos7840_port->icount;
+ smp_rmb();
+ if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
+diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
+index 4c29e6c..8ceaa89 100644
+--- a/drivers/usb/serial/oti6858.c
++++ b/drivers/usb/serial/oti6858.c
+@@ -196,7 +196,6 @@ struct oti6858_private {
+ u8 setup_done;
+ struct delayed_work delayed_setup_work;
+
+- wait_queue_head_t intr_wait;
+ struct usb_serial_port *port; /* USB port with which associated */
+ };
+
+@@ -357,7 +356,6 @@ static int oti6858_startup(struct usb_serial *serial)
+ break;
+
+ spin_lock_init(&priv->lock);
+- init_waitqueue_head(&priv->intr_wait);
+ /* INIT_WORK(&priv->setup_work, setup_line, serial->port[i]); */
+ /* INIT_WORK(&priv->write_work, send_data, serial->port[i]); */
+ priv->port = port;
+@@ -705,11 +703,15 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ while (1) {
+- wait_event_interruptible(priv->intr_wait,
++ wait_event_interruptible(port->delta_msr_wait,
++ port->serial->disconnected ||
+ priv->status.pin_state != prev);
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+
++ if (port->serial->disconnected)
++ return -EIO;
++
+ spin_lock_irqsave(&priv->lock, flags);
+ status = priv->status.pin_state & PIN_MASK;
+ spin_unlock_irqrestore(&priv->lock, flags);
+@@ -821,7 +823,7 @@ static void oti6858_read_int_callback(struct urb *urb)
+
+ if (!priv->transient) {
+ if (xs->pin_state != priv->status.pin_state)
+- wake_up_interruptible(&priv->intr_wait);
++ wake_up_interruptible(&port->delta_msr_wait);
+ memcpy(&priv->status, xs, OTI6858_CTRL_PKT_SIZE);
+ }
+
+diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
+index 5532ea5..fd86e0e 100644
+--- a/drivers/usb/serial/pl2303.c
++++ b/drivers/usb/serial/pl2303.c
+@@ -150,7 +150,6 @@ enum pl2303_type {
+
+ struct pl2303_private {
+ spinlock_t lock;
+- wait_queue_head_t delta_msr_wait;
+ u8 line_control;
+ u8 line_status;
+ enum pl2303_type type;
+@@ -204,7 +203,6 @@ static int pl2303_startup(struct usb_serial *serial)
+ if (!priv)
+ goto cleanup;
+ spin_lock_init(&priv->lock);
+- init_waitqueue_head(&priv->delta_msr_wait);
+ priv->type = type;
+ usb_set_serial_port_data(serial->port[i], priv);
+ }
+@@ -599,11 +597,14 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ while (1) {
+- interruptible_sleep_on(&priv->delta_msr_wait);
++ interruptible_sleep_on(&port->delta_msr_wait);
+ /* see if a signal did it */
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+
++ if (port->serial->disconnected)
++ return -EIO;
++
+ spin_lock_irqsave(&priv->lock, flags);
+ status = priv->line_status;
+ spin_unlock_irqrestore(&priv->lock, flags);
+@@ -725,7 +726,7 @@ static void pl2303_update_line_status(struct usb_serial_port *port,
+ spin_unlock_irqrestore(&priv->lock, flags);
+ if (priv->line_status & UART_BREAK_ERROR)
+ usb_serial_handle_break(port);
+- wake_up_interruptible(&priv->delta_msr_wait);
++ wake_up_interruptible(&port->delta_msr_wait);
+
+ tty = tty_port_tty_get(&port->port);
+ if (!tty)
+@@ -792,7 +793,7 @@ static void pl2303_process_read_urb(struct urb *urb)
+ line_status = priv->line_status;
+ priv->line_status &= ~UART_STATE_TRANSIENT_MASK;
+ spin_unlock_irqrestore(&priv->lock, flags);
+- wake_up_interruptible(&priv->delta_msr_wait);
++ wake_up_interruptible(&port->delta_msr_wait);
+
+ if (!urb->actual_length)
+ return;
+diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
+index 180ea6c..ba6b438 100644
+--- a/drivers/usb/serial/spcp8x5.c
++++ b/drivers/usb/serial/spcp8x5.c
+@@ -163,7 +163,6 @@ static struct usb_driver spcp8x5_driver = {
+ struct spcp8x5_private {
+ spinlock_t lock;
+ enum spcp8x5_type type;
+- wait_queue_head_t delta_msr_wait;
+ u8 line_control;
+ u8 line_status;
+ };
+@@ -197,7 +196,6 @@ static int spcp8x5_startup(struct usb_serial *serial)
+ goto cleanup;
+
+ spin_lock_init(&priv->lock);
+- init_waitqueue_head(&priv->delta_msr_wait);
+ priv->type = type;
+ usb_set_serial_port_data(serial->port[i] , priv);
+ }
+@@ -502,7 +500,7 @@ static void spcp8x5_process_read_urb(struct urb *urb)
+ priv->line_status &= ~UART_STATE_TRANSIENT_MASK;
+ spin_unlock_irqrestore(&priv->lock, flags);
+ /* wake up the wait for termios */
+- wake_up_interruptible(&priv->delta_msr_wait);
++ wake_up_interruptible(&port->delta_msr_wait);
+
+ if (!urb->actual_length)
+ return;
+@@ -552,12 +550,15 @@ static int spcp8x5_wait_modem_info(struct usb_serial_port *port,
+
+ while (1) {
+ /* wake up in bulk read */
+- interruptible_sleep_on(&priv->delta_msr_wait);
++ interruptible_sleep_on(&port->delta_msr_wait);
+
+ /* see if a signal did it */
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+
++ if (port->serial->disconnected)
++ return -EIO;
++
+ spin_lock_irqsave(&priv->lock, flags);
+ status = priv->line_status;
+ spin_unlock_irqrestore(&priv->lock, flags);
+diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c
+index fff7f17..bf1f8ea 100644
+--- a/drivers/usb/serial/ssu100.c
++++ b/drivers/usb/serial/ssu100.c
+@@ -78,7 +78,6 @@ struct ssu100_port_private {
+ spinlock_t status_lock;
+ u8 shadowLSR;
+ u8 shadowMSR;
+- wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */
+ struct async_icount icount;
+ };
+
+@@ -387,8 +386,9 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
+ spin_unlock_irqrestore(&priv->status_lock, flags);
+
+ while (1) {
+- wait_event_interruptible(priv->delta_msr_wait,
+- ((priv->icount.rng != prev.rng) ||
++ wait_event_interruptible(port->delta_msr_wait,
++ (port->serial->disconnected ||
++ (priv->icount.rng != prev.rng) ||
+ (priv->icount.dsr != prev.dsr) ||
+ (priv->icount.dcd != prev.dcd) ||
+ (priv->icount.cts != prev.cts)));
+@@ -396,6 +396,9 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+
++ if (port->serial->disconnected)
++ return -EIO;
++
+ spin_lock_irqsave(&priv->status_lock, flags);
+ cur = priv->icount;
+ spin_unlock_irqrestore(&priv->status_lock, flags);
+@@ -478,7 +481,6 @@ static int ssu100_attach(struct usb_serial *serial)
+ }
+
+ spin_lock_init(&priv->status_lock);
+- init_waitqueue_head(&priv->delta_msr_wait);
+ usb_set_serial_port_data(port, priv);
+
+ return ssu100_initdevice(serial->dev);
+@@ -564,7 +566,7 @@ static void ssu100_update_msr(struct usb_serial_port *port, u8 msr)
+ priv->icount.dcd++;
+ if (msr & UART_MSR_TERI)
+ priv->icount.rng++;
+- wake_up_interruptible(&priv->delta_msr_wait);
++ wake_up_interruptible(&port->delta_msr_wait);
+ }
+ }
+
+diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
+index 2856474..4b805be 100644
+--- a/drivers/usb/serial/ti_usb_3410_5052.c
++++ b/drivers/usb/serial/ti_usb_3410_5052.c
+@@ -75,7 +75,6 @@ struct ti_port {
+ int tp_flags;
+ int tp_closing_wait;/* in .01 secs */
+ struct async_icount tp_icount;
+- wait_queue_head_t tp_msr_wait; /* wait for msr change */
+ wait_queue_head_t tp_write_wait;
+ struct ti_device *tp_tdev;
+ struct usb_serial_port *tp_port;
+@@ -447,7 +446,6 @@ static int ti_startup(struct usb_serial *serial)
+ tport->tp_uart_base_addr = (i == 0 ?
+ TI_UART1_BASE_ADDR : TI_UART2_BASE_ADDR);
+ tport->tp_closing_wait = closing_wait;
+- init_waitqueue_head(&tport->tp_msr_wait);
+ init_waitqueue_head(&tport->tp_write_wait);
+ if (kfifo_alloc(&tport->write_fifo, TI_WRITE_BUF_SIZE,
+ GFP_KERNEL)) {
+@@ -848,9 +846,13 @@ static int ti_ioctl(struct tty_struct *tty,
+ dbg("%s - (%d) TIOCMIWAIT", __func__, port->number);
+ cprev = tport->tp_icount;
+ while (1) {
+- interruptible_sleep_on(&tport->tp_msr_wait);
++ interruptible_sleep_on(&port->delta_msr_wait);
+ if (signal_pending(current))
+ return -ERESTARTSYS;
++
++ if (port->serial->disconnected)
++ return -EIO;
++
+ cnow = tport->tp_icount;
+ if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
+ cnow.dcd == cprev.dcd && cnow.cts == cprev.cts)
+@@ -1481,7 +1483,7 @@ static void ti_handle_new_msr(struct ti_port *tport, __u8 msr)
+ icount->dcd++;
+ if (msr & TI_MSR_DELTA_RI)
+ icount->rng++;
+- wake_up_interruptible(&tport->tp_msr_wait);
++ wake_up_interruptible(&tport->tp_port->delta_msr_wait);
+ spin_unlock_irqrestore(&tport->tp_lock, flags);
+ }
+
+diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
+index 2482d5e..850faa4 100644
+--- a/drivers/usb/serial/usb-serial.c
++++ b/drivers/usb/serial/usb-serial.c
+@@ -905,6 +905,7 @@ int usb_serial_probe(struct usb_interface *interface,
+ port->port.ops = &serial_port_ops;
+ port->serial = serial;
+ spin_lock_init(&port->lock);
++ init_waitqueue_head(&port->delta_msr_wait);
+ /* Keep this for private driver use for the moment but
+ should probably go away */
+ INIT_WORK(&port->work, usb_serial_port_work);
+diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
+index 9b8bcab..7a36dff 100644
+--- a/drivers/video/console/fbcon.c
++++ b/drivers/video/console/fbcon.c
+@@ -843,6 +843,8 @@ static void con2fb_init_display(struct vc_data *vc, struct fb_info *info,
+ *
+ * Maps a virtual console @unit to a frame buffer device
+ * @newidx.
++ *
++ * This should be called with the console lock held.
+ */
+ static int set_con2fb_map(int unit, int newidx, int user)
+ {
+@@ -860,7 +862,7 @@ static int set_con2fb_map(int unit, int newidx, int user)
+
+ if (!search_for_mapped_con() || !con_is_bound(&fb_con)) {
+ info_idx = newidx;
+- return fbcon_takeover(0);
++ return do_fbcon_takeover(0);
+ }
+
+ if (oldidx != -1)
+@@ -868,7 +870,6 @@ static int set_con2fb_map(int unit, int newidx, int user)
+
+ found = search_fb_in_map(newidx);
+
+- console_lock();
+ con2fb_map[unit] = newidx;
+ if (!err && !found)
+ err = con2fb_acquire_newinfo(vc, info, unit, oldidx);
+@@ -895,7 +896,6 @@ static int set_con2fb_map(int unit, int newidx, int user)
+ if (!search_fb_in_map(info_idx))
+ info_idx = newidx;
+
+- console_unlock();
+ return err;
+ }
+
+@@ -3026,6 +3026,7 @@ static inline int fbcon_unbind(void)
+ }
+ #endif /* CONFIG_VT_HW_CONSOLE_BINDING */
+
++/* called with console_lock held */
+ static int fbcon_fb_unbind(int idx)
+ {
+ int i, new_idx = -1, ret = 0;
+@@ -3052,6 +3053,7 @@ static int fbcon_fb_unbind(int idx)
+ return ret;
+ }
+
++/* called with console_lock held */
+ static int fbcon_fb_unregistered(struct fb_info *info)
+ {
+ int i, idx;
+@@ -3089,6 +3091,7 @@ static int fbcon_fb_unregistered(struct fb_info *info)
+ return 0;
+ }
+
++/* called with console_lock held */
+ static void fbcon_remap_all(int idx)
+ {
+ int i;
+@@ -3133,6 +3136,7 @@ static inline void fbcon_select_primary(struct fb_info *info)
+ }
+ #endif /* CONFIG_FRAMEBUFFER_DETECT_PRIMARY */
+
++/* called with console_lock held */
+ static int fbcon_fb_registered(struct fb_info *info)
+ {
+ int ret = 0, i, idx;
+@@ -3285,6 +3289,7 @@ static int fbcon_event_notify(struct notifier_block *self,
+ ret = fbcon_fb_unregistered(info);
+ break;
+ case FB_EVENT_SET_CONSOLE_MAP:
++ /* called with console lock held */
+ con2fb = event->data;
+ ret = set_con2fb_map(con2fb->console - 1,
+ con2fb->framebuffer, 1);
+diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
+index c133dde..babbb07 100644
+--- a/drivers/video/fbmem.c
++++ b/drivers/video/fbmem.c
+@@ -1154,8 +1154,10 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
+ event.data = &con2fb;
+ if (!lock_fb_info(info))
+ return -ENODEV;
++ console_lock();
+ event.info = info;
+ ret = fb_notifier_call_chain(FB_EVENT_SET_CONSOLE_MAP, &event);
++ console_unlock();
+ unlock_fb_info(info);
+ break;
+ case FBIOBLANK:
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 49f3c9d..73e4cbc 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -1209,6 +1209,39 @@ int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
+ mask);
+ }
+
++int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
++{
++ unsigned long index = start >> PAGE_CACHE_SHIFT;
++ unsigned long end_index = end >> PAGE_CACHE_SHIFT;
++ struct page *page;
++
++ while (index <= end_index) {
++ page = find_get_page(inode->i_mapping, index);
++ BUG_ON(!page); /* Pages should be in the extent_io_tree */
++ clear_page_dirty_for_io(page);
++ page_cache_release(page);
++ index++;
++ }
++ return 0;
++}
++
++int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
++{
++ unsigned long index = start >> PAGE_CACHE_SHIFT;
++ unsigned long end_index = end >> PAGE_CACHE_SHIFT;
++ struct page *page;
++
++ while (index <= end_index) {
++ page = find_get_page(inode->i_mapping, index);
++ BUG_ON(!page); /* Pages should be in the extent_io_tree */
++ account_page_redirty(page);
++ __set_page_dirty_nobuffers(page);
++ page_cache_release(page);
++ index++;
++ }
++ return 0;
++}
++
+ /*
+ * helper function to set both pages and extents in the tree writeback
+ */
+diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
+index 7604c30..2e32510 100644
+--- a/fs/btrfs/extent_io.h
++++ b/fs/btrfs/extent_io.h
+@@ -304,6 +304,8 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset,
+ unsigned long *map_len);
+ int extent_range_uptodate(struct extent_io_tree *tree,
+ u64 start, u64 end);
++int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
++int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
+ int extent_clear_unlock_delalloc(struct inode *inode,
+ struct extent_io_tree *tree,
+ u64 start, u64 end, struct page *locked_page,
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index fd1a06d..1372634 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -343,6 +343,7 @@ static noinline int compress_file_range(struct inode *inode,
+ int i;
+ int will_compress;
+ int compress_type = root->fs_info->compress_type;
++ int redirty = 0;
+
+ /* if this is a small write inside eof, kick off a defragbot */
+ if (end <= BTRFS_I(inode)->disk_i_size && (end - start + 1) < 16 * 1024)
+@@ -404,6 +405,17 @@ again:
+ if (BTRFS_I(inode)->force_compress)
+ compress_type = BTRFS_I(inode)->force_compress;
+
++ /*
++ * we need to call clear_page_dirty_for_io on each
++ * page in the range. Otherwise applications with the file
++ * mmap'd can wander in and change the page contents while
++ * we are compressing them.
++ *
++ * If the compression fails for any reason, we set the pages
++ * dirty again later on.
++ */
++ extent_range_clear_dirty_for_io(inode, start, end);
++ redirty = 1;
+ ret = btrfs_compress_pages(compress_type,
+ inode->i_mapping, start,
+ total_compressed, pages,
+@@ -541,6 +553,8 @@ cleanup_and_bail_uncompressed:
+ __set_page_dirty_nobuffers(locked_page);
+ /* unlocked later on in the async handlers */
+ }
++ if (redirty)
++ extent_range_redirty_for_io(inode, start, end);
+ add_async_extent(async_cow, start, end - start + 1,
+ 0, NULL, 0, BTRFS_COMPRESS_NONE);
+ *num_added += 1;
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 19b127c..21faa12 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -316,6 +316,7 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
+ unsigned long src_ptr;
+ unsigned long dst_ptr;
+ int overwrite_root = 0;
++ bool inode_item = key->type == BTRFS_INODE_ITEM_KEY;
+
+ if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
+ overwrite_root = 1;
+@@ -325,6 +326,9 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
+
+ /* look for the key in the destination tree */
+ ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
++ if (ret < 0)
++ return ret;
++
+ if (ret == 0) {
+ char *src_copy;
+ char *dst_copy;
+@@ -366,6 +370,30 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
+ return 0;
+ }
+
++ /*
++ * We need to load the old nbytes into the inode so when we
++ * replay the extents we've logged we get the right nbytes.
++ */
++ if (inode_item) {
++ struct btrfs_inode_item *item;
++ u64 nbytes;
++
++ item = btrfs_item_ptr(path->nodes[0], path->slots[0],
++ struct btrfs_inode_item);
++ nbytes = btrfs_inode_nbytes(path->nodes[0], item);
++ item = btrfs_item_ptr(eb, slot,
++ struct btrfs_inode_item);
++ btrfs_set_inode_nbytes(eb, item, nbytes);
++ }
++ } else if (inode_item) {
++ struct btrfs_inode_item *item;
++
++ /*
++ * New inode, set nbytes to 0 so that the nbytes comes out
++ * properly when we replay the extents.
++ */
++ item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
++ btrfs_set_inode_nbytes(eb, item, 0);
+ }
+ insert:
+ btrfs_release_path(path);
+@@ -488,7 +516,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
+ u64 extent_end;
+ u64 alloc_hint;
+ u64 start = key->offset;
+- u64 saved_nbytes;
++ u64 nbytes = 0;
+ struct btrfs_file_extent_item *item;
+ struct inode *inode = NULL;
+ unsigned long size;
+@@ -498,10 +526,19 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
+ found_type = btrfs_file_extent_type(eb, item);
+
+ if (found_type == BTRFS_FILE_EXTENT_REG ||
+- found_type == BTRFS_FILE_EXTENT_PREALLOC)
+- extent_end = start + btrfs_file_extent_num_bytes(eb, item);
+- else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
++ found_type == BTRFS_FILE_EXTENT_PREALLOC) {
++ nbytes = btrfs_file_extent_num_bytes(eb, item);
++ extent_end = start + nbytes;
++
++ /*
++ * We don't add to the inodes nbytes if we are prealloc or a
++ * hole.
++ */
++ if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
++ nbytes = 0;
++ } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
+ size = btrfs_file_extent_inline_len(eb, item);
++ nbytes = btrfs_file_extent_ram_bytes(eb, item);
+ extent_end = (start + size + mask) & ~mask;
+ } else {
+ ret = 0;
+@@ -550,7 +587,6 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
+ }
+ btrfs_release_path(path);
+
+- saved_nbytes = inode_get_bytes(inode);
+ /* drop any overlapping extents */
+ ret = btrfs_drop_extents(trans, inode, start, extent_end,
+ &alloc_hint, 1);
+@@ -638,7 +674,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
+ BUG_ON(ret);
+ }
+
+- inode_set_bytes(inode, saved_nbytes);
++ inode_add_bytes(inode, nbytes);
+ btrfs_update_inode(trans, root, inode);
+ out:
+ if (inode)
+diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
+index 5849e3e..32b12e5 100644
+--- a/fs/hfsplus/extents.c
++++ b/fs/hfsplus/extents.c
+@@ -517,7 +517,7 @@ void hfsplus_file_truncate(struct inode *inode)
+ struct address_space *mapping = inode->i_mapping;
+ struct page *page;
+ void *fsdata;
+- u32 size = inode->i_size;
++ loff_t size = inode->i_size;
+
+ res = pagecache_write_begin(NULL, mapping, size, 0,
+ AOP_FLAG_UNINTERRUPTIBLE,
+diff --git a/fs/inode.c b/fs/inode.c
+index ee4e66b..e2d3633 100644
+--- a/fs/inode.c
++++ b/fs/inode.c
+@@ -634,7 +634,7 @@ void prune_icache_sb(struct super_block *sb, int nr_to_scan)
+ * inode to the back of the list so we don't spin on it.
+ */
+ if (!spin_trylock(&inode->i_lock)) {
+- list_move_tail(&inode->i_lru, &sb->s_inode_lru);
++ list_move(&inode->i_lru, &sb->s_inode_lru);
+ continue;
+ }
+
+diff --git a/include/linux/ata.h b/include/linux/ata.h
+index 32df2b6..5856c9e 100644
+--- a/include/linux/ata.h
++++ b/include/linux/ata.h
+@@ -937,7 +937,7 @@ static inline int atapi_cdb_len(const u16 *dev_id)
+ }
+ }
+
+-static inline bool atapi_command_packet_set(const u16 *dev_id)
++static inline int atapi_command_packet_set(const u16 *dev_id)
+ {
+ return (dev_id[ATA_ID_CONFIG] >> 8) & 0x1f;
+ }
+diff --git a/include/linux/kref.h b/include/linux/kref.h
+index d4a62ab..d064502 100644
+--- a/include/linux/kref.h
++++ b/include/linux/kref.h
+@@ -16,6 +16,7 @@
+ #define _KREF_H_
+
+ #include <linux/types.h>
++#include <linux/atomic.h>
+
+ struct kref {
+ atomic_t refcount;
+@@ -27,4 +28,24 @@ int kref_put(struct kref *kref, void (*release) (struct kref *kref));
+ int kref_sub(struct kref *kref, unsigned int count,
+ void (*release) (struct kref *kref));
+
++/**
++ * kref_get_unless_zero - Increment refcount for object unless it is zero.
++ * @kref: object.
++ *
++ * Return non-zero if the increment succeeded. Otherwise return 0.
++ *
++ * This function is intended to simplify locking around refcounting for
++ * objects that can be looked up from a lookup structure, and which are
++ * removed from that lookup structure in the object destructor.
++ * Operations on such objects require at least a read lock around
++ * lookup + kref_get, and a write lock around kref_put + remove from lookup
++ * structure. Furthermore, RCU implementations become extremely tricky.
++ * With a lookup followed by a kref_get_unless_zero *with return value check*
++ * locking in the kref_put path can be deferred to the actual removal from
++ * the lookup structure and RCU lookups become trivial.
++ */
++static inline int __must_check kref_get_unless_zero(struct kref *kref)
++{
++ return atomic_add_unless(&kref->refcount, 1, 0);
++}
+ #endif /* _KREF_H_ */
+diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
+index 6136821..e6796c1 100644
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -396,7 +396,7 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
+ int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+ void *data, unsigned long len);
+ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+- gpa_t gpa);
++ gpa_t gpa, unsigned long len);
+ int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
+ int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
+ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
+diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
+index fa7cc72..b0bcce0 100644
+--- a/include/linux/kvm_types.h
++++ b/include/linux/kvm_types.h
+@@ -71,6 +71,7 @@ struct gfn_to_hva_cache {
+ u64 generation;
+ gpa_t gpa;
+ unsigned long hva;
++ unsigned long len;
+ struct kvm_memory_slot *memslot;
+ };
+
+diff --git a/include/linux/libata.h b/include/linux/libata.h
+index cafc09a..62467ca 100644
+--- a/include/linux/libata.h
++++ b/include/linux/libata.h
+@@ -392,6 +392,7 @@ enum {
+ ATA_HORKAGE_NOSETXFER = (1 << 14), /* skip SETXFER, SATA only */
+ ATA_HORKAGE_BROKEN_FPDMA_AA = (1 << 15), /* skip AA */
+ ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */
++ ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */
+
+ /* DMA mask for user DMA control: User visible values; DO NOT
+ renumber */
+diff --git a/include/linux/of.h b/include/linux/of.h
+index 4948552..9bf9611 100644
+--- a/include/linux/of.h
++++ b/include/linux/of.h
+@@ -336,6 +336,22 @@ static inline int of_machine_is_compatible(const char *compat)
+ #define of_match_node(_matches, _node) NULL
+ #endif /* CONFIG_OF */
+
++/**
++ * of_property_read_bool - Findfrom a property
++ * @np: device node from which the property value is to be read.
++ * @propname: name of the property to be searched.
++ *
++ * Search for a property in a device node.
++ * Returns true if the property exist false otherwise.
++ */
++static inline bool of_property_read_bool(const struct device_node *np,
++ const char *propname)
++{
++ struct property *prop = of_find_property(np, propname, NULL);
++
++ return prop ? true : false;
++}
++
+ static inline int of_property_read_u32(const struct device_node *np,
+ const char *propname,
+ u32 *out_value)
+diff --git a/include/linux/preempt.h b/include/linux/preempt.h
+index 58969b2..e86bf01 100644
+--- a/include/linux/preempt.h
++++ b/include/linux/preempt.h
+@@ -91,13 +91,19 @@ do { \
+
+ #else /* !CONFIG_PREEMPT_COUNT */
+
+-#define preempt_disable() do { } while (0)
+-#define preempt_enable_no_resched() do { } while (0)
+-#define preempt_enable() do { } while (0)
++/*
++ * Even if we don't have any preemption, we need preempt disable/enable
++ * to be barriers, so that we don't have things like get_user/put_user
++ * that can cause faults and scheduling migrate into our preempt-protected
++ * region.
++ */
++#define preempt_disable() barrier()
++#define preempt_enable_no_resched() barrier()
++#define preempt_enable() barrier()
+
+-#define preempt_disable_notrace() do { } while (0)
+-#define preempt_enable_no_resched_notrace() do { } while (0)
+-#define preempt_enable_notrace() do { } while (0)
++#define preempt_disable_notrace() barrier()
++#define preempt_enable_no_resched_notrace() barrier()
++#define preempt_enable_notrace() barrier()
+
+ #endif /* CONFIG_PREEMPT_COUNT */
+
+diff --git a/include/linux/socket.h b/include/linux/socket.h
+index ad919e0..2acd2e2 100644
+--- a/include/linux/socket.h
++++ b/include/linux/socket.h
+@@ -317,6 +317,7 @@ struct ucred {
+ #define IPX_TYPE 1
+
+ extern void cred_to_ucred(struct pid *pid, const struct cred *cred, struct ucred *ucred);
++extern void cred_real_to_ucred(struct pid *pid, const struct cred *cred, struct ucred *ucred);
+
+ extern int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len);
+ extern int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
+diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h
+index a26e2fb..e2369c1 100644
+--- a/include/linux/spinlock_up.h
++++ b/include/linux/spinlock_up.h
+@@ -16,7 +16,10 @@
+ * In the debug case, 1 means unlocked, 0 means locked. (the values
+ * are inverted, to catch initialization bugs)
+ *
+- * No atomicity anywhere, we are on UP.
++ * No atomicity anywhere, we are on UP. However, we still need
++ * the compiler barriers, because we do not want the compiler to
++ * move potentially faulting instructions (notably user accesses)
++ * into the locked sequence, resulting in non-atomic execution.
+ */
+
+ #ifdef CONFIG_DEBUG_SPINLOCK
+@@ -25,6 +28,7 @@
+ static inline void arch_spin_lock(arch_spinlock_t *lock)
+ {
+ lock->slock = 0;
++ barrier();
+ }
+
+ static inline void
+@@ -32,6 +36,7 @@ arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
+ {
+ local_irq_save(flags);
+ lock->slock = 0;
++ barrier();
+ }
+
+ static inline int arch_spin_trylock(arch_spinlock_t *lock)
+@@ -39,32 +44,34 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
+ char oldval = lock->slock;
+
+ lock->slock = 0;
++ barrier();
+
+ return oldval > 0;
+ }
+
+ static inline void arch_spin_unlock(arch_spinlock_t *lock)
+ {
++ barrier();
+ lock->slock = 1;
+ }
+
+ /*
+ * Read-write spinlocks. No debug version.
+ */
+-#define arch_read_lock(lock) do { (void)(lock); } while (0)
+-#define arch_write_lock(lock) do { (void)(lock); } while (0)
+-#define arch_read_trylock(lock) ({ (void)(lock); 1; })
+-#define arch_write_trylock(lock) ({ (void)(lock); 1; })
+-#define arch_read_unlock(lock) do { (void)(lock); } while (0)
+-#define arch_write_unlock(lock) do { (void)(lock); } while (0)
++#define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0)
++#define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0)
++#define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; })
++#define arch_write_trylock(lock) ({ barrier(); (void)(lock); 1; })
++#define arch_read_unlock(lock) do { barrier(); (void)(lock); } while (0)
++#define arch_write_unlock(lock) do { barrier(); (void)(lock); } while (0)
+
+ #else /* DEBUG_SPINLOCK */
+ #define arch_spin_is_locked(lock) ((void)(lock), 0)
+ /* for sched.c and kernel_lock.c: */
+-# define arch_spin_lock(lock) do { (void)(lock); } while (0)
+-# define arch_spin_lock_flags(lock, flags) do { (void)(lock); } while (0)
+-# define arch_spin_unlock(lock) do { (void)(lock); } while (0)
+-# define arch_spin_trylock(lock) ({ (void)(lock); 1; })
++# define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0)
++# define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0)
++# define arch_spin_unlock(lock) do { barrier(); (void)(lock); } while (0)
++# define arch_spin_trylock(lock) ({ barrier(); (void)(lock); 1; })
+ #endif /* DEBUG_SPINLOCK */
+
+ #define arch_spin_is_contended(lock) (((void)(lock), 0))
+diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
+index b29f70b..237d5f8 100644
+--- a/include/linux/usb/serial.h
++++ b/include/linux/usb/serial.h
+@@ -71,6 +71,7 @@ enum port_dev_state {
+ * port.
+ * @flags: usb serial port flags
+ * @write_wait: a wait_queue_head_t used by the port.
++ * @delta_msr_wait: modem-status-change wait queue
+ * @work: work queue entry for the line discipline waking up.
+ * @throttled: nonzero if the read urb is inactive to throttle the device
+ * @throttle_req: nonzero if the tty wants to throttle us
+@@ -114,6 +115,7 @@ struct usb_serial_port {
+
+ unsigned long flags;
+ wait_queue_head_t write_wait;
++ wait_queue_head_t delta_msr_wait;
+ struct work_struct work;
+ char throttled;
+ char throttle_req;
+diff --git a/include/linux/writeback.h b/include/linux/writeback.h
+index a378c29..7e85d45 100644
+--- a/include/linux/writeback.h
++++ b/include/linux/writeback.h
+@@ -195,6 +195,8 @@ void writeback_set_ratelimit(void);
+ void tag_pages_for_writeback(struct address_space *mapping,
+ pgoff_t start, pgoff_t end);
+
++void account_page_redirty(struct page *page);
++
+ /* pdflush.c */
+ extern int nr_pdflush_threads; /* Global so it can be exported to sysctl
+ read-only. */
+diff --git a/include/net/scm.h b/include/net/scm.h
+index 0c0017c..5da0a7b 100644
+--- a/include/net/scm.h
++++ b/include/net/scm.h
+@@ -50,7 +50,7 @@ static __inline__ void scm_set_cred(struct scm_cookie *scm,
+ {
+ scm->pid = get_pid(pid);
+ scm->cred = cred ? get_cred(cred) : NULL;
+- cred_to_ucred(pid, cred, &scm->creds);
++ cred_real_to_ucred(pid, cred, &scm->creds);
+ }
+
+ static __inline__ void scm_destroy_cred(struct scm_cookie *scm)
+diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
+index cdd5607..e4cee8d 100644
+--- a/kernel/hrtimer.c
++++ b/kernel/hrtimer.c
+@@ -61,6 +61,7 @@
+ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
+ {
+
++ .lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock),
+ .clock_base =
+ {
+ {
+@@ -1640,8 +1641,6 @@ static void __cpuinit init_hrtimers_cpu(int cpu)
+ struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
+ int i;
+
+- raw_spin_lock_init(&cpu_base->lock);
+-
+ for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
+ cpu_base->clock_base[i].cpu_base = cpu_base;
+ timerqueue_init_head(&cpu_base->clock_base[i].active);
+diff --git a/kernel/sched.c b/kernel/sched.c
+index eeeec4e..d08c9f4 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -2889,8 +2889,10 @@ static void try_to_wake_up_local(struct task_struct *p)
+ {
+ struct rq *rq = task_rq(p);
+
+- BUG_ON(rq != this_rq());
+- BUG_ON(p == current);
++ if (WARN_ON_ONCE(rq != this_rq()) ||
++ WARN_ON_ONCE(p == current))
++ return;
++
+ lockdep_assert_held(&rq->lock);
+
+ if (!raw_spin_trylock(&p->pi_lock)) {
+diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
+index c685e31..c3ae144 100644
+--- a/kernel/sched_clock.c
++++ b/kernel/sched_clock.c
+@@ -176,10 +176,36 @@ static u64 sched_clock_remote(struct sched_clock_data *scd)
+ u64 this_clock, remote_clock;
+ u64 *ptr, old_val, val;
+
++#if BITS_PER_LONG != 64
++again:
++ /*
++ * Careful here: The local and the remote clock values need to
++ * be read out atomic as we need to compare the values and
++ * then update either the local or the remote side. So the
++ * cmpxchg64 below only protects one readout.
++ *
++ * We must reread via sched_clock_local() in the retry case on
++ * 32bit as an NMI could use sched_clock_local() via the
++ * tracer and hit between the readout of
++ * the low32bit and the high 32bit portion.
++ */
++ this_clock = sched_clock_local(my_scd);
++ /*
++ * We must enforce atomic readout on 32bit, otherwise the
++ * update on the remote cpu can hit inbetween the readout of
++ * the low32bit and the high 32bit portion.
++ */
++ remote_clock = cmpxchg64(&scd->clock, 0, 0);
++#else
++ /*
++ * On 64bit the read of [my]scd->clock is atomic versus the
++ * update, so we can avoid the above 32bit dance.
++ */
+ sched_clock_local(my_scd);
+ again:
+ this_clock = my_scd->clock;
+ remote_clock = scd->clock;
++#endif
+
+ /*
+ * Use the opportunity that we have both locks
+diff --git a/kernel/signal.c b/kernel/signal.c
+index ea76d30..3ecf574 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -2790,7 +2790,7 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
+
+ static int do_tkill(pid_t tgid, pid_t pid, int sig)
+ {
+- struct siginfo info;
++ struct siginfo info = {};
+
+ info.si_signo = sig;
+ info.si_errno = 0;
+diff --git a/kernel/sys.c b/kernel/sys.c
+index f5939c2..be5fa8b 100644
+--- a/kernel/sys.c
++++ b/kernel/sys.c
+@@ -320,7 +320,6 @@ void kernel_restart_prepare(char *cmd)
+ system_state = SYSTEM_RESTART;
+ usermodehelper_disable();
+ device_shutdown();
+- syscore_shutdown();
+ }
+
+ /**
+@@ -366,6 +365,7 @@ void kernel_restart(char *cmd)
+ {
+ kernel_restart_prepare(cmd);
+ disable_nonboot_cpus();
++ syscore_shutdown();
+ if (!cmd)
+ printk(KERN_EMERG "Restarting system.\n");
+ else
+@@ -391,6 +391,7 @@ static void kernel_shutdown_prepare(enum system_states state)
+ void kernel_halt(void)
+ {
+ kernel_shutdown_prepare(SYSTEM_HALT);
++ disable_nonboot_cpus();
+ syscore_shutdown();
+ printk(KERN_EMERG "System halted.\n");
+ kmsg_dump(KMSG_DUMP_HALT);
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 0943d2a..5527211 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -572,7 +572,6 @@ int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
+ free_page(tmp);
+ }
+
+- free_page((unsigned long)stat->pages);
+ stat->pages = NULL;
+ stat->start = NULL;
+
+@@ -2317,7 +2316,7 @@ ftrace_notrace_open(struct inode *inode, struct file *file)
+ }
+
+ static loff_t
+-ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
++ftrace_filter_lseek(struct file *file, loff_t offset, int origin)
+ {
+ loff_t ret;
+
+@@ -3135,7 +3134,7 @@ static const struct file_operations ftrace_filter_fops = {
+ .open = ftrace_filter_open,
+ .read = seq_read,
+ .write = ftrace_filter_write,
+- .llseek = ftrace_regex_lseek,
++ .llseek = ftrace_filter_lseek,
+ .release = ftrace_regex_release,
+ };
+
+@@ -3143,7 +3142,7 @@ static const struct file_operations ftrace_notrace_fops = {
+ .open = ftrace_notrace_open,
+ .read = seq_read,
+ .write = ftrace_notrace_write,
+- .llseek = ftrace_regex_lseek,
++ .llseek = ftrace_filter_lseek,
+ .release = ftrace_regex_release,
+ };
+
+@@ -3351,8 +3350,8 @@ static const struct file_operations ftrace_graph_fops = {
+ .open = ftrace_graph_open,
+ .read = seq_read,
+ .write = ftrace_graph_write,
++ .llseek = ftrace_filter_lseek,
+ .release = ftrace_graph_release,
+- .llseek = seq_lseek,
+ };
+ #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+@@ -3844,7 +3843,7 @@ static const struct file_operations ftrace_pid_fops = {
+ .open = ftrace_pid_open,
+ .write = ftrace_pid_write,
+ .read = seq_read,
+- .llseek = seq_lseek,
++ .llseek = ftrace_filter_lseek,
+ .release = ftrace_pid_release,
+ };
+
+@@ -3964,12 +3963,8 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
+ ftrace_startup_sysctl();
+
+ /* we are starting ftrace again */
+- if (ftrace_ops_list != &ftrace_list_end) {
+- if (ftrace_ops_list->next == &ftrace_list_end)
+- ftrace_trace_function = ftrace_ops_list->func;
+- else
+- ftrace_trace_function = ftrace_ops_list_func;
+- }
++ if (ftrace_ops_list != &ftrace_list_end)
++ update_ftrace_function();
+
+ } else {
+ /* stopping ftrace calls (just send to ftrace_stub) */
+diff --git a/lib/kobject.c b/lib/kobject.c
+index 640bd98..83bd5b3 100644
+--- a/lib/kobject.c
++++ b/lib/kobject.c
+@@ -531,6 +531,13 @@ struct kobject *kobject_get(struct kobject *kobj)
+ return kobj;
+ }
+
++static struct kobject *kobject_get_unless_zero(struct kobject *kobj)
++{
++ if (!kref_get_unless_zero(&kobj->kref))
++ kobj = NULL;
++ return kobj;
++}
++
+ /*
+ * kobject_cleanup - free kobject resources.
+ * @kobj: object to cleanup
+@@ -785,7 +792,7 @@ struct kobject *kset_find_obj_hinted(struct kset *kset, const char *name,
+ slow_search:
+ list_for_each_entry(k, &kset->list, entry) {
+ if (kobject_name(k) && !strcmp(kobject_name(k), name)) {
+- ret = kobject_get(k);
++ ret = kobject_get_unless_zero(k);
+ break;
+ }
+ }
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 4c7d42a..70b4733 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -2889,7 +2889,17 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ break;
+ }
+
+- if (absent ||
++ /*
++ * We need call hugetlb_fault for both hugepages under migration
++ * (in which case hugetlb_fault waits for the migration,) and
++ * hwpoisoned hugepages (in which case we need to prevent the
++ * caller from accessing to them.) In order to do this, we use
++ * here is_swap_pte instead of is_hugetlb_entry_migration and
++ * is_hugetlb_entry_hwpoisoned. This is because it simply covers
++ * both cases, and because we can't follow correct pages
++ * directly from any kind of swap entries.
++ */
++ if (absent || is_swap_pte(huge_ptep_get(pte)) ||
+ ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) {
+ int ret;
+
+diff --git a/mm/page-writeback.c b/mm/page-writeback.c
+index 50f0824..ea3f83b 100644
+--- a/mm/page-writeback.c
++++ b/mm/page-writeback.c
+@@ -1801,6 +1801,24 @@ int __set_page_dirty_nobuffers(struct page *page)
+ EXPORT_SYMBOL(__set_page_dirty_nobuffers);
+
+ /*
++ * Call this whenever redirtying a page, to de-account the dirty counters
++ * (NR_DIRTIED, BDI_DIRTIED, tsk->nr_dirtied), so that they match the written
++ * counters (NR_WRITTEN, BDI_WRITTEN) in long term. The mismatches will lead to
++ * systematic errors in balanced_dirty_ratelimit and the dirty pages position
++ * control.
++ */
++void account_page_redirty(struct page *page)
++{
++ struct address_space *mapping = page->mapping;
++ if (mapping && mapping_cap_account_dirty(mapping)) {
++ current->nr_dirtied--;
++ dec_zone_page_state(page, NR_DIRTIED);
++ dec_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED);
++ }
++}
++EXPORT_SYMBOL(account_page_redirty);
++
++/*
+ * When a writepage implementation decides that it doesn't want to write this
+ * page for some reason, it should redirty the locked page via
+ * redirty_page_for_writepage() and it should then unlock the page and return 0
+@@ -1808,6 +1826,7 @@ EXPORT_SYMBOL(__set_page_dirty_nobuffers);
+ int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
+ {
+ wbc->pages_skipped++;
++ account_page_redirty(page);
+ return __set_page_dirty_nobuffers(page);
+ }
+ EXPORT_SYMBOL(redirty_page_for_writepage);
+diff --git a/net/can/gw.c b/net/can/gw.c
+index 3d79b12..f78f898 100644
+--- a/net/can/gw.c
++++ b/net/can/gw.c
+@@ -436,7 +436,7 @@ static int cgw_notifier(struct notifier_block *nb,
+ if (gwj->src.dev == dev || gwj->dst.dev == dev) {
+ hlist_del(&gwj->list);
+ cgw_unregister_filter(gwj);
+- kfree(gwj);
++ kmem_cache_free(cgw_cache, gwj);
+ }
+ }
+ }
+@@ -850,7 +850,7 @@ static void cgw_remove_all_jobs(void)
+ hlist_for_each_entry_safe(gwj, n, nx, &cgw_list, list) {
+ hlist_del(&gwj->list);
+ cgw_unregister_filter(gwj);
+- kfree(gwj);
++ kmem_cache_free(cgw_cache, gwj);
+ }
+ }
+
+@@ -903,7 +903,7 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
+
+ hlist_del(&gwj->list);
+ cgw_unregister_filter(gwj);
+- kfree(gwj);
++ kmem_cache_free(cgw_cache, gwj);
+ err = 0;
+ break;
+ }
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 1e8a882..2c73adf 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -761,6 +761,20 @@ void cred_to_ucred(struct pid *pid, const struct cred *cred,
+ }
+ EXPORT_SYMBOL_GPL(cred_to_ucred);
+
++void cred_real_to_ucred(struct pid *pid, const struct cred *cred,
++ struct ucred *ucred)
++{
++ ucred->pid = pid_vnr(pid);
++ ucred->uid = ucred->gid = -1;
++ if (cred) {
++ struct user_namespace *current_ns = current_user_ns();
++
++ ucred->uid = user_ns_map_uid(current_ns, cred, cred->uid);
++ ucred->gid = user_ns_map_gid(current_ns, cred, cred->gid);
++ }
++}
++EXPORT_SYMBOL_GPL(cred_real_to_ucred);
++
+ int sock_getsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int __user *optlen)
+ {
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index 7747d26..4707b6c 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -163,7 +163,7 @@ const char *snd_hda_get_jack_type(u32 cfg)
+ "Line Out", "Speaker", "HP Out", "CD",
+ "SPDIF Out", "Digital Out", "Modem Line", "Modem Hand",
+ "Line In", "Aux", "Mic", "Telephony",
+- "SPDIF In", "Digitial In", "Reserved", "Other"
++ "SPDIF In", "Digital In", "Reserved", "Other"
+ };
+
+ return jack_types[(cfg & AC_DEFCFG_DEVICE)
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index f3e0b24..1b43fde 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5595,7 +5595,8 @@ static int alc662_parse_auto_config(struct hda_codec *codec)
+ const hda_nid_t *ssids;
+
+ if (codec->vendor_id == 0x10ec0272 || codec->vendor_id == 0x10ec0663 ||
+- codec->vendor_id == 0x10ec0665 || codec->vendor_id == 0x10ec0670)
++ codec->vendor_id == 0x10ec0665 || codec->vendor_id == 0x10ec0670 ||
++ codec->vendor_id == 0x10ec0671)
+ ssids = alc663_ssids;
+ else
+ ssids = alc662_ssids;
+@@ -6045,6 +6046,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
+ { .id = 0x10ec0665, .name = "ALC665", .patch = patch_alc662 },
+ { .id = 0x10ec0668, .name = "ALC668", .patch = patch_alc662 },
+ { .id = 0x10ec0670, .name = "ALC670", .patch = patch_alc662 },
++ { .id = 0x10ec0671, .name = "ALC671", .patch = patch_alc662 },
+ { .id = 0x10ec0680, .name = "ALC680", .patch = patch_alc680 },
+ { .id = 0x10ec0880, .name = "ALC880", .patch = patch_alc880 },
+ { .id = 0x10ec0882, .name = "ALC882", .patch = patch_alc882 },
+diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c
+index 4ad8ebd..4352ffb 100644
+--- a/sound/soc/codecs/wm8903.c
++++ b/sound/soc/codecs/wm8903.c
+@@ -1101,6 +1101,8 @@ static const struct snd_soc_dapm_route wm8903_intercon[] = {
+ { "ROP", NULL, "Right Speaker PGA" },
+ { "RON", NULL, "Right Speaker PGA" },
+
++ { "Charge Pump", NULL, "CLK_DSP" },
++
+ { "Left Headphone Output PGA", NULL, "Charge Pump" },
+ { "Right Headphone Output PGA", NULL, "Charge Pump" },
+ { "Left Line Output PGA", NULL, "Charge Pump" },
+diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
+index 38a607a..fb95069 100644
+--- a/sound/usb/mixer_quirks.c
++++ b/sound/usb/mixer_quirks.c
+@@ -396,7 +396,7 @@ static int snd_nativeinstruments_control_get(struct snd_kcontrol *kcontrol,
+ else
+ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), bRequest,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
+- 0, cpu_to_le16(wIndex),
++ 0, wIndex,
+ &tmp, sizeof(tmp), 1000);
+ up_read(&mixer->chip->shutdown_rwsem);
+
+@@ -427,7 +427,7 @@ static int snd_nativeinstruments_control_put(struct snd_kcontrol *kcontrol,
+ else
+ ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), bRequest,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
+- cpu_to_le16(wValue), cpu_to_le16(wIndex),
++ wValue, wIndex,
+ NULL, 0, 1000);
+ up_read(&mixer->chip->shutdown_rwsem);
+
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 1b275f0..dfbd65d 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -482,7 +482,7 @@ static int snd_usb_nativeinstruments_boot_quirk(struct usb_device *dev)
+ {
+ int ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+ 0xaf, USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+- cpu_to_le16(1), 0, NULL, 0, 1000);
++ 1, 0, NULL, 0, 1000);
+
+ if (ret < 0)
+ return ret;
+diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
+index 3eed61e..79647cd 100644
+--- a/virt/kvm/ioapic.c
++++ b/virt/kvm/ioapic.c
+@@ -73,9 +73,12 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
+ u32 redir_index = (ioapic->ioregsel - 0x10) >> 1;
+ u64 redir_content;
+
+- ASSERT(redir_index < IOAPIC_NUM_PINS);
++ if (redir_index < IOAPIC_NUM_PINS)
++ redir_content =
++ ioapic->redirtbl[redir_index].bits;
++ else
++ redir_content = ~0ULL;
+
+- redir_content = ioapic->redirtbl[redir_index].bits;
+ result = (ioapic->ioregsel & 0x1) ?
+ (redir_content >> 32) & 0xffffffff :
+ redir_content & 0xffffffff;
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index ec747dc..8bf05f0 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -1401,21 +1401,38 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
+ }
+
+ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+- gpa_t gpa)
++ gpa_t gpa, unsigned long len)
+ {
+ struct kvm_memslots *slots = kvm_memslots(kvm);
+ int offset = offset_in_page(gpa);
+- gfn_t gfn = gpa >> PAGE_SHIFT;
++ gfn_t start_gfn = gpa >> PAGE_SHIFT;
++ gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
++ gfn_t nr_pages_needed = end_gfn - start_gfn + 1;
++ gfn_t nr_pages_avail;
+
+ ghc->gpa = gpa;
+ ghc->generation = slots->generation;
+- ghc->memslot = __gfn_to_memslot(slots, gfn);
+- ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL);
+- if (!kvm_is_error_hva(ghc->hva))
++ ghc->len = len;
++ ghc->memslot = __gfn_to_memslot(slots, start_gfn);
++ ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail);
++ if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) {
+ ghc->hva += offset;
+- else
+- return -EFAULT;
+-
++ } else {
++ /*
++ * If the requested region crosses two memslots, we still
++ * verify that the entire region is valid here.
++ */
++ while (start_gfn <= end_gfn) {
++ ghc->memslot = __gfn_to_memslot(slots, start_gfn);
++ ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
++ &nr_pages_avail);
++ if (kvm_is_error_hva(ghc->hva))
++ return -EFAULT;
++ start_gfn += nr_pages_avail;
++ }
++ /* Use the slow path for cross page reads and writes. */
++ ghc->memslot = NULL;
++ }
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
+@@ -1426,8 +1443,13 @@ int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+ struct kvm_memslots *slots = kvm_memslots(kvm);
+ int r;
+
++ BUG_ON(len > ghc->len);
++
+ if (slots->generation != ghc->generation)
+- kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa);
++ kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
++
++ if (unlikely(!ghc->memslot))
++ return kvm_write_guest(kvm, ghc->gpa, data, len);
+
+ if (kvm_is_error_hva(ghc->hva))
+ return -EFAULT;
+@@ -1447,8 +1469,13 @@ int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+ struct kvm_memslots *slots = kvm_memslots(kvm);
+ int r;
+
++ BUG_ON(len > ghc->len);
++
+ if (slots->generation != ghc->generation)
+- kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa);
++ kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
++
++ if (unlikely(!ghc->memslot))
++ return kvm_read_guest(kvm, ghc->gpa, data, len);
+
+ if (kvm_is_error_hva(ghc->hva))
+ return -EFAULT;
diff --git a/1044_linux-3.2.45.patch b/1044_linux-3.2.45.patch
new file mode 100644
index 00000000..44e17678
--- /dev/null
+++ b/1044_linux-3.2.45.patch
@@ -0,0 +1,3809 @@
+diff --git a/Makefile b/Makefile
+index 566750c..9072fee 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 44
++SUBLEVEL = 45
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/mach-u300/include/mach/u300-regs.h b/arch/arm/mach-u300/include/mach/u300-regs.h
+index 035fdc9..a8b71f2 100644
+--- a/arch/arm/mach-u300/include/mach/u300-regs.h
++++ b/arch/arm/mach-u300/include/mach/u300-regs.h
+@@ -102,7 +102,7 @@
+
+ #ifdef CONFIG_MACH_U300_BS335
+ /* Fast UART1 on U335 only */
+-#define U300_UART1_BASE (U300_SLOW_PER_PHYS_BASE+0x7000)
++#define U300_UART1_BASE (U300_FAST_PER_PHYS_BASE+0x7000)
+ #endif
+
+ /*
+diff --git a/arch/ia64/include/asm/futex.h b/arch/ia64/include/asm/futex.h
+index 21ab376..1bd14d5 100644
+--- a/arch/ia64/include/asm/futex.h
++++ b/arch/ia64/include/asm/futex.h
+@@ -107,16 +107,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ return -EFAULT;
+
+ {
+- register unsigned long r8 __asm ("r8");
++ register unsigned long r8 __asm ("r8") = 0;
+ unsigned long prev;
+ __asm__ __volatile__(
+ " mf;; \n"
+- " mov %0=r0 \n"
+ " mov ar.ccv=%4;; \n"
+ "[1:] cmpxchg4.acq %1=[%2],%3,ar.ccv \n"
+ " .xdata4 \"__ex_table\", 1b-., 2f-. \n"
+ "[2:]"
+- : "=r" (r8), "=r" (prev)
++ : "+r" (r8), "=&r" (prev)
+ : "r" (uaddr), "r" (newval),
+ "rO" ((long) (unsigned) oldval)
+ : "memory");
+diff --git a/arch/ia64/include/asm/mca.h b/arch/ia64/include/asm/mca.h
+index 43f96ab..8c70961 100644
+--- a/arch/ia64/include/asm/mca.h
++++ b/arch/ia64/include/asm/mca.h
+@@ -143,6 +143,7 @@ extern unsigned long __per_cpu_mca[NR_CPUS];
+ extern int cpe_vector;
+ extern int ia64_cpe_irq;
+ extern void ia64_mca_init(void);
++extern void ia64_mca_irq_init(void);
+ extern void ia64_mca_cpu_init(void *);
+ extern void ia64_os_mca_dispatch(void);
+ extern void ia64_os_mca_dispatch_end(void);
+diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
+index ad69606..f2c41828 100644
+--- a/arch/ia64/kernel/irq.c
++++ b/arch/ia64/kernel/irq.c
+@@ -23,6 +23,8 @@
+ #include <linux/interrupt.h>
+ #include <linux/kernel_stat.h>
+
++#include <asm/mca.h>
++
+ /*
+ * 'what should we do if we get a hw irq event on an illegal vector'.
+ * each architecture has to answer this themselves.
+@@ -83,6 +85,12 @@ bool is_affinity_mask_valid(const struct cpumask *cpumask)
+
+ #endif /* CONFIG_SMP */
+
++int __init arch_early_irq_init(void)
++{
++ ia64_mca_irq_init();
++ return 0;
++}
++
+ #ifdef CONFIG_HOTPLUG_CPU
+ unsigned int vectors_in_migration[NR_IRQS];
+
+diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
+index 84fb405..9b97303 100644
+--- a/arch/ia64/kernel/mca.c
++++ b/arch/ia64/kernel/mca.c
+@@ -2071,22 +2071,16 @@ ia64_mca_init(void)
+ printk(KERN_INFO "MCA related initialization done\n");
+ }
+
++
+ /*
+- * ia64_mca_late_init
+- *
+- * Opportunity to setup things that require initialization later
+- * than ia64_mca_init. Setup a timer to poll for CPEs if the
+- * platform doesn't support an interrupt driven mechanism.
+- *
+- * Inputs : None
+- * Outputs : Status
++ * These pieces cannot be done in ia64_mca_init() because it is called before
++ * early_irq_init() which would wipe out our percpu irq registrations. But we
++ * cannot leave them until ia64_mca_late_init() because by then all the other
++ * processors have been brought online and have set their own CMC vectors to
++ * point at a non-existant action. Called from arch_early_irq_init().
+ */
+-static int __init
+-ia64_mca_late_init(void)
++void __init ia64_mca_irq_init(void)
+ {
+- if (!mca_init)
+- return 0;
+-
+ /*
+ * Configure the CMCI/P vector and handler. Interrupts for CMC are
+ * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c).
+@@ -2105,6 +2099,23 @@ ia64_mca_late_init(void)
+ /* Setup the CPEI/P handler */
+ register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
+ #endif
++}
++
++/*
++ * ia64_mca_late_init
++ *
++ * Opportunity to setup things that require initialization later
++ * than ia64_mca_init. Setup a timer to poll for CPEs if the
++ * platform doesn't support an interrupt driven mechanism.
++ *
++ * Inputs : None
++ * Outputs : Status
++ */
++static int __init
++ia64_mca_late_init(void)
++{
++ if (!mca_init)
++ return 0;
+
+ register_hotcpu_notifier(&mca_cpu_notifier);
+
+diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c
+index 4332f7e..a7869f8 100644
+--- a/arch/ia64/kvm/vtlb.c
++++ b/arch/ia64/kvm/vtlb.c
+@@ -256,7 +256,7 @@ u64 guest_vhpt_lookup(u64 iha, u64 *pte)
+ "srlz.d;;"
+ "ssm psr.i;;"
+ "srlz.d;;"
+- : "=r"(ret) : "r"(iha), "r"(pte):"memory");
++ : "=&r"(ret) : "r"(iha), "r"(pte) : "memory");
+
+ return ret;
+ }
+diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
+index cdf6b3f..2c49227 100644
+--- a/arch/powerpc/kernel/head_64.S
++++ b/arch/powerpc/kernel/head_64.S
+@@ -502,6 +502,7 @@ _GLOBAL(copy_and_flush)
+ sync
+ addi r5,r5,8
+ addi r6,r6,8
++ isync
+ blr
+
+ .align 8
+diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
+index b22a83a..24523dc 100644
+--- a/arch/powerpc/mm/numa.c
++++ b/arch/powerpc/mm/numa.c
+@@ -221,7 +221,7 @@ int __node_distance(int a, int b)
+ int distance = LOCAL_DISTANCE;
+
+ if (!form1_affinity)
+- return distance;
++ return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE);
+
+ for (i = 0; i < distance_ref_points_depth; i++) {
+ if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
+diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
+index e481f6b..70ec4e9 100644
+--- a/arch/powerpc/platforms/cell/spufs/inode.c
++++ b/arch/powerpc/platforms/cell/spufs/inode.c
+@@ -100,6 +100,7 @@ spufs_new_inode(struct super_block *sb, int mode)
+ if (!inode)
+ goto out;
+
++ inode->i_ino = get_next_ino();
+ inode->i_mode = mode;
+ inode->i_uid = current_fsuid();
+ inode->i_gid = current_fsgid();
+diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
+index 4f289ff..5aaf0bf 100644
+--- a/arch/s390/include/asm/pgtable.h
++++ b/arch/s390/include/asm/pgtable.h
+@@ -67,6 +67,10 @@ static inline int is_zero_pfn(unsigned long pfn)
+
+ #define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
+
++/* TODO: s390 cannot support io_remap_pfn_range... */
++#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
++ remap_pfn_range(vma, vaddr, pfn, size, prot)
++
+ #endif /* !__ASSEMBLY__ */
+
+ /*
+diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
+index 38ebb2c..ddbbea3 100644
+--- a/arch/sparc/include/asm/pgtable_64.h
++++ b/arch/sparc/include/asm/pgtable_64.h
+@@ -781,6 +781,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
+ return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
+ }
+
++#include <asm/tlbflush.h>
+ #include <asm-generic/pgtable.h>
+
+ /* We provide our own get_unmapped_area to cope with VA holes and
+diff --git a/arch/sparc/include/asm/system_64.h b/arch/sparc/include/asm/system_64.h
+index 10bcabc..f856c7f 100644
+--- a/arch/sparc/include/asm/system_64.h
++++ b/arch/sparc/include/asm/system_64.h
+@@ -140,8 +140,7 @@ do { \
+ * and 2 stores in this critical code path. -DaveM
+ */
+ #define switch_to(prev, next, last) \
+-do { flush_tlb_pending(); \
+- save_and_clear_fpu(); \
++do { save_and_clear_fpu(); \
+ /* If you are tempted to conditionalize the following */ \
+ /* so that ASI is only written if it changes, think again. */ \
+ __asm__ __volatile__("wr %%g0, %0, %%asi" \
+diff --git a/arch/sparc/include/asm/tlbflush_64.h b/arch/sparc/include/asm/tlbflush_64.h
+index 2ef4634..f0d6a97 100644
+--- a/arch/sparc/include/asm/tlbflush_64.h
++++ b/arch/sparc/include/asm/tlbflush_64.h
+@@ -11,24 +11,40 @@
+ struct tlb_batch {
+ struct mm_struct *mm;
+ unsigned long tlb_nr;
++ unsigned long active;
+ unsigned long vaddrs[TLB_BATCH_NR];
+ };
+
+ extern void flush_tsb_kernel_range(unsigned long start, unsigned long end);
+ extern void flush_tsb_user(struct tlb_batch *tb);
++extern void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr);
+
+ /* TLB flush operations. */
+
+-extern void flush_tlb_pending(void);
++static inline void flush_tlb_mm(struct mm_struct *mm)
++{
++}
++
++static inline void flush_tlb_page(struct vm_area_struct *vma,
++ unsigned long vmaddr)
++{
++}
++
++static inline void flush_tlb_range(struct vm_area_struct *vma,
++ unsigned long start, unsigned long end)
++{
++}
++
++#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
+
+-#define flush_tlb_range(vma,start,end) \
+- do { (void)(start); flush_tlb_pending(); } while (0)
+-#define flush_tlb_page(vma,addr) flush_tlb_pending()
+-#define flush_tlb_mm(mm) flush_tlb_pending()
++extern void flush_tlb_pending(void);
++extern void arch_enter_lazy_mmu_mode(void);
++extern void arch_leave_lazy_mmu_mode(void);
++#define arch_flush_lazy_mmu_mode() do {} while (0)
+
+ /* Local cpu only. */
+ extern void __flush_tlb_all(void);
+-
++extern void __flush_tlb_page(unsigned long context, unsigned long vaddr);
+ extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
+
+ #ifndef CONFIG_SMP
+@@ -38,15 +54,24 @@ do { flush_tsb_kernel_range(start,end); \
+ __flush_tlb_kernel_range(start,end); \
+ } while (0)
+
++static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
++{
++ __flush_tlb_page(CTX_HWBITS(mm->context), vaddr);
++}
++
+ #else /* CONFIG_SMP */
+
+ extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
++extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
+
+ #define flush_tlb_kernel_range(start, end) \
+ do { flush_tsb_kernel_range(start,end); \
+ smp_flush_tlb_kernel_range(start, end); \
+ } while (0)
+
++#define global_flush_tlb_page(mm, vaddr) \
++ smp_flush_tlb_page(mm, vaddr)
++
+ #endif /* ! CONFIG_SMP */
+
+ #endif /* _SPARC64_TLBFLUSH_H */
+diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
+index 7560772..e21d3c0d 100644
+--- a/arch/sparc/kernel/smp_64.c
++++ b/arch/sparc/kernel/smp_64.c
+@@ -856,7 +856,7 @@ void smp_tsb_sync(struct mm_struct *mm)
+ }
+
+ extern unsigned long xcall_flush_tlb_mm;
+-extern unsigned long xcall_flush_tlb_pending;
++extern unsigned long xcall_flush_tlb_page;
+ extern unsigned long xcall_flush_tlb_kernel_range;
+ extern unsigned long xcall_fetch_glob_regs;
+ extern unsigned long xcall_receive_signal;
+@@ -1070,23 +1070,56 @@ local_flush_and_out:
+ put_cpu();
+ }
+
++struct tlb_pending_info {
++ unsigned long ctx;
++ unsigned long nr;
++ unsigned long *vaddrs;
++};
++
++static void tlb_pending_func(void *info)
++{
++ struct tlb_pending_info *t = info;
++
++ __flush_tlb_pending(t->ctx, t->nr, t->vaddrs);
++}
++
+ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
+ {
+ u32 ctx = CTX_HWBITS(mm->context);
++ struct tlb_pending_info info;
+ int cpu = get_cpu();
+
++ info.ctx = ctx;
++ info.nr = nr;
++ info.vaddrs = vaddrs;
++
+ if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
+ cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
+ else
+- smp_cross_call_masked(&xcall_flush_tlb_pending,
+- ctx, nr, (unsigned long) vaddrs,
+- mm_cpumask(mm));
++ smp_call_function_many(mm_cpumask(mm), tlb_pending_func,
++ &info, 1);
+
+ __flush_tlb_pending(ctx, nr, vaddrs);
+
+ put_cpu();
+ }
+
++void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
++{
++ unsigned long context = CTX_HWBITS(mm->context);
++ int cpu = get_cpu();
++
++ if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
++ cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
++ else
++ smp_cross_call_masked(&xcall_flush_tlb_page,
++ context, vaddr, 0,
++ mm_cpumask(mm));
++ __flush_tlb_page(context, vaddr);
++
++ put_cpu();
++}
++
+ void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
+ {
+ start &= PAGE_MASK;
+diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
+index b1f279c..afd021e 100644
+--- a/arch/sparc/mm/tlb.c
++++ b/arch/sparc/mm/tlb.c
+@@ -24,11 +24,17 @@ static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
+ void flush_tlb_pending(void)
+ {
+ struct tlb_batch *tb = &get_cpu_var(tlb_batch);
++ struct mm_struct *mm = tb->mm;
+
+- if (tb->tlb_nr) {
+- flush_tsb_user(tb);
++ if (!tb->tlb_nr)
++ goto out;
+
+- if (CTX_VALID(tb->mm->context)) {
++ flush_tsb_user(tb);
++
++ if (CTX_VALID(mm->context)) {
++ if (tb->tlb_nr == 1) {
++ global_flush_tlb_page(mm, tb->vaddrs[0]);
++ } else {
+ #ifdef CONFIG_SMP
+ smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
+ &tb->vaddrs[0]);
+@@ -37,12 +43,30 @@ void flush_tlb_pending(void)
+ tb->tlb_nr, &tb->vaddrs[0]);
+ #endif
+ }
+- tb->tlb_nr = 0;
+ }
+
++ tb->tlb_nr = 0;
++
++out:
+ put_cpu_var(tlb_batch);
+ }
+
++void arch_enter_lazy_mmu_mode(void)
++{
++ struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
++
++ tb->active = 1;
++}
++
++void arch_leave_lazy_mmu_mode(void)
++{
++ struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
++
++ if (tb->tlb_nr)
++ flush_tlb_pending();
++ tb->active = 0;
++}
++
+ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
+ pte_t *ptep, pte_t orig, int fullmm)
+ {
+@@ -90,6 +114,12 @@ no_cache_flush:
+ nr = 0;
+ }
+
++ if (!tb->active) {
++ global_flush_tlb_page(mm, vaddr);
++ flush_tsb_user_page(mm, vaddr);
++ goto out;
++ }
++
+ if (nr == 0)
+ tb->mm = mm;
+
+@@ -98,5 +128,6 @@ no_cache_flush:
+ if (nr >= TLB_BATCH_NR)
+ flush_tlb_pending();
+
++out:
+ put_cpu_var(tlb_batch);
+ }
+diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
+index 536412d..3ebcac7 100644
+--- a/arch/sparc/mm/tsb.c
++++ b/arch/sparc/mm/tsb.c
+@@ -8,11 +8,10 @@
+ #include <linux/slab.h>
+ #include <asm/system.h>
+ #include <asm/page.h>
+-#include <asm/tlbflush.h>
+-#include <asm/tlb.h>
+-#include <asm/mmu_context.h>
+ #include <asm/pgtable.h>
++#include <asm/mmu_context.h>
+ #include <asm/tsb.h>
++#include <asm/tlb.h>
+ #include <asm/oplib.h>
+
+ extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
+@@ -47,23 +46,27 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
+ }
+ }
+
+-static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
+- unsigned long tsb, unsigned long nentries)
++static void __flush_tsb_one_entry(unsigned long tsb, unsigned long v,
++ unsigned long hash_shift,
++ unsigned long nentries)
+ {
+- unsigned long i;
++ unsigned long tag, ent, hash;
+
+- for (i = 0; i < tb->tlb_nr; i++) {
+- unsigned long v = tb->vaddrs[i];
+- unsigned long tag, ent, hash;
++ v &= ~0x1UL;
++ hash = tsb_hash(v, hash_shift, nentries);
++ ent = tsb + (hash * sizeof(struct tsb));
++ tag = (v >> 22UL);
+
+- v &= ~0x1UL;
++ tsb_flush(ent, tag);
++}
+
+- hash = tsb_hash(v, hash_shift, nentries);
+- ent = tsb + (hash * sizeof(struct tsb));
+- tag = (v >> 22UL);
++static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
++ unsigned long tsb, unsigned long nentries)
++{
++ unsigned long i;
+
+- tsb_flush(ent, tag);
+- }
++ for (i = 0; i < tb->tlb_nr; i++)
++ __flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries);
+ }
+
+ void flush_tsb_user(struct tlb_batch *tb)
+@@ -91,6 +94,30 @@ void flush_tsb_user(struct tlb_batch *tb)
+ spin_unlock_irqrestore(&mm->context.lock, flags);
+ }
+
++void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr)
++{
++ unsigned long nentries, base, flags;
++
++ spin_lock_irqsave(&mm->context.lock, flags);
++
++ base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
++ nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
++ if (tlb_type == cheetah_plus || tlb_type == hypervisor)
++ base = __pa(base);
++ __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries);
++
++#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
++ if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
++ base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
++ nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
++ if (tlb_type == cheetah_plus || tlb_type == hypervisor)
++ base = __pa(base);
++ __flush_tsb_one_entry(base, vaddr, HPAGE_SHIFT, nentries);
++ }
++#endif
++ spin_unlock_irqrestore(&mm->context.lock, flags);
++}
++
+ #if defined(CONFIG_SPARC64_PAGE_SIZE_8KB)
+ #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K
+ #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K
+diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S
+index 874162a..dd10caa 100644
+--- a/arch/sparc/mm/ultra.S
++++ b/arch/sparc/mm/ultra.S
+@@ -53,6 +53,33 @@ __flush_tlb_mm: /* 18 insns */
+ nop
+
+ .align 32
++ .globl __flush_tlb_page
++__flush_tlb_page: /* 22 insns */
++ /* %o0 = context, %o1 = vaddr */
++ rdpr %pstate, %g7
++ andn %g7, PSTATE_IE, %g2
++ wrpr %g2, %pstate
++ mov SECONDARY_CONTEXT, %o4
++ ldxa [%o4] ASI_DMMU, %g2
++ stxa %o0, [%o4] ASI_DMMU
++ andcc %o1, 1, %g0
++ andn %o1, 1, %o3
++ be,pn %icc, 1f
++ or %o3, 0x10, %o3
++ stxa %g0, [%o3] ASI_IMMU_DEMAP
++1: stxa %g0, [%o3] ASI_DMMU_DEMAP
++ membar #Sync
++ stxa %g2, [%o4] ASI_DMMU
++ sethi %hi(KERNBASE), %o4
++ flush %o4
++ retl
++ wrpr %g7, 0x0, %pstate
++ nop
++ nop
++ nop
++ nop
++
++ .align 32
+ .globl __flush_tlb_pending
+ __flush_tlb_pending: /* 26 insns */
+ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
+@@ -203,6 +230,31 @@ __cheetah_flush_tlb_mm: /* 19 insns */
+ retl
+ wrpr %g7, 0x0, %pstate
+
++__cheetah_flush_tlb_page: /* 22 insns */
++ /* %o0 = context, %o1 = vaddr */
++ rdpr %pstate, %g7
++ andn %g7, PSTATE_IE, %g2
++ wrpr %g2, 0x0, %pstate
++ wrpr %g0, 1, %tl
++ mov PRIMARY_CONTEXT, %o4
++ ldxa [%o4] ASI_DMMU, %g2
++ srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3
++ sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3
++ or %o0, %o3, %o0 /* Preserve nucleus page size fields */
++ stxa %o0, [%o4] ASI_DMMU
++ andcc %o1, 1, %g0
++ be,pn %icc, 1f
++ andn %o1, 1, %o3
++ stxa %g0, [%o3] ASI_IMMU_DEMAP
++1: stxa %g0, [%o3] ASI_DMMU_DEMAP
++ membar #Sync
++ stxa %g2, [%o4] ASI_DMMU
++ sethi %hi(KERNBASE), %o4
++ flush %o4
++ wrpr %g0, 0, %tl
++ retl
++ wrpr %g7, 0x0, %pstate
++
+ __cheetah_flush_tlb_pending: /* 27 insns */
+ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
+ rdpr %pstate, %g7
+@@ -269,6 +321,20 @@ __hypervisor_flush_tlb_mm: /* 10 insns */
+ retl
+ nop
+
++__hypervisor_flush_tlb_page: /* 11 insns */
++ /* %o0 = context, %o1 = vaddr */
++ mov %o0, %g2
++ mov %o1, %o0 /* ARG0: vaddr + IMMU-bit */
++ mov %g2, %o1 /* ARG1: mmu context */
++ mov HV_MMU_ALL, %o2 /* ARG2: flags */
++ srlx %o0, PAGE_SHIFT, %o0
++ sllx %o0, PAGE_SHIFT, %o0
++ ta HV_MMU_UNMAP_ADDR_TRAP
++ brnz,pn %o0, __hypervisor_tlb_tl0_error
++ mov HV_MMU_UNMAP_ADDR_TRAP, %o1
++ retl
++ nop
++
+ __hypervisor_flush_tlb_pending: /* 16 insns */
+ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
+ sllx %o1, 3, %g1
+@@ -339,6 +405,13 @@ cheetah_patch_cachetlbops:
+ call tlb_patch_one
+ mov 19, %o2
+
++ sethi %hi(__flush_tlb_page), %o0
++ or %o0, %lo(__flush_tlb_page), %o0
++ sethi %hi(__cheetah_flush_tlb_page), %o1
++ or %o1, %lo(__cheetah_flush_tlb_page), %o1
++ call tlb_patch_one
++ mov 22, %o2
++
+ sethi %hi(__flush_tlb_pending), %o0
+ or %o0, %lo(__flush_tlb_pending), %o0
+ sethi %hi(__cheetah_flush_tlb_pending), %o1
+@@ -397,10 +470,9 @@ xcall_flush_tlb_mm: /* 21 insns */
+ nop
+ nop
+
+- .globl xcall_flush_tlb_pending
+-xcall_flush_tlb_pending: /* 21 insns */
+- /* %g5=context, %g1=nr, %g7=vaddrs[] */
+- sllx %g1, 3, %g1
++ .globl xcall_flush_tlb_page
++xcall_flush_tlb_page: /* 17 insns */
++ /* %g5=context, %g1=vaddr */
+ mov PRIMARY_CONTEXT, %g4
+ ldxa [%g4] ASI_DMMU, %g2
+ srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4
+@@ -408,20 +480,16 @@ xcall_flush_tlb_pending: /* 21 insns */
+ or %g5, %g4, %g5
+ mov PRIMARY_CONTEXT, %g4
+ stxa %g5, [%g4] ASI_DMMU
+-1: sub %g1, (1 << 3), %g1
+- ldx [%g7 + %g1], %g5
+- andcc %g5, 0x1, %g0
++ andcc %g1, 0x1, %g0
+ be,pn %icc, 2f
+-
+- andn %g5, 0x1, %g5
++ andn %g1, 0x1, %g5
+ stxa %g0, [%g5] ASI_IMMU_DEMAP
+ 2: stxa %g0, [%g5] ASI_DMMU_DEMAP
+ membar #Sync
+- brnz,pt %g1, 1b
+- nop
+ stxa %g2, [%g4] ASI_DMMU
+ retry
+ nop
++ nop
+
+ .globl xcall_flush_tlb_kernel_range
+ xcall_flush_tlb_kernel_range: /* 25 insns */
+@@ -596,15 +664,13 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */
+ membar #Sync
+ retry
+
+- .globl __hypervisor_xcall_flush_tlb_pending
+-__hypervisor_xcall_flush_tlb_pending: /* 21 insns */
+- /* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4,g6=scratch */
+- sllx %g1, 3, %g1
++ .globl __hypervisor_xcall_flush_tlb_page
++__hypervisor_xcall_flush_tlb_page: /* 17 insns */
++ /* %g5=ctx, %g1=vaddr */
+ mov %o0, %g2
+ mov %o1, %g3
+ mov %o2, %g4
+-1: sub %g1, (1 << 3), %g1
+- ldx [%g7 + %g1], %o0 /* ARG0: virtual address */
++ mov %g1, %o0 /* ARG0: virtual address */
+ mov %g5, %o1 /* ARG1: mmu context */
+ mov HV_MMU_ALL, %o2 /* ARG2: flags */
+ srlx %o0, PAGE_SHIFT, %o0
+@@ -613,8 +679,6 @@ __hypervisor_xcall_flush_tlb_pending: /* 21 insns */
+ mov HV_MMU_UNMAP_ADDR_TRAP, %g6
+ brnz,a,pn %o0, __hypervisor_tlb_xcall_error
+ mov %o0, %g5
+- brnz,pt %g1, 1b
+- nop
+ mov %g2, %o0
+ mov %g3, %o1
+ mov %g4, %o2
+@@ -697,6 +761,13 @@ hypervisor_patch_cachetlbops:
+ call tlb_patch_one
+ mov 10, %o2
+
++ sethi %hi(__flush_tlb_page), %o0
++ or %o0, %lo(__flush_tlb_page), %o0
++ sethi %hi(__hypervisor_flush_tlb_page), %o1
++ or %o1, %lo(__hypervisor_flush_tlb_page), %o1
++ call tlb_patch_one
++ mov 11, %o2
++
+ sethi %hi(__flush_tlb_pending), %o0
+ or %o0, %lo(__flush_tlb_pending), %o0
+ sethi %hi(__hypervisor_flush_tlb_pending), %o1
+@@ -728,12 +799,12 @@ hypervisor_patch_cachetlbops:
+ call tlb_patch_one
+ mov 21, %o2
+
+- sethi %hi(xcall_flush_tlb_pending), %o0
+- or %o0, %lo(xcall_flush_tlb_pending), %o0
+- sethi %hi(__hypervisor_xcall_flush_tlb_pending), %o1
+- or %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1
++ sethi %hi(xcall_flush_tlb_page), %o0
++ or %o0, %lo(xcall_flush_tlb_page), %o0
++ sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1
++ or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1
+ call tlb_patch_one
+- mov 21, %o2
++ mov 17, %o2
+
+ sethi %hi(xcall_flush_tlb_kernel_range), %o0
+ or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
+diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
+index 957c216..4bb12f7 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel.c
++++ b/arch/x86/kernel/cpu/perf_event_intel.c
+@@ -130,8 +130,14 @@ static struct event_constraint intel_gen_event_constraints[] __read_mostly =
+ };
+
+ static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
+- INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
+- INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
++ INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
++ INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
++ EVENT_EXTRA_END
++};
++
++static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
++ INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
++ INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
+ EVENT_EXTRA_END
+ };
+
+@@ -1711,7 +1717,10 @@ __init int intel_pmu_init(void)
+
+ x86_pmu.event_constraints = intel_snb_event_constraints;
+ x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
+- x86_pmu.extra_regs = intel_snb_extra_regs;
++ if (boot_cpu_data.x86_model == 45)
++ x86_pmu.extra_regs = intel_snbep_extra_regs;
++ else
++ x86_pmu.extra_regs = intel_snb_extra_regs;
+ /* all extra regs are per-cpu when HT is on */
+ x86_pmu.er_flags |= ERF_HAS_RSP_1;
+ x86_pmu.er_flags |= ERF_NO_HT_SHARING;
+diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
+index 34a7f40..a4cca06 100644
+--- a/arch/x86/mm/init.c
++++ b/arch/x86/mm/init.c
+@@ -44,11 +44,15 @@ static void __init find_early_table_space(struct map_range *mr, int nr_range)
+ int i;
+ unsigned long puds = 0, pmds = 0, ptes = 0, tables;
+ unsigned long start = 0, good_end;
++ unsigned long pgd_extra = 0;
+ phys_addr_t base;
+
+ for (i = 0; i < nr_range; i++) {
+ unsigned long range, extra;
+
++ if ((mr[i].end >> PGDIR_SHIFT) - (mr[i].start >> PGDIR_SHIFT))
++ pgd_extra++;
++
+ range = mr[i].end - mr[i].start;
+ puds += (range + PUD_SIZE - 1) >> PUD_SHIFT;
+
+@@ -73,6 +77,7 @@ static void __init find_early_table_space(struct map_range *mr, int nr_range)
+ tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
+ tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
+ tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
++ tables += (pgd_extra * PAGE_SIZE);
+
+ #ifdef CONFIG_X86_32
+ /* for fixmap */
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index 69b9ef6..044f5d9 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -1391,8 +1391,11 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
+ switch (action) {
+ case CPU_UP_PREPARE:
+ xen_vcpu_setup(cpu);
+- if (xen_have_vector_callback)
++ if (xen_have_vector_callback) {
+ xen_init_lock_cpu(cpu);
++ if (xen_feature(XENFEAT_hvm_safe_pvclock))
++ xen_setup_timer(cpu);
++ }
+ break;
+ default:
+ break;
+diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
+index 9a23fff..6e4d5dc 100644
+--- a/arch/x86/xen/smp.c
++++ b/arch/x86/xen/smp.c
+@@ -563,6 +563,8 @@ static void xen_hvm_cpu_die(unsigned int cpu)
+ unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
+ unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
+ unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
++ xen_uninit_lock_cpu(cpu);
++ xen_teardown_timer(cpu);
+ native_cpu_die(cpu);
+ }
+
+diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
+index 0296a95..054cc01 100644
+--- a/arch/x86/xen/time.c
++++ b/arch/x86/xen/time.c
+@@ -497,7 +497,11 @@ static void xen_hvm_setup_cpu_clockevents(void)
+ {
+ int cpu = smp_processor_id();
+ xen_setup_runstate_info(cpu);
+- xen_setup_timer(cpu);
++ /*
++ * xen_setup_timer(cpu) - snprintf is bad in atomic context. Hence
++ * doing it xen_hvm_cpu_notify (which gets called by smp_init during
++ * early bootup and also during CPU hotplug events).
++ */
+ xen_setup_cpu_clockevents();
+ }
+
+diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
+index ef5356c..0262210 100644
+--- a/crypto/algif_hash.c
++++ b/crypto/algif_hash.c
+@@ -161,6 +161,8 @@ static int hash_recvmsg(struct kiocb *unused, struct socket *sock,
+ else if (len < ds)
+ msg->msg_flags |= MSG_TRUNC;
+
++ msg->msg_namelen = 0;
++
+ lock_sock(sk);
+ if (ctx->more) {
+ ctx->more = 0;
+diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
+index 6a6dfc0..a1c4f0a 100644
+--- a/crypto/algif_skcipher.c
++++ b/crypto/algif_skcipher.c
+@@ -432,6 +432,7 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
+ long copied = 0;
+
+ lock_sock(sk);
++ msg->msg_namelen = 0;
+ for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0;
+ iovlen--, iov++) {
+ unsigned long seglen = iov->iov_len;
+diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
+index 7aff631..5b0f075 100644
+--- a/drivers/acpi/pci_root.c
++++ b/drivers/acpi/pci_root.c
+@@ -247,8 +247,8 @@ static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root,
+ *control &= OSC_PCI_CONTROL_MASKS;
+ capbuf[OSC_CONTROL_TYPE] = *control | root->osc_control_set;
+ } else {
+- /* Run _OSC query for all possible controls. */
+- capbuf[OSC_CONTROL_TYPE] = OSC_PCI_CONTROL_MASKS;
++ /* Run _OSC query only with existing controls. */
++ capbuf[OSC_CONTROL_TYPE] = root->osc_control_set;
+ }
+
+ status = acpi_pci_run_osc(root->device->handle, capbuf, &result);
+diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
+index 0833896..14d49e4 100644
+--- a/drivers/char/hpet.c
++++ b/drivers/char/hpet.c
+@@ -374,26 +374,14 @@ static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
+ struct hpet_dev *devp;
+ unsigned long addr;
+
+- if (((vma->vm_end - vma->vm_start) != PAGE_SIZE) || vma->vm_pgoff)
+- return -EINVAL;
+-
+ devp = file->private_data;
+ addr = devp->hd_hpets->hp_hpet_phys;
+
+ if (addr & (PAGE_SIZE - 1))
+ return -ENOSYS;
+
+- vma->vm_flags |= VM_IO;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+-
+- if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT,
+- PAGE_SIZE, vma->vm_page_prot)) {
+- printk(KERN_ERR "%s: io_remap_pfn_range failed\n",
+- __func__);
+- return -EAGAIN;
+- }
+-
+- return 0;
++ return vm_iomap_memory(vma, addr, PAGE_SIZE);
+ #else
+ return -ENOSYS;
+ #endif
+diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
+index ca67338..c77fc67 100644
+--- a/drivers/gpu/drm/i915/i915_dma.c
++++ b/drivers/gpu/drm/i915/i915_dma.c
+@@ -1007,56 +1007,50 @@ intel_teardown_mchbar(struct drm_device *dev)
+ release_resource(&dev_priv->mch_res);
+ }
+
+-#define PTE_ADDRESS_MASK 0xfffff000
+-#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
+-#define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
+-#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
+-#define PTE_MAPPING_TYPE_CACHED (3 << 1)
+-#define PTE_MAPPING_TYPE_MASK (3 << 1)
+-#define PTE_VALID (1 << 0)
+-
+-/**
+- * i915_stolen_to_phys - take an offset into stolen memory and turn it into
+- * a physical one
+- * @dev: drm device
+- * @offset: address to translate
+- *
+- * Some chip functions require allocations from stolen space and need the
+- * physical address of the memory in question.
+- */
+-static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset)
++static unsigned long i915_stolen_to_physical(struct drm_device *dev)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = dev_priv->bridge_dev;
+ u32 base;
+
+-#if 0
+ /* On the machines I have tested the Graphics Base of Stolen Memory
+- * is unreliable, so compute the base by subtracting the stolen memory
+- * from the Top of Low Usable DRAM which is where the BIOS places
+- * the graphics stolen memory.
++ * is unreliable, so on those compute the base by subtracting the
++ * stolen memory from the Top of Low Usable DRAM which is where the
++ * BIOS places the graphics stolen memory.
++ *
++ * On gen2, the layout is slightly different with the Graphics Segment
++ * immediately following Top of Memory (or Top of Usable DRAM). Note
++ * it appears that TOUD is only reported by 865g, so we just use the
++ * top of memory as determined by the e820 probe.
++ *
++ * XXX gen2 requires an unavailable symbol and 945gm fails with
++ * its value of TOLUD.
+ */
+- if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
+- /* top 32bits are reserved = 0 */
++ base = 0;
++ if (INTEL_INFO(dev)->gen >= 6) {
++ /* Read Base Data of Stolen Memory Register (BDSM) directly.
++ * Note that there is also a MCHBAR miror at 0x1080c0 or
++ * we could use device 2:0x5c instead.
++ */
++ pci_read_config_dword(pdev, 0xB0, &base);
++ base &= ~4095; /* lower bits used for locking register */
++ } else if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
++ /* Read Graphics Base of Stolen Memory directly */
+ pci_read_config_dword(pdev, 0xA4, &base);
+- } else {
+- /* XXX presume 8xx is the same as i915 */
+- pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base);
+- }
+-#else
+- if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
+- u16 val;
+- pci_read_config_word(pdev, 0xb0, &val);
+- base = val >> 4 << 20;
+- } else {
++#if 0
++ } else if (IS_GEN3(dev)) {
+ u8 val;
++ /* Stolen is immediately below Top of Low Usable DRAM */
+ pci_read_config_byte(pdev, 0x9c, &val);
+ base = val >> 3 << 27;
+- }
+- base -= dev_priv->mm.gtt->stolen_size;
++ base -= dev_priv->mm.gtt->stolen_size;
++ } else {
++ /* Stolen is immediately above Top of Memory */
++ base = max_low_pfn_mapped << PAGE_SHIFT;
+ #endif
++ }
+
+- return base + offset;
++ return base;
+ }
+
+ static void i915_warn_stolen(struct drm_device *dev)
+@@ -1081,7 +1075,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
+ if (!compressed_fb)
+ goto err;
+
+- cfb_base = i915_stolen_to_phys(dev, compressed_fb->start);
++ cfb_base = dev_priv->mm.stolen_base + compressed_fb->start;
+ if (!cfb_base)
+ goto err_fb;
+
+@@ -1094,7 +1088,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
+ if (!compressed_llb)
+ goto err_fb;
+
+- ll_base = i915_stolen_to_phys(dev, compressed_llb->start);
++ ll_base = dev_priv->mm.stolen_base + compressed_llb->start;
+ if (!ll_base)
+ goto err_llb;
+ }
+@@ -1113,7 +1107,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
+ }
+
+ DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n",
+- cfb_base, ll_base, size >> 20);
++ (long)cfb_base, (long)ll_base, size >> 20);
+ return;
+
+ err_llb:
+@@ -1187,6 +1181,13 @@ static int i915_load_gem_init(struct drm_device *dev)
+ gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
+ mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+
++ dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
++ if (dev_priv->mm.stolen_base == 0)
++ return 0;
++
++ DRM_DEBUG_KMS("found %d bytes of stolen memory at %08lx\n",
++ dev_priv->mm.gtt->stolen_size, dev_priv->mm.stolen_base);
++
+ /* Basic memrange allocator for stolen space */
+ drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
+
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index 144d37c..20cd295 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -581,6 +581,7 @@ typedef struct drm_i915_private {
+ unsigned long gtt_start;
+ unsigned long gtt_mappable_end;
+ unsigned long gtt_end;
++ unsigned long stolen_base; /* limited to low memory (32-bit) */
+
+ struct io_mapping *gtt_mapping;
+ int gtt_mtrr;
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index b0186b8..2865b44 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -2520,6 +2520,11 @@ i915_find_fence_reg(struct drm_device *dev,
+ return avail;
+ }
+
++static void i915_gem_write_fence__ipi(void *data)
++{
++ wbinvd();
++}
++
+ /**
+ * i915_gem_object_get_fence - set up a fence reg for an object
+ * @obj: object to map through a fence reg
+@@ -2640,6 +2645,17 @@ update:
+ switch (INTEL_INFO(dev)->gen) {
+ case 7:
+ case 6:
++ /* In order to fully serialize access to the fenced region and
++ * the update to the fence register we need to take extreme
++ * measures on SNB+. In theory, the write to the fence register
++ * flushes all memory transactions before, and coupled with the
++ * mb() placed around the register write we serialise all memory
++ * operations with respect to the changes in the tiler. Yet, on
++ * SNB+ we need to take a step further and emit an explicit wbinvd()
++ * on each processor in order to manually flush all memory
++ * transactions before updating the fence register.
++ */
++ on_each_cpu(i915_gem_write_fence__ipi, NULL, 1);
+ ret = sandybridge_write_fence_reg(obj, pipelined);
+ break;
+ case 5:
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 897ca06..cfbb893 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -9093,6 +9093,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
+ del_timer_sync(&dev_priv->idle_timer);
+ cancel_work_sync(&dev_priv->idle_work);
+
++ /* destroy backlight, if any, before the connectors */
++ intel_panel_destroy_backlight(dev);
++
+ drm_mode_config_cleanup(dev);
+ }
+
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index c8ecaab..a07ccab 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -2274,11 +2274,6 @@ done:
+ static void
+ intel_dp_destroy(struct drm_connector *connector)
+ {
+- struct drm_device *dev = connector->dev;
+-
+- if (intel_dpd_is_edp(dev))
+- intel_panel_destroy_backlight(dev);
+-
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
+index 6eda1b5..8ac91b8 100644
+--- a/drivers/gpu/drm/i915/intel_dvo.c
++++ b/drivers/gpu/drm/i915/intel_dvo.c
+@@ -371,6 +371,7 @@ void intel_dvo_init(struct drm_device *dev)
+ const struct intel_dvo_device *dvo = &intel_dvo_devices[i];
+ struct i2c_adapter *i2c;
+ int gpio;
++ bool dvoinit;
+
+ /* Allow the I2C driver info to specify the GPIO to be used in
+ * special cases, but otherwise default to what's defined
+@@ -390,7 +391,17 @@ void intel_dvo_init(struct drm_device *dev)
+ i2c = &dev_priv->gmbus[gpio].adapter;
+
+ intel_dvo->dev = *dvo;
+- if (!dvo->dev_ops->init(&intel_dvo->dev, i2c))
++
++ /* GMBUS NAK handling seems to be unstable, hence let the
++ * transmitter detection run in bit banging mode for now.
++ */
++ intel_gmbus_force_bit(i2c, true);
++
++ dvoinit = dvo->dev_ops->init(&intel_dvo->dev, i2c);
++
++ intel_gmbus_force_bit(i2c, false);
++
++ if (!dvoinit)
+ continue;
+
+ intel_encoder->type = INTEL_OUTPUT_DVO;
+diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
+index 6601d21..876bac0 100644
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -553,8 +553,6 @@ static void intel_lvds_destroy(struct drm_connector *connector)
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+- intel_panel_destroy_backlight(dev);
+-
+ if (dev_priv->lid_notifier.notifier_call)
+ acpi_lid_notifier_unregister(&dev_priv->lid_notifier);
+ drm_sysfs_connector_remove(connector);
+@@ -788,6 +786,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "X7SPA-H"),
+ },
+ },
++ {
++ .callback = intel_no_lvds_dmi_callback,
++ .ident = "Fujitsu Esprimo Q900",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Q900"),
++ },
++ },
+
+ { } /* terminating entry */
+ };
+diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
+index 72b8949..04cb34a 100644
+--- a/drivers/gpu/drm/i915/intel_panel.c
++++ b/drivers/gpu/drm/i915/intel_panel.c
+@@ -361,6 +361,9 @@ int intel_panel_setup_backlight(struct drm_device *dev)
+
+ intel_panel_init_backlight(dev);
+
++ if (WARN_ON(dev_priv->backlight))
++ return -ENODEV;
++
+ if (dev_priv->int_lvds_connector)
+ connector = dev_priv->int_lvds_connector;
+ else if (dev_priv->int_edp_connector)
+@@ -388,8 +391,10 @@ int intel_panel_setup_backlight(struct drm_device *dev)
+ void intel_panel_destroy_backlight(struct drm_device *dev)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+- if (dev_priv->backlight)
++ if (dev_priv->backlight) {
+ backlight_device_unregister(dev_priv->backlight);
++ dev_priv->backlight = NULL;
++ }
+ }
+ #else
+ int intel_panel_setup_backlight(struct drm_device *dev)
+diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
+index 3a05cdb..d969f3c 100644
+--- a/drivers/gpu/drm/radeon/atom.c
++++ b/drivers/gpu/drm/radeon/atom.c
+@@ -1387,10 +1387,10 @@ int atom_allocate_fb_scratch(struct atom_context *ctx)
+ firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
+
+ DRM_DEBUG("atom firmware requested %08x %dkb\n",
+- firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware,
+- firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb);
++ le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware),
++ le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb));
+
+- usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
++ usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
+ }
+ ctx->scratch_size_bytes = 0;
+ if (usage_bytes == 0)
+diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
+index a25d08a..038570a 100644
+--- a/drivers/gpu/drm/radeon/atombios_crtc.c
++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
+@@ -544,6 +544,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
+ /* use frac fb div on APUs */
+ if (ASIC_IS_DCE41(rdev))
+ pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
++ /* use frac fb div on RS780/RS880 */
++ if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
++ pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+ if (ASIC_IS_DCE32(rdev) && mode->clock > 165000)
+ pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+ } else {
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index 60d13fe..0495a50 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -412,6 +412,16 @@ void evergreen_hpd_init(struct radeon_device *rdev)
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
++
++ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
++ connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
++ /* don't try to enable hpd on eDP or LVDS avoid breaking the
++ * aux dp channel on imac and help (but not completely fix)
++ * https://bugzilla.redhat.com/show_bug.cgi?id=726143
++ * also avoid interrupt storms during dpms.
++ */
++ continue;
++ }
+ switch (radeon_connector->hpd.hpd) {
+ case RADEON_HPD_1:
+ WREG32(DC_HPD1_CONTROL, tmp);
+diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
+index c45d921..57a825d 100644
+--- a/drivers/gpu/drm/radeon/r600_hdmi.c
++++ b/drivers/gpu/drm/radeon/r600_hdmi.c
+@@ -506,7 +506,7 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
+ offset = radeon_encoder->hdmi_offset;
+ if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
+ WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0x1, ~0x1);
+- } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
++ } else if (ASIC_IS_DCE2(rdev) && !ASIC_IS_DCE3(rdev)) {
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ WREG32_P(AVIVO_TMDSA_CNTL, 0x4, ~0x4);
+@@ -572,7 +572,7 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
+
+ if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
+ WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0, ~0x1);
+- } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
++ } else if (ASIC_IS_DCE2(rdev) && !ASIC_IS_DCE3(rdev)) {
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ WREG32_P(AVIVO_TMDSA_CNTL, 0, ~0x4);
+diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
+index 38585c5..383b38e 100644
+--- a/drivers/gpu/drm/radeon/radeon_atombios.c
++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
+@@ -1989,6 +1989,8 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
+ num_modes = power_info->info.ucNumOfPowerModeEntries;
+ if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
+ num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
++ if (num_modes == 0)
++ return state_index;
+ rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * num_modes, GFP_KERNEL);
+ if (!rdev->pm.power_state)
+ return state_index;
+@@ -2361,6 +2363,8 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev)
+ power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+ radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
++ if (power_info->pplib.ucNumStates == 0)
++ return state_index;
+ rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
+ power_info->pplib.ucNumStates, GFP_KERNEL);
+ if (!rdev->pm.power_state)
+@@ -2443,6 +2447,7 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
+ int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+ u16 data_offset;
+ u8 frev, crev;
++ u8 *power_state_offset;
+
+ if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset))
+@@ -2459,15 +2464,17 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
+ non_clock_info_array = (struct NonClockInfoArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
++ if (state_array->ucNumEntries == 0)
++ return state_index;
+ rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
+ state_array->ucNumEntries, GFP_KERNEL);
+ if (!rdev->pm.power_state)
+ return state_index;
++ power_state_offset = (u8 *)state_array->states;
+ for (i = 0; i < state_array->ucNumEntries; i++) {
+ mode_index = 0;
+- power_state = (union pplib_power_state *)&state_array->states[i];
+- /* XXX this might be an inagua bug... */
+- non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */
++ power_state = (union pplib_power_state *)power_state_offset;
++ non_clock_array_index = power_state->v2.nonClockInfoIndex;
+ non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+ &non_clock_info_array->nonClockInfo[non_clock_array_index];
+ rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) *
+@@ -2479,9 +2486,6 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
+ if (power_state->v2.ucNumDPMLevels) {
+ for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
+ clock_array_index = power_state->v2.clockInfoIndex[j];
+- /* XXX this might be an inagua bug... */
+- if (clock_array_index >= clock_info_array->ucNumEntries)
+- continue;
+ clock_info = (union pplib_clock_info *)
+ &clock_info_array->clockInfo[clock_array_index];
+ valid = radeon_atombios_parse_pplib_clock_info(rdev,
+@@ -2503,6 +2507,7 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
+ non_clock_info);
+ state_index++;
+ }
++ power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
+ }
+ /* if multiple clock modes, mark the lowest as no display */
+ for (i = 0; i < state_index; i++) {
+@@ -2549,7 +2554,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
+ default:
+ break;
+ }
+- } else {
++ }
++
++ if (state_index == 0) {
+ rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL);
+ if (rdev->pm.power_state) {
+ rdev->pm.power_state[0].clock_info =
+diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
+index be2c122..4bb9e27 100644
+--- a/drivers/gpu/drm/radeon/radeon_kms.c
++++ b/drivers/gpu/drm/radeon/radeon_kms.c
+@@ -39,8 +39,12 @@ int radeon_driver_unload_kms(struct drm_device *dev)
+
+ if (rdev == NULL)
+ return 0;
++ if (rdev->rmmio == NULL)
++ goto done_free;
+ radeon_modeset_fini(rdev);
+ radeon_device_fini(rdev);
++
++done_free:
+ kfree(rdev);
+ dev->dev_private = NULL;
+ return 0;
+diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
+index ebd6c51..d58eccb 100644
+--- a/drivers/gpu/drm/radeon/radeon_pm.c
++++ b/drivers/gpu/drm/radeon/radeon_pm.c
+@@ -863,7 +863,11 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
+ struct radeon_device *rdev = dev->dev_private;
+
+ seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
+- seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
++ /* radeon_get_engine_clock is not reliable on APUs so just print the current clock */
++ if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP))
++ seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk);
++ else
++ seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
+ seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
+ if (rdev->asic->get_memory_clock)
+ seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
+diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
+index 4bb68f3..64e7065 100644
+--- a/drivers/i2c/busses/i2c-xiic.c
++++ b/drivers/i2c/busses/i2c-xiic.c
+@@ -311,10 +311,8 @@ static void xiic_fill_tx_fifo(struct xiic_i2c *i2c)
+ /* last message in transfer -> STOP */
+ data |= XIIC_TX_DYN_STOP_MASK;
+ dev_dbg(i2c->adap.dev.parent, "%s TX STOP\n", __func__);
+-
+- xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data);
+- } else
+- xiic_setreg8(i2c, XIIC_DTR_REG_OFFSET, data);
++ }
++ xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data);
+ }
+ }
+
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 1702133..2d0544c 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -1588,8 +1588,8 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
+ sector, count, 1) == 0)
+ return -EINVAL;
+ }
+- } else if (sb->bblog_offset == 0)
+- rdev->badblocks.shift = -1;
++ } else if (sb->bblog_offset != 0)
++ rdev->badblocks.shift = 0;
+
+ if (!refdev) {
+ ret = 1;
+@@ -3063,7 +3063,7 @@ int md_rdev_init(struct md_rdev *rdev)
+ * be used - I wonder if that matters
+ */
+ rdev->badblocks.count = 0;
+- rdev->badblocks.shift = 0;
++ rdev->badblocks.shift = -1; /* disabled until explicitly enabled */
+ rdev->badblocks.page = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ seqlock_init(&rdev->badblocks.lock);
+ if (rdev->badblocks.page == NULL)
+@@ -3135,9 +3135,6 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
+ goto abort_free;
+ }
+ }
+- if (super_format == -1)
+- /* hot-add for 0.90, or non-persistent: so no badblocks */
+- rdev->badblocks.shift = -1;
+
+ return rdev;
+
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index fc07f90..b436b84 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1866,6 +1866,7 @@ err_detach:
+ write_unlock_bh(&bond->lock);
+
+ err_close:
++ slave_dev->priv_flags &= ~IFF_BONDING;
+ dev_close(slave_dev);
+
+ err_unset_master:
+@@ -4853,9 +4854,18 @@ static int __net_init bond_net_init(struct net *net)
+ static void __net_exit bond_net_exit(struct net *net)
+ {
+ struct bond_net *bn = net_generic(net, bond_net_id);
++ struct bonding *bond, *tmp_bond;
++ LIST_HEAD(list);
+
+ bond_destroy_sysfs(bn);
+ bond_destroy_proc_dir(bn);
++
++ /* Kill off any bonds created after unregistering bond rtnl ops */
++ rtnl_lock();
++ list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
++ unregister_netdevice_queue(bond->dev, &list);
++ unregister_netdevice_many(&list);
++ rtnl_unlock();
+ }
+
+ static struct pernet_operations bond_net_ops = {
+diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e.h b/drivers/net/ethernet/atheros/atl1e/atl1e.h
+index edfdf6b..b5fd934 100644
+--- a/drivers/net/ethernet/atheros/atl1e/atl1e.h
++++ b/drivers/net/ethernet/atheros/atl1e/atl1e.h
+@@ -186,7 +186,7 @@ struct atl1e_tpd_desc {
+ /* how about 0x2000 */
+ #define MAX_TX_BUF_LEN 0x2000
+ #define MAX_TX_BUF_SHIFT 13
+-/*#define MAX_TX_BUF_LEN 0x3000 */
++#define MAX_TSO_SEG_SIZE 0x3c00
+
+ /* rrs word 1 bit 0:31 */
+ #define RRS_RX_CSUM_MASK 0xFFFF
+diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+index c69dc29..dd893b3 100644
+--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
++++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+@@ -2352,6 +2352,7 @@ static int __devinit atl1e_probe(struct pci_dev *pdev,
+
+ INIT_WORK(&adapter->reset_task, atl1e_reset_task);
+ INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task);
++ netif_set_gso_max_size(netdev, MAX_TSO_SEG_SIZE);
+ err = register_netdev(netdev);
+ if (err) {
+ netdev_err(netdev, "register netdevice failed\n");
+diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
+index f67b8ae..69c3adf 100644
+--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
++++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
+@@ -127,7 +127,6 @@ struct gianfar_ptp_registers {
+
+ #define DRIVER "gianfar_ptp"
+ #define DEFAULT_CKSEL 1
+-#define N_ALARM 1 /* first alarm is used internally to reset fipers */
+ #define N_EXT_TS 2
+ #define REG_SIZE sizeof(struct gianfar_ptp_registers)
+
+@@ -410,7 +409,7 @@ static struct ptp_clock_info ptp_gianfar_caps = {
+ .owner = THIS_MODULE,
+ .name = "gianfar clock",
+ .max_adj = 512000,
+- .n_alarm = N_ALARM,
++ .n_alarm = 0,
+ .n_ext_ts = N_EXT_TS,
+ .n_per_out = 0,
+ .pps = 1,
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+index 41396fa..d93eee1 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+@@ -1937,6 +1937,16 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
+ * with the write to EICR.
+ */
+ eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
++
++ /* The lower 16bits of the EICR register are for the queue interrupts
++ * which should be masked here in order to not accidently clear them if
++ * the bits are high when ixgbe_msix_other is called. There is a race
++ * condition otherwise which results in possible performance loss
++ * especially if the ixgbe_msix_other interrupt is triggering
++ * consistently (as it would when PPS is turned on for the X540 device)
++ */
++ eicr &= 0xFFFF0000;
++
+ IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
+
+ if (eicr & IXGBE_EICR_LSC)
+@@ -5408,7 +5418,9 @@ static int ixgbe_resume(struct pci_dev *pdev)
+
+ pci_wake_from_d3(pdev, false);
+
++ rtnl_lock();
+ err = ixgbe_init_interrupt_scheme(adapter);
++ rtnl_unlock();
+ if (err) {
+ e_dev_err("Cannot initialize interrupts for device\n");
+ return err;
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index d812790..f698183 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -1629,8 +1629,6 @@ static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
+
+ if (opts2 & RxVlanTag)
+ __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
+-
+- desc->opts2 = 0;
+ }
+
+ static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
+@@ -5566,6 +5564,14 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
+ goto err_stop_0;
+ }
+
++ /* 8168evl does not automatically pad to minimum length. */
++ if (unlikely(tp->mac_version == RTL_GIGA_MAC_VER_34 &&
++ skb->len < ETH_ZLEN)) {
++ if (skb_padto(skb, ETH_ZLEN))
++ goto err_update_stats;
++ skb_put(skb, ETH_ZLEN - skb->len);
++ }
++
+ if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
+ goto err_stop_0;
+
+@@ -5633,6 +5639,7 @@ err_dma_1:
+ rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
+ err_dma_0:
+ dev_kfree_skb(skb);
++err_update_stats:
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+
+@@ -5814,7 +5821,6 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
+ rtl8169_schedule_work(dev, rtl8169_reset_task);
+ dev->stats.rx_fifo_errors++;
+ }
+- rtl8169_mark_to_asic(desc, rx_buf_sz);
+ } else {
+ struct sk_buff *skb;
+ dma_addr_t addr = le64_to_cpu(desc->addr);
+@@ -5828,16 +5834,14 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
+ if (unlikely(rtl8169_fragmented_frame(status))) {
+ dev->stats.rx_dropped++;
+ dev->stats.rx_length_errors++;
+- rtl8169_mark_to_asic(desc, rx_buf_sz);
+- continue;
++ goto release_descriptor;
+ }
+
+ skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],
+ tp, pkt_size, addr);
+- rtl8169_mark_to_asic(desc, rx_buf_sz);
+ if (!skb) {
+ dev->stats.rx_dropped++;
+- continue;
++ goto release_descriptor;
+ }
+
+ rtl8169_rx_csum(skb, status);
+@@ -5851,6 +5855,10 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
+ dev->stats.rx_bytes += pkt_size;
+ dev->stats.rx_packets++;
+ }
++release_descriptor:
++ desc->opts2 = 0;
++ wmb();
++ rtl8169_mark_to_asic(desc, rx_buf_sz);
+ }
+
+ count = cur_rx - tp->cur_rx;
+diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+index ccf1524..3935994 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
++++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+@@ -563,6 +563,7 @@ void iwl_clear_ucode_stations(struct iwl_priv *priv,
+ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+ {
+ struct iwl_addsta_cmd sta_cmd;
++ static const struct iwl_link_quality_cmd zero_lq = {};
+ struct iwl_link_quality_cmd lq;
+ unsigned long flags_spin;
+ int i;
+@@ -602,7 +603,9 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+ else
+ memcpy(&lq, priv->stations[i].lq,
+ sizeof(struct iwl_link_quality_cmd));
+- send_lq = true;
++
++ if (!memcmp(&lq, &zero_lq, sizeof(lq)))
++ send_lq = true;
+ }
+ spin_unlock_irqrestore(&priv->shrd->sta_lock,
+ flags_spin);
+diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
+index 3cf4ecc..621b84f 100644
+--- a/drivers/net/wireless/mwifiex/pcie.c
++++ b/drivers/net/wireless/mwifiex/pcie.c
+@@ -1821,9 +1821,9 @@ static void mwifiex_pcie_cleanup(struct mwifiex_adapter *adapter)
+ if (pdev) {
+ pci_iounmap(pdev, card->pci_mmap);
+ pci_iounmap(pdev, card->pci_mmap1);
+-
+- pci_release_regions(pdev);
+ pci_disable_device(pdev);
++ pci_release_region(pdev, 2);
++ pci_release_region(pdev, 0);
+ pci_set_drvdata(pdev, NULL);
+ }
+ }
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 6d4a531..363a5c6 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -664,15 +664,11 @@ static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
+ error = platform_pci_set_power_state(dev, state);
+ if (!error)
+ pci_update_current_state(dev, state);
+- /* Fall back to PCI_D0 if native PM is not supported */
+- if (!dev->pm_cap)
+- dev->current_state = PCI_D0;
+- } else {
++ } else
+ error = -ENODEV;
+- /* Fall back to PCI_D0 if native PM is not supported */
+- if (!dev->pm_cap)
+- dev->current_state = PCI_D0;
+- }
++
++ if (error && !dev->pm_cap) /* Fall back to PCI_D0 */
++ dev->current_state = PCI_D0;
+
+ return error;
+ }
+diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
+index 05beb6c..e3eed18 100644
+--- a/drivers/rtc/rtc-cmos.c
++++ b/drivers/rtc/rtc-cmos.c
+@@ -805,9 +805,8 @@ static int cmos_suspend(struct device *dev)
+ mask = RTC_IRQMASK;
+ tmp &= ~mask;
+ CMOS_WRITE(tmp, RTC_CONTROL);
++ hpet_mask_rtc_irq_bit(mask);
+
+- /* shut down hpet emulation - we don't need it for alarm */
+- hpet_mask_rtc_irq_bit(RTC_PIE|RTC_AIE|RTC_UIE);
+ cmos_checkintr(cmos, tmp);
+ }
+ spin_unlock_irq(&rtc_lock);
+@@ -872,6 +871,7 @@ static int cmos_resume(struct device *dev)
+ rtc_update_irq(cmos->rtc, 1, mask);
+ tmp &= ~RTC_AIE;
+ hpet_mask_rtc_irq_bit(RTC_AIE);
++ hpet_rtc_timer_init();
+ } while (mask & RTC_AIE);
+ spin_unlock_irq(&rtc_lock);
+ }
+diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
+index 0b54a91..a56a15e 100644
+--- a/drivers/s390/char/sclp_cmd.c
++++ b/drivers/s390/char/sclp_cmd.c
+@@ -509,6 +509,8 @@ static void __init sclp_add_standby_memory(void)
+ add_memory_merged(0);
+ }
+
++#define MEM_SCT_SIZE (1UL << SECTION_SIZE_BITS)
++
+ static void __init insert_increment(u16 rn, int standby, int assigned)
+ {
+ struct memory_increment *incr, *new_incr;
+@@ -521,7 +523,7 @@ static void __init insert_increment(u16 rn, int standby, int assigned)
+ new_incr->rn = rn;
+ new_incr->standby = standby;
+ if (!standby)
+- new_incr->usecount = 1;
++ new_incr->usecount = rzm > MEM_SCT_SIZE ? rzm/MEM_SCT_SIZE : 1;
+ last_rn = 0;
+ prev = &sclp_mem_list;
+ list_for_each_entry(incr, &sclp_mem_list, list) {
+diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
+index d19b879..4735928 100644
+--- a/drivers/tty/pty.c
++++ b/drivers/tty/pty.c
+@@ -669,6 +669,9 @@ static int ptmx_open(struct inode *inode, struct file *filp)
+
+ nonseekable_open(inode, filp);
+
++ /* We refuse fsnotify events on ptmx, since it's a shared resource */
++ filp->f_mode |= FMODE_NONOTIFY;
++
+ retval = tty_alloc_file(filp);
+ if (retval)
+ return retval;
+diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
+index af5ffb9..488214a 100644
+--- a/drivers/tty/serial/serial_core.c
++++ b/drivers/tty/serial/serial_core.c
+@@ -1901,6 +1901,8 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
+ mutex_unlock(&port->mutex);
+ return 0;
+ }
++ put_device(tty_dev);
++
+ if (console_suspend_enabled || !uart_console(uport))
+ uport->suspended = 1;
+
+@@ -1966,9 +1968,11 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
+ disable_irq_wake(uport->irq);
+ uport->irq_wake = 0;
+ }
++ put_device(tty_dev);
+ mutex_unlock(&port->mutex);
+ return 0;
+ }
++ put_device(tty_dev);
+ uport->suspended = 0;
+
+ /*
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index 05085be..3f35e42 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -940,6 +940,14 @@ void start_tty(struct tty_struct *tty)
+
+ EXPORT_SYMBOL(start_tty);
+
++/* We limit tty time update visibility to every 8 seconds or so. */
++static void tty_update_time(struct timespec *time)
++{
++ unsigned long sec = get_seconds() & ~7;
++ if ((long)(sec - time->tv_sec) > 0)
++ time->tv_sec = sec;
++}
++
+ /**
+ * tty_read - read method for tty device files
+ * @file: pointer to tty file
+@@ -976,8 +984,10 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
+ else
+ i = -EIO;
+ tty_ldisc_deref(ld);
++
+ if (i > 0)
+- inode->i_atime = current_fs_time(inode->i_sb);
++ tty_update_time(&inode->i_atime);
++
+ return i;
+ }
+
+@@ -1079,8 +1089,8 @@ static inline ssize_t do_tty_write(
+ cond_resched();
+ }
+ if (written) {
+- struct inode *inode = file->f_path.dentry->d_inode;
+- inode->i_mtime = current_fs_time(inode->i_sb);
++ struct inode *inode = file->f_path.dentry->d_inode;
++ tty_update_time(&inode->i_mtime);
+ ret = written;
+ }
+ out:
+diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
+index a9df218..22f770a 100644
+--- a/drivers/usb/core/devio.c
++++ b/drivers/usb/core/devio.c
+@@ -643,6 +643,8 @@ static int check_ctrlrecip(struct dev_state *ps, unsigned int requesttype,
+ index &= 0xff;
+ switch (requesttype & USB_RECIP_MASK) {
+ case USB_RECIP_ENDPOINT:
++ if ((index & ~USB_DIR_IN) == 0)
++ return 0;
+ ret = findintfep(ps->dev, index);
+ if (ret >= 0)
+ ret = checkintf(ps, ret);
+diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
+index ac0d75a..9f7003e 100644
+--- a/drivers/usb/misc/appledisplay.c
++++ b/drivers/usb/misc/appledisplay.c
+@@ -63,6 +63,7 @@ static const struct usb_device_id appledisplay_table[] = {
+ { APPLEDISPLAY_DEVICE(0x9219) },
+ { APPLEDISPLAY_DEVICE(0x921c) },
+ { APPLEDISPLAY_DEVICE(0x921d) },
++ { APPLEDISPLAY_DEVICE(0x9236) },
+
+ /* Terminating entry */
+ { }
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 06394e5a..51d1712 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -195,6 +195,7 @@ static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_THROTTLE_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GATEWAY_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_PID) },
++ { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_BOOST_PID) },
+ { USB_DEVICE(NEWPORT_VID, NEWPORT_AGILIS_PID) },
+ { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) },
+ { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) },
+@@ -876,7 +877,9 @@ static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) },
+ { USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+- { USB_DEVICE(ST_VID, ST_STMCLT1030_PID),
++ { USB_DEVICE(ST_VID, ST_STMCLT_2232_PID),
++ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
++ { USB_DEVICE(ST_VID, ST_STMCLT_4232_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_stmclite_quirk },
+ { USB_DEVICE(FTDI_VID, FTDI_RF_R106) },
+ { USB_DEVICE(FTDI_VID, FTDI_DISTORTEC_JTAG_LOCK_PICK_PID),
+@@ -1816,8 +1819,11 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial)
+ }
+
+ /*
+- * First and second port on STMCLiteadaptors is reserved for JTAG interface
+- * and the forth port for pio
++ * First two ports on JTAG adaptors using an FT4232 such as STMicroelectronics's
++ * ST Micro Connect Lite are reserved for JTAG or other non-UART interfaces and
++ * can be accessed from userspace.
++ * The next two ports are enabled as UARTs by default, where port 2 is
++ * a conventional RS-232 UART.
+ */
+ static int ftdi_stmclite_probe(struct usb_serial *serial)
+ {
+@@ -1826,12 +1832,13 @@ static int ftdi_stmclite_probe(struct usb_serial *serial)
+
+ dbg("%s", __func__);
+
+- if (interface == udev->actconfig->interface[2])
+- return 0;
+-
+- dev_info(&udev->dev, "Ignoring serial port reserved for JTAG\n");
++ if (interface == udev->actconfig->interface[0] ||
++ interface == udev->actconfig->interface[1]) {
++ dev_info(&udev->dev, "Ignoring serial port reserved for JTAG\n");
++ return -ENODEV;
++ }
+
+- return -ENODEV;
++ return 0;
+ }
+
+ /*
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 809c03a..2f86008 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -74,6 +74,7 @@
+ #define FTDI_OPENDCC_THROTTLE_PID 0xBFDA
+ #define FTDI_OPENDCC_GATEWAY_PID 0xBFDB
+ #define FTDI_OPENDCC_GBM_PID 0xBFDC
++#define FTDI_OPENDCC_GBM_BOOST_PID 0xBFDD
+
+ /* NZR SEM 16+ USB (http://www.nzr.de) */
+ #define FTDI_NZR_SEM_USB_PID 0xC1E0 /* NZR SEM-LOG16+ */
+@@ -1150,7 +1151,8 @@
+ * STMicroelectonics
+ */
+ #define ST_VID 0x0483
+-#define ST_STMCLT1030_PID 0x3747 /* ST Micro Connect Lite STMCLT1030 */
++#define ST_STMCLT_2232_PID 0x3746
++#define ST_STMCLT_4232_PID 0x3747
+
+ /*
+ * Papouch products (http://www.papouch.com/)
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 4418538..8513f51 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -347,6 +347,7 @@ static void option_instat_callback(struct urb *urb);
+ /* Olivetti products */
+ #define OLIVETTI_VENDOR_ID 0x0b3c
+ #define OLIVETTI_PRODUCT_OLICARD100 0xc000
++#define OLIVETTI_PRODUCT_OLICARD145 0xc003
+
+ /* Celot products */
+ #define CELOT_VENDOR_ID 0x211f
+@@ -1273,6 +1274,7 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
+
+ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
++ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) },
+ { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
+ { USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */
+ { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
+@@ -1350,6 +1352,12 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) },
++ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x02, 0x01) }, /* D-Link DWM-156 (variant) */
++ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x00, 0x00) }, /* D-Link DWM-156 (variant) */
++ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x02, 0x01) },
++ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) },
++ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
++ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
+ { } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, option_ids);
+diff --git a/drivers/usb/storage/cypress_atacb.c b/drivers/usb/storage/cypress_atacb.c
+index c844718..7341ce2 100644
+--- a/drivers/usb/storage/cypress_atacb.c
++++ b/drivers/usb/storage/cypress_atacb.c
+@@ -248,14 +248,26 @@ static int cypress_probe(struct usb_interface *intf,
+ {
+ struct us_data *us;
+ int result;
++ struct usb_device *device;
+
+ result = usb_stor_probe1(&us, intf, id,
+ (id - cypress_usb_ids) + cypress_unusual_dev_list);
+ if (result)
+ return result;
+
+- us->protocol_name = "Transparent SCSI with Cypress ATACB";
+- us->proto_handler = cypress_atacb_passthrough;
++ /* Among CY7C68300 chips, the A revision does not support Cypress ATACB
++ * Filter out this revision from EEPROM default descriptor values
++ */
++ device = interface_to_usbdev(intf);
++ if (device->descriptor.iManufacturer != 0x38 ||
++ device->descriptor.iProduct != 0x4e ||
++ device->descriptor.iSerialNumber != 0x64) {
++ us->protocol_name = "Transparent SCSI with Cypress ATACB";
++ us->proto_handler = cypress_atacb_passthrough;
++ } else {
++ us->protocol_name = "Transparent SCSI";
++ us->proto_handler = usb_stor_transparent_scsi_command;
++ }
+
+ result = usb_stor_probe2(us);
+ return result;
+diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
+index 7a36dff..6b4fb5c 100644
+--- a/drivers/video/console/fbcon.c
++++ b/drivers/video/console/fbcon.c
+@@ -1229,6 +1229,8 @@ static void fbcon_deinit(struct vc_data *vc)
+ finished:
+
+ fbcon_free_font(p, free_font);
++ if (free_font)
++ vc->vc_font.data = NULL;
+
+ if (!con_is_bound(&fb_con))
+ fbcon_exit();
+diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
+index babbb07..0a22808 100644
+--- a/drivers/video/fbmem.c
++++ b/drivers/video/fbmem.c
+@@ -1350,15 +1350,12 @@ fb_mmap(struct file *file, struct vm_area_struct * vma)
+ {
+ struct fb_info *info = file_fb_info(file);
+ struct fb_ops *fb;
+- unsigned long off;
++ unsigned long mmio_pgoff;
+ unsigned long start;
+ u32 len;
+
+ if (!info)
+ return -ENODEV;
+- if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
+- return -EINVAL;
+- off = vma->vm_pgoff << PAGE_SHIFT;
+ fb = info->fbops;
+ if (!fb)
+ return -ENODEV;
+@@ -1370,33 +1367,24 @@ fb_mmap(struct file *file, struct vm_area_struct * vma)
+ return res;
+ }
+
+- /* frame buffer memory */
++ /*
++ * Ugh. This can be either the frame buffer mapping, or
++ * if pgoff points past it, the mmio mapping.
++ */
+ start = info->fix.smem_start;
+- len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.smem_len);
+- if (off >= len) {
+- /* memory mapped io */
+- off -= len;
+- if (info->var.accel_flags) {
+- mutex_unlock(&info->mm_lock);
+- return -EINVAL;
+- }
++ len = info->fix.smem_len;
++ mmio_pgoff = PAGE_ALIGN((start & ~PAGE_MASK) + len) >> PAGE_SHIFT;
++ if (vma->vm_pgoff >= mmio_pgoff) {
++ vma->vm_pgoff -= mmio_pgoff;
+ start = info->fix.mmio_start;
+- len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.mmio_len);
++ len = info->fix.mmio_len;
+ }
+ mutex_unlock(&info->mm_lock);
+- start &= PAGE_MASK;
+- if ((vma->vm_end - vma->vm_start + off) > len)
+- return -EINVAL;
+- off += start;
+- vma->vm_pgoff = off >> PAGE_SHIFT;
+- /* This is an IO map - tell maydump to skip this VMA */
+- vma->vm_flags |= VM_IO | VM_RESERVED;
++
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+- fb_pgprotect(file, vma, off);
+- if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
+- vma->vm_end - vma->vm_start, vma->vm_page_prot))
+- return -EAGAIN;
+- return 0;
++ fb_pgprotect(file, vma, start);
++
++ return vm_iomap_memory(vma, start, len);
+ }
+
+ static int
+diff --git a/fs/aio.c b/fs/aio.c
+index 3b65ee7..8cdd8ea 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -1112,9 +1112,9 @@ static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent)
+ spin_unlock(&info->ring_lock);
+
+ out:
+- kunmap_atomic(ring, KM_USER0);
+ dprintk("leaving aio_read_evt: %d h%lu t%lu\n", ret,
+ (unsigned long)ring->head, (unsigned long)ring->tail);
++ kunmap_atomic(ring, KM_USER0);
+ return ret;
+ }
+
+diff --git a/fs/dcache.c b/fs/dcache.c
+index e923bf4..d322929 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -1176,8 +1176,10 @@ void shrink_dcache_parent(struct dentry * parent)
+ LIST_HEAD(dispose);
+ int found;
+
+- while ((found = select_parent(parent, &dispose)) != 0)
++ while ((found = select_parent(parent, &dispose)) != 0) {
+ shrink_dentry_list(&dispose);
++ cond_resched();
++ }
+ }
+ EXPORT_SYMBOL(shrink_dcache_parent);
+
+diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
+index 9ed1bb1..5459168 100644
+--- a/fs/ext4/Kconfig
++++ b/fs/ext4/Kconfig
+@@ -82,4 +82,5 @@ config EXT4_DEBUG
+ Enables run-time debugging support for the ext4 filesystem.
+
+ If you select Y here, then you will be able to turn on debugging
+- with a command such as "echo 1 > /sys/kernel/debug/ext4/mballoc-debug"
++ with a command such as:
++ echo 1 > /sys/module/ext4/parameters/mballoc_debug
+diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
+index bb6c7d8..a8d03a4 100644
+--- a/fs/ext4/fsync.c
++++ b/fs/ext4/fsync.c
+@@ -260,8 +260,7 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+ if (journal->j_flags & JBD2_BARRIER &&
+ !jbd2_trans_will_send_data_barrier(journal, commit_tid))
+ needs_barrier = true;
+- jbd2_log_start_commit(journal, commit_tid);
+- ret = jbd2_log_wait_commit(journal, commit_tid);
++ ret = jbd2_complete_transaction(journal, commit_tid);
+ if (needs_barrier)
+ blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
+ out:
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 3270ffd..025b4b6 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -147,8 +147,7 @@ void ext4_evict_inode(struct inode *inode)
+ journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
+ tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
+
+- jbd2_log_start_commit(journal, commit_tid);
+- jbd2_log_wait_commit(journal, commit_tid);
++ jbd2_complete_transaction(journal, commit_tid);
+ filemap_write_and_wait(&inode->i_data);
+ }
+ truncate_inode_pages(&inode->i_data, 0);
+diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
+index 4765190..73c0bd7 100644
+--- a/fs/fscache/stats.c
++++ b/fs/fscache/stats.c
+@@ -276,5 +276,5 @@ const struct file_operations fscache_stats_fops = {
+ .open = fscache_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+- .release = seq_release,
++ .release = single_release,
+ };
+diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
+index d751f04..ab9463a 100644
+--- a/fs/jbd2/commit.c
++++ b/fs/jbd2/commit.c
+@@ -326,7 +326,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
+ int space_left = 0;
+ int first_tag = 0;
+ int tag_flag;
+- int i, to_free = 0;
++ int i;
+ int tag_bytes = journal_tag_bytes(journal);
+ struct buffer_head *cbh = NULL; /* For transactional checksums */
+ __u32 crc32_sum = ~0;
+@@ -996,7 +996,7 @@ restart_loop:
+ journal->j_stats.run.rs_blocks_logged += stats.run.rs_blocks_logged;
+ spin_unlock(&journal->j_history_lock);
+
+- commit_transaction->t_state = T_FINISHED;
++ commit_transaction->t_state = T_COMMIT_CALLBACK;
+ J_ASSERT(commit_transaction == journal->j_committing_transaction);
+ journal->j_commit_sequence = commit_transaction->t_tid;
+ journal->j_committing_transaction = NULL;
+@@ -1011,38 +1011,44 @@ restart_loop:
+ journal->j_average_commit_time*3) / 4;
+ else
+ journal->j_average_commit_time = commit_time;
++
+ write_unlock(&journal->j_state_lock);
+
+- if (commit_transaction->t_checkpoint_list == NULL &&
+- commit_transaction->t_checkpoint_io_list == NULL) {
+- __jbd2_journal_drop_transaction(journal, commit_transaction);
+- to_free = 1;
++ if (journal->j_checkpoint_transactions == NULL) {
++ journal->j_checkpoint_transactions = commit_transaction;
++ commit_transaction->t_cpnext = commit_transaction;
++ commit_transaction->t_cpprev = commit_transaction;
+ } else {
+- if (journal->j_checkpoint_transactions == NULL) {
+- journal->j_checkpoint_transactions = commit_transaction;
+- commit_transaction->t_cpnext = commit_transaction;
+- commit_transaction->t_cpprev = commit_transaction;
+- } else {
+- commit_transaction->t_cpnext =
+- journal->j_checkpoint_transactions;
+- commit_transaction->t_cpprev =
+- commit_transaction->t_cpnext->t_cpprev;
+- commit_transaction->t_cpnext->t_cpprev =
+- commit_transaction;
+- commit_transaction->t_cpprev->t_cpnext =
++ commit_transaction->t_cpnext =
++ journal->j_checkpoint_transactions;
++ commit_transaction->t_cpprev =
++ commit_transaction->t_cpnext->t_cpprev;
++ commit_transaction->t_cpnext->t_cpprev =
++ commit_transaction;
++ commit_transaction->t_cpprev->t_cpnext =
+ commit_transaction;
+- }
+ }
+ spin_unlock(&journal->j_list_lock);
+-
++ /* Drop all spin_locks because commit_callback may be block.
++ * __journal_remove_checkpoint() can not destroy transaction
++ * under us because it is not marked as T_FINISHED yet */
+ if (journal->j_commit_callback)
+ journal->j_commit_callback(journal, commit_transaction);
+
+ trace_jbd2_end_commit(journal, commit_transaction);
+ jbd_debug(1, "JBD2: commit %d complete, head %d\n",
+ journal->j_commit_sequence, journal->j_tail_sequence);
+- if (to_free)
+- kfree(commit_transaction);
+
++ write_lock(&journal->j_state_lock);
++ spin_lock(&journal->j_list_lock);
++ commit_transaction->t_state = T_FINISHED;
++ /* Recheck checkpoint lists after j_list_lock was dropped */
++ if (commit_transaction->t_checkpoint_list == NULL &&
++ commit_transaction->t_checkpoint_io_list == NULL) {
++ __jbd2_journal_drop_transaction(journal, commit_transaction);
++ kfree(commit_transaction);
++ }
++ spin_unlock(&journal->j_list_lock);
++ write_unlock(&journal->j_state_lock);
+ wake_up(&journal->j_wait_done_commit);
+ }
+diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
+index 0fa0123..17b04fc 100644
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -663,6 +663,37 @@ int jbd2_log_wait_commit(journal_t *journal, tid_t tid)
+ }
+
+ /*
++ * When this function returns the transaction corresponding to tid
++ * will be completed. If the transaction has currently running, start
++ * committing that transaction before waiting for it to complete. If
++ * the transaction id is stale, it is by definition already completed,
++ * so just return SUCCESS.
++ */
++int jbd2_complete_transaction(journal_t *journal, tid_t tid)
++{
++ int need_to_wait = 1;
++
++ read_lock(&journal->j_state_lock);
++ if (journal->j_running_transaction &&
++ journal->j_running_transaction->t_tid == tid) {
++ if (journal->j_commit_request != tid) {
++ /* transaction not yet started, so request it */
++ read_unlock(&journal->j_state_lock);
++ jbd2_log_start_commit(journal, tid);
++ goto wait_commit;
++ }
++ } else if (!(journal->j_committing_transaction &&
++ journal->j_committing_transaction->t_tid == tid))
++ need_to_wait = 0;
++ read_unlock(&journal->j_state_lock);
++ if (!need_to_wait)
++ return 0;
++wait_commit:
++ return jbd2_log_wait_commit(journal, tid);
++}
++EXPORT_SYMBOL(jbd2_complete_transaction);
++
++/*
+ * Log buffer allocation routines:
+ */
+
+diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c
+index 8d4ea83..de88922 100644
+--- a/fs/lockd/clntlock.c
++++ b/fs/lockd/clntlock.c
+@@ -141,6 +141,9 @@ int nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout)
+ timeout);
+ if (ret < 0)
+ return -ERESTARTSYS;
++ /* Reset the lock status after a server reboot so we resend */
++ if (block->b_status == nlm_lck_denied_grace_period)
++ block->b_status = nlm_lck_blocked;
+ req->a_res.status = block->b_status;
+ return 0;
+ }
+diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
+index a3a0987..8392cb8 100644
+--- a/fs/lockd/clntproc.c
++++ b/fs/lockd/clntproc.c
+@@ -551,9 +551,6 @@ again:
+ status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT);
+ if (status < 0)
+ break;
+- /* Resend the blocking lock request after a server reboot */
+- if (resp->status == nlm_lck_denied_grace_period)
+- continue;
+ if (resp->status != nlm_lck_blocked)
+ break;
+ }
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index fe5c5fb..08921b8 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -880,14 +880,14 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+
+ nfs4_lock_state();
+ status = nfs4_preprocess_stateid_op(cstate, stateid, WR_STATE, &filp);
+- if (filp)
+- get_file(filp);
+- nfs4_unlock_state();
+-
+ if (status) {
++ nfs4_unlock_state();
+ dprintk("NFSD: nfsd4_write: couldn't process stateid!\n");
+ return status;
+ }
++ if (filp)
++ get_file(filp);
++ nfs4_unlock_state();
+
+ cnt = write->wr_buflen;
+ write->wr_how_written = write->wr_stable_how;
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 7d189dc..4cef99f 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -188,13 +188,7 @@ static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
+ {
+ if (atomic_dec_and_test(&fp->fi_access[oflag])) {
+ nfs4_file_put_fd(fp, oflag);
+- /*
+- * It's also safe to get rid of the RDWR open *if*
+- * we no longer have need of the other kind of access
+- * or if we already have the other kind of open:
+- */
+- if (fp->fi_fds[1-oflag]
+- || atomic_read(&fp->fi_access[1 - oflag]) == 0)
++ if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
+ nfs4_file_put_fd(fp, O_RDWR);
+ }
+ }
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 24afa96..ade5316 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -360,10 +360,7 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
+ all 32 bits of 'nseconds'. */
+ READ_BUF(12);
+ len += 12;
+- READ32(dummy32);
+- if (dummy32)
+- return nfserr_inval;
+- READ32(iattr->ia_atime.tv_sec);
++ READ64(iattr->ia_atime.tv_sec);
+ READ32(iattr->ia_atime.tv_nsec);
+ if (iattr->ia_atime.tv_nsec >= (u32)1000000000)
+ return nfserr_inval;
+@@ -386,10 +383,7 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
+ all 32 bits of 'nseconds'. */
+ READ_BUF(12);
+ len += 12;
+- READ32(dummy32);
+- if (dummy32)
+- return nfserr_inval;
+- READ32(iattr->ia_mtime.tv_sec);
++ READ64(iattr->ia_mtime.tv_sec);
+ READ32(iattr->ia_mtime.tv_nsec);
+ if (iattr->ia_mtime.tv_nsec >= (u32)1000000000)
+ return nfserr_inval;
+@@ -2374,8 +2368,7 @@ out_acl:
+ if (bmval1 & FATTR4_WORD1_TIME_ACCESS) {
+ if ((buflen -= 12) < 0)
+ goto out_resource;
+- WRITE32(0);
+- WRITE32(stat.atime.tv_sec);
++ WRITE64((s64)stat.atime.tv_sec);
+ WRITE32(stat.atime.tv_nsec);
+ }
+ if (bmval1 & FATTR4_WORD1_TIME_DELTA) {
+@@ -2388,15 +2381,13 @@ out_acl:
+ if (bmval1 & FATTR4_WORD1_TIME_METADATA) {
+ if ((buflen -= 12) < 0)
+ goto out_resource;
+- WRITE32(0);
+- WRITE32(stat.ctime.tv_sec);
++ WRITE64((s64)stat.ctime.tv_sec);
+ WRITE32(stat.ctime.tv_nsec);
+ }
+ if (bmval1 & FATTR4_WORD1_TIME_MODIFY) {
+ if ((buflen -= 12) < 0)
+ goto out_resource;
+- WRITE32(0);
+- WRITE32(stat.mtime.tv_sec);
++ WRITE64((s64)stat.mtime.tv_sec);
+ WRITE32(stat.mtime.tv_nsec);
+ }
+ if (bmval1 & FATTR4_WORD1_MOUNTED_ON_FILEID) {
+diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
+index 6f292dd..f255d37 100644
+--- a/fs/notify/inotify/inotify_user.c
++++ b/fs/notify/inotify/inotify_user.c
+@@ -577,7 +577,6 @@ static int inotify_update_existing_watch(struct fsnotify_group *group,
+ int add = (arg & IN_MASK_ADD);
+ int ret;
+
+- /* don't allow invalid bits: we don't want flags set */
+ mask = inotify_arg_to_mask(arg);
+
+ fsn_mark = fsnotify_find_inode_mark(group, inode);
+@@ -628,7 +627,6 @@ static int inotify_new_watch(struct fsnotify_group *group,
+ struct idr *idr = &group->inotify_data.idr;
+ spinlock_t *idr_lock = &group->inotify_data.idr_lock;
+
+- /* don't allow invalid bits: we don't want flags set */
+ mask = inotify_arg_to_mask(arg);
+
+ tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
+@@ -757,6 +755,10 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
+ int ret, fput_needed;
+ unsigned flags = 0;
+
++ /* don't allow invalid bits: we don't want flags set */
++ if (unlikely(!(mask & ALL_INOTIFY_BITS)))
++ return -EINVAL;
++
+ filp = fget_light(fd, &fput_needed);
+ if (unlikely(!filp))
+ return -EBADF;
+diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
+index 3899e24..e756bc4 100644
+--- a/fs/sysfs/dir.c
++++ b/fs/sysfs/dir.c
+@@ -977,6 +977,7 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
+ enum kobj_ns_type type;
+ const void *ns;
+ ino_t ino;
++ loff_t off;
+
+ type = sysfs_ns_type(parent_sd);
+ ns = sysfs_info(dentry->d_sb)->ns[type];
+@@ -999,6 +1000,7 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
+ return 0;
+ }
+ mutex_lock(&sysfs_mutex);
++ off = filp->f_pos;
+ for (pos = sysfs_dir_pos(ns, parent_sd, filp->f_pos, pos);
+ pos;
+ pos = sysfs_dir_next_pos(ns, parent_sd, filp->f_pos, pos)) {
+@@ -1010,19 +1012,24 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
+ len = strlen(name);
+ ino = pos->s_ino;
+ type = dt_type(pos);
+- filp->f_pos = ino;
++ off = filp->f_pos = ino;
+ filp->private_data = sysfs_get(pos);
+
+ mutex_unlock(&sysfs_mutex);
+- ret = filldir(dirent, name, len, filp->f_pos, ino, type);
++ ret = filldir(dirent, name, len, off, ino, type);
+ mutex_lock(&sysfs_mutex);
+ if (ret < 0)
+ break;
+ }
+ mutex_unlock(&sysfs_mutex);
+- if ((filp->f_pos > 1) && !pos) { /* EOF */
+- filp->f_pos = INT_MAX;
++
++ /* don't reference last entry if its refcount is dropped */
++ if (!pos) {
+ filp->private_data = NULL;
++
++ /* EOF and not changed as 0 or 1 in read/write path */
++ if (off == filp->f_pos && off > 1)
++ filp->f_pos = INT_MAX;
+ }
+ return 0;
+ }
+diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
+index 8a297a5..497c6cc 100644
+--- a/include/linux/ipc_namespace.h
++++ b/include/linux/ipc_namespace.h
+@@ -42,8 +42,8 @@ struct ipc_namespace {
+
+ size_t shm_ctlmax;
+ size_t shm_ctlall;
++ unsigned long shm_tot;
+ int shm_ctlmni;
+- int shm_tot;
+ /*
+ * Defines whether IPC_RMID is forced for _all_ shm segments regardless
+ * of shmctl()
+diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
+index 2092ea2..a153ed5 100644
+--- a/include/linux/jbd2.h
++++ b/include/linux/jbd2.h
+@@ -470,6 +470,7 @@ struct transaction_s
+ T_COMMIT,
+ T_COMMIT_DFLUSH,
+ T_COMMIT_JFLUSH,
++ T_COMMIT_CALLBACK,
+ T_FINISHED
+ } t_state;
+
+@@ -1165,6 +1166,7 @@ int __jbd2_log_start_commit(journal_t *journal, tid_t tid);
+ int jbd2_journal_start_commit(journal_t *journal, tid_t *tid);
+ int jbd2_journal_force_commit_nested(journal_t *journal);
+ int jbd2_log_wait_commit(journal_t *journal, tid_t tid);
++int jbd2_complete_transaction(journal_t *journal, tid_t tid);
+ int jbd2_log_do_checkpoint(journal_t *journal);
+ int jbd2_trans_will_send_data_barrier(journal_t *journal, tid_t tid);
+
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index 4baadd1..d0493f6 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -1509,6 +1509,8 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn);
+ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn);
++int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
++
+
+ struct page *follow_page(struct vm_area_struct *, unsigned long address,
+ unsigned int foll_flags);
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 00ca32b..8c43fd1 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -225,9 +225,9 @@ struct netdev_hw_addr {
+ #define NETDEV_HW_ADDR_T_SLAVE 3
+ #define NETDEV_HW_ADDR_T_UNICAST 4
+ #define NETDEV_HW_ADDR_T_MULTICAST 5
+- bool synced;
+ bool global_use;
+ int refcount;
++ int synced;
+ struct rcu_head rcu_head;
+ };
+
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index da65890..efe50af 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -2367,6 +2367,13 @@ static inline void nf_reset(struct sk_buff *skb)
+ #endif
+ }
+
++static inline void nf_reset_trace(struct sk_buff *skb)
++{
++#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
++ skb->nf_trace = 0;
++#endif
++}
++
+ /* Note: This doesn't put any conntrack and bridge info in dst. */
+ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
+ {
+diff --git a/ipc/shm.c b/ipc/shm.c
+index b76be5b..326a20b 100644
+--- a/ipc/shm.c
++++ b/ipc/shm.c
+@@ -450,7 +450,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
+ size_t size = params->u.size;
+ int error;
+ struct shmid_kernel *shp;
+- int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
++ size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ struct file * file;
+ char name[13];
+ int id;
+diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
+index 31fdc48..0caf1f8 100644
+--- a/kernel/audit_tree.c
++++ b/kernel/audit_tree.c
+@@ -608,9 +608,9 @@ void audit_trim_trees(void)
+ }
+ spin_unlock(&hash_lock);
+ trim_marked(tree);
+- put_tree(tree);
+ drop_collected_mounts(root_mnt);
+ skip_it:
++ put_tree(tree);
+ mutex_lock(&audit_filter_mutex);
+ }
+ list_del(&cursor);
+diff --git a/kernel/cgroup.c b/kernel/cgroup.c
+index c0739f8..d2a01fe 100644
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -2029,7 +2029,7 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
+ if (!group)
+ return -ENOMEM;
+ /* pre-allocate to guarantee space while iterating in rcu read-side. */
+- retval = flex_array_prealloc(group, 0, group_size - 1, GFP_KERNEL);
++ retval = flex_array_prealloc(group, 0, group_size, GFP_KERNEL);
+ if (retval)
+ goto out_free_group_list;
+
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 7d1f05e..9f21915 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -5164,7 +5164,7 @@ static void sw_perf_event_destroy(struct perf_event *event)
+
+ static int perf_swevent_init(struct perf_event *event)
+ {
+- int event_id = event->attr.config;
++ u64 event_id = event->attr.config;
+
+ if (event->attr.type != PERF_TYPE_SOFTWARE)
+ return -ENOENT;
+@@ -5756,6 +5756,7 @@ skip_type:
+ if (pmu->pmu_cpu_context)
+ goto got_cpu_context;
+
++ ret = -ENOMEM;
+ pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
+ if (!pmu->pmu_cpu_context)
+ goto free_dev;
+diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
+index e4cee8d..60f7e32 100644
+--- a/kernel/hrtimer.c
++++ b/kernel/hrtimer.c
+@@ -298,6 +298,10 @@ ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec)
+ } else {
+ unsigned long rem = do_div(nsec, NSEC_PER_SEC);
+
++ /* Make sure nsec fits into long */
++ if (unlikely(nsec > KTIME_SEC_MAX))
++ return (ktime_t){ .tv64 = KTIME_MAX };
++
+ tmp = ktime_set((long)nsec, rem);
+ }
+
+@@ -1308,6 +1312,8 @@ retry:
+
+ expires = ktime_sub(hrtimer_get_expires(timer),
+ base->offset);
++ if (expires.tv64 < 0)
++ expires.tv64 = KTIME_MAX;
+ if (expires.tv64 < expires_next.tv64)
+ expires_next = expires;
+ break;
+diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
+index cd068b2..c3509fb 100644
+--- a/kernel/time/tick-broadcast.c
++++ b/kernel/time/tick-broadcast.c
+@@ -66,6 +66,8 @@ static void tick_broadcast_start_periodic(struct clock_event_device *bc)
+ */
+ int tick_check_broadcast_device(struct clock_event_device *dev)
+ {
++ struct clock_event_device *cur = tick_broadcast_device.evtdev;
++
+ if ((dev->features & CLOCK_EVT_FEAT_DUMMY) ||
+ (tick_broadcast_device.evtdev &&
+ tick_broadcast_device.evtdev->rating >= dev->rating) ||
+@@ -73,6 +75,8 @@ int tick_check_broadcast_device(struct clock_event_device *dev)
+ return 0;
+
+ clockevents_exchange_device(tick_broadcast_device.evtdev, dev);
++ if (cur)
++ cur->event_handler = clockevents_handle_noop;
+ tick_broadcast_device.evtdev = dev;
+ if (!cpumask_empty(tick_get_broadcast_mask()))
+ tick_broadcast_start_periodic(dev);
+diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
+index da6c9ec..ead79bc 100644
+--- a/kernel/time/tick-common.c
++++ b/kernel/time/tick-common.c
+@@ -323,6 +323,7 @@ static void tick_shutdown(unsigned int *cpup)
+ */
+ dev->mode = CLOCK_EVT_MODE_UNUSED;
+ clockevents_exchange_device(dev, NULL);
++ dev->event_handler = clockevents_handle_noop;
+ td->evtdev = NULL;
+ }
+ raw_spin_unlock_irqrestore(&tick_device_lock, flags);
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 5527211..24b3759 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -554,7 +554,7 @@ int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
+
+ pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
+
+- for (i = 0; i < pages; i++) {
++ for (i = 1; i < pages; i++) {
+ pg->next = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!pg->next)
+ goto out_free;
+@@ -3303,7 +3303,8 @@ out:
+ if (fail)
+ return -EINVAL;
+
+- ftrace_graph_filter_enabled = 1;
++ ftrace_graph_filter_enabled = !!(*idx);
++
+ return 0;
+ }
+
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 17edb14..0ec6c34 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -4563,6 +4563,8 @@ static __init int tracer_init_debugfs(void)
+ trace_access_lock_init();
+
+ d_tracer = tracing_init_dentry();
++ if (!d_tracer)
++ return 0;
+
+ trace_create_file("tracing_enabled", 0644, d_tracer,
+ &global_trace, &tracing_ctrl_fops);
+@@ -4696,36 +4698,32 @@ void trace_init_global_iter(struct trace_iterator *iter)
+ iter->cpu_file = TRACE_PIPE_ALL_CPU;
+ }
+
+-static void
+-__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
++void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
+ {
+- static arch_spinlock_t ftrace_dump_lock =
+- (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
+ /* use static because iter can be a bit big for the stack */
+ static struct trace_iterator iter;
++ static atomic_t dump_running;
+ unsigned int old_userobj;
+- static int dump_ran;
+ unsigned long flags;
+ int cnt = 0, cpu;
+
+- /* only one dump */
+- local_irq_save(flags);
+- arch_spin_lock(&ftrace_dump_lock);
+- if (dump_ran)
+- goto out;
+-
+- dump_ran = 1;
++ /* Only allow one dump user at a time. */
++ if (atomic_inc_return(&dump_running) != 1) {
++ atomic_dec(&dump_running);
++ return;
++ }
+
++ /*
++ * Always turn off tracing when we dump.
++ * We don't need to show trace output of what happens
++ * between multiple crashes.
++ *
++ * If the user does a sysrq-z, then they can re-enable
++ * tracing with echo 1 > tracing_on.
++ */
+ tracing_off();
+
+- /* Did function tracer already get disabled? */
+- if (ftrace_is_dead()) {
+- printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
+- printk("# MAY BE MISSING FUNCTION EVENTS\n");
+- }
+-
+- if (disable_tracing)
+- ftrace_kill();
++ local_irq_save(flags);
+
+ trace_init_global_iter(&iter);
+
+@@ -4758,6 +4756,12 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
+
+ printk(KERN_TRACE "Dumping ftrace buffer:\n");
+
++ /* Did function tracer already get disabled? */
++ if (ftrace_is_dead()) {
++ printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
++ printk("# MAY BE MISSING FUNCTION EVENTS\n");
++ }
++
+ /*
+ * We need to stop all tracing on all CPUS to read the
+ * the next buffer. This is a bit expensive, but is
+@@ -4796,26 +4800,15 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
+ printk(KERN_TRACE "---------------------------------\n");
+
+ out_enable:
+- /* Re-enable tracing if requested */
+- if (!disable_tracing) {
+- trace_flags |= old_userobj;
++ trace_flags |= old_userobj;
+
+- for_each_tracing_cpu(cpu) {
+- atomic_dec(&iter.tr->data[cpu]->disabled);
+- }
+- tracing_on();
++ for_each_tracing_cpu(cpu) {
++ atomic_dec(&iter.tr->data[cpu]->disabled);
+ }
+-
+- out:
+- arch_spin_unlock(&ftrace_dump_lock);
++ atomic_dec(&dump_running);
+ local_irq_restore(flags);
+ }
+-
+-/* By default: disable tracing after the dump */
+-void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
+-{
+- __ftrace_dump(true, oops_dump_mode);
+-}
++EXPORT_SYMBOL_GPL(ftrace_dump);
+
+ __init static int tracer_alloc_buffers(void)
+ {
+diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
+index 288541f..09fd98a 100644
+--- a/kernel/trace/trace_selftest.c
++++ b/kernel/trace/trace_selftest.c
+@@ -461,8 +461,6 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
+ /* Maximum number of functions to trace before diagnosing a hang */
+ #define GRAPH_MAX_FUNC_TEST 100000000
+
+-static void
+-__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode);
+ static unsigned int graph_hang_thresh;
+
+ /* Wrap the real function entry probe to avoid possible hanging */
+@@ -472,8 +470,11 @@ static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
+ if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
+ ftrace_graph_stop();
+ printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
+- if (ftrace_dump_on_oops)
+- __ftrace_dump(false, DUMP_ALL);
++ if (ftrace_dump_on_oops) {
++ ftrace_dump(DUMP_ALL);
++ /* ftrace_dump() disables tracing */
++ tracing_on();
++ }
+ return 0;
+ }
+
+diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
+index 77575b3..c5b20a3 100644
+--- a/kernel/trace/trace_stack.c
++++ b/kernel/trace/trace_stack.c
+@@ -17,13 +17,24 @@
+
+ #define STACK_TRACE_ENTRIES 500
+
++#ifdef CC_USING_FENTRY
++# define fentry 1
++#else
++# define fentry 0
++#endif
++
+ static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
+ { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
+ static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
+
++/*
++ * Reserve one entry for the passed in ip. This will allow
++ * us to remove most or all of the stack size overhead
++ * added by the stack tracer itself.
++ */
+ static struct stack_trace max_stack_trace = {
+- .max_entries = STACK_TRACE_ENTRIES,
+- .entries = stack_dump_trace,
++ .max_entries = STACK_TRACE_ENTRIES - 1,
++ .entries = &stack_dump_trace[1],
+ };
+
+ static unsigned long max_stack_size;
+@@ -37,25 +48,34 @@ static DEFINE_MUTEX(stack_sysctl_mutex);
+ int stack_tracer_enabled;
+ static int last_stack_tracer_enabled;
+
+-static inline void check_stack(void)
++static inline void
++check_stack(unsigned long ip, unsigned long *stack)
+ {
+ unsigned long this_size, flags;
+ unsigned long *p, *top, *start;
++ static int tracer_frame;
++ int frame_size = ACCESS_ONCE(tracer_frame);
+ int i;
+
+- this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1);
++ this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
+ this_size = THREAD_SIZE - this_size;
++ /* Remove the frame of the tracer */
++ this_size -= frame_size;
+
+ if (this_size <= max_stack_size)
+ return;
+
+ /* we do not handle interrupt stacks yet */
+- if (!object_is_on_stack(&this_size))
++ if (!object_is_on_stack(stack))
+ return;
+
+ local_irq_save(flags);
+ arch_spin_lock(&max_stack_lock);
+
++ /* In case another CPU set the tracer_frame on us */
++ if (unlikely(!frame_size))
++ this_size -= tracer_frame;
++
+ /* a race could have already updated it */
+ if (this_size <= max_stack_size)
+ goto out;
+@@ -68,10 +88,18 @@ static inline void check_stack(void)
+ save_stack_trace(&max_stack_trace);
+
+ /*
++ * Add the passed in ip from the function tracer.
++ * Searching for this on the stack will skip over
++ * most of the overhead from the stack tracer itself.
++ */
++ stack_dump_trace[0] = ip;
++ max_stack_trace.nr_entries++;
++
++ /*
+ * Now find where in the stack these are.
+ */
+ i = 0;
+- start = &this_size;
++ start = stack;
+ top = (unsigned long *)
+ (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
+
+@@ -95,6 +123,18 @@ static inline void check_stack(void)
+ found = 1;
+ /* Start the search from here */
+ start = p + 1;
++ /*
++ * We do not want to show the overhead
++ * of the stack tracer stack in the
++ * max stack. If we haven't figured
++ * out what that is, then figure it out
++ * now.
++ */
++ if (unlikely(!tracer_frame) && i == 1) {
++ tracer_frame = (p - stack) *
++ sizeof(unsigned long);
++ max_stack_size -= tracer_frame;
++ }
+ }
+ }
+
+@@ -110,6 +150,7 @@ static inline void check_stack(void)
+ static void
+ stack_trace_call(unsigned long ip, unsigned long parent_ip)
+ {
++ unsigned long stack;
+ int cpu;
+
+ if (unlikely(!ftrace_enabled || stack_trace_disabled))
+@@ -122,7 +163,26 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip)
+ if (per_cpu(trace_active, cpu)++ != 0)
+ goto out;
+
+- check_stack();
++ /*
++ * When fentry is used, the traced function does not get
++ * its stack frame set up, and we lose the parent.
++ * The ip is pretty useless because the function tracer
++ * was called before that function set up its stack frame.
++ * In this case, we use the parent ip.
++ *
++ * By adding the return address of either the parent ip
++ * or the current ip we can disregard most of the stack usage
++ * caused by the stack tracer itself.
++ *
++ * The function tracer always reports the address of where the
++ * mcount call was, but the stack will hold the return address.
++ */
++ if (fentry)
++ ip = parent_ip;
++ else
++ ip += MCOUNT_INSN_SIZE;
++
++ check_stack(ip, &stack);
+
+ out:
+ per_cpu(trace_active, cpu)--;
+@@ -351,6 +411,8 @@ static __init int stack_trace_init(void)
+ struct dentry *d_tracer;
+
+ d_tracer = tracing_init_dentry();
++ if (!d_tracer)
++ return 0;
+
+ trace_create_file("stack_max_size", 0644, d_tracer,
+ &max_stack_size, &stack_max_size_fops);
+diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c
+index 96cffb2..847f88a 100644
+--- a/kernel/trace/trace_stat.c
++++ b/kernel/trace/trace_stat.c
+@@ -307,6 +307,8 @@ static int tracing_stat_init(void)
+ struct dentry *d_tracing;
+
+ d_tracing = tracing_init_dentry();
++ if (!d_tracing)
++ return 0;
+
+ stat_dir = debugfs_create_dir("trace_stat", d_tracing);
+ if (!stat_dir)
+diff --git a/mm/memory.c b/mm/memory.c
+index 4f2add1..d5f913b 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -2309,6 +2309,53 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
+ }
+ EXPORT_SYMBOL(remap_pfn_range);
+
++/**
++ * vm_iomap_memory - remap memory to userspace
++ * @vma: user vma to map to
++ * @start: start of area
++ * @len: size of area
++ *
++ * This is a simplified io_remap_pfn_range() for common driver use. The
++ * driver just needs to give us the physical memory range to be mapped,
++ * we'll figure out the rest from the vma information.
++ *
++ * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
++ * whatever write-combining details or similar.
++ */
++int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
++{
++ unsigned long vm_len, pfn, pages;
++
++ /* Check that the physical memory area passed in looks valid */
++ if (start + len < start)
++ return -EINVAL;
++ /*
++ * You *really* shouldn't map things that aren't page-aligned,
++ * but we've historically allowed it because IO memory might
++ * just have smaller alignment.
++ */
++ len += start & ~PAGE_MASK;
++ pfn = start >> PAGE_SHIFT;
++ pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
++ if (pfn + pages < pfn)
++ return -EINVAL;
++
++ /* We start the mapping 'vm_pgoff' pages into the area */
++ if (vma->vm_pgoff > pages)
++ return -EINVAL;
++ pfn += vma->vm_pgoff;
++ pages -= vma->vm_pgoff;
++
++ /* Can we fit all of the mapping? */
++ vm_len = vma->vm_end - vma->vm_start;
++ if (vm_len >> PAGE_SHIFT > pages)
++ return -EINVAL;
++
++ /* Ok, let it rip */
++ return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
++}
++EXPORT_SYMBOL(vm_iomap_memory);
++
+ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long addr, unsigned long end,
+ pte_fn_t fn, void *data)
+diff --git a/net/atm/common.c b/net/atm/common.c
+index 0ca06e8..43b6bfe 100644
+--- a/net/atm/common.c
++++ b/net/atm/common.c
+@@ -500,6 +500,8 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
+ struct sk_buff *skb;
+ int copied, error = -EINVAL;
+
++ msg->msg_namelen = 0;
++
+ if (sock->state != SS_CONNECTED)
+ return -ENOTCONN;
+ if (flags & ~MSG_DONTWAIT) /* only handle MSG_DONTWAIT */
+diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
+index b04a6ef..86ac37f 100644
+--- a/net/ax25/af_ax25.c
++++ b/net/ax25/af_ax25.c
+@@ -1641,6 +1641,7 @@ static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock,
+ ax25_address src;
+ const unsigned char *mac = skb_mac_header(skb);
+
++ memset(sax, 0, sizeof(struct full_sockaddr_ax25));
+ ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL,
+ &digi, NULL, NULL);
+ sax->sax25_family = AF_AX25;
+diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
+index 062124c..838f113 100644
+--- a/net/bluetooth/af_bluetooth.c
++++ b/net/bluetooth/af_bluetooth.c
+@@ -245,6 +245,8 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if (flags & (MSG_OOB))
+ return -EOPNOTSUPP;
+
++ msg->msg_namelen = 0;
++
+ skb = skb_recv_datagram(sk, flags, noblock, &err);
+ if (!skb) {
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+@@ -252,8 +254,6 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+ return err;
+ }
+
+- msg->msg_namelen = 0;
+-
+ copied = skb->len;
+ if (len < copied) {
+ msg->msg_flags |= MSG_TRUNC;
+diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
+index 14c4864..82ce164 100644
+--- a/net/bluetooth/rfcomm/sock.c
++++ b/net/bluetooth/rfcomm/sock.c
+@@ -627,6 +627,7 @@ static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+
+ if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) {
+ rfcomm_dlc_accept(d);
++ msg->msg_namelen = 0;
+ return 0;
+ }
+
+diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
+index a986280..53a8e37 100644
+--- a/net/caif/caif_socket.c
++++ b/net/caif/caif_socket.c
+@@ -320,6 +320,8 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if (m->msg_flags&MSG_OOB)
+ goto read_error;
+
++ m->msg_namelen = 0;
++
+ skb = skb_recv_datagram(sk, flags, 0 , &ret);
+ if (!skb)
+ goto read_error;
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 720aea0..8e455b8 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -1619,6 +1619,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
+ skb->mark = 0;
+ secpath_reset(skb);
+ nf_reset(skb);
++ nf_reset_trace(skb);
+ return netif_rx(skb);
+ }
+ EXPORT_SYMBOL_GPL(dev_forward_skb);
+diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
+index 0387da0..cd09414 100644
+--- a/net/core/dev_addr_lists.c
++++ b/net/core/dev_addr_lists.c
+@@ -57,7 +57,7 @@ static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
+ ha->type = addr_type;
+ ha->refcount = 1;
+ ha->global_use = global;
+- ha->synced = false;
++ ha->synced = 0;
+ list_add_tail_rcu(&ha->list, &list->list);
+ list->count++;
+ return 0;
+@@ -155,7 +155,7 @@ int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
+ addr_len, ha->type);
+ if (err)
+ break;
+- ha->synced = true;
++ ha->synced++;
+ ha->refcount++;
+ } else if (ha->refcount == 1) {
+ __hw_addr_del(to_list, ha->addr, addr_len, ha->type);
+@@ -176,7 +176,7 @@ void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
+ if (ha->synced) {
+ __hw_addr_del(to_list, ha->addr,
+ addr_len, ha->type);
+- ha->synced = false;
++ ha->synced--;
+ __hw_addr_del(from_list, ha->addr,
+ addr_len, ha->type);
+ }
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 3b5e680..5b7d5f2 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -1064,7 +1064,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
+ rcu_read_lock();
+ cb->seq = net->dev_base_seq;
+
+- if (nlmsg_parse(cb->nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
++ if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
+ ifla_policy) >= 0) {
+
+ if (tb[IFLA_EXT_MASK])
+@@ -1907,7 +1907,7 @@ static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
+ u32 ext_filter_mask = 0;
+ u16 min_ifinfo_dump_size = 0;
+
+- if (nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
++ if (nlmsg_parse(nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
+ ifla_policy) >= 0) {
+ if (tb[IFLA_EXT_MASK])
+ ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
+diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
+index 530787b..238fc3b 100644
+--- a/net/ipv4/esp4.c
++++ b/net/ipv4/esp4.c
+@@ -137,8 +137,6 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
+
+ /* skb is pure payload to encrypt */
+
+- err = -ENOMEM;
+-
+ esp = x->data;
+ aead = esp->aead;
+ alen = crypto_aead_authsize(aead);
+@@ -174,8 +172,10 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
+ }
+
+ tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
+- if (!tmp)
++ if (!tmp) {
++ err = -ENOMEM;
+ goto error;
++ }
+
+ seqhi = esp_tmp_seqhi(tmp);
+ iv = esp_tmp_iv(aead, tmp, seqhilen);
+diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
+index b2cfe83..8f441b2 100644
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -251,8 +251,7 @@ static void ip_expire(unsigned long arg)
+ if (!head->dev)
+ goto out_rcu_unlock;
+
+- /* skb dst is stale, drop it, and perform route lookup again */
+- skb_dst_drop(head);
++ /* skb has no dst, perform route lookup again */
+ iph = ip_hdr(head);
+ err = ip_route_input_noref(head, iph->daddr, iph->saddr,
+ iph->tos, head->dev);
+@@ -518,8 +517,16 @@ found:
+ qp->q.last_in |= INET_FRAG_FIRST_IN;
+
+ if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
+- qp->q.meat == qp->q.len)
+- return ip_frag_reasm(qp, prev, dev);
++ qp->q.meat == qp->q.len) {
++ unsigned long orefdst = skb->_skb_refdst;
++
++ skb->_skb_refdst = 0UL;
++ err = ip_frag_reasm(qp, prev, dev);
++ skb->_skb_refdst = orefdst;
++ return err;
++ }
++
++ skb_dst_drop(skb);
+
+ write_lock(&ip4_frags.lock);
+ list_move_tail(&qp->q.lru_list, &qp->q.net->lru_list);
+diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
+index 769c0e9..8a1bed2 100644
+--- a/net/ipv4/syncookies.c
++++ b/net/ipv4/syncookies.c
+@@ -347,8 +347,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
+ * hasn't changed since we received the original syn, but I see
+ * no easy way to do this.
+ */
+- flowi4_init_output(&fl4, 0, sk->sk_mark, RT_CONN_FLAGS(sk),
+- RT_SCOPE_UNIVERSE, IPPROTO_TCP,
++ flowi4_init_output(&fl4, sk->sk_bound_dev_if, sk->sk_mark,
++ RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP,
+ inet_sk_flowi_flags(sk),
+ (opt && opt->srr) ? opt->faddr : ireq->rmt_addr,
+ ireq->loc_addr, th->source, th->dest);
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 3124e17..872b41d 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -115,6 +115,7 @@ int sysctl_tcp_abc __read_mostly;
+ #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */
+ #define FLAG_NONHEAD_RETRANS_ACKED 0x1000 /* Non-head rexmitted data was ACKed */
+ #define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */
++#define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */
+
+ #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED)
+ #define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)
+@@ -3723,6 +3724,27 @@ static void tcp_send_challenge_ack(struct sock *sk)
+ }
+ }
+
++static void tcp_store_ts_recent(struct tcp_sock *tp)
++{
++ tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval;
++ tp->rx_opt.ts_recent_stamp = get_seconds();
++}
++
++static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
++{
++ if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) {
++ /* PAWS bug workaround wrt. ACK frames, the PAWS discard
++ * extra check below makes sure this can only happen
++ * for pure ACK frames. -DaveM
++ *
++ * Not only, also it occurs for expired timestamps.
++ */
++
++ if (tcp_paws_check(&tp->rx_opt, 0))
++ tcp_store_ts_recent(tp);
++ }
++}
++
+ /* This routine deals with incoming acks, but not outgoing ones. */
+ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
+ {
+@@ -3771,6 +3793,12 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
+ prior_fackets = tp->fackets_out;
+ prior_in_flight = tcp_packets_in_flight(tp);
+
++ /* ts_recent update must be made after we are sure that the packet
++ * is in window.
++ */
++ if (flag & FLAG_UPDATE_TS_RECENT)
++ tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
++
+ if (!(flag & FLAG_SLOWPATH) && after(ack, prior_snd_una)) {
+ /* Window is constant, pure forward advance.
+ * No more checks are required.
+@@ -4061,27 +4089,6 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th)
+ EXPORT_SYMBOL(tcp_parse_md5sig_option);
+ #endif
+
+-static inline void tcp_store_ts_recent(struct tcp_sock *tp)
+-{
+- tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval;
+- tp->rx_opt.ts_recent_stamp = get_seconds();
+-}
+-
+-static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
+-{
+- if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) {
+- /* PAWS bug workaround wrt. ACK frames, the PAWS discard
+- * extra check below makes sure this can only happen
+- * for pure ACK frames. -DaveM
+- *
+- * Not only, also it occurs for expired timestamps.
+- */
+-
+- if (tcp_paws_check(&tp->rx_opt, 0))
+- tcp_store_ts_recent(tp);
+- }
+-}
+-
+ /* Sorry, PAWS as specified is broken wrt. pure-ACKs -DaveM
+ *
+ * It is not fatal. If this ACK does _not_ change critical state (seqs, window)
+@@ -5552,14 +5559,10 @@ slow_path:
+ return 0;
+
+ step5:
+- if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0)
++ if (th->ack &&
++ tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT) < 0)
+ goto discard;
+
+- /* ts_recent update must be made after we are sure that the packet
+- * is in window.
+- */
+- tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
+-
+ tcp_rcv_rtt_measure_ts(sk, skb);
+
+ /* Process urgent data. */
+@@ -5923,7 +5926,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+
+ /* step 5: check the ACK field */
+ if (th->ack) {
+- int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0;
++ int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH |
++ FLAG_UPDATE_TS_RECENT) > 0;
+
+ switch (sk->sk_state) {
+ case TCP_SYN_RECV:
+@@ -6030,11 +6034,6 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+ } else
+ goto discard;
+
+- /* ts_recent update must be made after we are sure that the packet
+- * is in window.
+- */
+- tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
+-
+ /* step 6: check the URG bit */
+ tcp_urg(sk, skb, th);
+
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 8589c2d..d84033b 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -2404,6 +2404,9 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
+ static void init_loopback(struct net_device *dev)
+ {
+ struct inet6_dev *idev;
++ struct net_device *sp_dev;
++ struct inet6_ifaddr *sp_ifa;
++ struct rt6_info *sp_rt;
+
+ /* ::1 */
+
+@@ -2415,6 +2418,30 @@ static void init_loopback(struct net_device *dev)
+ }
+
+ add_addr(idev, &in6addr_loopback, 128, IFA_HOST);
++
++ /* Add routes to other interface's IPv6 addresses */
++ for_each_netdev(dev_net(dev), sp_dev) {
++ if (!strcmp(sp_dev->name, dev->name))
++ continue;
++
++ idev = __in6_dev_get(sp_dev);
++ if (!idev)
++ continue;
++
++ read_lock_bh(&idev->lock);
++ list_for_each_entry(sp_ifa, &idev->addr_list, if_list) {
++
++ if (sp_ifa->flags & (IFA_F_DADFAILED | IFA_F_TENTATIVE))
++ continue;
++
++ sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0);
++
++ /* Failure cases are ignored */
++ if (!IS_ERR(sp_rt))
++ ip6_ins_rt(sp_rt);
++ }
++ read_unlock_bh(&idev->lock);
++ }
+ }
+
+ static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr *addr)
+diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
+index 2b0a4ca..411fe2c 100644
+--- a/net/ipv6/reassembly.c
++++ b/net/ipv6/reassembly.c
+@@ -386,8 +386,17 @@ found:
+ }
+
+ if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
+- fq->q.meat == fq->q.len)
+- return ip6_frag_reasm(fq, prev, dev);
++ fq->q.meat == fq->q.len) {
++ int res;
++ unsigned long orefdst = skb->_skb_refdst;
++
++ skb->_skb_refdst = 0UL;
++ res = ip6_frag_reasm(fq, prev, dev);
++ skb->_skb_refdst = orefdst;
++ return res;
++ }
++
++ skb_dst_drop(skb);
+
+ write_lock(&ip6_frags.lock);
+ list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list);
+diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
+index f4b49c5..91821e9 100644
+--- a/net/irda/af_irda.c
++++ b/net/irda/af_irda.c
+@@ -1386,6 +1386,8 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock,
+
+ IRDA_DEBUG(4, "%s()\n", __func__);
+
++ msg->msg_namelen = 0;
++
+ skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
+ flags & MSG_DONTWAIT, &err);
+ if (!skb)
+diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
+index cf98d62..e836140 100644
+--- a/net/iucv/af_iucv.c
++++ b/net/iucv/af_iucv.c
+@@ -1356,6 +1356,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+ int blen;
+ int err = 0;
+
++ msg->msg_namelen = 0;
++
+ if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) &&
+ skb_queue_empty(&iucv->backlog_skb_q) &&
+ skb_queue_empty(&sk->sk_receive_queue) &&
+diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
+index 99a60d5..e5565c7 100644
+--- a/net/llc/af_llc.c
++++ b/net/llc/af_llc.c
+@@ -720,6 +720,8 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
+ int target; /* Read at least this many bytes */
+ long timeo;
+
++ msg->msg_namelen = 0;
++
+ lock_sock(sk);
+ copied = -ENOTCONN;
+ if (unlikely(sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN))
+diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
+index f156382..3df7c5a 100644
+--- a/net/netrom/af_netrom.c
++++ b/net/netrom/af_netrom.c
+@@ -1178,6 +1178,7 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock,
+ }
+
+ if (sax != NULL) {
++ memset(sax, 0, sizeof(*sax));
+ sax->sax25_family = AF_NETROM;
+ skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call,
+ AX25_ADDR_LEN);
+diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
+index f9ea925..1f96fb9 100644
+--- a/net/rose/af_rose.c
++++ b/net/rose/af_rose.c
+@@ -1258,6 +1258,7 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
+ skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+
+ if (srose != NULL) {
++ memset(srose, 0, msg->msg_namelen);
+ srose->srose_family = AF_ROSE;
+ srose->srose_addr = rose->dest_addr;
+ srose->srose_call = rose->dest_call;
+diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
+index 599f67a..b7cddb9 100644
+--- a/net/sched/sch_cbq.c
++++ b/net/sched/sch_cbq.c
+@@ -963,8 +963,11 @@ cbq_dequeue(struct Qdisc *sch)
+ cbq_update(q);
+ if ((incr -= incr2) < 0)
+ incr = 0;
++ q->now += incr;
++ } else {
++ if (now > q->now)
++ q->now = now;
+ }
+- q->now += incr;
+ q->now_rt = now;
+
+ for (;;) {
+diff --git a/net/sctp/auth.c b/net/sctp/auth.c
+index bf81204..333926d 100644
+--- a/net/sctp/auth.c
++++ b/net/sctp/auth.c
+@@ -71,7 +71,7 @@ void sctp_auth_key_put(struct sctp_auth_bytes *key)
+ return;
+
+ if (atomic_dec_and_test(&key->refcnt)) {
+- kfree(key);
++ kzfree(key);
+ SCTP_DBG_OBJCNT_DEC(keys);
+ }
+ }
+diff --git a/net/tipc/socket.c b/net/tipc/socket.c
+index 42b8324..fdf34af 100644
+--- a/net/tipc/socket.c
++++ b/net/tipc/socket.c
+@@ -829,6 +829,7 @@ static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
+ if (addr) {
+ addr->family = AF_TIPC;
+ addr->addrtype = TIPC_ADDR_ID;
++ memset(&addr->addr, 0, sizeof(addr->addr));
+ addr->addr.id.ref = msg_origport(msg);
+ addr->addr.id.node = msg_orignode(msg);
+ addr->addr.name.domain = 0; /* could leave uninitialized */
+@@ -948,6 +949,9 @@ static int recv_msg(struct kiocb *iocb, struct socket *sock,
+ goto exit;
+ }
+
++ /* will be updated in set_orig_addr() if needed */
++ m->msg_namelen = 0;
++
+ timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
+ restart:
+
+@@ -1074,6 +1078,9 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock,
+ goto exit;
+ }
+
++ /* will be updated in set_orig_addr() if needed */
++ m->msg_namelen = 0;
++
+ target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
+ timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
+ restart:
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 18978b6..5611563 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -1956,7 +1956,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if ((UNIXCB(skb).pid != siocb->scm->pid) ||
+ (UNIXCB(skb).cred != siocb->scm->cred))
+ break;
+- } else {
++ } else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
+ /* Copy credentials */
+ scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
+ check_creds = 1;
+diff --git a/net/wireless/reg.c b/net/wireless/reg.c
+index 0b08905..21958cd 100644
+--- a/net/wireless/reg.c
++++ b/net/wireless/reg.c
+@@ -853,7 +853,7 @@ static void handle_channel(struct wiphy *wiphy,
+ return;
+
+ REG_DBG_PRINT("Disabling freq %d MHz\n", chan->center_freq);
+- chan->flags = IEEE80211_CHAN_DISABLED;
++ chan->flags |= IEEE80211_CHAN_DISABLED;
+ return;
+ }
+
+diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
+index 7ada40e..638600b 100644
+--- a/sound/core/pcm_native.c
++++ b/sound/core/pcm_native.c
+@@ -3204,18 +3204,10 @@ EXPORT_SYMBOL_GPL(snd_pcm_lib_default_mmap);
+ int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream,
+ struct vm_area_struct *area)
+ {
+- long size;
+- unsigned long offset;
++ struct snd_pcm_runtime *runtime = substream->runtime;;
+
+ area->vm_page_prot = pgprot_noncached(area->vm_page_prot);
+- area->vm_flags |= VM_IO;
+- size = area->vm_end - area->vm_start;
+- offset = area->vm_pgoff << PAGE_SHIFT;
+- if (io_remap_pfn_range(area, area->vm_start,
+- (substream->runtime->dma_addr + offset) >> PAGE_SHIFT,
+- size, area->vm_page_prot))
+- return -EAGAIN;
+- return 0;
++ return vm_iomap_memory(area, runtime->dma_addr, runtime->dma_bytes);
+ }
+
+ EXPORT_SYMBOL(snd_pcm_lib_mmap_iomem);
+diff --git a/sound/soc/codecs/max98088.c b/sound/soc/codecs/max98088.c
+index ebbf63c..b7cf246 100644
+--- a/sound/soc/codecs/max98088.c
++++ b/sound/soc/codecs/max98088.c
+@@ -2007,7 +2007,7 @@ static int max98088_probe(struct snd_soc_codec *codec)
+ ret);
+ goto err_access;
+ }
+- dev_info(codec->dev, "revision %c\n", ret + 'A');
++ dev_info(codec->dev, "revision %c\n", ret - 0x40 + 'A');
+
+ snd_soc_write(codec, M98088_REG_51_PWR_SYS, M98088_PWRSV);
+
+diff --git a/sound/usb/card.c b/sound/usb/card.c
+index 566acb3..acb7fac 100644
+--- a/sound/usb/card.c
++++ b/sound/usb/card.c
+@@ -611,7 +611,9 @@ int snd_usb_autoresume(struct snd_usb_audio *chip)
+ int err = -ENODEV;
+
+ down_read(&chip->shutdown_rwsem);
+- if (!chip->shutdown && !chip->probing)
++ if (chip->probing)
++ err = 0;
++ else if (!chip->shutdown)
+ err = usb_autopm_get_interface(chip->pm_intf);
+ up_read(&chip->shutdown_rwsem);
+
+diff --git a/sound/usb/card.h b/sound/usb/card.h
+index 665e297..2b7559c 100644
+--- a/sound/usb/card.h
++++ b/sound/usb/card.h
+@@ -73,6 +73,7 @@ struct snd_usb_substream {
+ unsigned int fill_max: 1; /* fill max packet size always */
+ unsigned int txfr_quirk:1; /* allow sub-frame alignment */
+ unsigned int fmt_type; /* USB audio format type (1-3) */
++ unsigned int pkt_offset_adj; /* Bytes to drop from beginning of packets (for non-compliant devices) */
+
+ unsigned int running: 1; /* running status */
+
+diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
+index 9ab2b3e..5ebe8c4 100644
+--- a/sound/usb/endpoint.c
++++ b/sound/usb/endpoint.c
+@@ -458,7 +458,7 @@ static int retire_capture_urb(struct snd_usb_substream *subs,
+ stride = runtime->frame_bits >> 3;
+
+ for (i = 0; i < urb->number_of_packets; i++) {
+- cp = (unsigned char *)urb->transfer_buffer + urb->iso_frame_desc[i].offset;
++ cp = (unsigned char *)urb->transfer_buffer + urb->iso_frame_desc[i].offset + subs->pkt_offset_adj;
+ if (urb->iso_frame_desc[i].status && printk_ratelimit()) {
+ snd_printdd("frame %d active: %d\n", i, urb->iso_frame_desc[i].status);
+ // continue;
+@@ -898,6 +898,7 @@ void snd_usb_init_substream(struct snd_usb_stream *as,
+ subs->speed = snd_usb_get_speed(subs->dev);
+ if (subs->speed >= USB_SPEED_HIGH)
+ subs->ops.prepare_sync = prepare_capture_sync_urb_hs;
++ subs->pkt_offset_adj = 0;
+
+ snd_usb_set_pcm_ops(as->pcm, stream);
+
+diff --git a/sound/usb/midi.c b/sound/usb/midi.c
+index 34b9bb7..e5fee18 100644
+--- a/sound/usb/midi.c
++++ b/sound/usb/midi.c
+@@ -126,7 +126,6 @@ struct snd_usb_midi {
+ struct snd_usb_midi_in_endpoint *in;
+ } endpoints[MIDI_MAX_ENDPOINTS];
+ unsigned long input_triggered;
+- bool autopm_reference;
+ unsigned int opened[2];
+ unsigned char disconnected;
+ unsigned char input_running;
+@@ -1040,7 +1039,6 @@ static int substream_open(struct snd_rawmidi_substream *substream, int dir,
+ {
+ struct snd_usb_midi* umidi = substream->rmidi->private_data;
+ struct snd_kcontrol *ctl;
+- int err;
+
+ down_read(&umidi->disc_rwsem);
+ if (umidi->disconnected) {
+@@ -1051,13 +1049,6 @@ static int substream_open(struct snd_rawmidi_substream *substream, int dir,
+ mutex_lock(&umidi->mutex);
+ if (open) {
+ if (!umidi->opened[0] && !umidi->opened[1]) {
+- err = usb_autopm_get_interface(umidi->iface);
+- umidi->autopm_reference = err >= 0;
+- if (err < 0 && err != -EACCES) {
+- mutex_unlock(&umidi->mutex);
+- up_read(&umidi->disc_rwsem);
+- return -EIO;
+- }
+ if (umidi->roland_load_ctl) {
+ ctl = umidi->roland_load_ctl;
+ ctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE;
+@@ -1080,8 +1071,6 @@ static int substream_open(struct snd_rawmidi_substream *substream, int dir,
+ snd_ctl_notify(umidi->card,
+ SNDRV_CTL_EVENT_MASK_INFO, &ctl->id);
+ }
+- if (umidi->autopm_reference)
+- usb_autopm_put_interface(umidi->iface);
+ }
+ }
+ mutex_unlock(&umidi->mutex);
+@@ -2256,6 +2245,8 @@ int snd_usbmidi_create(struct snd_card *card,
+ return err;
+ }
+
++ usb_autopm_get_interface_no_resume(umidi->iface);
++
+ list_add_tail(&umidi->list, midi_list);
+ return 0;
+ }
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index dfbd65d..42eeee8 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -744,6 +744,7 @@ static void set_format_emu_quirk(struct snd_usb_substream *subs,
+ break;
+ }
+ snd_emuusb_set_samplerate(subs->stream->chip, emu_samplerate_id);
++ subs->pkt_offset_adj = (emu_samplerate_id >= EMU_QUIRK_SR_176400HZ) ? 4 : 0;
+ }
+
+ void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
+diff --git a/sound/usb/stream.c b/sound/usb/stream.c
+index 5ff8010..33a335b 100644
+--- a/sound/usb/stream.c
++++ b/sound/usb/stream.c
+@@ -168,6 +168,14 @@ static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip,
+ if (!csep && altsd->bNumEndpoints >= 2)
+ csep = snd_usb_find_desc(alts->endpoint[1].extra, alts->endpoint[1].extralen, NULL, USB_DT_CS_ENDPOINT);
+
++ /*
++ * If we can't locate the USB_DT_CS_ENDPOINT descriptor in the extra
++ * bytes after the first endpoint, go search the entire interface.
++ * Some devices have it directly *before* the standard endpoint.
++ */
++ if (!csep)
++ csep = snd_usb_find_desc(alts->extra, alts->extralen, NULL, USB_DT_CS_ENDPOINT);
++
+ if (!csep || csep->bLength < 7 ||
+ csep->bDescriptorSubtype != UAC_EP_GENERAL) {
+ snd_printk(KERN_WARNING "%d:%u:%d : no or invalid"
diff --git a/1045_linux-3.2.46.patch b/1045_linux-3.2.46.patch
new file mode 100644
index 00000000..bc10efd0
--- /dev/null
+++ b/1045_linux-3.2.46.patch
@@ -0,0 +1,3142 @@
+diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
+index 897f223..2ba8272 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -734,6 +734,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+ edd= [EDD]
+ Format: {"off" | "on" | "skip[mbr]"}
+
++ efi_no_storage_paranoia [EFI; X86]
++ Using this parameter you can use more than 50% of
++ your efi variable storage. Use this parameter only if
++ you are really sure that your UEFI does sane gc and
++ fulfills the spec otherwise your board may brick.
++
+ eisa_irq_edge= [PARISC,HW]
+ See header of drivers/parisc/eisa.c.
+
+diff --git a/Makefile b/Makefile
+index 9072fee..f600582 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 45
++SUBLEVEL = 46
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/configs/at91sam9g45_defconfig b/arch/arm/configs/at91sam9g45_defconfig
+index 606d48f..8aab786 100644
+--- a/arch/arm/configs/at91sam9g45_defconfig
++++ b/arch/arm/configs/at91sam9g45_defconfig
+@@ -173,7 +173,6 @@ CONFIG_MMC=y
+ # CONFIG_MMC_BLOCK_BOUNCE is not set
+ CONFIG_SDIO_UART=m
+ CONFIG_MMC_ATMELMCI=y
+-CONFIG_MMC_ATMELMCI_DMA=y
+ CONFIG_LEDS_ATMEL_PWM=y
+ CONFIG_LEDS_GPIO=y
+ CONFIG_LEDS_TRIGGER_TIMER=y
+diff --git a/arch/arm/mach-kirkwood/ts219-setup.c b/arch/arm/mach-kirkwood/ts219-setup.c
+index 262c034..3d0737e 100644
+--- a/arch/arm/mach-kirkwood/ts219-setup.c
++++ b/arch/arm/mach-kirkwood/ts219-setup.c
+@@ -124,7 +124,7 @@ static void __init qnap_ts219_init(void)
+ static int __init ts219_pci_init(void)
+ {
+ if (machine_is_ts219())
+- kirkwood_pcie_init(KW_PCIE0);
++ kirkwood_pcie_init(KW_PCIE1 | KW_PCIE0);
+
+ return 0;
+ }
+diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
+index 8a6886a..c72b083 100644
+--- a/arch/arm/plat-orion/common.c
++++ b/arch/arm/plat-orion/common.c
+@@ -347,7 +347,7 @@ static struct resource orion_ge10_shared_resources[] = {
+
+ static struct platform_device orion_ge10_shared = {
+ .name = MV643XX_ETH_SHARED_NAME,
+- .id = 1,
++ .id = 2,
+ .dev = {
+ .platform_data = &orion_ge10_shared_data,
+ },
+@@ -362,8 +362,8 @@ static struct resource orion_ge10_resources[] = {
+
+ static struct platform_device orion_ge10 = {
+ .name = MV643XX_ETH_NAME,
+- .id = 1,
+- .num_resources = 2,
++ .id = 2,
++ .num_resources = 1,
+ .resource = orion_ge10_resources,
+ .dev = {
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+@@ -401,7 +401,7 @@ static struct resource orion_ge11_shared_resources[] = {
+
+ static struct platform_device orion_ge11_shared = {
+ .name = MV643XX_ETH_SHARED_NAME,
+- .id = 1,
++ .id = 3,
+ .dev = {
+ .platform_data = &orion_ge11_shared_data,
+ },
+@@ -416,8 +416,8 @@ static struct resource orion_ge11_resources[] = {
+
+ static struct platform_device orion_ge11 = {
+ .name = MV643XX_ETH_NAME,
+- .id = 1,
+- .num_resources = 2,
++ .id = 3,
++ .num_resources = 1,
+ .resource = orion_ge11_resources,
+ .dev = {
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+diff --git a/arch/avr32/configs/favr-32_defconfig b/arch/avr32/configs/favr-32_defconfig
+index 19973b0..59e4cc9 100644
+--- a/arch/avr32/configs/favr-32_defconfig
++++ b/arch/avr32/configs/favr-32_defconfig
+@@ -122,7 +122,6 @@ CONFIG_USB_G_SERIAL=m
+ CONFIG_USB_CDC_COMPOSITE=m
+ CONFIG_MMC=y
+ CONFIG_MMC_ATMELMCI=y
+-CONFIG_MMC_ATMELMCI_DMA=y
+ CONFIG_NEW_LEDS=y
+ CONFIG_LEDS_CLASS=y
+ CONFIG_LEDS_ATMEL_PWM=m
+diff --git a/arch/avr32/configs/merisc_defconfig b/arch/avr32/configs/merisc_defconfig
+index 3befab9..65de443 100644
+--- a/arch/avr32/configs/merisc_defconfig
++++ b/arch/avr32/configs/merisc_defconfig
+@@ -102,7 +102,6 @@ CONFIG_FRAMEBUFFER_CONSOLE=y
+ CONFIG_LOGO=y
+ CONFIG_MMC=y
+ CONFIG_MMC_ATMELMCI=y
+-CONFIG_MMC_ATMELMCI_DMA=y
+ CONFIG_NEW_LEDS=y
+ CONFIG_LEDS_CLASS=y
+ CONFIG_LEDS_ATMEL_PWM=y
+diff --git a/arch/avr32/kernel/module.c b/arch/avr32/kernel/module.c
+index 596f730..2c94129 100644
+--- a/arch/avr32/kernel/module.c
++++ b/arch/avr32/kernel/module.c
+@@ -264,7 +264,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
+ break;
+ case R_AVR32_GOT18SW:
+ if ((relocation & 0xfffe0003) != 0
+- && (relocation & 0xfffc0003) != 0xffff0000)
++ && (relocation & 0xfffc0000) != 0xfffc0000)
+ return reloc_overflow(module, "R_AVR32_GOT18SW",
+ relocation);
+ relocation >>= 2;
+diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
+index 41f69ae..8c3efd2 100644
+--- a/arch/powerpc/include/asm/rtas.h
++++ b/arch/powerpc/include/asm/rtas.h
+@@ -230,6 +230,8 @@ extern void rtas_progress(char *s, unsigned short hex);
+ extern void rtas_initialize(void);
+ extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data);
+ extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data);
++extern int rtas_online_cpus_mask(cpumask_var_t cpus);
++extern int rtas_offline_cpus_mask(cpumask_var_t cpus);
+ extern int rtas_ibm_suspend_me(struct rtas_args *);
+
+ struct rtc_time;
+diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
+index 517b1d8..434a180 100644
+--- a/arch/powerpc/kernel/rtas.c
++++ b/arch/powerpc/kernel/rtas.c
+@@ -19,6 +19,7 @@
+ #include <linux/init.h>
+ #include <linux/capability.h>
+ #include <linux/delay.h>
++#include <linux/cpu.h>
+ #include <linux/smp.h>
+ #include <linux/completion.h>
+ #include <linux/cpumask.h>
+@@ -716,7 +717,6 @@ static int __rtas_suspend_last_cpu(struct rtas_suspend_me_data *data, int wake_w
+ int cpu;
+
+ slb_set_size(SLB_MIN_SIZE);
+- stop_topology_update();
+ printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n", smp_processor_id());
+
+ while (rc == H_MULTI_THREADS_ACTIVE && !atomic_read(&data->done) &&
+@@ -732,7 +732,6 @@ static int __rtas_suspend_last_cpu(struct rtas_suspend_me_data *data, int wake_w
+ rc = atomic_read(&data->error);
+
+ atomic_set(&data->error, rc);
+- start_topology_update();
+ pSeries_coalesce_init();
+
+ if (wake_when_done) {
+@@ -811,6 +810,95 @@ static void rtas_percpu_suspend_me(void *info)
+ __rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1);
+ }
+
++enum rtas_cpu_state {
++ DOWN,
++ UP,
++};
++
++#ifndef CONFIG_SMP
++static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
++ cpumask_var_t cpus)
++{
++ if (!cpumask_empty(cpus)) {
++ cpumask_clear(cpus);
++ return -EINVAL;
++ } else
++ return 0;
++}
++#else
++/* On return cpumask will be altered to indicate CPUs changed.
++ * CPUs with states changed will be set in the mask,
++ * CPUs with status unchanged will be unset in the mask. */
++static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
++ cpumask_var_t cpus)
++{
++ int cpu;
++ int cpuret = 0;
++ int ret = 0;
++
++ if (cpumask_empty(cpus))
++ return 0;
++
++ for_each_cpu(cpu, cpus) {
++ switch (state) {
++ case DOWN:
++ cpuret = cpu_down(cpu);
++ break;
++ case UP:
++ cpuret = cpu_up(cpu);
++ break;
++ }
++ if (cpuret) {
++ pr_debug("%s: cpu_%s for cpu#%d returned %d.\n",
++ __func__,
++ ((state == UP) ? "up" : "down"),
++ cpu, cpuret);
++ if (!ret)
++ ret = cpuret;
++ if (state == UP) {
++ /* clear bits for unchanged cpus, return */
++ cpumask_shift_right(cpus, cpus, cpu);
++ cpumask_shift_left(cpus, cpus, cpu);
++ break;
++ } else {
++ /* clear bit for unchanged cpu, continue */
++ cpumask_clear_cpu(cpu, cpus);
++ }
++ }
++ }
++
++ return ret;
++}
++#endif
++
++int rtas_online_cpus_mask(cpumask_var_t cpus)
++{
++ int ret;
++
++ ret = rtas_cpu_state_change_mask(UP, cpus);
++
++ if (ret) {
++ cpumask_var_t tmp_mask;
++
++ if (!alloc_cpumask_var(&tmp_mask, GFP_TEMPORARY))
++ return ret;
++
++ /* Use tmp_mask to preserve cpus mask from first failure */
++ cpumask_copy(tmp_mask, cpus);
++ rtas_offline_cpus_mask(tmp_mask);
++ free_cpumask_var(tmp_mask);
++ }
++
++ return ret;
++}
++EXPORT_SYMBOL(rtas_online_cpus_mask);
++
++int rtas_offline_cpus_mask(cpumask_var_t cpus)
++{
++ return rtas_cpu_state_change_mask(DOWN, cpus);
++}
++EXPORT_SYMBOL(rtas_offline_cpus_mask);
++
+ int rtas_ibm_suspend_me(struct rtas_args *args)
+ {
+ long state;
+@@ -818,6 +906,8 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ struct rtas_suspend_me_data data;
+ DECLARE_COMPLETION_ONSTACK(done);
++ cpumask_var_t offline_mask;
++ int cpuret;
+
+ if (!rtas_service_present("ibm,suspend-me"))
+ return -ENOSYS;
+@@ -841,12 +931,26 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
+ return 0;
+ }
+
++ if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY))
++ return -ENOMEM;
++
+ atomic_set(&data.working, 0);
+ atomic_set(&data.done, 0);
+ atomic_set(&data.error, 0);
+ data.token = rtas_token("ibm,suspend-me");
+ data.complete = &done;
+
++ /* All present CPUs must be online */
++ cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask);
++ cpuret = rtas_online_cpus_mask(offline_mask);
++ if (cpuret) {
++ pr_err("%s: Could not bring present CPUs online.\n", __func__);
++ atomic_set(&data.error, cpuret);
++ goto out;
++ }
++
++ stop_topology_update();
++
+ /* Call function on all CPUs. One of us will make the
+ * rtas call
+ */
+@@ -858,6 +962,16 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
+ if (atomic_read(&data.error) != 0)
+ printk(KERN_ERR "Error doing global join\n");
+
++ start_topology_update();
++
++ /* Take down CPUs not online prior to suspend */
++ cpuret = rtas_offline_cpus_mask(offline_mask);
++ if (cpuret)
++ pr_warn("%s: Could not restore CPUs to offline state.\n",
++ __func__);
++
++out:
++ free_cpumask_var(offline_mask);
+ return atomic_read(&data.error);
+ }
+ #else /* CONFIG_PPC_PSERIES */
+diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c
+index d3de084..55a4771 100644
+--- a/arch/powerpc/platforms/pseries/suspend.c
++++ b/arch/powerpc/platforms/pseries/suspend.c
+@@ -16,6 +16,7 @@
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
++#include <linux/cpu.h>
+ #include <linux/delay.h>
+ #include <linux/suspend.h>
+ #include <linux/stat.h>
+@@ -24,6 +25,7 @@
+ #include <asm/machdep.h>
+ #include <asm/mmu.h>
+ #include <asm/rtas.h>
++#include <asm/topology.h>
+
+ static u64 stream_id;
+ static struct sys_device suspend_sysdev;
+@@ -125,11 +127,15 @@ static ssize_t store_hibernate(struct sysdev_class *classdev,
+ struct sysdev_class_attribute *attr,
+ const char *buf, size_t count)
+ {
++ cpumask_var_t offline_mask;
+ int rc;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
++ if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY))
++ return -ENOMEM;
++
+ stream_id = simple_strtoul(buf, NULL, 16);
+
+ do {
+@@ -138,13 +144,33 @@ static ssize_t store_hibernate(struct sysdev_class *classdev,
+ ssleep(1);
+ } while (rc == -EAGAIN);
+
+- if (!rc)
++ if (!rc) {
++ /* All present CPUs must be online */
++ cpumask_andnot(offline_mask, cpu_present_mask,
++ cpu_online_mask);
++ rc = rtas_online_cpus_mask(offline_mask);
++ if (rc) {
++ pr_err("%s: Could not bring present CPUs online.\n",
++ __func__);
++ goto out;
++ }
++
++ stop_topology_update();
+ rc = pm_suspend(PM_SUSPEND_MEM);
++ start_topology_update();
++
++ /* Take down CPUs not online prior to suspend */
++ if (!rtas_offline_cpus_mask(offline_mask))
++ pr_warn("%s: Could not restore CPUs to offline "
++ "state.\n", __func__);
++ }
+
+ stream_id = 0;
+
+ if (!rc)
+ rc = count;
++out:
++ free_cpumask_var(offline_mask);
+ return rc;
+ }
+
+diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h
+index aa365c5..5888f1b 100644
+--- a/arch/um/include/asm/pgtable.h
++++ b/arch/um/include/asm/pgtable.h
+@@ -69,6 +69,8 @@ extern unsigned long end_iomem;
+ #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
+ #define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
+
++#define io_remap_pfn_range remap_pfn_range
++
+ /*
+ * The i386 can't do page protection for execute, and considers that the same
+ * are read.
+diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
+index 429e0c9..fb2eb32 100644
+--- a/arch/x86/kernel/irq.c
++++ b/arch/x86/kernel/irq.c
+@@ -160,10 +160,6 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
+ u64 arch_irq_stat(void)
+ {
+ u64 sum = atomic_read(&irq_err_count);
+-
+-#ifdef CONFIG_X86_IO_APIC
+- sum += atomic_read(&irq_mis_count);
+-#endif
+ return sum;
+ }
+
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 407789b..aac5ea7 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -4882,6 +4882,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
+ if (err != EMULATE_DONE)
+ return 0;
+
++ if (vcpu->arch.halt_request) {
++ vcpu->arch.halt_request = 0;
++ ret = kvm_emulate_halt(vcpu);
++ goto out;
++ }
++
+ if (signal_pending(current))
+ goto out;
+ if (need_resched())
+diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
+index 1de542b..07ef7e8 100644
+--- a/arch/x86/platform/efi/efi.c
++++ b/arch/x86/platform/efi/efi.c
+@@ -101,6 +101,15 @@ static int __init setup_add_efi_memmap(char *arg)
+ }
+ early_param("add_efi_memmap", setup_add_efi_memmap);
+
++static bool efi_no_storage_paranoia;
++
++static int __init setup_storage_paranoia(char *arg)
++{
++ efi_no_storage_paranoia = true;
++ return 0;
++}
++early_param("efi_no_storage_paranoia", setup_storage_paranoia);
++
+
+ static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
+ {
+@@ -815,3 +824,37 @@ u64 efi_mem_attributes(unsigned long phys_addr)
+ }
+ return 0;
+ }
++
++/*
++ * Some firmware has serious problems when using more than 50% of the EFI
++ * variable store, i.e. it triggers bugs that can brick machines. Ensure that
++ * we never use more than this safe limit.
++ *
++ * Return EFI_SUCCESS if it is safe to write 'size' bytes to the variable
++ * store.
++ */
++efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
++{
++ efi_status_t status;
++ u64 storage_size, remaining_size, max_size;
++
++ status = efi.query_variable_info(attributes, &storage_size,
++ &remaining_size, &max_size);
++ if (status != EFI_SUCCESS)
++ return status;
++
++ if (!max_size && remaining_size > size)
++ printk_once(KERN_ERR FW_BUG "Broken EFI implementation"
++ " is returning MaxVariableSize=0\n");
++
++ if (!storage_size || size > remaining_size ||
++ (max_size && size > max_size))
++ return EFI_OUT_OF_RESOURCES;
++
++ if (!efi_no_storage_paranoia &&
++ (remaining_size - size) < (storage_size / 2))
++ return EFI_OUT_OF_RESOURCES;
++
++ return EFI_SUCCESS;
++}
++EXPORT_SYMBOL_GPL(efi_query_variable_store);
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index 044f5d9..5189fe8 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -129,6 +129,21 @@ static void xen_vcpu_setup(int cpu)
+
+ BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
+
++ /*
++ * This path is called twice on PVHVM - first during bootup via
++ * smp_init -> xen_hvm_cpu_notify, and then if the VCPU is being
++ * hotplugged: cpu_up -> xen_hvm_cpu_notify.
++ * As we can only do the VCPUOP_register_vcpu_info once lets
++ * not over-write its result.
++ *
++ * For PV it is called during restore (xen_vcpu_restore) and bootup
++ * (xen_setup_vcpu_info_placement). The hotplug mechanism does not
++ * use this function.
++ */
++ if (xen_hvm_domain()) {
++ if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu))
++ return;
++ }
+ if (cpu < MAX_VIRT_CPUS)
+ per_cpu(xen_vcpu,cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
+
+diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
+index f915a7f..b334f54 100644
+--- a/drivers/acpi/acpica/exfldio.c
++++ b/drivers/acpi/acpica/exfldio.c
+@@ -702,7 +702,19 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
+
+ if ((obj_desc->common_field.start_field_bit_offset == 0) &&
+ (obj_desc->common_field.bit_length == access_bit_width)) {
+- status = acpi_ex_field_datum_io(obj_desc, 0, buffer, ACPI_READ);
++ if (buffer_length >= sizeof(u64)) {
++ status =
++ acpi_ex_field_datum_io(obj_desc, 0, buffer,
++ ACPI_READ);
++ } else {
++ /* Use raw_datum (u64) to handle buffers < 64 bits */
++
++ status =
++ acpi_ex_field_datum_io(obj_desc, 0, &raw_datum,
++ ACPI_READ);
++ ACPI_MEMCPY(buffer, &raw_datum, buffer_length);
++ }
++
+ return_ACPI_STATUS(status);
+ }
+
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index d2519b2..51de186 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -217,7 +217,7 @@ static int ec_check_sci_sync(struct acpi_ec *ec, u8 state)
+ static int ec_poll(struct acpi_ec *ec)
+ {
+ unsigned long flags;
+- int repeat = 2; /* number of command restarts */
++ int repeat = 5; /* number of command restarts */
+ while (repeat--) {
+ unsigned long delay = jiffies +
+ msecs_to_jiffies(ec_delay);
+@@ -235,8 +235,6 @@ static int ec_poll(struct acpi_ec *ec)
+ }
+ advance_transaction(ec, acpi_ec_read_status(ec));
+ } while (time_before(jiffies, delay));
+- if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF)
+- break;
+ pr_debug(PREFIX "controller reset, restart transaction\n");
+ spin_lock_irqsave(&ec->curr_lock, flags);
+ start_transaction(ec);
+diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
+index d9c0199..53e28a9 100644
+--- a/drivers/acpi/video_detect.c
++++ b/drivers/acpi/video_detect.c
+@@ -164,6 +164,14 @@ static struct dmi_system_id video_detect_dmi_table[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "UL30VT"),
+ },
+ },
++ {
++ .callback = video_detect_force_vendor,
++ .ident = "Asus UL30A",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
++ },
++ },
+ { },
+ };
+
+diff --git a/drivers/block/brd.c b/drivers/block/brd.c
+index d22119d..968a0d4 100644
+--- a/drivers/block/brd.c
++++ b/drivers/block/brd.c
+@@ -117,13 +117,13 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
+
+ spin_lock(&brd->brd_lock);
+ idx = sector >> PAGE_SECTORS_SHIFT;
++ page->index = idx;
+ if (radix_tree_insert(&brd->brd_pages, idx, page)) {
+ __free_page(page);
+ page = radix_tree_lookup(&brd->brd_pages, idx);
+ BUG_ON(!page);
+ BUG_ON(page->index != idx);
+- } else
+- page->index = idx;
++ }
+ spin_unlock(&brd->brd_lock);
+
+ radix_tree_preload_end();
+diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
+index 43beaca..13cbdd3 100644
+--- a/drivers/block/drbd/drbd_receiver.c
++++ b/drivers/block/drbd/drbd_receiver.c
+@@ -2225,7 +2225,6 @@ static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
+ if (hg == -1 && mdev->state.role == R_PRIMARY) {
+ enum drbd_state_rv rv2;
+
+- drbd_set_role(mdev, R_SECONDARY, 0);
+ /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
+ * we might be here in C_WF_REPORT_PARAMS which is transient.
+ * we do not need to wait for the after state change work either. */
+diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
+index 3ed20e8..92ce302 100644
+--- a/drivers/char/ipmi/ipmi_bt_sm.c
++++ b/drivers/char/ipmi/ipmi_bt_sm.c
+@@ -95,9 +95,9 @@ struct si_sm_data {
+ enum bt_states state;
+ unsigned char seq; /* BT sequence number */
+ struct si_sm_io *io;
+- unsigned char write_data[IPMI_MAX_MSG_LENGTH];
++ unsigned char write_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */
+ int write_count;
+- unsigned char read_data[IPMI_MAX_MSG_LENGTH];
++ unsigned char read_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */
+ int read_count;
+ int truncated;
+ long timeout; /* microseconds countdown */
+diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
+index 2aa3977..8dde1f5 100644
+--- a/drivers/char/ipmi/ipmi_devintf.c
++++ b/drivers/char/ipmi/ipmi_devintf.c
+@@ -838,13 +838,25 @@ static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
+ return ipmi_ioctl(filep, cmd, arg);
+ }
+ }
++
++static long unlocked_compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
++ unsigned long arg)
++{
++ int ret;
++
++ mutex_lock(&ipmi_mutex);
++ ret = compat_ipmi_ioctl(filep, cmd, arg);
++ mutex_unlock(&ipmi_mutex);
++
++ return ret;
++}
+ #endif
+
+ static const struct file_operations ipmi_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = ipmi_unlocked_ioctl,
+ #ifdef CONFIG_COMPAT
+- .compat_ioctl = compat_ipmi_ioctl,
++ .compat_ioctl = unlocked_compat_ipmi_ioctl,
+ #endif
+ .open = ipmi_open,
+ .release = ipmi_release,
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index 8ae9235..b651733 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -889,16 +889,24 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
+ if (r->entropy_count / 8 < min + reserved) {
+ nbytes = 0;
+ } else {
++ int entropy_count, orig;
++retry:
++ entropy_count = orig = ACCESS_ONCE(r->entropy_count);
+ /* If limited, never pull more than available */
+- if (r->limit && nbytes + reserved >= r->entropy_count / 8)
+- nbytes = r->entropy_count/8 - reserved;
+-
+- if (r->entropy_count / 8 >= nbytes + reserved)
+- r->entropy_count -= nbytes*8;
+- else
+- r->entropy_count = reserved;
++ if (r->limit && nbytes + reserved >= entropy_count / 8)
++ nbytes = entropy_count/8 - reserved;
++
++ if (entropy_count / 8 >= nbytes + reserved) {
++ entropy_count -= nbytes*8;
++ if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
++ goto retry;
++ } else {
++ entropy_count = reserved;
++ if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
++ goto retry;
++ }
+
+- if (r->entropy_count < random_write_wakeup_thresh) {
++ if (entropy_count < random_write_wakeup_thresh) {
+ wake_up_interruptible(&random_write_wait);
+ kill_fasync(&fasync, SIGIO, POLL_OUT);
+ }
+diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
+index 629c430..49138e7 100644
+--- a/drivers/dma/pch_dma.c
++++ b/drivers/dma/pch_dma.c
+@@ -489,7 +489,7 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
+ dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i);
+
+ if (!ret) {
+- ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO);
++ ret = pdc_alloc_desc(&pd_chan->chan, GFP_ATOMIC);
+ if (ret) {
+ spin_lock(&pd_chan->lock);
+ pd_chan->descs_allocated++;
+diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
+index b15c0aa..2a64e69 100644
+--- a/drivers/firmware/efivars.c
++++ b/drivers/firmware/efivars.c
+@@ -425,24 +425,12 @@ static efi_status_t
+ check_var_size_locked(struct efivars *efivars, u32 attributes,
+ unsigned long size)
+ {
+- u64 storage_size, remaining_size, max_size;
+- efi_status_t status;
+ const struct efivar_operations *fops = efivars->ops;
+
+- if (!efivars->ops->query_variable_info)
++ if (!efivars->ops->query_variable_store)
+ return EFI_UNSUPPORTED;
+
+- status = fops->query_variable_info(attributes, &storage_size,
+- &remaining_size, &max_size);
+-
+- if (status != EFI_SUCCESS)
+- return status;
+-
+- if (!storage_size || size > remaining_size || size > max_size ||
+- (remaining_size - size) < (storage_size / 2))
+- return EFI_OUT_OF_RESOURCES;
+-
+- return status;
++ return fops->query_variable_store(attributes, size);
+ }
+
+ static ssize_t
+@@ -1456,7 +1444,7 @@ efivars_init(void)
+ ops.get_variable = efi.get_variable;
+ ops.set_variable = efi.set_variable;
+ ops.get_next_variable = efi.get_next_variable;
+- ops.query_variable_info = efi.query_variable_info;
++ ops.query_variable_store = efi_query_variable_store;
+ error = register_efivars(&__efivars, &ops, efi_kobj);
+ if (error)
+ goto err_put;
+diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
+index c77fc67..ca67338 100644
+--- a/drivers/gpu/drm/i915/i915_dma.c
++++ b/drivers/gpu/drm/i915/i915_dma.c
+@@ -1007,50 +1007,56 @@ intel_teardown_mchbar(struct drm_device *dev)
+ release_resource(&dev_priv->mch_res);
+ }
+
+-static unsigned long i915_stolen_to_physical(struct drm_device *dev)
++#define PTE_ADDRESS_MASK 0xfffff000
++#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
++#define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
++#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
++#define PTE_MAPPING_TYPE_CACHED (3 << 1)
++#define PTE_MAPPING_TYPE_MASK (3 << 1)
++#define PTE_VALID (1 << 0)
++
++/**
++ * i915_stolen_to_phys - take an offset into stolen memory and turn it into
++ * a physical one
++ * @dev: drm device
++ * @offset: address to translate
++ *
++ * Some chip functions require allocations from stolen space and need the
++ * physical address of the memory in question.
++ */
++static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = dev_priv->bridge_dev;
+ u32 base;
+
++#if 0
+ /* On the machines I have tested the Graphics Base of Stolen Memory
+- * is unreliable, so on those compute the base by subtracting the
+- * stolen memory from the Top of Low Usable DRAM which is where the
+- * BIOS places the graphics stolen memory.
+- *
+- * On gen2, the layout is slightly different with the Graphics Segment
+- * immediately following Top of Memory (or Top of Usable DRAM). Note
+- * it appears that TOUD is only reported by 865g, so we just use the
+- * top of memory as determined by the e820 probe.
+- *
+- * XXX gen2 requires an unavailable symbol and 945gm fails with
+- * its value of TOLUD.
++ * is unreliable, so compute the base by subtracting the stolen memory
++ * from the Top of Low Usable DRAM which is where the BIOS places
++ * the graphics stolen memory.
+ */
+- base = 0;
+- if (INTEL_INFO(dev)->gen >= 6) {
+- /* Read Base Data of Stolen Memory Register (BDSM) directly.
+- * Note that there is also a MCHBAR miror at 0x1080c0 or
+- * we could use device 2:0x5c instead.
+- */
+- pci_read_config_dword(pdev, 0xB0, &base);
+- base &= ~4095; /* lower bits used for locking register */
+- } else if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
+- /* Read Graphics Base of Stolen Memory directly */
++ if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
++ /* top 32bits are reserved = 0 */
+ pci_read_config_dword(pdev, 0xA4, &base);
+-#if 0
+- } else if (IS_GEN3(dev)) {
++ } else {
++ /* XXX presume 8xx is the same as i915 */
++ pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base);
++ }
++#else
++ if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
++ u16 val;
++ pci_read_config_word(pdev, 0xb0, &val);
++ base = val >> 4 << 20;
++ } else {
+ u8 val;
+- /* Stolen is immediately below Top of Low Usable DRAM */
+ pci_read_config_byte(pdev, 0x9c, &val);
+ base = val >> 3 << 27;
+- base -= dev_priv->mm.gtt->stolen_size;
+- } else {
+- /* Stolen is immediately above Top of Memory */
+- base = max_low_pfn_mapped << PAGE_SHIFT;
+-#endif
+ }
++ base -= dev_priv->mm.gtt->stolen_size;
++#endif
+
+- return base;
++ return base + offset;
+ }
+
+ static void i915_warn_stolen(struct drm_device *dev)
+@@ -1075,7 +1081,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
+ if (!compressed_fb)
+ goto err;
+
+- cfb_base = dev_priv->mm.stolen_base + compressed_fb->start;
++ cfb_base = i915_stolen_to_phys(dev, compressed_fb->start);
+ if (!cfb_base)
+ goto err_fb;
+
+@@ -1088,7 +1094,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
+ if (!compressed_llb)
+ goto err_fb;
+
+- ll_base = dev_priv->mm.stolen_base + compressed_llb->start;
++ ll_base = i915_stolen_to_phys(dev, compressed_llb->start);
+ if (!ll_base)
+ goto err_llb;
+ }
+@@ -1107,7 +1113,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
+ }
+
+ DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n",
+- (long)cfb_base, (long)ll_base, size >> 20);
++ cfb_base, ll_base, size >> 20);
+ return;
+
+ err_llb:
+@@ -1181,13 +1187,6 @@ static int i915_load_gem_init(struct drm_device *dev)
+ gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
+ mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+
+- dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
+- if (dev_priv->mm.stolen_base == 0)
+- return 0;
+-
+- DRM_DEBUG_KMS("found %d bytes of stolen memory at %08lx\n",
+- dev_priv->mm.gtt->stolen_size, dev_priv->mm.stolen_base);
+-
+ /* Basic memrange allocator for stolen space */
+ drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
+
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index 20cd295..144d37c 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -581,7 +581,6 @@ typedef struct drm_i915_private {
+ unsigned long gtt_start;
+ unsigned long gtt_mappable_end;
+ unsigned long gtt_end;
+- unsigned long stolen_base; /* limited to low memory (32-bit) */
+
+ struct io_mapping *gtt_mapping;
+ int gtt_mtrr;
+diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
+index 1fe98b4..9aa02be 100644
+--- a/drivers/gpu/drm/radeon/r300_cmdbuf.c
++++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
+@@ -74,7 +74,7 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
+ OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1));
+
+ for (i = 0; i < nr; ++i) {
+- if (DRM_COPY_FROM_USER_UNCHECKED
++ if (DRM_COPY_FROM_USER
+ (&box, &cmdbuf->boxes[n + i], sizeof(box))) {
+ DRM_ERROR("copy cliprect faulted\n");
+ return -EFAULT;
+diff --git a/drivers/hwmon/abituguru.c b/drivers/hwmon/abituguru.c
+index 65a35cf..61ab615 100644
+--- a/drivers/hwmon/abituguru.c
++++ b/drivers/hwmon/abituguru.c
+@@ -1280,14 +1280,18 @@ static int __devinit abituguru_probe(struct platform_device *pdev)
+ pr_info("found Abit uGuru\n");
+
+ /* Register sysfs hooks */
+- for (i = 0; i < sysfs_attr_i; i++)
+- if (device_create_file(&pdev->dev,
+- &data->sysfs_attr[i].dev_attr))
++ for (i = 0; i < sysfs_attr_i; i++) {
++ res = device_create_file(&pdev->dev,
++ &data->sysfs_attr[i].dev_attr);
++ if (res)
+ goto abituguru_probe_error;
+- for (i = 0; i < ARRAY_SIZE(abituguru_sysfs_attr); i++)
+- if (device_create_file(&pdev->dev,
+- &abituguru_sysfs_attr[i].dev_attr))
++ }
++ for (i = 0; i < ARRAY_SIZE(abituguru_sysfs_attr); i++) {
++ res = device_create_file(&pdev->dev,
++ &abituguru_sysfs_attr[i].dev_attr);
++ if (res)
+ goto abituguru_probe_error;
++ }
+
+ data->hwmon_dev = hwmon_device_register(&pdev->dev);
+ if (!IS_ERR(data->hwmon_dev))
+diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
+index 6193349..3c2812f 100644
+--- a/drivers/i2c/busses/i2c-designware-core.c
++++ b/drivers/i2c/busses/i2c-designware-core.c
+@@ -349,7 +349,8 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
+ /* Enable the adapter */
+ dw_writel(dev, 1, DW_IC_ENABLE);
+
+- /* Enable interrupts */
++ /* Clear and enable interrupts */
++ i2c_dw_clear_int(dev);
+ dw_writel(dev, DW_IC_INTR_DEFAULT_MASK, DW_IC_INTR_MASK);
+ }
+
+diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
+index 0a6806f..a5dfcc0 100644
+--- a/drivers/md/dm-bufio.c
++++ b/drivers/md/dm-bufio.c
+@@ -322,6 +322,9 @@ static void __cache_size_refresh(void)
+ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
+ enum data_mode *data_mode)
+ {
++ unsigned noio_flag;
++ void *ptr;
++
+ if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
+ *data_mode = DATA_MODE_SLAB;
+ return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
+@@ -335,7 +338,28 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
+ }
+
+ *data_mode = DATA_MODE_VMALLOC;
+- return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
++
++ /*
++ * __vmalloc allocates the data pages and auxiliary structures with
++ * gfp_flags that were specified, but pagetables are always allocated
++ * with GFP_KERNEL, no matter what was specified as gfp_mask.
++ *
++ * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
++ * all allocations done by this process (including pagetables) are done
++ * as if GFP_NOIO was specified.
++ */
++
++ if (gfp_mask & __GFP_NORETRY) {
++ noio_flag = current->flags & PF_MEMALLOC;
++ current->flags |= PF_MEMALLOC;
++ }
++
++ ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
++
++ if (gfp_mask & __GFP_NORETRY)
++ current->flags = (current->flags & ~PF_MEMALLOC) | noio_flag;
++
++ return ptr;
+ }
+
+ /*
+diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
+index 34ec2b5..b4aaa7b 100644
+--- a/drivers/md/dm-snap.c
++++ b/drivers/md/dm-snap.c
+@@ -1117,6 +1117,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
+ if (!s->pending_pool) {
+ ti->error = "Could not allocate mempool for pending exceptions";
++ r = -ENOMEM;
+ goto bad_pending_pool;
+ }
+
+diff --git a/drivers/media/dvb/mantis/mantis_dvb.c b/drivers/media/dvb/mantis/mantis_dvb.c
+index e5180e4..5d15c6b 100644
+--- a/drivers/media/dvb/mantis/mantis_dvb.c
++++ b/drivers/media/dvb/mantis/mantis_dvb.c
+@@ -248,8 +248,10 @@ int __devinit mantis_dvb_init(struct mantis_pci *mantis)
+ err5:
+ tasklet_kill(&mantis->tasklet);
+ dvb_net_release(&mantis->dvbnet);
+- dvb_unregister_frontend(mantis->fe);
+- dvb_frontend_detach(mantis->fe);
++ if (mantis->fe) {
++ dvb_unregister_frontend(mantis->fe);
++ dvb_frontend_detach(mantis->fe);
++ }
+ err4:
+ mantis->demux.dmx.remove_frontend(&mantis->demux.dmx, &mantis->fe_mem);
+
+diff --git a/drivers/mfd/adp5520.c b/drivers/mfd/adp5520.c
+index 8d816cc..105f820 100644
+--- a/drivers/mfd/adp5520.c
++++ b/drivers/mfd/adp5520.c
+@@ -36,6 +36,7 @@ struct adp5520_chip {
+ struct blocking_notifier_head notifier_list;
+ int irq;
+ unsigned long id;
++ uint8_t mode;
+ };
+
+ static int __adp5520_read(struct i2c_client *client,
+@@ -326,7 +327,10 @@ static int adp5520_suspend(struct device *dev)
+ struct i2c_client *client = to_i2c_client(dev);
+ struct adp5520_chip *chip = dev_get_drvdata(&client->dev);
+
+- adp5520_clr_bits(chip->dev, ADP5520_MODE_STATUS, ADP5520_nSTNBY);
++ adp5520_read(chip->dev, ADP5520_MODE_STATUS, &chip->mode);
++ /* All other bits are W1C */
++ chip->mode &= ADP5520_BL_EN | ADP5520_DIM_EN | ADP5520_nSTNBY;
++ adp5520_write(chip->dev, ADP5520_MODE_STATUS, 0);
+ return 0;
+ }
+
+@@ -335,7 +339,7 @@ static int adp5520_resume(struct device *dev)
+ struct i2c_client *client = to_i2c_client(dev);
+ struct adp5520_chip *chip = dev_get_drvdata(&client->dev);
+
+- adp5520_set_bits(chip->dev, ADP5520_MODE_STATUS, ADP5520_nSTNBY);
++ adp5520_write(chip->dev, ADP5520_MODE_STATUS, chip->mode);
+ return 0;
+ }
+ #endif
+diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
+index fb7c27f..c1aec06 100644
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -363,13 +363,13 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
+ ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
+ card->ext_csd.raw_trim_mult =
+ ext_csd[EXT_CSD_TRIM_MULT];
++ card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT];
+ if (card->ext_csd.rev >= 4) {
+ /*
+ * Enhanced area feature support -- check whether the eMMC
+ * card has the Enhanced area enabled. If so, export enhanced
+ * area offset and size to user by adding sysfs interface.
+ */
+- card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT];
+ if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
+ (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
+ hc_erase_grp_sz =
+diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
+index cf444b0..90233ad 100644
+--- a/drivers/mmc/host/Kconfig
++++ b/drivers/mmc/host/Kconfig
+@@ -297,16 +297,6 @@ config MMC_ATMELMCI
+
+ endchoice
+
+-config MMC_ATMELMCI_DMA
+- bool "Atmel MCI DMA support"
+- depends on MMC_ATMELMCI && (AVR32 || ARCH_AT91SAM9G45) && DMA_ENGINE
+- help
+- Say Y here to have the Atmel MCI driver use a DMA engine to
+- do data transfers and thus increase the throughput and
+- reduce the CPU utilization.
+-
+- If unsure, say N.
+-
+ config MMC_IMX
+ tristate "Motorola i.MX Multimedia Card Interface support"
+ depends on ARCH_MX1
+diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
+index 0932024..83790f2 100644
+--- a/drivers/mmc/host/atmel-mci.c
++++ b/drivers/mmc/host/atmel-mci.c
+@@ -163,6 +163,7 @@ struct atmel_mci {
+ void __iomem *regs;
+
+ struct scatterlist *sg;
++ unsigned int sg_len;
+ unsigned int pio_offset;
+
+ struct atmel_mci_slot *cur_slot;
+@@ -751,6 +752,7 @@ static u32 atmci_prepare_data(struct atmel_mci *host, struct mmc_data *data)
+ data->error = -EINPROGRESS;
+
+ host->sg = data->sg;
++ host->sg_len = data->sg_len;
+ host->data = data;
+ host->data_chan = NULL;
+
+@@ -1573,7 +1575,8 @@ static void atmci_read_data_pio(struct atmel_mci *host)
+ if (offset == sg->length) {
+ flush_dcache_page(sg_page(sg));
+ host->sg = sg = sg_next(sg);
+- if (!sg)
++ host->sg_len--;
++ if (!sg || !host->sg_len)
+ goto done;
+
+ offset = 0;
+@@ -1586,7 +1589,8 @@ static void atmci_read_data_pio(struct atmel_mci *host)
+
+ flush_dcache_page(sg_page(sg));
+ host->sg = sg = sg_next(sg);
+- if (!sg)
++ host->sg_len--;
++ if (!sg || !host->sg_len)
+ goto done;
+
+ offset = 4 - remaining;
+@@ -1640,7 +1644,8 @@ static void atmci_write_data_pio(struct atmel_mci *host)
+ nbytes += 4;
+ if (offset == sg->length) {
+ host->sg = sg = sg_next(sg);
+- if (!sg)
++ host->sg_len--;
++ if (!sg || !host->sg_len)
+ goto done;
+
+ offset = 0;
+@@ -1654,7 +1659,8 @@ static void atmci_write_data_pio(struct atmel_mci *host)
+ nbytes += remaining;
+
+ host->sg = sg = sg_next(sg);
+- if (!sg) {
++ host->sg_len--;
++ if (!sg || !host->sg_len) {
+ atmci_writel(host, ATMCI_TDR, value);
+ goto done;
+ }
+@@ -2167,10 +2173,8 @@ static int __exit atmci_remove(struct platform_device *pdev)
+ atmci_readl(host, ATMCI_SR);
+ clk_disable(host->mck);
+
+-#ifdef CONFIG_MMC_ATMELMCI_DMA
+ if (host->dma.chan)
+ dma_release_channel(host->dma.chan);
+-#endif
+
+ free_irq(platform_get_irq(pdev, 0), host);
+ iounmap(host->regs);
+diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
+index 92053e6..c15d6ce 100644
+--- a/drivers/net/ethernet/3com/3c509.c
++++ b/drivers/net/ethernet/3com/3c509.c
+@@ -309,6 +309,7 @@ static int __devinit el3_isa_match(struct device *pdev,
+ if (!dev)
+ return -ENOMEM;
+
++ SET_NETDEV_DEV(dev, pdev);
+ netdev_boot_setup_check(dev);
+
+ if (!request_region(ioaddr, EL3_IO_EXTENT, "3c509-isa")) {
+@@ -704,6 +705,7 @@ static int __init el3_eisa_probe (struct device *device)
+ return -ENOMEM;
+ }
+
++ SET_NETDEV_DEV(dev, device);
+ netdev_boot_setup_check(dev);
+
+ el3_dev_fill(dev, phys_addr, ioaddr, irq, if_port, EL3_EISA);
+diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
+index e0c5529..efc9dee 100644
+--- a/drivers/net/ethernet/3com/3c59x.c
++++ b/drivers/net/ethernet/3com/3c59x.c
+@@ -632,7 +632,6 @@ struct vortex_private {
+ pm_state_valid:1, /* pci_dev->saved_config_space has sane contents */
+ open:1,
+ medialock:1,
+- must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */
+ large_frames:1, /* accept large frames */
+ handling_irq:1; /* private in_irq indicator */
+ /* {get|set}_wol operations are already serialized by rtnl.
+@@ -951,7 +950,7 @@ static int __devexit vortex_eisa_remove(struct device *device)
+
+ unregister_netdev(dev);
+ iowrite16(TotalReset|0x14, ioaddr + EL3_CMD);
+- release_region(dev->base_addr, VORTEX_TOTAL_SIZE);
++ release_region(edev->base_addr, VORTEX_TOTAL_SIZE);
+
+ free_netdev(dev);
+ return 0;
+@@ -1012,6 +1011,12 @@ static int __devinit vortex_init_one(struct pci_dev *pdev,
+ if (rc < 0)
+ goto out;
+
++ rc = pci_request_regions(pdev, DRV_NAME);
++ if (rc < 0) {
++ pci_disable_device(pdev);
++ goto out;
++ }
++
+ unit = vortex_cards_found;
+
+ if (global_use_mmio < 0 && (unit >= MAX_UNITS || use_mmio[unit] < 0)) {
+@@ -1027,6 +1032,7 @@ static int __devinit vortex_init_one(struct pci_dev *pdev,
+ if (!ioaddr) /* If mapping fails, fall-back to BAR 0... */
+ ioaddr = pci_iomap(pdev, 0, 0);
+ if (!ioaddr) {
++ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ rc = -ENOMEM;
+ goto out;
+@@ -1036,6 +1042,7 @@ static int __devinit vortex_init_one(struct pci_dev *pdev,
+ ent->driver_data, unit);
+ if (rc < 0) {
+ pci_iounmap(pdev, ioaddr);
++ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ goto out;
+ }
+@@ -1180,11 +1187,6 @@ static int __devinit vortex_probe1(struct device *gendev,
+
+ /* PCI-only startup logic */
+ if (pdev) {
+- /* EISA resources already marked, so only PCI needs to do this here */
+- /* Ignore return value, because Cardbus drivers already allocate for us */
+- if (request_region(dev->base_addr, vci->io_size, print_name) != NULL)
+- vp->must_free_region = 1;
+-
+ /* enable bus-mastering if necessary */
+ if (vci->flags & PCI_USES_MASTER)
+ pci_set_master(pdev);
+@@ -1222,7 +1224,7 @@ static int __devinit vortex_probe1(struct device *gendev,
+ &vp->rx_ring_dma);
+ retval = -ENOMEM;
+ if (!vp->rx_ring)
+- goto free_region;
++ goto free_device;
+
+ vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE);
+ vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE;
+@@ -1487,9 +1489,7 @@ free_ring:
+ + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
+ vp->rx_ring,
+ vp->rx_ring_dma);
+-free_region:
+- if (vp->must_free_region)
+- release_region(dev->base_addr, vci->io_size);
++free_device:
+ free_netdev(dev);
+ pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval);
+ out:
+@@ -3254,8 +3254,9 @@ static void __devexit vortex_remove_one(struct pci_dev *pdev)
+ + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
+ vp->rx_ring,
+ vp->rx_ring_dma);
+- if (vp->must_free_region)
+- release_region(dev->base_addr, vp->io_size);
++
++ pci_release_regions(pdev);
++
+ free_netdev(dev);
+ }
+
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index c6b9903..ec13a59 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -2752,6 +2752,31 @@ static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
+ static int tg3_setup_phy(struct tg3 *, int);
+ static int tg3_halt_cpu(struct tg3 *, u32);
+
++static bool tg3_phy_power_bug(struct tg3 *tp)
++{
++ switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
++ case ASIC_REV_5700:
++ case ASIC_REV_5704:
++ return true;
++ case ASIC_REV_5780:
++ if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
++ return true;
++ return false;
++ case ASIC_REV_5717:
++ if (!tp->pci_fn)
++ return true;
++ return false;
++ case ASIC_REV_5719:
++ case ASIC_REV_5720:
++ if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
++ !tp->pci_fn)
++ return true;
++ return false;
++ }
++
++ return false;
++}
++
+ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
+ {
+ u32 val;
+@@ -2808,12 +2833,7 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
+ /* The PHY should not be powered down on some chips because
+ * of bugs.
+ */
+- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
+- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
+- (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
+- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
+- !tp->pci_fn))
++ if (tg3_phy_power_bug(tp))
+ return;
+
+ if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
+diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
+index 021463b..57e2da0 100644
+--- a/drivers/net/ethernet/ibm/ibmveth.c
++++ b/drivers/net/ethernet/ibm/ibmveth.c
+@@ -1325,7 +1325,7 @@ static const struct net_device_ops ibmveth_netdev_ops = {
+ static int __devinit ibmveth_probe(struct vio_dev *dev,
+ const struct vio_device_id *id)
+ {
+- int rc, i;
++ int rc, i, mac_len;
+ struct net_device *netdev;
+ struct ibmveth_adapter *adapter;
+ unsigned char *mac_addr_p;
+@@ -1335,11 +1335,19 @@ static int __devinit ibmveth_probe(struct vio_dev *dev,
+ dev->unit_address);
+
+ mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR,
+- NULL);
++ &mac_len);
+ if (!mac_addr_p) {
+ dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n");
+ return -EINVAL;
+ }
++ /* Workaround for old/broken pHyp */
++ if (mac_len == 8)
++ mac_addr_p += 2;
++ else if (mac_len != 6) {
++ dev_err(&dev->dev, "VETH_MAC_ADDR attribute wrong len %d\n",
++ mac_len);
++ return -EINVAL;
++ }
+
+ mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev,
+ VETH_MCAST_FILTER_SIZE, NULL);
+@@ -1364,17 +1372,6 @@ static int __devinit ibmveth_probe(struct vio_dev *dev,
+
+ netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
+
+- /*
+- * Some older boxes running PHYP non-natively have an OF that returns
+- * a 8-byte local-mac-address field (and the first 2 bytes have to be
+- * ignored) while newer boxes' OF return a 6-byte field. Note that
+- * IEEE 1275 specifies that local-mac-address must be a 6-byte field.
+- * The RPA doc specifies that the first byte must be 10b, so we'll
+- * just look for it to solve this 8 vs. 6 byte field issue
+- */
+- if ((*mac_addr_p & 0x3) != 0x02)
+- mac_addr_p += 2;
+-
+ adapter->mac_addr = 0;
+ memcpy(&adapter->mac_addr, mac_addr_p, 6);
+
+diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
+index 544ac06..301b39e 100644
+--- a/drivers/net/macvlan.c
++++ b/drivers/net/macvlan.c
+@@ -204,7 +204,8 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
+ }
+
+ if (port->passthru)
+- vlan = list_first_entry(&port->vlans, struct macvlan_dev, list);
++ vlan = list_first_or_null_rcu(&port->vlans,
++ struct macvlan_dev, list);
+ else
+ vlan = macvlan_hash_lookup(port, eth->h_dest);
+ if (vlan == NULL)
+@@ -725,7 +726,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
+ if (err < 0)
+ goto destroy_port;
+
+- list_add_tail(&vlan->list, &port->vlans);
++ list_add_tail_rcu(&vlan->list, &port->vlans);
+ netif_stacked_transfer_operstate(lowerdev, dev);
+
+ return 0;
+@@ -751,7 +752,7 @@ void macvlan_dellink(struct net_device *dev, struct list_head *head)
+ {
+ struct macvlan_dev *vlan = netdev_priv(dev);
+
+- list_del(&vlan->list);
++ list_del_rcu(&vlan->list);
+ unregister_netdevice_queue(dev, head);
+ }
+ EXPORT_SYMBOL_GPL(macvlan_dellink);
+diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
+index 95437fc..df3e27c 100644
+--- a/drivers/net/wireless/ath/ath9k/main.c
++++ b/drivers/net/wireless/ath/ath9k/main.c
+@@ -1793,6 +1793,7 @@ static int ath9k_sta_add(struct ieee80211_hw *hw,
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_node *an = (struct ath_node *) sta->drv_priv;
+ struct ieee80211_key_conf ps_key = { };
++ int key;
+
+ ath_node_attach(sc, sta);
+
+@@ -1800,7 +1801,9 @@ static int ath9k_sta_add(struct ieee80211_hw *hw,
+ vif->type != NL80211_IFTYPE_AP_VLAN)
+ return 0;
+
+- an->ps_key = ath_key_config(common, vif, sta, &ps_key);
++ key = ath_key_config(common, vif, sta, &ps_key);
++ if (key > 0)
++ an->ps_key = key;
+
+ return 0;
+ }
+@@ -1817,6 +1820,7 @@ static void ath9k_del_ps_key(struct ath_softc *sc,
+ return;
+
+ ath_key_delete(common, &ps_key);
++ an->ps_key = 0;
+ }
+
+ static int ath9k_sta_remove(struct ieee80211_hw *hw,
+diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
+index 12975ad..ca70267 100644
+--- a/drivers/net/wireless/b43/dma.c
++++ b/drivers/net/wireless/b43/dma.c
+@@ -1719,6 +1719,25 @@ drop_recycle_buffer:
+ sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
+ }
+
++void b43_dma_handle_rx_overflow(struct b43_dmaring *ring)
++{
++ int current_slot, previous_slot;
++
++ B43_WARN_ON(ring->tx);
++
++ /* Device has filled all buffers, drop all packets and let TCP
++ * decrease speed.
++ * Decrement RX index by one will let the device to see all slots
++ * as free again
++ */
++ /*
++ *TODO: How to increase rx_drop in mac80211?
++ */
++ current_slot = ring->ops->get_current_rxslot(ring);
++ previous_slot = prev_slot(ring, current_slot);
++ ring->ops->set_current_rxslot(ring, previous_slot);
++}
++
+ void b43_dma_rx(struct b43_dmaring *ring)
+ {
+ const struct b43_dma_ops *ops = ring->ops;
+diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h
+index 9fdd198..df8c8cd 100644
+--- a/drivers/net/wireless/b43/dma.h
++++ b/drivers/net/wireless/b43/dma.h
+@@ -9,7 +9,7 @@
+ /* DMA-Interrupt reasons. */
+ #define B43_DMAIRQ_FATALMASK ((1 << 10) | (1 << 11) | (1 << 12) \
+ | (1 << 14) | (1 << 15))
+-#define B43_DMAIRQ_NONFATALMASK (1 << 13)
++#define B43_DMAIRQ_RDESC_UFLOW (1 << 13)
+ #define B43_DMAIRQ_RX_DONE (1 << 16)
+
+ /*** 32-bit DMA Engine. ***/
+@@ -295,6 +295,8 @@ int b43_dma_tx(struct b43_wldev *dev,
+ void b43_dma_handle_txstatus(struct b43_wldev *dev,
+ const struct b43_txstatus *status);
+
++void b43_dma_handle_rx_overflow(struct b43_dmaring *ring);
++
+ void b43_dma_rx(struct b43_dmaring *ring);
+
+ void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
+diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
+index 680709c..c0f2041 100644
+--- a/drivers/net/wireless/b43/main.c
++++ b/drivers/net/wireless/b43/main.c
+@@ -1901,30 +1901,18 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev)
+ }
+ }
+
+- if (unlikely(merged_dma_reason & (B43_DMAIRQ_FATALMASK |
+- B43_DMAIRQ_NONFATALMASK))) {
+- if (merged_dma_reason & B43_DMAIRQ_FATALMASK) {
+- b43err(dev->wl, "Fatal DMA error: "
+- "0x%08X, 0x%08X, 0x%08X, "
+- "0x%08X, 0x%08X, 0x%08X\n",
+- dma_reason[0], dma_reason[1],
+- dma_reason[2], dma_reason[3],
+- dma_reason[4], dma_reason[5]);
+- b43err(dev->wl, "This device does not support DMA "
++ if (unlikely(merged_dma_reason & (B43_DMAIRQ_FATALMASK))) {
++ b43err(dev->wl,
++ "Fatal DMA error: 0x%08X, 0x%08X, 0x%08X, 0x%08X, 0x%08X, 0x%08X\n",
++ dma_reason[0], dma_reason[1],
++ dma_reason[2], dma_reason[3],
++ dma_reason[4], dma_reason[5]);
++ b43err(dev->wl, "This device does not support DMA "
+ "on your system. It will now be switched to PIO.\n");
+- /* Fall back to PIO transfers if we get fatal DMA errors! */
+- dev->use_pio = 1;
+- b43_controller_restart(dev, "DMA error");
+- return;
+- }
+- if (merged_dma_reason & B43_DMAIRQ_NONFATALMASK) {
+- b43err(dev->wl, "DMA error: "
+- "0x%08X, 0x%08X, 0x%08X, "
+- "0x%08X, 0x%08X, 0x%08X\n",
+- dma_reason[0], dma_reason[1],
+- dma_reason[2], dma_reason[3],
+- dma_reason[4], dma_reason[5]);
+- }
++ /* Fall back to PIO transfers if we get fatal DMA errors! */
++ dev->use_pio = true;
++ b43_controller_restart(dev, "DMA error");
++ return;
+ }
+
+ if (unlikely(reason & B43_IRQ_UCODE_DEBUG))
+@@ -1943,6 +1931,11 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev)
+ handle_irq_noise(dev);
+
+ /* Check the DMA reason registers for received data. */
++ if (dma_reason[0] & B43_DMAIRQ_RDESC_UFLOW) {
++ if (B43_DEBUG)
++ b43warn(dev->wl, "RX descriptor underrun\n");
++ b43_dma_handle_rx_overflow(dev->dma.rx_ring);
++ }
+ if (dma_reason[0] & B43_DMAIRQ_RX_DONE) {
+ if (b43_using_pio_transfers(dev))
+ b43_pio_rx(dev->pio.rx_queue);
+@@ -2000,7 +1993,7 @@ static irqreturn_t b43_do_interrupt(struct b43_wldev *dev)
+ return IRQ_NONE;
+
+ dev->dma_reason[0] = b43_read32(dev, B43_MMIO_DMA0_REASON)
+- & 0x0001DC00;
++ & 0x0001FC00;
+ dev->dma_reason[1] = b43_read32(dev, B43_MMIO_DMA1_REASON)
+ & 0x0000DC00;
+ dev->dma_reason[2] = b43_read32(dev, B43_MMIO_DMA2_REASON)
+@@ -3103,7 +3096,7 @@ static int b43_chip_init(struct b43_wldev *dev)
+ b43_write32(dev, 0x018C, 0x02000000);
+ }
+ b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, 0x00004000);
+- b43_write32(dev, B43_MMIO_DMA0_IRQ_MASK, 0x0001DC00);
++ b43_write32(dev, B43_MMIO_DMA0_IRQ_MASK, 0x0001FC00);
+ b43_write32(dev, B43_MMIO_DMA1_IRQ_MASK, 0x0000DC00);
+ b43_write32(dev, B43_MMIO_DMA2_IRQ_MASK, 0x0000DC00);
+ b43_write32(dev, B43_MMIO_DMA3_IRQ_MASK, 0x0001DC00);
+diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
+index 727c129..45ac407 100644
+--- a/drivers/net/wireless/mwifiex/cfg80211.c
++++ b/drivers/net/wireless/mwifiex/cfg80211.c
+@@ -1281,9 +1281,6 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct net_device *dev)
+ if (dev->reg_state == NETREG_REGISTERED)
+ unregister_netdevice(dev);
+
+- if (dev->reg_state == NETREG_UNREGISTERED)
+- free_netdev(dev);
+-
+ /* Clear the priv in adapter */
+ priv->netdev = NULL;
+
+diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
+index 5a25dd2..90ffc76 100644
+--- a/drivers/net/wireless/mwifiex/cmdevt.c
++++ b/drivers/net/wireless/mwifiex/cmdevt.c
+@@ -1083,6 +1083,7 @@ mwifiex_process_hs_config(struct mwifiex_adapter *adapter)
+ adapter->if_ops.wakeup(adapter);
+ adapter->hs_activated = false;
+ adapter->is_hs_configured = false;
++ adapter->is_suspended = false;
+ mwifiex_hs_activated_event(mwifiex_get_priv(adapter,
+ MWIFIEX_BSS_ROLE_ANY), false);
+ }
+diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
+index 67e6db7..5baa12a 100644
+--- a/drivers/net/wireless/mwifiex/main.c
++++ b/drivers/net/wireless/mwifiex/main.c
+@@ -581,6 +581,7 @@ void mwifiex_init_priv_params(struct mwifiex_private *priv,
+ struct net_device *dev)
+ {
+ dev->netdev_ops = &mwifiex_netdev_ops;
++ dev->destructor = free_netdev;
+ /* Initialize private structure */
+ priv->current_key_index = 0;
+ priv->media_connected = false;
+diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
+index 56e1c4a..5c3c62d 100644
+--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
++++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
+@@ -105,7 +105,7 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
+ } else {
+ /* Multicast */
+ priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_PROMISCUOUS_ENABLE;
+- if (mcast_list->mode == MWIFIEX_MULTICAST_MODE) {
++ if (mcast_list->mode == MWIFIEX_ALL_MULTI_MODE) {
+ dev_dbg(priv->adapter->dev,
+ "info: Enabling All Multicast!\n");
+ priv->curr_pkt_filter |=
+@@ -117,20 +117,11 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
+ dev_dbg(priv->adapter->dev,
+ "info: Set multicast list=%d\n",
+ mcast_list->num_multicast_addr);
+- /* Set multicast addresses to firmware */
+- if (old_pkt_filter == priv->curr_pkt_filter) {
+- /* Send request to firmware */
+- ret = mwifiex_send_cmd_async(priv,
+- HostCmd_CMD_MAC_MULTICAST_ADR,
+- HostCmd_ACT_GEN_SET, 0,
+- mcast_list);
+- } else {
+- /* Send request to firmware */
+- ret = mwifiex_send_cmd_async(priv,
+- HostCmd_CMD_MAC_MULTICAST_ADR,
+- HostCmd_ACT_GEN_SET, 0,
+- mcast_list);
+- }
++ /* Send multicast addresses to firmware */
++ ret = mwifiex_send_cmd_async(priv,
++ HostCmd_CMD_MAC_MULTICAST_ADR,
++ HostCmd_ACT_GEN_SET, 0,
++ mcast_list);
+ }
+ }
+ }
+diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
+index 22b2dfa..fdacfce 100644
+--- a/drivers/platform/x86/hp_accel.c
++++ b/drivers/platform/x86/hp_accel.c
+@@ -362,7 +362,8 @@ static int lis3lv02d_suspend(struct acpi_device *device, pm_message_t state)
+
+ static int lis3lv02d_resume(struct acpi_device *device)
+ {
+- return lis3lv02d_poweron(&lis3_dev);
++ lis3lv02d_poweron(&lis3_dev);
++ return 0;
+ }
+ #else
+ #define lis3lv02d_suspend NULL
+diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
+index 33471e1..23ef16c 100644
+--- a/drivers/rapidio/devices/tsi721.c
++++ b/drivers/rapidio/devices/tsi721.c
+@@ -475,6 +475,10 @@ static irqreturn_t tsi721_irqhandler(int irq, void *ptr)
+ u32 intval;
+ u32 ch_inte;
+
++ /* For MSI mode disable all device-level interrupts */
++ if (priv->flags & TSI721_USING_MSI)
++ iowrite32(0, priv->regs + TSI721_DEV_INTE);
++
+ dev_int = ioread32(priv->regs + TSI721_DEV_INT);
+ if (!dev_int)
+ return IRQ_NONE;
+@@ -548,6 +552,13 @@ static irqreturn_t tsi721_irqhandler(int irq, void *ptr)
+ tsi721_pw_handler(mport);
+ }
+
++ /* For MSI mode re-enable device-level interrupts */
++ if (priv->flags & TSI721_USING_MSI) {
++ dev_int = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO |
++ TSI721_DEV_INT_SMSG_CH | TSI721_DEV_INT_BDMA_CH;
++ iowrite32(dev_int, priv->regs + TSI721_DEV_INTE);
++ }
++
+ return IRQ_HANDLED;
+ }
+
+diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c
+index 2ee3bbf..62e1b2c 100644
+--- a/drivers/rtc/rtc-pcf2123.c
++++ b/drivers/rtc/rtc-pcf2123.c
+@@ -264,6 +264,7 @@ static int __devinit pcf2123_probe(struct spi_device *spi)
+
+ if (!(rxbuf[0] & 0x20)) {
+ dev_err(&spi->dev, "chip not found\n");
++ ret = -ENODEV;
+ goto kfree_exit;
+ }
+
+diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
+index a023f52..fe4dbf3 100644
+--- a/drivers/staging/comedi/comedi_fops.c
++++ b/drivers/staging/comedi/comedi_fops.c
+@@ -143,6 +143,9 @@ static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd,
+ }
+ rc = do_devconfig_ioctl(dev,
+ (struct comedi_devconfig __user *)arg);
++ if (rc == 0)
++ /* Evade comedi_auto_unconfig(). */
++ dev_file_info->hardware_device = NULL;
+ goto done;
+ }
+
+diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
+index 51b5adf..df8ea25 100644
+--- a/drivers/staging/vt6656/hostap.c
++++ b/drivers/staging/vt6656/hostap.c
+@@ -153,7 +153,7 @@ static int hostap_disable_hostapd(PSDevice pDevice, int rtnl_locked)
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n",
+ pDevice->dev->name, pDevice->apdev->name);
+ }
+- kfree(pDevice->apdev);
++ free_netdev(pDevice->apdev);
+ pDevice->apdev = NULL;
+ pDevice->bEnable8021x = FALSE;
+ pDevice->bEnableHostWEP = FALSE;
+diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
+index 101b1be..d86791e 100644
+--- a/drivers/target/iscsi/iscsi_target_erl1.c
++++ b/drivers/target/iscsi/iscsi_target_erl1.c
+@@ -824,7 +824,7 @@ static int iscsit_attach_ooo_cmdsn(
+ /*
+ * CmdSN is greater than the tail of the list.
+ */
+- if (ooo_tail->cmdsn < ooo_cmdsn->cmdsn)
++ if (iscsi_sna_lt(ooo_tail->cmdsn, ooo_cmdsn->cmdsn))
+ list_add_tail(&ooo_cmdsn->ooo_list,
+ &sess->sess_ooo_cmdsn_list);
+ else {
+@@ -834,11 +834,12 @@ static int iscsit_attach_ooo_cmdsn(
+ */
+ list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list,
+ ooo_list) {
+- if (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn)
++ if (iscsi_sna_lt(ooo_tmp->cmdsn, ooo_cmdsn->cmdsn))
+ continue;
+
++ /* Insert before this entry */
+ list_add(&ooo_cmdsn->ooo_list,
+- &ooo_tmp->ooo_list);
++ ooo_tmp->ooo_list.prev);
+ break;
+ }
+ }
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index 8481aae..0f8a785 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -1530,6 +1530,14 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
+ tty->real_raw = 0;
+ }
+ n_tty_set_room(tty);
++ /*
++ * Fix tty hang when I_IXON(tty) is cleared, but the tty
++ * been stopped by STOP_CHAR(tty) before it.
++ */
++ if (!I_IXON(tty) && old && (old->c_iflag & IXON) && !tty->flow_stopped) {
++ start_tty(tty);
++ }
++
+ /* The termios change make the tty ready for I/O */
+ wake_up_interruptible(&tty->write_wait);
+ wake_up_interruptible(&tty->read_wait);
+diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
+index a845f8b..9497171 100644
+--- a/drivers/usb/atm/cxacru.c
++++ b/drivers/usb/atm/cxacru.c
+@@ -686,7 +686,8 @@ static int cxacru_cm_get_array(struct cxacru_data *instance, enum cxacru_cm_requ
+ {
+ int ret, len;
+ __le32 *buf;
+- int offb, offd;
++ int offb;
++ unsigned int offd;
+ const int stride = CMD_PACKET_SIZE / (4 * 2) - 1;
+ int buflen = ((size - 1) / stride + 1 + size * 2) * 4;
+
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 0aaa4f1..2fbcb75 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -88,6 +88,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ /* Edirol SD-20 */
+ { USB_DEVICE(0x0582, 0x0027), .driver_info = USB_QUIRK_RESET_RESUME },
+
++ /* Alcor Micro Corp. Hub */
++ { USB_DEVICE(0x058f, 0x9254), .driver_info = USB_QUIRK_RESET_RESUME },
++
+ /* appletouch */
+ { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
+
+diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c
+index 045cde4..850723f 100644
+--- a/drivers/usb/host/uhci-hub.c
++++ b/drivers/usb/host/uhci-hub.c
+@@ -221,7 +221,8 @@ static int uhci_hub_status_data(struct usb_hcd *hcd, char *buf)
+ /* auto-stop if nothing connected for 1 second */
+ if (any_ports_active(uhci))
+ uhci->rh_state = UHCI_RH_RUNNING;
+- else if (time_after_eq(jiffies, uhci->auto_stop_time))
++ else if (time_after_eq(jiffies, uhci->auto_stop_time) &&
++ !uhci->wait_for_hp)
+ suspend_rh(uhci, UHCI_RH_AUTO_STOPPED);
+ break;
+
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index ee5ec11..430c1d5 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -1353,15 +1353,17 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
+ ep_ctx->ep_info2 |= cpu_to_le32(xhci_get_endpoint_type(udev, ep));
+
+ /* Set the max packet size and max burst */
++ max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
++ max_burst = 0;
+ switch (udev->speed) {
+ case USB_SPEED_SUPER:
+- max_packet = usb_endpoint_maxp(&ep->desc);
+- ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet));
+ /* dig out max burst from ep companion desc */
+- max_packet = ep->ss_ep_comp.bMaxBurst;
+- ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_packet));
++ max_burst = ep->ss_ep_comp.bMaxBurst;
+ break;
+ case USB_SPEED_HIGH:
++ /* Some devices get this wrong */
++ if (usb_endpoint_xfer_bulk(&ep->desc))
++ max_packet = 512;
+ /* bits 11:12 specify the number of additional transaction
+ * opportunities per microframe (USB 2.0, section 9.6.6)
+ */
+@@ -1369,17 +1371,16 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
+ usb_endpoint_xfer_int(&ep->desc)) {
+ max_burst = (usb_endpoint_maxp(&ep->desc)
+ & 0x1800) >> 11;
+- ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_burst));
+ }
+- /* Fall through */
++ break;
+ case USB_SPEED_FULL:
+ case USB_SPEED_LOW:
+- max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
+- ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet));
+ break;
+ default:
+ BUG();
+ }
++ ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet) |
++ MAX_BURST(max_burst));
+ max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep);
+ ep_ctx->tx_info = cpu_to_le32(MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload));
+
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 37b2a89..d08a804 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -2376,14 +2376,21 @@ static int handle_tx_event(struct xhci_hcd *xhci,
+ * TD list.
+ */
+ if (list_empty(&ep_ring->td_list)) {
+- xhci_warn(xhci, "WARN Event TRB for slot %d ep %d "
+- "with no TDs queued?\n",
+- TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
+- ep_index);
+- xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
+- (le32_to_cpu(event->flags) &
+- TRB_TYPE_BITMASK)>>10);
+- xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
++ /*
++ * A stopped endpoint may generate an extra completion
++ * event if the device was suspended. Don't print
++ * warnings.
++ */
++ if (!(trb_comp_code == COMP_STOP ||
++ trb_comp_code == COMP_STOP_INVAL)) {
++ xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
++ TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
++ ep_index);
++ xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
++ (le32_to_cpu(event->flags) &
++ TRB_TYPE_BITMASK)>>10);
++ xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
++ }
+ if (ep->skip) {
+ ep->skip = false;
+ xhci_dbg(xhci, "td_list is empty while skip "
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 51d1712..918ec98 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -197,6 +197,8 @@ static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_BOOST_PID) },
+ { USB_DEVICE(NEWPORT_VID, NEWPORT_AGILIS_PID) },
++ { USB_DEVICE(NEWPORT_VID, NEWPORT_CONEX_CC_PID) },
++ { USB_DEVICE(NEWPORT_VID, NEWPORT_CONEX_AGP_PID) },
+ { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) },
+ { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) },
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 2f86008..5d25e26 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -772,6 +772,8 @@
+ */
+ #define NEWPORT_VID 0x104D
+ #define NEWPORT_AGILIS_PID 0x3000
++#define NEWPORT_CONEX_CC_PID 0x3002
++#define NEWPORT_CONEX_AGP_PID 0x3006
+
+ /* Interbiometrics USB I/O Board */
+ /* Developed for Interbiometrics by Rudolf Gugler */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 8513f51..59c4997 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -196,6 +196,7 @@ static void option_instat_callback(struct urb *urb);
+
+ #define DELL_PRODUCT_5800_MINICARD_VZW 0x8195 /* Novatel E362 */
+ #define DELL_PRODUCT_5800_V2_MINICARD_VZW 0x8196 /* Novatel E362 */
++#define DELL_PRODUCT_5804_MINICARD_ATT 0x819b /* Novatel E371 */
+
+ #define KYOCERA_VENDOR_ID 0x0c88
+ #define KYOCERA_PRODUCT_KPC650 0x17da
+@@ -341,8 +342,8 @@ static void option_instat_callback(struct urb *urb);
+ #define CINTERION_PRODUCT_EU3_E 0x0051
+ #define CINTERION_PRODUCT_EU3_P 0x0052
+ #define CINTERION_PRODUCT_PH8 0x0053
+-#define CINTERION_PRODUCT_AH6 0x0055
+-#define CINTERION_PRODUCT_PLS8 0x0060
++#define CINTERION_PRODUCT_AHXX 0x0055
++#define CINTERION_PRODUCT_PLXX 0x0060
+
+ /* Olivetti products */
+ #define OLIVETTI_VENDOR_ID 0x0b3c
+@@ -771,6 +772,7 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_VZW) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */
+ { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_MINICARD_VZW, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_V2_MINICARD_VZW, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5804_MINICARD_ATT, 0xff, 0xff, 0xff) },
+ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */
+ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
+ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
+@@ -966,6 +968,8 @@ static const struct usb_device_id option_ids[] = {
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0330, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0395, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0412, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G */
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
+@@ -1264,8 +1268,9 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8) },
+- { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AH6) },
+- { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLS8) },
++ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX) },
++ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
+ { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) },
+diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
+index 450f529..2c69d12 100644
+--- a/fs/autofs4/expire.c
++++ b/fs/autofs4/expire.c
+@@ -61,15 +61,6 @@ static int autofs4_mount_busy(struct vfsmount *mnt, struct dentry *dentry)
+ /* This is an autofs submount, we can't expire it */
+ if (autofs_type_indirect(sbi->type))
+ goto done;
+-
+- /*
+- * Otherwise it's an offset mount and we need to check
+- * if we can umount its mount, if there is one.
+- */
+- if (!d_mountpoint(path.dentry)) {
+- status = 0;
+- goto done;
+- }
+ }
+
+ /* Update the expiry counter if fs is busy */
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index c04f02c..618ae6f 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -1571,7 +1571,11 @@ static noinline int copy_to_sk(struct btrfs_root *root,
+ item_off = btrfs_item_ptr_offset(leaf, i);
+ item_len = btrfs_item_size_nr(leaf, i);
+
+- if (item_len > BTRFS_SEARCH_ARGS_BUFSIZE)
++ btrfs_item_key_to_cpu(leaf, key, i);
++ if (!key_in_sk(key, sk))
++ continue;
++
++ if (sizeof(sh) + item_len > BTRFS_SEARCH_ARGS_BUFSIZE)
+ item_len = 0;
+
+ if (sizeof(sh) + item_len + *sk_offset >
+@@ -1580,10 +1584,6 @@ static noinline int copy_to_sk(struct btrfs_root *root,
+ goto overflow;
+ }
+
+- btrfs_item_key_to_cpu(leaf, key, i);
+- if (!key_in_sk(key, sk))
+- continue;
+-
+ sh.objectid = key->objectid;
+ sh.offset = key->offset;
+ sh.type = key->type;
+diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
+index e851d5b..20431b4 100644
+--- a/fs/cifs/inode.c
++++ b/fs/cifs/inode.c
+@@ -173,7 +173,8 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
+
+ if (fattr->cf_flags & CIFS_FATTR_DFS_REFERRAL)
+ inode->i_flags |= S_AUTOMOUNT;
+- cifs_set_ops(inode);
++ if (inode->i_state & I_NEW)
++ cifs_set_ops(inode);
+ }
+
+ void
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 7b18563..9243103 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -2027,7 +2027,11 @@ repeat:
+ group = ac->ac_g_ex.fe_group;
+
+ for (i = 0; i < ngroups; group++, i++) {
+- if (group == ngroups)
++ /*
++ * Artificially restricted ngroups for non-extent
++ * files makes group > ngroups possible on first loop.
++ */
++ if (group >= ngroups)
+ group = 0;
+
+ /* This now checks without needing the buddy page */
+diff --git a/fs/fat/inode.c b/fs/fat/inode.c
+index 808cac7..fc33ca1 100644
+--- a/fs/fat/inode.c
++++ b/fs/fat/inode.c
+@@ -1238,6 +1238,19 @@ static int fat_read_root(struct inode *inode)
+ return 0;
+ }
+
++static unsigned long calc_fat_clusters(struct super_block *sb)
++{
++ struct msdos_sb_info *sbi = MSDOS_SB(sb);
++
++ /* Divide first to avoid overflow */
++ if (sbi->fat_bits != 12) {
++ unsigned long ent_per_sec = sb->s_blocksize * 8 / sbi->fat_bits;
++ return ent_per_sec * sbi->fat_length;
++ }
++
++ return sbi->fat_length * sb->s_blocksize * 8 / sbi->fat_bits;
++}
++
+ /*
+ * Read the super block of an MS-DOS FS.
+ */
+@@ -1434,7 +1447,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
+ sbi->fat_bits = (total_clusters > MAX_FAT12) ? 16 : 12;
+
+ /* check that FAT table does not overflow */
+- fat_clusters = sbi->fat_length * sb->s_blocksize * 8 / sbi->fat_bits;
++ fat_clusters = calc_fat_clusters(sb);
+ total_clusters = min(total_clusters, fat_clusters - FAT_START_ENT);
+ if (total_clusters > MAX_FAT(sb)) {
+ if (!silent)
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index 08921b8..e065497 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -267,6 +267,7 @@ static __be32
+ do_open_fhandle(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
+ {
+ __be32 status;
++ int accmode = 0;
+
+ /* Only reclaims from previously confirmed clients are valid */
+ if ((status = nfs4_check_open_reclaim(&open->op_clientid)))
+@@ -284,9 +285,19 @@ do_open_fhandle(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_
+
+ open->op_truncate = (open->op_iattr.ia_valid & ATTR_SIZE) &&
+ (open->op_iattr.ia_size == 0);
++ /*
++ * In the delegation case, the client is telling us about an
++ * open that it *already* performed locally, some time ago. We
++ * should let it succeed now if possible.
++ *
++ * In the case of a CLAIM_FH open, on the other hand, the client
++ * may be counting on us to enforce permissions (the Linux 4.1
++ * client uses this for normal opens, for example).
++ */
++ if (open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH)
++ accmode = NFSD_MAY_OWNER_OVERRIDE;
+
+- status = do_open_permission(rqstp, current_fh, open,
+- NFSD_MAY_OWNER_OVERRIDE);
++ status = do_open_permission(rqstp, current_fh, open, accmode);
+
+ return status;
+ }
+diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
+index b50ffb7..edeb239 100644
+--- a/fs/nilfs2/inode.c
++++ b/fs/nilfs2/inode.c
+@@ -195,13 +195,32 @@ static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
+
+ static int nilfs_set_page_dirty(struct page *page)
+ {
+- int ret = __set_page_dirty_buffers(page);
++ int ret = __set_page_dirty_nobuffers(page);
+
+- if (ret) {
++ if (page_has_buffers(page)) {
+ struct inode *inode = page->mapping->host;
+- unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits);
++ unsigned nr_dirty = 0;
++ struct buffer_head *bh, *head;
+
+- nilfs_set_file_dirty(inode, nr_dirty);
++ /*
++ * This page is locked by callers, and no other thread
++ * concurrently marks its buffers dirty since they are
++ * only dirtied through routines in fs/buffer.c in
++ * which call sites of mark_buffer_dirty are protected
++ * by page lock.
++ */
++ bh = head = page_buffers(page);
++ do {
++ /* Do not mark hole blocks dirty */
++ if (buffer_dirty(bh) || !buffer_mapped(bh))
++ continue;
++
++ set_buffer_dirty(bh);
++ nr_dirty++;
++ } while (bh = bh->b_this_page, bh != head);
++
++ if (nr_dirty)
++ nilfs_set_file_dirty(inode, nr_dirty);
+ }
+ return ret;
+ }
+diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
+index 2f5b92e..7eb1c0c 100644
+--- a/fs/ocfs2/extent_map.c
++++ b/fs/ocfs2/extent_map.c
+@@ -791,7 +791,7 @@ int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ &hole_size, &rec, &is_last);
+ if (ret) {
+ mlog_errno(ret);
+- goto out;
++ goto out_unlock;
+ }
+
+ if (rec.e_blkno == 0ULL) {
+diff --git a/include/linux/efi.h b/include/linux/efi.h
+index 8469f3f..88c953d 100644
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -204,6 +204,7 @@ typedef efi_status_t efi_query_capsule_caps_t(efi_capsule_header_t **capsules,
+ unsigned long count,
+ u64 *max_size,
+ int *reset_type);
++typedef efi_status_t efi_query_variable_store_t(u32 attributes, unsigned long size);
+
+ /*
+ * EFI Configuration Table and GUID definitions
+@@ -331,6 +332,14 @@ extern void efi_map_pal_code (void);
+ extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg);
+ extern void efi_gettimeofday (struct timespec *ts);
+ extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */
++#ifdef CONFIG_X86
++extern efi_status_t efi_query_variable_store(u32 attributes, unsigned long size);
++#else
++static inline efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
++{
++ return EFI_SUCCESS;
++}
++#endif
+ extern u64 efi_get_iobase (void);
+ extern u32 efi_mem_type (unsigned long phys_addr);
+ extern u64 efi_mem_attributes (unsigned long phys_addr);
+@@ -475,7 +484,7 @@ struct efivar_operations {
+ efi_get_variable_t *get_variable;
+ efi_get_next_variable_t *get_next_variable;
+ efi_set_variable_t *set_variable;
+- efi_query_variable_info_t *query_variable_info;
++ efi_query_variable_store_t *query_variable_store;
+ };
+
+ struct efivars {
+diff --git a/include/linux/if_cablemodem.h b/include/linux/if_cablemodem.h
+index 9ca1007..ee6b3c4 100644
+--- a/include/linux/if_cablemodem.h
++++ b/include/linux/if_cablemodem.h
+@@ -12,11 +12,11 @@
+ */
+
+ /* some useful defines for sb1000.c e cmconfig.c - fv */
+-#define SIOCGCMSTATS SIOCDEVPRIVATE+0 /* get cable modem stats */
+-#define SIOCGCMFIRMWARE SIOCDEVPRIVATE+1 /* get cm firmware version */
+-#define SIOCGCMFREQUENCY SIOCDEVPRIVATE+2 /* get cable modem frequency */
+-#define SIOCSCMFREQUENCY SIOCDEVPRIVATE+3 /* set cable modem frequency */
+-#define SIOCGCMPIDS SIOCDEVPRIVATE+4 /* get cable modem PIDs */
+-#define SIOCSCMPIDS SIOCDEVPRIVATE+5 /* set cable modem PIDs */
++#define SIOCGCMSTATS (SIOCDEVPRIVATE+0) /* get cable modem stats */
++#define SIOCGCMFIRMWARE (SIOCDEVPRIVATE+1) /* get cm firmware version */
++#define SIOCGCMFREQUENCY (SIOCDEVPRIVATE+2) /* get cable modem frequency */
++#define SIOCSCMFREQUENCY (SIOCDEVPRIVATE+3) /* set cable modem frequency */
++#define SIOCGCMPIDS (SIOCDEVPRIVATE+4) /* get cable modem PIDs */
++#define SIOCSCMPIDS (SIOCDEVPRIVATE+5) /* set cable modem PIDs */
+
+ #endif
+diff --git a/include/linux/rculist.h b/include/linux/rculist.h
+index d079290..6f95e24 100644
+--- a/include/linux/rculist.h
++++ b/include/linux/rculist.h
+@@ -242,6 +242,23 @@ static inline void list_splice_init_rcu(struct list_head *list,
+ list_entry_rcu((ptr)->next, type, member)
+
+ /**
++ * list_first_or_null_rcu - get the first element from a list
++ * @ptr: the list head to take the element from.
++ * @type: the type of the struct this is embedded in.
++ * @member: the name of the list_struct within the struct.
++ *
++ * Note that if the list is empty, it returns NULL.
++ *
++ * This primitive may safely run concurrently with the _rcu list-mutation
++ * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
++ */
++#define list_first_or_null_rcu(ptr, type, member) \
++ ({struct list_head *__ptr = (ptr); \
++ struct list_head __rcu *__next = list_next_rcu(__ptr); \
++ likely(__ptr != __next) ? container_of(__next, type, member) : NULL; \
++ })
++
++/**
+ * list_for_each_entry_rcu - iterate over rcu list of given type
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+diff --git a/include/linux/virtio_console.h b/include/linux/virtio_console.h
+index bdf4b00..82e12ad 100644
+--- a/include/linux/virtio_console.h
++++ b/include/linux/virtio_console.h
+@@ -39,7 +39,7 @@
+ #define VIRTIO_CONSOLE_F_SIZE 0 /* Does host provide console size? */
+ #define VIRTIO_CONSOLE_F_MULTIPORT 1 /* Does host provide multiple ports? */
+
+-#define VIRTIO_CONSOLE_BAD_ID (~(u32)0)
++#define VIRTIO_CONSOLE_BAD_ID (~(__u32)0)
+
+ struct virtio_console_config {
+ /* colums of the screens */
+diff --git a/include/linux/wait.h b/include/linux/wait.h
+index 3efc9f3..bea7ad5 100644
+--- a/include/linux/wait.h
++++ b/include/linux/wait.h
+@@ -233,6 +233,8 @@ do { \
+ if (!ret) \
+ break; \
+ } \
++ if (!ret && (condition)) \
++ ret = 1; \
+ finish_wait(&wq, &__wait); \
+ } while (0)
+
+@@ -249,8 +251,9 @@ do { \
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+- * The function returns 0 if the @timeout elapsed, and the remaining
+- * jiffies if the condition evaluated to true before the timeout elapsed.
++ * The function returns 0 if the @timeout elapsed, or the remaining
++ * jiffies (at least 1) if the @condition evaluated to %true before
++ * the @timeout elapsed.
+ */
+ #define wait_event_timeout(wq, condition, timeout) \
+ ({ \
+@@ -318,6 +321,8 @@ do { \
+ ret = -ERESTARTSYS; \
+ break; \
+ } \
++ if (!ret && (condition)) \
++ ret = 1; \
+ finish_wait(&wq, &__wait); \
+ } while (0)
+
+@@ -334,9 +339,10 @@ do { \
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+- * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
+- * was interrupted by a signal, and the remaining jiffies otherwise
+- * if the condition evaluated to true before the timeout elapsed.
++ * Returns:
++ * 0 if the @timeout elapsed, -%ERESTARTSYS if it was interrupted by
++ * a signal, or the remaining jiffies (at least 1) if the @condition
++ * evaluated to %true before the @timeout elapsed.
+ */
+ #define wait_event_interruptible_timeout(wq, condition, timeout) \
+ ({ \
+diff --git a/include/net/sock.h b/include/net/sock.h
+index ddf523c..e6454b6 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -734,6 +734,18 @@ struct inet_hashinfo;
+ struct raw_hashinfo;
+ struct module;
+
++/*
++ * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes
++ * un-modified. Special care is taken when initializing object to zero.
++ */
++static inline void sk_prot_clear_nulls(struct sock *sk, int size)
++{
++ if (offsetof(struct sock, sk_node.next) != 0)
++ memset(sk, 0, offsetof(struct sock, sk_node.next));
++ memset(&sk->sk_node.pprev, 0,
++ size - offsetof(struct sock, sk_node.pprev));
++}
++
+ /* Networking protocol blocks we attach to sockets.
+ * socket layer -> transport layer interface
+ * transport -> network interface is defined by struct inet_proto
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 0768715..fe46019 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -931,6 +931,7 @@ static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
+ if (sysctl_tcp_low_latency || !tp->ucopy.task)
+ return 0;
+
++ skb_dst_force(skb);
+ __skb_queue_tail(&tp->ucopy.prequeue, skb);
+ tp->ucopy.memory += skb->truesize;
+ if (tp->ucopy.memory > sk->sk_rcvbuf) {
+diff --git a/kernel/kmod.c b/kernel/kmod.c
+index d6fe08a..a16dac1 100644
+--- a/kernel/kmod.c
++++ b/kernel/kmod.c
+@@ -467,6 +467,11 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info,
+ int retval = 0;
+
+ helper_lock();
++ if (!sub_info->path) {
++ retval = -EINVAL;
++ goto out;
++ }
++
+ if (sub_info->path[0] == '\0')
+ goto out;
+
+diff --git a/kernel/sched.c b/kernel/sched.c
+index d08c9f4..d93369a 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -6672,16 +6672,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
+ *tablep = NULL;
+ }
+
++static int min_load_idx = 0;
++static int max_load_idx = CPU_LOAD_IDX_MAX-1;
++
+ static void
+ set_table_entry(struct ctl_table *entry,
+ const char *procname, void *data, int maxlen,
+- mode_t mode, proc_handler *proc_handler)
++ mode_t mode, proc_handler *proc_handler,
++ bool load_idx)
+ {
+ entry->procname = procname;
+ entry->data = data;
+ entry->maxlen = maxlen;
+ entry->mode = mode;
+ entry->proc_handler = proc_handler;
++
++ if (load_idx) {
++ entry->extra1 = &min_load_idx;
++ entry->extra2 = &max_load_idx;
++ }
+ }
+
+ static struct ctl_table *
+@@ -6693,30 +6702,30 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
+ return NULL;
+
+ set_table_entry(&table[0], "min_interval", &sd->min_interval,
+- sizeof(long), 0644, proc_doulongvec_minmax);
++ sizeof(long), 0644, proc_doulongvec_minmax, false);
+ set_table_entry(&table[1], "max_interval", &sd->max_interval,
+- sizeof(long), 0644, proc_doulongvec_minmax);
++ sizeof(long), 0644, proc_doulongvec_minmax, false);
+ set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
+- sizeof(int), 0644, proc_dointvec_minmax);
++ sizeof(int), 0644, proc_dointvec_minmax, true);
+ set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
+- sizeof(int), 0644, proc_dointvec_minmax);
++ sizeof(int), 0644, proc_dointvec_minmax, true);
+ set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
+- sizeof(int), 0644, proc_dointvec_minmax);
++ sizeof(int), 0644, proc_dointvec_minmax, true);
+ set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
+- sizeof(int), 0644, proc_dointvec_minmax);
++ sizeof(int), 0644, proc_dointvec_minmax, true);
+ set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
+- sizeof(int), 0644, proc_dointvec_minmax);
++ sizeof(int), 0644, proc_dointvec_minmax, true);
+ set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
+- sizeof(int), 0644, proc_dointvec_minmax);
++ sizeof(int), 0644, proc_dointvec_minmax, false);
+ set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
+- sizeof(int), 0644, proc_dointvec_minmax);
++ sizeof(int), 0644, proc_dointvec_minmax, false);
+ set_table_entry(&table[9], "cache_nice_tries",
+ &sd->cache_nice_tries,
+- sizeof(int), 0644, proc_dointvec_minmax);
++ sizeof(int), 0644, proc_dointvec_minmax, false);
+ set_table_entry(&table[10], "flags", &sd->flags,
+- sizeof(int), 0644, proc_dointvec_minmax);
++ sizeof(int), 0644, proc_dointvec_minmax, false);
+ set_table_entry(&table[11], "name", sd->name,
+- CORENAME_MAX_SIZE, 0444, proc_dostring);
++ CORENAME_MAX_SIZE, 0444, proc_dostring, false);
+ /* &table[12] is terminator */
+
+ return table;
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index 793548c..e9a45f1 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -833,7 +833,7 @@ void tick_cancel_sched_timer(int cpu)
+ hrtimer_cancel(&ts->sched_timer);
+ # endif
+
+- ts->nohz_mode = NOHZ_MODE_INACTIVE;
++ memset(ts, 0, sizeof(*ts));
+ }
+ #endif
+
+diff --git a/kernel/timer.c b/kernel/timer.c
+index c219db6..f2f71d7 100644
+--- a/kernel/timer.c
++++ b/kernel/timer.c
+@@ -1630,12 +1630,12 @@ static int __cpuinit init_timers_cpu(int cpu)
+ boot_done = 1;
+ base = &boot_tvec_bases;
+ }
++ spin_lock_init(&base->lock);
+ tvec_base_done[cpu] = 1;
+ } else {
+ base = per_cpu(tvec_bases, cpu);
+ }
+
+- spin_lock_init(&base->lock);
+
+ for (j = 0; j < TVN_SIZE; j++) {
+ INIT_LIST_HEAD(base->tv5.vec + j);
+diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
+index 95dc31e..b0996c1 100644
+--- a/kernel/trace/trace_events_filter.c
++++ b/kernel/trace/trace_events_filter.c
+@@ -769,7 +769,11 @@ static int filter_set_pred(struct event_filter *filter,
+
+ static void __free_preds(struct event_filter *filter)
+ {
++ int i;
++
+ if (filter->preds) {
++ for (i = 0; i < filter->n_preds; i++)
++ kfree(filter->preds[i].ops);
+ kfree(filter->preds);
+ filter->preds = NULL;
+ }
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 470cbb4..d80ac4b 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -1937,7 +1937,12 @@ static void collapse_huge_page(struct mm_struct *mm,
+ pte_unmap(pte);
+ spin_lock(&mm->page_table_lock);
+ BUG_ON(!pmd_none(*pmd));
+- set_pmd_at(mm, address, pmd, _pmd);
++ /*
++ * We can only use set_pmd_at when establishing
++ * hugepmds and never for establishing regular pmds that
++ * points to regular pagetables. Use pmd_populate for that
++ */
++ pmd_populate(mm, pmd, pmd_pgtable(_pmd));
+ spin_unlock(&mm->page_table_lock);
+ anon_vma_unlock(vma->anon_vma);
+ goto out;
+diff --git a/mm/migrate.c b/mm/migrate.c
+index 180d97f..e1052d1 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -147,7 +147,7 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
+ if (PageHuge(new))
+ pte = pte_mkhuge(pte);
+ #endif
+- flush_cache_page(vma, addr, pte_pfn(pte));
++ flush_dcache_page(new);
+ set_pte_at(mm, addr, ptep, pte);
+
+ if (PageHuge(new)) {
+diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
+index 8d1ca2d..a160ec8 100644
+--- a/mm/mmu_notifier.c
++++ b/mm/mmu_notifier.c
+@@ -37,51 +37,48 @@ static struct srcu_struct srcu;
+ void __mmu_notifier_release(struct mm_struct *mm)
+ {
+ struct mmu_notifier *mn;
++ struct hlist_node *n;
+ int id;
+
+ /*
+- * srcu_read_lock() here will block synchronize_srcu() in
+- * mmu_notifier_unregister() until all registered
+- * ->release() callouts this function makes have
+- * returned.
++ * SRCU here will block mmu_notifier_unregister until
++ * ->release returns.
+ */
+ id = srcu_read_lock(&srcu);
++ hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist)
++ /*
++ * If ->release runs before mmu_notifier_unregister it must be
++ * handled, as it's the only way for the driver to flush all
++ * existing sptes and stop the driver from establishing any more
++ * sptes before all the pages in the mm are freed.
++ */
++ if (mn->ops->release)
++ mn->ops->release(mn, mm);
++ srcu_read_unlock(&srcu, id);
++
+ spin_lock(&mm->mmu_notifier_mm->lock);
+ while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
+ mn = hlist_entry(mm->mmu_notifier_mm->list.first,
+ struct mmu_notifier,
+ hlist);
+-
+ /*
+- * Unlink. This will prevent mmu_notifier_unregister()
+- * from also making the ->release() callout.
++ * We arrived before mmu_notifier_unregister so
++ * mmu_notifier_unregister will do nothing other than to wait
++ * for ->release to finish and for mmu_notifier_unregister to
++ * return.
+ */
+ hlist_del_init_rcu(&mn->hlist);
+- spin_unlock(&mm->mmu_notifier_mm->lock);
+-
+- /*
+- * Clear sptes. (see 'release' description in mmu_notifier.h)
+- */
+- if (mn->ops->release)
+- mn->ops->release(mn, mm);
+-
+- spin_lock(&mm->mmu_notifier_mm->lock);
+ }
+ spin_unlock(&mm->mmu_notifier_mm->lock);
+
+ /*
+- * All callouts to ->release() which we have done are complete.
+- * Allow synchronize_srcu() in mmu_notifier_unregister() to complete
+- */
+- srcu_read_unlock(&srcu, id);
+-
+- /*
+- * mmu_notifier_unregister() may have unlinked a notifier and may
+- * still be calling out to it. Additionally, other notifiers
+- * may have been active via vmtruncate() et. al. Block here
+- * to ensure that all notifier callouts for this mm have been
+- * completed and the sptes are really cleaned up before returning
+- * to exit_mmap().
++ * synchronize_srcu here prevents mmu_notifier_release from returning to
++ * exit_mmap (which would proceed with freeing all pages in the mm)
++ * until the ->release method returns, if it was invoked by
++ * mmu_notifier_unregister.
++ *
++ * The mmu_notifier_mm can't go away from under us because one mm_count
++ * is held by exit_mmap.
+ */
+ synchronize_srcu(&srcu);
+ }
+@@ -302,31 +299,34 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
+ {
+ BUG_ON(atomic_read(&mm->mm_count) <= 0);
+
+- spin_lock(&mm->mmu_notifier_mm->lock);
+ if (!hlist_unhashed(&mn->hlist)) {
++ /*
++ * SRCU here will force exit_mmap to wait for ->release to
++ * finish before freeing the pages.
++ */
+ int id;
+
++ id = srcu_read_lock(&srcu);
+ /*
+- * Ensure we synchronize up with __mmu_notifier_release().
++ * exit_mmap will block in mmu_notifier_release to guarantee
++ * that ->release is called before freeing the pages.
+ */
+- id = srcu_read_lock(&srcu);
+-
+- hlist_del_rcu(&mn->hlist);
+- spin_unlock(&mm->mmu_notifier_mm->lock);
+-
+ if (mn->ops->release)
+ mn->ops->release(mn, mm);
++ srcu_read_unlock(&srcu, id);
+
++ spin_lock(&mm->mmu_notifier_mm->lock);
+ /*
+- * Allow __mmu_notifier_release() to complete.
++ * Can not use list_del_rcu() since __mmu_notifier_release
++ * can delete it before we hold the lock.
+ */
+- srcu_read_unlock(&srcu, id);
+- } else
++ hlist_del_init_rcu(&mn->hlist);
+ spin_unlock(&mm->mmu_notifier_mm->lock);
++ }
+
+ /*
+- * Wait for any running method to finish, including ->release() if it
+- * was run by __mmu_notifier_release() instead of us.
++ * Wait for any running method to finish, of course including
++ * ->release if it was run by mmu_notifier_relase instead of us.
+ */
+ synchronize_srcu(&srcu);
+
+diff --git a/mm/pagewalk.c b/mm/pagewalk.c
+index aa9701e..1090e77 100644
+--- a/mm/pagewalk.c
++++ b/mm/pagewalk.c
+@@ -127,28 +127,7 @@ static int walk_hugetlb_range(struct vm_area_struct *vma,
+ return 0;
+ }
+
+-static struct vm_area_struct* hugetlb_vma(unsigned long addr, struct mm_walk *walk)
+-{
+- struct vm_area_struct *vma;
+-
+- /* We don't need vma lookup at all. */
+- if (!walk->hugetlb_entry)
+- return NULL;
+-
+- VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
+- vma = find_vma(walk->mm, addr);
+- if (vma && vma->vm_start <= addr && is_vm_hugetlb_page(vma))
+- return vma;
+-
+- return NULL;
+-}
+-
+ #else /* CONFIG_HUGETLB_PAGE */
+-static struct vm_area_struct* hugetlb_vma(unsigned long addr, struct mm_walk *walk)
+-{
+- return NULL;
+-}
+-
+ static int walk_hugetlb_range(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long end,
+ struct mm_walk *walk)
+@@ -199,30 +178,53 @@ int walk_page_range(unsigned long addr, unsigned long end,
+ if (!walk->mm)
+ return -EINVAL;
+
++ VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
++
+ pgd = pgd_offset(walk->mm, addr);
+ do {
+- struct vm_area_struct *vma;
++ struct vm_area_struct *vma = NULL;
+
+ next = pgd_addr_end(addr, end);
+
+ /*
+- * handle hugetlb vma individually because pagetable walk for
+- * the hugetlb page is dependent on the architecture and
+- * we can't handled it in the same manner as non-huge pages.
++ * This function was not intended to be vma based.
++ * But there are vma special cases to be handled:
++ * - hugetlb vma's
++ * - VM_PFNMAP vma's
+ */
+- vma = hugetlb_vma(addr, walk);
++ vma = find_vma(walk->mm, addr);
+ if (vma) {
+- if (vma->vm_end < next)
++ /*
++ * There are no page structures backing a VM_PFNMAP
++ * range, so do not allow split_huge_page_pmd().
++ */
++ if ((vma->vm_start <= addr) &&
++ (vma->vm_flags & VM_PFNMAP)) {
+ next = vma->vm_end;
++ pgd = pgd_offset(walk->mm, next);
++ continue;
++ }
+ /*
+- * Hugepage is very tightly coupled with vma, so
+- * walk through hugetlb entries within a given vma.
++ * Handle hugetlb vma individually because pagetable
++ * walk for the hugetlb page is dependent on the
++ * architecture and we can't handled it in the same
++ * manner as non-huge pages.
+ */
+- err = walk_hugetlb_range(vma, addr, next, walk);
+- if (err)
+- break;
+- pgd = pgd_offset(walk->mm, next);
+- continue;
++ if (walk->hugetlb_entry && (vma->vm_start <= addr) &&
++ is_vm_hugetlb_page(vma)) {
++ if (vma->vm_end < next)
++ next = vma->vm_end;
++ /*
++ * Hugepage is very tightly coupled with vma,
++ * so walk through hugetlb entries within a
++ * given vma.
++ */
++ err = walk_hugetlb_range(vma, addr, next, walk);
++ if (err)
++ break;
++ pgd = pgd_offset(walk->mm, next);
++ continue;
++ }
+ }
+
+ if (pgd_none_or_clear_bad(pgd)) {
+diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c
+index 58de2a0..c83ee79 100644
+--- a/net/bridge/br_stp_timer.c
++++ b/net/bridge/br_stp_timer.c
+@@ -107,7 +107,7 @@ static void br_tcn_timer_expired(unsigned long arg)
+
+ br_debug(br, "tcn timer expired\n");
+ spin_lock(&br->lock);
+- if (br->dev->flags & IFF_UP) {
++ if (!br_is_root_bridge(br) && (br->dev->flags & IFF_UP)) {
+ br_transmit_tcn(br);
+
+ mod_timer(&br->tcn_timer,jiffies + br->bridge_hello_time);
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 2c73adf..8a2c2dd 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1021,18 +1021,6 @@ static void sock_copy(struct sock *nsk, const struct sock *osk)
+ #endif
+ }
+
+-/*
+- * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes
+- * un-modified. Special care is taken when initializing object to zero.
+- */
+-static inline void sk_prot_clear_nulls(struct sock *sk, int size)
+-{
+- if (offsetof(struct sock, sk_node.next) != 0)
+- memset(sk, 0, offsetof(struct sock, sk_node.next));
+- memset(&sk->sk_node.pprev, 0,
+- size - offsetof(struct sock, sk_node.pprev));
+-}
+-
+ void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
+ {
+ unsigned long nulls1, nulls2;
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index db10805..c69358c 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -2196,6 +2196,17 @@ void tcp6_proc_exit(struct net *net)
+ }
+ #endif
+
++static void tcp_v6_clear_sk(struct sock *sk, int size)
++{
++ struct inet_sock *inet = inet_sk(sk);
++
++ /* we do not want to clear pinet6 field, because of RCU lookups */
++ sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
++
++ size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
++ memset(&inet->pinet6 + 1, 0, size);
++}
++
+ struct proto tcpv6_prot = {
+ .name = "TCPv6",
+ .owner = THIS_MODULE,
+@@ -2235,6 +2246,7 @@ struct proto tcpv6_prot = {
+ .compat_setsockopt = compat_tcp_setsockopt,
+ .compat_getsockopt = compat_tcp_getsockopt,
+ #endif
++ .clear_sk = tcp_v6_clear_sk,
+ };
+
+ static const struct inet6_protocol tcpv6_protocol = {
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 8c25419..20f0812 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -1453,6 +1453,17 @@ void udp6_proc_exit(struct net *net) {
+ }
+ #endif /* CONFIG_PROC_FS */
+
++void udp_v6_clear_sk(struct sock *sk, int size)
++{
++ struct inet_sock *inet = inet_sk(sk);
++
++ /* we do not want to clear pinet6 field, because of RCU lookups */
++ sk_prot_clear_portaddr_nulls(sk, offsetof(struct inet_sock, pinet6));
++
++ size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
++ memset(&inet->pinet6 + 1, 0, size);
++}
++
+ /* ------------------------------------------------------------------------ */
+
+ struct proto udpv6_prot = {
+@@ -1483,7 +1494,7 @@ struct proto udpv6_prot = {
+ .compat_setsockopt = compat_udpv6_setsockopt,
+ .compat_getsockopt = compat_udpv6_getsockopt,
+ #endif
+- .clear_sk = sk_prot_clear_portaddr_nulls,
++ .clear_sk = udp_v6_clear_sk,
+ };
+
+ static struct inet_protosw udpv6_protosw = {
+diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h
+index d757104..4691ed5 100644
+--- a/net/ipv6/udp_impl.h
++++ b/net/ipv6/udp_impl.h
+@@ -31,6 +31,8 @@ extern int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
+ extern int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb);
+ extern void udpv6_destroy_sock(struct sock *sk);
+
++extern void udp_v6_clear_sk(struct sock *sk, int size);
++
+ #ifdef CONFIG_PROC_FS
+ extern int udp6_seq_show(struct seq_file *seq, void *v);
+ #endif
+diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
+index 1d08e21..dfcc4be 100644
+--- a/net/ipv6/udplite.c
++++ b/net/ipv6/udplite.c
+@@ -56,7 +56,7 @@ struct proto udplitev6_prot = {
+ .compat_setsockopt = compat_udpv6_setsockopt,
+ .compat_getsockopt = compat_udpv6_getsockopt,
+ #endif
+- .clear_sk = sk_prot_clear_portaddr_nulls,
++ .clear_sk = udp_v6_clear_sk,
+ };
+
+ static struct inet_protosw udplite6_protosw = {
+diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
+index d879f7e..db78e7d 100644
+--- a/net/ipv6/xfrm6_policy.c
++++ b/net/ipv6/xfrm6_policy.c
+@@ -96,8 +96,10 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
+ dev_hold(dev);
+
+ xdst->u.rt6.rt6i_idev = in6_dev_get(dev);
+- if (!xdst->u.rt6.rt6i_idev)
++ if (!xdst->u.rt6.rt6i_idev) {
++ dev_put(dev);
+ return -ENODEV;
++ }
+
+ xdst->u.rt6.rt6i_peer = rt->rt6i_peer;
+ if (rt->rt6i_peer)
+diff --git a/net/netfilter/ipvs/ip_vs_pe_sip.c b/net/netfilter/ipvs/ip_vs_pe_sip.c
+index 13d607a..87ecf75 100644
+--- a/net/netfilter/ipvs/ip_vs_pe_sip.c
++++ b/net/netfilter/ipvs/ip_vs_pe_sip.c
+@@ -37,14 +37,10 @@ static int get_callid(const char *dptr, unsigned int dataoff,
+ if (ret > 0)
+ break;
+ if (!ret)
+- return 0;
++ return -EINVAL;
+ dataoff += *matchoff;
+ }
+
+- /* Empty callid is useless */
+- if (!*matchlen)
+- return -EINVAL;
+-
+ /* Too large is useless */
+ if (*matchlen > IP_VS_PEDATA_MAXLEN)
+ return -EINVAL;
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 835fcea..5a70215 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -813,37 +813,27 @@ static void prb_open_block(struct tpacket_kbdq_core *pkc1,
+
+ smp_rmb();
+
+- if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd1))) {
+-
+- /* We could have just memset this but we will lose the
+- * flexibility of making the priv area sticky
+- */
+- BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
+- BLOCK_NUM_PKTS(pbd1) = 0;
+- BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
+- getnstimeofday(&ts);
+- h1->ts_first_pkt.ts_sec = ts.tv_sec;
+- h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
+- pkc1->pkblk_start = (char *)pbd1;
+- pkc1->nxt_offset = (char *)(pkc1->pkblk_start +
+- BLK_PLUS_PRIV(pkc1->blk_sizeof_priv));
+- BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
+- BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
+- pbd1->version = pkc1->version;
+- pkc1->prev = pkc1->nxt_offset;
+- pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
+- prb_thaw_queue(pkc1);
+- _prb_refresh_rx_retire_blk_timer(pkc1);
+-
+- smp_wmb();
+-
+- return;
+- }
++ /* We could have just memset this but we will lose the
++ * flexibility of making the priv area sticky
++ */
++ BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
++ BLOCK_NUM_PKTS(pbd1) = 0;
++ BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
++ getnstimeofday(&ts);
++ h1->ts_first_pkt.ts_sec = ts.tv_sec;
++ h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
++ pkc1->pkblk_start = (char *)pbd1;
++ pkc1->nxt_offset = (char *)(pkc1->pkblk_start +
++ BLK_PLUS_PRIV(pkc1->blk_sizeof_priv));
++ BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
++ BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
++ pbd1->version = pkc1->version;
++ pkc1->prev = pkc1->nxt_offset;
++ pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
++ prb_thaw_queue(pkc1);
++ _prb_refresh_rx_retire_blk_timer(pkc1);
+
+- WARN(1, "ERROR block:%p is NOT FREE status:%d kactive_blk_num:%d\n",
+- pbd1, BLOCK_STATUS(pbd1), pkc1->kactive_blk_num);
+- dump_stack();
+- BUG();
++ smp_wmb();
+ }
+
+ /*
+@@ -934,10 +924,6 @@ static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
+ prb_close_block(pkc, pbd, po, status);
+ return;
+ }
+-
+- WARN(1, "ERROR-pbd[%d]:%p\n", pkc->kactive_blk_num, pbd);
+- dump_stack();
+- BUG();
+ }
+
+ static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
+diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
+index 60f8f61..57827bf 100644
+--- a/net/sched/act_ipt.c
++++ b/net/sched/act_ipt.c
+@@ -8,7 +8,7 @@
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+- * Copyright: Jamal Hadi Salim (2002-4)
++ * Copyright: Jamal Hadi Salim (2002-13)
+ */
+
+ #include <linux/types.h>
+@@ -299,17 +299,44 @@ static struct tc_action_ops act_ipt_ops = {
+ .walk = tcf_generic_walker
+ };
+
+-MODULE_AUTHOR("Jamal Hadi Salim(2002-4)");
++static struct tc_action_ops act_xt_ops = {
++ .kind = "xt",
++ .hinfo = &ipt_hash_info,
++ .type = TCA_ACT_IPT,
++ .capab = TCA_CAP_NONE,
++ .owner = THIS_MODULE,
++ .act = tcf_ipt,
++ .dump = tcf_ipt_dump,
++ .cleanup = tcf_ipt_cleanup,
++ .lookup = tcf_hash_search,
++ .init = tcf_ipt_init,
++ .walk = tcf_generic_walker
++};
++
++MODULE_AUTHOR("Jamal Hadi Salim(2002-13)");
+ MODULE_DESCRIPTION("Iptables target actions");
+ MODULE_LICENSE("GPL");
++MODULE_ALIAS("act_xt");
+
+ static int __init ipt_init_module(void)
+ {
+- return tcf_register_action(&act_ipt_ops);
++ int ret1, ret2;
++ ret1 = tcf_register_action(&act_xt_ops);
++ if (ret1 < 0)
++ printk("Failed to load xt action\n");
++ ret2 = tcf_register_action(&act_ipt_ops);
++ if (ret2 < 0)
++ printk("Failed to load ipt action\n");
++
++ if (ret1 < 0 && ret2 < 0)
++ return ret1;
++ else
++ return 0;
+ }
+
+ static void __exit ipt_cleanup_module(void)
+ {
++ tcf_unregister_action(&act_xt_ops);
+ tcf_unregister_action(&act_ipt_ops);
+ }
+
+diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
+index dc6af27..206c61e 100644
+--- a/net/sunrpc/sched.c
++++ b/net/sunrpc/sched.c
+@@ -296,13 +296,20 @@ EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
+ /*
+ * Make an RPC task runnable.
+ *
+- * Note: If the task is ASYNC, this must be called with
+- * the spinlock held to protect the wait queue operation.
++ * Note: If the task is ASYNC, and is being made runnable after sitting on an
++ * rpc_wait_queue, this must be called with the queue spinlock held to protect
++ * the wait queue operation.
++ * Note the ordering of rpc_test_and_set_running() and rpc_clear_queued(),
++ * which is needed to ensure that __rpc_execute() doesn't loop (due to the
++ * lockless RPC_IS_QUEUED() test) before we've had a chance to test
++ * the RPC_TASK_RUNNING flag.
+ */
+ static void rpc_make_runnable(struct rpc_task *task)
+ {
++ bool need_wakeup = !rpc_test_and_set_running(task);
++
+ rpc_clear_queued(task);
+- if (rpc_test_and_set_running(task))
++ if (!need_wakeup)
+ return;
+ if (RPC_IS_ASYNC(task)) {
+ INIT_WORK(&task->u.tk_work, rpc_async_schedule);
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index 4707b6c..faabaa5 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -615,6 +615,9 @@ int snd_hda_queue_unsol_event(struct hda_bus *bus, u32 res, u32 res_ex)
+ struct hda_bus_unsolicited *unsol;
+ unsigned int wp;
+
++ if (!bus || !bus->workq)
++ return 0;
++
+ trace_hda_unsol_event(bus, res, res_ex);
+ unsol = bus->unsol;
+ if (!unsol)
+diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
+index 98c5774..b73f226 100644
+--- a/sound/soc/codecs/wm8994.c
++++ b/sound/soc/codecs/wm8994.c
+@@ -2636,6 +2636,7 @@ static int wm8994_aif3_hw_params(struct snd_pcm_substream *substream,
+ default:
+ return 0;
+ }
++ break;
+ default:
+ return 0;
+ }
+diff --git a/tools/perf/scripts/python/net_dropmonitor.py b/tools/perf/scripts/python/net_dropmonitor.py
+index a4ffc95..4c11605 100755
+--- a/tools/perf/scripts/python/net_dropmonitor.py
++++ b/tools/perf/scripts/python/net_dropmonitor.py
+@@ -40,9 +40,9 @@ def get_kallsyms_table():
+
+ def get_sym(sloc):
+ loc = int(sloc)
+- for i in kallsyms:
+- if (i['loc'] >= loc):
+- return (i['name'], i['loc']-loc)
++ for i in kallsyms[::-1]:
++ if loc >= i['loc']:
++ return (i['name'], loc - i['loc'])
+ return (None, 0)
+
+ def print_drop_table():
+@@ -64,7 +64,7 @@ def trace_end():
+
+ # called from perf, when it finds a correspoinding event
+ def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
+- skbaddr, protocol, location):
++ skbaddr, location, protocol):
+ slocation = str(location)
+ try:
+ drop_log[slocation] = drop_log[slocation] + 1
diff --git a/1046_linux-3.2.47.patch b/1046_linux-3.2.47.patch
new file mode 100644
index 00000000..b74563cf
--- /dev/null
+++ b/1046_linux-3.2.47.patch
@@ -0,0 +1,3314 @@
+diff --git a/Makefile b/Makefile
+index f600582..40e2a11 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 46
++SUBLEVEL = 47
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
+index 21f56ff..5954a1a 100644
+--- a/arch/arm/boot/compressed/Makefile
++++ b/arch/arm/boot/compressed/Makefile
+@@ -123,7 +123,6 @@ KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
+ endif
+
+ ccflags-y := -fpic -fno-builtin -I$(obj)
+-asflags-y := -Wa,-march=all
+
+ # Supply kernel BSS size to the decompressor via a linker symbol.
+ KBSS_SZ = $(shell size $(obj)/../../../../vmlinux | awk 'END{print $$3}')
+diff --git a/arch/arm/boot/compressed/head-sa1100.S b/arch/arm/boot/compressed/head-sa1100.S
+index 6179d94..3115e31 100644
+--- a/arch/arm/boot/compressed/head-sa1100.S
++++ b/arch/arm/boot/compressed/head-sa1100.S
+@@ -11,6 +11,7 @@
+ #include <asm/mach-types.h>
+
+ .section ".start", "ax"
++ .arch armv4
+
+ __SA1100_start:
+
+diff --git a/arch/arm/boot/compressed/head-shark.S b/arch/arm/boot/compressed/head-shark.S
+index 089c560..92b5689 100644
+--- a/arch/arm/boot/compressed/head-shark.S
++++ b/arch/arm/boot/compressed/head-shark.S
+@@ -18,6 +18,7 @@
+
+ .section ".start", "ax"
+
++ .arch armv4
+ b __beginning
+
+ __ofw_data: .long 0 @ the number of memory blocks
+diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
+index d63632f..8c57359 100644
+--- a/arch/arm/boot/compressed/head.S
++++ b/arch/arm/boot/compressed/head.S
+@@ -10,6 +10,7 @@
+ */
+ #include <linux/linkage.h>
+
++ .arch armv7-a
+ /*
+ * Debugging stuff
+ *
+diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
+index 8200dea..140c817 100644
+--- a/arch/arm/kernel/topology.c
++++ b/arch/arm/kernel/topology.c
+@@ -13,6 +13,7 @@
+
+ #include <linux/cpu.h>
+ #include <linux/cpumask.h>
++#include <linux/export.h>
+ #include <linux/init.h>
+ #include <linux/percpu.h>
+ #include <linux/node.h>
+@@ -42,6 +43,7 @@
+ #define MPIDR_LEVEL2_SHIFT 16
+
+ struct cputopo_arm cpu_topology[NR_CPUS];
++EXPORT_SYMBOL_GPL(cpu_topology);
+
+ const struct cpumask *cpu_coregroup_mask(int cpu)
+ {
+diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
+index cf9c69b..8c3baa0 100644
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -463,7 +463,7 @@ machine_check_common:
+ STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
+ STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
+ STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
+- STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception)
++ STD_EXCEPTION_COMMON(0xe40, emulation_assist, .emulation_assist_interrupt)
+ STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception)
+ STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception)
+ STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
+diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
+index 82dcd4d..9844662 100644
+--- a/arch/powerpc/kernel/traps.c
++++ b/arch/powerpc/kernel/traps.c
+@@ -1036,6 +1036,16 @@ void __kprobes program_check_exception(struct pt_regs *regs)
+ _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
+ }
+
++/*
++ * This occurs when running in hypervisor mode on POWER6 or later
++ * and an illegal instruction is encountered.
++ */
++void __kprobes emulation_assist_interrupt(struct pt_regs *regs)
++{
++ regs->msr |= REASON_ILLEGAL;
++ program_check_exception(regs);
++}
++
+ void alignment_exception(struct pt_regs *regs)
+ {
+ int sig, code, fixed = 0;
+diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
+index 7a6f3b3..f2bb9c9 100644
+--- a/arch/x86/kernel/relocate_kernel_64.S
++++ b/arch/x86/kernel/relocate_kernel_64.S
+@@ -160,7 +160,7 @@ identity_mapped:
+ xorq %rbp, %rbp
+ xorq %r8, %r8
+ xorq %r9, %r9
+- xorq %r10, %r9
++ xorq %r10, %r10
+ xorq %r11, %r11
+ xorq %r12, %r12
+ xorq %r13, %r13
+diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
+index 0e47949..e19898d 100644
+--- a/drivers/acpi/video.c
++++ b/drivers/acpi/video.c
+@@ -447,6 +447,38 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Folio 13 - 2000 Notebook PC"),
+ },
+ },
++ {
++ .callback = video_ignore_initial_backlight,
++ .ident = "HP Pavilion dm4",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dm4 Notebook PC"),
++ },
++ },
++ {
++ .callback = video_ignore_initial_backlight,
++ .ident = "HP Pavilion g6 Notebook PC",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion g6 Notebook PC"),
++ },
++ },
++ {
++ .callback = video_ignore_initial_backlight,
++ .ident = "HP 1000 Notebook PC",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "HP 1000 Notebook PC"),
++ },
++ },
++ {
++ .callback = video_ignore_initial_backlight,
++ .ident = "HP Pavilion m4",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion m4 Notebook PC"),
++ },
++ },
+ {}
+ };
+
+diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
+index ddfc1c1..0e92326 100644
+--- a/drivers/ata/ata_piix.c
++++ b/drivers/ata/ata_piix.c
+@@ -151,6 +151,7 @@ enum piix_controller_ids {
+ piix_pata_vmw, /* PIIX4 for VMware, spurious DMA_ERR */
+ ich8_sata_snb,
+ ich8_2port_sata_snb,
++ ich8_2port_sata_byt,
+ };
+
+ struct piix_map_db {
+@@ -356,6 +357,9 @@ static const struct pci_device_id piix_pci_tbl[] = {
+ { 0x8086, 0x8d60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+ /* SATA Controller IDE (Wellsburg) */
+ { 0x8086, 0x8d68, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
++ /* SATA Controller IDE (BayTrail) */
++ { 0x8086, 0x0F20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt },
++ { 0x8086, 0x0F21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt },
+
+ { } /* terminate list */
+ };
+@@ -521,6 +525,7 @@ static const struct piix_map_db *piix_map_db_table[] = {
+ [tolapai_sata] = &tolapai_map_db,
+ [ich8_sata_snb] = &ich8_map_db,
+ [ich8_2port_sata_snb] = &ich8_2port_map_db,
++ [ich8_2port_sata_byt] = &ich8_2port_map_db,
+ };
+
+ static struct ata_port_info piix_port_info[] = {
+@@ -672,6 +677,15 @@ static struct ata_port_info piix_port_info[] = {
+ .port_ops = &piix_sata_ops,
+ },
+
++ [ich8_2port_sata_byt] =
++ {
++ .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR | PIIX_FLAG_PIO16,
++ .pio_mask = ATA_PIO4,
++ .mwdma_mask = ATA_MWDMA2,
++ .udma_mask = ATA_UDMA6,
++ .port_ops = &piix_sata_ops,
++ },
++
+ };
+
+ static struct pci_bits piix_enable_bits[] = {
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 288b635..d54b7d6 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -1598,6 +1598,12 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
+ qc->tf = *tf;
+ if (cdb)
+ memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
++
++ /* some SATA bridges need us to indicate data xfer direction */
++ if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
++ dma_dir == DMA_FROM_DEVICE)
++ qc->tf.feature |= ATAPI_DMADIR;
++
+ qc->flags |= ATA_QCFLAG_RESULT_TF;
+ qc->dma_dir = dma_dir;
+ if (dma_dir != DMA_NONE) {
+diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
+index b0f553b..d3446f6 100644
+--- a/drivers/block/cciss.c
++++ b/drivers/block/cciss.c
+@@ -161,8 +161,6 @@ static irqreturn_t do_cciss_msix_intr(int irq, void *dev_id);
+ static int cciss_open(struct block_device *bdev, fmode_t mode);
+ static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode);
+ static int cciss_release(struct gendisk *disk, fmode_t mode);
+-static int do_ioctl(struct block_device *bdev, fmode_t mode,
+- unsigned int cmd, unsigned long arg);
+ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg);
+ static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
+@@ -229,7 +227,7 @@ static const struct block_device_operations cciss_fops = {
+ .owner = THIS_MODULE,
+ .open = cciss_unlocked_open,
+ .release = cciss_release,
+- .ioctl = do_ioctl,
++ .ioctl = cciss_ioctl,
+ .getgeo = cciss_getgeo,
+ #ifdef CONFIG_COMPAT
+ .compat_ioctl = cciss_compat_ioctl,
+@@ -1140,16 +1138,6 @@ static int cciss_release(struct gendisk *disk, fmode_t mode)
+ return 0;
+ }
+
+-static int do_ioctl(struct block_device *bdev, fmode_t mode,
+- unsigned cmd, unsigned long arg)
+-{
+- int ret;
+- mutex_lock(&cciss_mutex);
+- ret = cciss_ioctl(bdev, mode, cmd, arg);
+- mutex_unlock(&cciss_mutex);
+- return ret;
+-}
+-
+ #ifdef CONFIG_COMPAT
+
+ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
+@@ -1176,7 +1164,7 @@ static int cciss_compat_ioctl(struct block_device *bdev, fmode_t mode,
+ case CCISS_REGNEWD:
+ case CCISS_RESCANDISK:
+ case CCISS_GETLUNINFO:
+- return do_ioctl(bdev, mode, cmd, arg);
++ return cciss_ioctl(bdev, mode, cmd, arg);
+
+ case CCISS_PASSTHRU32:
+ return cciss_ioctl32_passthru(bdev, mode, cmd, arg);
+@@ -1216,7 +1204,7 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
+ if (err)
+ return -EFAULT;
+
+- err = do_ioctl(bdev, mode, CCISS_PASSTHRU, (unsigned long)p);
++ err = cciss_ioctl(bdev, mode, CCISS_PASSTHRU, (unsigned long)p);
+ if (err)
+ return err;
+ err |=
+@@ -1258,7 +1246,7 @@ static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode,
+ if (err)
+ return -EFAULT;
+
+- err = do_ioctl(bdev, mode, CCISS_BIG_PASSTHRU, (unsigned long)p);
++ err = cciss_ioctl(bdev, mode, CCISS_BIG_PASSTHRU, (unsigned long)p);
+ if (err)
+ return err;
+ err |=
+@@ -1308,11 +1296,14 @@ static int cciss_getpciinfo(ctlr_info_t *h, void __user *argp)
+ static int cciss_getintinfo(ctlr_info_t *h, void __user *argp)
+ {
+ cciss_coalint_struct intinfo;
++ unsigned long flags;
+
+ if (!argp)
+ return -EINVAL;
++ spin_lock_irqsave(&h->lock, flags);
+ intinfo.delay = readl(&h->cfgtable->HostWrite.CoalIntDelay);
+ intinfo.count = readl(&h->cfgtable->HostWrite.CoalIntCount);
++ spin_unlock_irqrestore(&h->lock, flags);
+ if (copy_to_user
+ (argp, &intinfo, sizeof(cciss_coalint_struct)))
+ return -EFAULT;
+@@ -1353,12 +1344,15 @@ static int cciss_setintinfo(ctlr_info_t *h, void __user *argp)
+ static int cciss_getnodename(ctlr_info_t *h, void __user *argp)
+ {
+ NodeName_type NodeName;
++ unsigned long flags;
+ int i;
+
+ if (!argp)
+ return -EINVAL;
++ spin_lock_irqsave(&h->lock, flags);
+ for (i = 0; i < 16; i++)
+ NodeName[i] = readb(&h->cfgtable->ServerName[i]);
++ spin_unlock_irqrestore(&h->lock, flags);
+ if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
+ return -EFAULT;
+ return 0;
+@@ -1395,10 +1389,13 @@ static int cciss_setnodename(ctlr_info_t *h, void __user *argp)
+ static int cciss_getheartbeat(ctlr_info_t *h, void __user *argp)
+ {
+ Heartbeat_type heartbeat;
++ unsigned long flags;
+
+ if (!argp)
+ return -EINVAL;
++ spin_lock_irqsave(&h->lock, flags);
+ heartbeat = readl(&h->cfgtable->HeartBeat);
++ spin_unlock_irqrestore(&h->lock, flags);
+ if (copy_to_user(argp, &heartbeat, sizeof(Heartbeat_type)))
+ return -EFAULT;
+ return 0;
+@@ -1407,10 +1404,13 @@ static int cciss_getheartbeat(ctlr_info_t *h, void __user *argp)
+ static int cciss_getbustypes(ctlr_info_t *h, void __user *argp)
+ {
+ BusTypes_type BusTypes;
++ unsigned long flags;
+
+ if (!argp)
+ return -EINVAL;
++ spin_lock_irqsave(&h->lock, flags);
+ BusTypes = readl(&h->cfgtable->BusTypes);
++ spin_unlock_irqrestore(&h->lock, flags);
+ if (copy_to_user(argp, &BusTypes, sizeof(BusTypes_type)))
+ return -EFAULT;
+ return 0;
+diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
+index 44a5d0a..73af885 100644
+--- a/drivers/gpu/drm/drm_irq.c
++++ b/drivers/gpu/drm/drm_irq.c
+@@ -981,7 +981,7 @@ EXPORT_SYMBOL(drm_vblank_off);
+ */
+ void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
+ {
+- /* vblank is not initialized (IRQ not installed ?) */
++ /* vblank is not initialized (IRQ not installed ?), or has been freed */
+ if (!dev->num_crtcs)
+ return;
+ /*
+@@ -1003,6 +1003,10 @@ void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
+ {
+ unsigned long irqflags;
+
++ /* vblank is not initialized (IRQ not installed ?), or has been freed */
++ if (!dev->num_crtcs)
++ return;
++
+ if (dev->vblank_inmodeset[crtc]) {
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ dev->vblank_disable_allowed = 1;
+diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
+index 876bac0..2ffa740 100644
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -740,10 +740,10 @@ static const struct dmi_system_id intel_no_lvds[] = {
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+- .ident = "Hewlett-Packard HP t5740e Thin Client",
++ .ident = "Hewlett-Packard HP t5740",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+- DMI_MATCH(DMI_PRODUCT_NAME, "HP t5740e Thin Client"),
++ DMI_MATCH(DMI_PRODUCT_NAME, " t5740"),
+ },
+ },
+ {
+diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
+index 9e24670..00ec0dd 100644
+--- a/drivers/gpu/drm/i915/intel_sdvo.c
++++ b/drivers/gpu/drm/i915/intel_sdvo.c
+@@ -1582,11 +1582,14 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
+ * Assume that the preferred modes are
+ * arranged in priority order.
+ */
+- intel_ddc_get_modes(connector, intel_sdvo->i2c);
+- if (list_empty(&connector->probed_modes) == false)
+- goto end;
++ intel_ddc_get_modes(connector, &intel_sdvo->ddc);
+
+- /* Fetch modes from VBT */
++ /*
++ * Fetch modes from VBT. For SDVO prefer the VBT mode since some
++ * SDVO->LVDS transcoders can't cope with the EDID mode. Since
++ * drm_mode_probed_add adds the mode at the head of the list we add it
++ * last.
++ */
+ if (dev_priv->sdvo_lvds_vbt_mode != NULL) {
+ newmode = drm_mode_duplicate(connector->dev,
+ dev_priv->sdvo_lvds_vbt_mode);
+@@ -1598,7 +1601,6 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
+ }
+ }
+
+-end:
+ list_for_each_entry(newmode, &connector->probed_modes, head) {
+ if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
+ intel_sdvo->sdvo_lvds_fixed_mode =
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index 0495a50..9bea4a6 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -3086,6 +3086,12 @@ static int evergreen_startup(struct radeon_device *rdev)
+ return r;
+
+ /* Enable IRQ */
++ if (!rdev->irq.installed) {
++ r = radeon_irq_kms_init(rdev);
++ if (r)
++ return r;
++ }
++
+ r = r600_irq_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: IH init failed (%d).\n", r);
+@@ -3218,10 +3224,6 @@ int evergreen_init(struct radeon_device *rdev)
+ if (r)
+ return r;
+
+- r = radeon_irq_kms_init(rdev);
+- if (r)
+- return r;
+-
+ rdev->cp.ring_obj = NULL;
+ r600_ring_init(rdev, 1024 * 1024);
+
+diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
+index 636255b..3f9705b 100644
+--- a/drivers/gpu/drm/radeon/ni.c
++++ b/drivers/gpu/drm/radeon/ni.c
+@@ -1389,6 +1389,12 @@ static int cayman_startup(struct radeon_device *rdev)
+ return r;
+
+ /* Enable IRQ */
++ if (!rdev->irq.installed) {
++ r = radeon_irq_kms_init(rdev);
++ if (r)
++ return r;
++ }
++
+ r = r600_irq_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: IH init failed (%d).\n", r);
+@@ -1506,10 +1512,6 @@ int cayman_init(struct radeon_device *rdev)
+ if (r)
+ return r;
+
+- r = radeon_irq_kms_init(rdev);
+- if (r)
+- return r;
+-
+ rdev->cp.ring_obj = NULL;
+ r600_ring_init(rdev, 1024 * 1024);
+
+diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
+index fad7cd1..76c1290 100644
+--- a/drivers/gpu/drm/radeon/r100.c
++++ b/drivers/gpu/drm/radeon/r100.c
+@@ -3905,6 +3905,12 @@ static int r100_startup(struct radeon_device *rdev)
+ return r;
+
+ /* Enable IRQ */
++ if (!rdev->irq.installed) {
++ r = radeon_irq_kms_init(rdev);
++ if (r)
++ return r;
++ }
++
+ r100_irq_set(rdev);
+ rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+ /* 1M ring buffer */
+@@ -4050,9 +4056,6 @@ int r100_init(struct radeon_device *rdev)
+ r = radeon_fence_driver_init(rdev);
+ if (r)
+ return r;
+- r = radeon_irq_kms_init(rdev);
+- if (r)
+- return r;
+ /* Memory manager */
+ r = radeon_bo_init(rdev);
+ if (r)
+diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
+index c93bc64..441570b 100644
+--- a/drivers/gpu/drm/radeon/r300.c
++++ b/drivers/gpu/drm/radeon/r300.c
+@@ -1397,6 +1397,12 @@ static int r300_startup(struct radeon_device *rdev)
+ return r;
+
+ /* Enable IRQ */
++ if (!rdev->irq.installed) {
++ r = radeon_irq_kms_init(rdev);
++ if (r)
++ return r;
++ }
++
+ r100_irq_set(rdev);
+ rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+ /* 1M ring buffer */
+@@ -1521,9 +1527,6 @@ int r300_init(struct radeon_device *rdev)
+ r = radeon_fence_driver_init(rdev);
+ if (r)
+ return r;
+- r = radeon_irq_kms_init(rdev);
+- if (r)
+- return r;
+ /* Memory manager */
+ r = radeon_bo_init(rdev);
+ if (r)
+diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
+index 417fab8..5b219b8 100644
+--- a/drivers/gpu/drm/radeon/r420.c
++++ b/drivers/gpu/drm/radeon/r420.c
+@@ -255,6 +255,12 @@ static int r420_startup(struct radeon_device *rdev)
+ return r;
+
+ /* Enable IRQ */
++ if (!rdev->irq.installed) {
++ r = radeon_irq_kms_init(rdev);
++ if (r)
++ return r;
++ }
++
+ r100_irq_set(rdev);
+ rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+ /* 1M ring buffer */
+@@ -391,10 +397,6 @@ int r420_init(struct radeon_device *rdev)
+ if (r) {
+ return r;
+ }
+- r = radeon_irq_kms_init(rdev);
+- if (r) {
+- return r;
+- }
+ /* Memory manager */
+ r = radeon_bo_init(rdev);
+ if (r) {
+diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
+index 3081d07..f36a5c9 100644
+--- a/drivers/gpu/drm/radeon/r520.c
++++ b/drivers/gpu/drm/radeon/r520.c
+@@ -188,6 +188,12 @@ static int r520_startup(struct radeon_device *rdev)
+ return r;
+
+ /* Enable IRQ */
++ if (!rdev->irq.installed) {
++ r = radeon_irq_kms_init(rdev);
++ if (r)
++ return r;
++ }
++
+ rs600_irq_set(rdev);
+ rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+ /* 1M ring buffer */
+@@ -281,9 +287,6 @@ int r520_init(struct radeon_device *rdev)
+ r = radeon_fence_driver_init(rdev);
+ if (r)
+ return r;
+- r = radeon_irq_kms_init(rdev);
+- if (r)
+- return r;
+ /* Memory manager */
+ r = radeon_bo_init(rdev);
+ if (r)
+diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
+index bdfa82a..3d46d7d4 100644
+--- a/drivers/gpu/drm/radeon/r600.c
++++ b/drivers/gpu/drm/radeon/r600.c
+@@ -2449,6 +2449,12 @@ int r600_startup(struct radeon_device *rdev)
+ return r;
+
+ /* Enable IRQ */
++ if (!rdev->irq.installed) {
++ r = radeon_irq_kms_init(rdev);
++ if (r)
++ return r;
++ }
++
+ r = r600_irq_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: IH init failed (%d).\n", r);
+@@ -2592,10 +2598,6 @@ int r600_init(struct radeon_device *rdev)
+ if (r)
+ return r;
+
+- r = radeon_irq_kms_init(rdev);
+- if (r)
+- return r;
+-
+ rdev->cp.ring_obj = NULL;
+ r600_ring_init(rdev, 1024 * 1024);
+
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index bd959c1..cd94abb 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -359,18 +359,17 @@ bool radeon_card_posted(struct radeon_device *rdev)
+ return false;
+
+ /* first check CRTCs */
+- if (ASIC_IS_DCE41(rdev)) {
++ if (ASIC_IS_DCE4(rdev)) {
+ reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
+- if (reg & EVERGREEN_CRTC_MASTER_EN)
+- return true;
+- } else if (ASIC_IS_DCE4(rdev)) {
+- reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
+- RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
+- RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
+- RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
+- RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
+- RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
++ if (rdev->num_crtc >= 4) {
++ reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
++ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
++ }
++ if (rdev->num_crtc >= 6) {
++ reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
++ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
++ }
+ if (reg & EVERGREEN_CRTC_MASTER_EN)
+ return true;
+ } else if (ASIC_IS_AVIVO(rdev)) {
+diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
+index 06b90c8..4dd9512 100644
+--- a/drivers/gpu/drm/radeon/rs400.c
++++ b/drivers/gpu/drm/radeon/rs400.c
+@@ -411,6 +411,12 @@ static int rs400_startup(struct radeon_device *rdev)
+ return r;
+
+ /* Enable IRQ */
++ if (!rdev->irq.installed) {
++ r = radeon_irq_kms_init(rdev);
++ if (r)
++ return r;
++ }
++
+ r100_irq_set(rdev);
+ rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+ /* 1M ring buffer */
+@@ -519,9 +525,6 @@ int rs400_init(struct radeon_device *rdev)
+ r = radeon_fence_driver_init(rdev);
+ if (r)
+ return r;
+- r = radeon_irq_kms_init(rdev);
+- if (r)
+- return r;
+ /* Memory manager */
+ r = radeon_bo_init(rdev);
+ if (r)
+diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
+index ee898e9..cea482a 100644
+--- a/drivers/gpu/drm/radeon/rs600.c
++++ b/drivers/gpu/drm/radeon/rs600.c
+@@ -848,6 +848,12 @@ static int rs600_startup(struct radeon_device *rdev)
+ return r;
+
+ /* Enable IRQ */
++ if (!rdev->irq.installed) {
++ r = radeon_irq_kms_init(rdev);
++ if (r)
++ return r;
++ }
++
+ rs600_irq_set(rdev);
+ rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+ /* 1M ring buffer */
+@@ -963,9 +969,6 @@ int rs600_init(struct radeon_device *rdev)
+ r = radeon_fence_driver_init(rdev);
+ if (r)
+ return r;
+- r = radeon_irq_kms_init(rdev);
+- if (r)
+- return r;
+ /* Memory manager */
+ r = radeon_bo_init(rdev);
+ if (r)
+diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
+index a9049ed..93bce72 100644
+--- a/drivers/gpu/drm/radeon/rs690.c
++++ b/drivers/gpu/drm/radeon/rs690.c
+@@ -622,6 +622,12 @@ static int rs690_startup(struct radeon_device *rdev)
+ return r;
+
+ /* Enable IRQ */
++ if (!rdev->irq.installed) {
++ r = radeon_irq_kms_init(rdev);
++ if (r)
++ return r;
++ }
++
+ rs600_irq_set(rdev);
+ rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+ /* 1M ring buffer */
+@@ -738,9 +744,6 @@ int rs690_init(struct radeon_device *rdev)
+ r = radeon_fence_driver_init(rdev);
+ if (r)
+ return r;
+- r = radeon_irq_kms_init(rdev);
+- if (r)
+- return r;
+ /* Memory manager */
+ r = radeon_bo_init(rdev);
+ if (r)
+diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
+index d5f45b4..9103638 100644
+--- a/drivers/gpu/drm/radeon/rv515.c
++++ b/drivers/gpu/drm/radeon/rv515.c
+@@ -380,6 +380,12 @@ static int rv515_startup(struct radeon_device *rdev)
+ return r;
+
+ /* Enable IRQ */
++ if (!rdev->irq.installed) {
++ r = radeon_irq_kms_init(rdev);
++ if (r)
++ return r;
++ }
++
+ rs600_irq_set(rdev);
+ rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+ /* 1M ring buffer */
+@@ -500,9 +506,6 @@ int rv515_init(struct radeon_device *rdev)
+ r = radeon_fence_driver_init(rdev);
+ if (r)
+ return r;
+- r = radeon_irq_kms_init(rdev);
+- if (r)
+- return r;
+ /* Memory manager */
+ r = radeon_bo_init(rdev);
+ if (r)
+diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
+index cc79449..63db75d 100644
+--- a/drivers/gpu/drm/radeon/rv770.c
++++ b/drivers/gpu/drm/radeon/rv770.c
+@@ -1092,6 +1092,12 @@ static int rv770_startup(struct radeon_device *rdev)
+ return r;
+
+ /* Enable IRQ */
++ if (!rdev->irq.installed) {
++ r = radeon_irq_kms_init(rdev);
++ if (r)
++ return r;
++ }
++
+ r = r600_irq_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: IH init failed (%d).\n", r);
+@@ -1220,10 +1226,6 @@ int rv770_init(struct radeon_device *rdev)
+ if (r)
+ return r;
+
+- r = radeon_irq_kms_init(rdev);
+- if (r)
+- return r;
+-
+ rdev->cp.ring_obj = NULL;
+ r600_ring_init(rdev, 1024 * 1024);
+
+diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c
+index 1ad0a88..8178927 100644
+--- a/drivers/hwmon/adm1021.c
++++ b/drivers/hwmon/adm1021.c
+@@ -311,26 +311,68 @@ static int adm1021_detect(struct i2c_client *client,
+ man_id = i2c_smbus_read_byte_data(client, ADM1021_REG_MAN_ID);
+ dev_id = i2c_smbus_read_byte_data(client, ADM1021_REG_DEV_ID);
+
++ if (man_id < 0 || dev_id < 0)
++ return -ENODEV;
++
+ if (man_id == 0x4d && dev_id == 0x01)
+ type_name = "max1617a";
+ else if (man_id == 0x41) {
+ if ((dev_id & 0xF0) == 0x30)
+ type_name = "adm1023";
+- else
++ else if ((dev_id & 0xF0) == 0x00)
+ type_name = "adm1021";
++ else
++ return -ENODEV;
+ } else if (man_id == 0x49)
+ type_name = "thmc10";
+ else if (man_id == 0x23)
+ type_name = "gl523sm";
+ else if (man_id == 0x54)
+ type_name = "mc1066";
+- /* LM84 Mfr ID in a different place, and it has more unused bits */
+- else if (conv_rate == 0x00
+- && (config & 0x7F) == 0x00
+- && (status & 0xAB) == 0x00)
+- type_name = "lm84";
+- else
+- type_name = "max1617";
++ else {
++ int lte, rte, lhi, rhi, llo, rlo;
++
++ /* extra checks for LM84 and MAX1617 to avoid misdetections */
++
++ llo = i2c_smbus_read_byte_data(client, ADM1021_REG_THYST_R(0));
++ rlo = i2c_smbus_read_byte_data(client, ADM1021_REG_THYST_R(1));
++
++ /* fail if any of the additional register reads failed */
++ if (llo < 0 || rlo < 0)
++ return -ENODEV;
++
++ lte = i2c_smbus_read_byte_data(client, ADM1021_REG_TEMP(0));
++ rte = i2c_smbus_read_byte_data(client, ADM1021_REG_TEMP(1));
++ lhi = i2c_smbus_read_byte_data(client, ADM1021_REG_TOS_R(0));
++ rhi = i2c_smbus_read_byte_data(client, ADM1021_REG_TOS_R(1));
++
++ /*
++ * Fail for negative temperatures and negative high limits.
++ * This check also catches read errors on the tested registers.
++ */
++ if ((s8)lte < 0 || (s8)rte < 0 || (s8)lhi < 0 || (s8)rhi < 0)
++ return -ENODEV;
++
++ /* fail if all registers hold the same value */
++ if (lte == rte && lte == lhi && lte == rhi && lte == llo
++ && lte == rlo)
++ return -ENODEV;
++
++ /*
++ * LM84 Mfr ID is in a different place,
++ * and it has more unused bits.
++ */
++ if (conv_rate == 0x00
++ && (config & 0x7F) == 0x00
++ && (status & 0xAB) == 0x00) {
++ type_name = "lm84";
++ } else {
++ /* fail if low limits are larger than high limits */
++ if ((s8)llo > lhi || (s8)rlo > rhi)
++ return -ENODEV;
++ type_name = "max1617";
++ }
++ }
+
+ pr_debug("adm1021: Detected chip %s at adapter %d, address 0x%02x.\n",
+ type_name, i2c_adapter_id(adapter), client->addr);
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 62306e5..298e02a 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -407,7 +407,17 @@ static void raid1_end_write_request(struct bio *bio, int error)
+
+ r1_bio->bios[mirror] = NULL;
+ to_put = bio;
+- set_bit(R1BIO_Uptodate, &r1_bio->state);
++ /*
++ * Do not set R1BIO_Uptodate if the current device is
++ * rebuilding or Faulty. This is because we cannot use
++ * such device for properly reading the data back (we could
++ * potentially use it, if the current write would have felt
++ * before rdev->recovery_offset, but for simplicity we don't
++ * check this here.
++ */
++ if (test_bit(In_sync, &conf->mirrors[mirror].rdev->flags) &&
++ !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags))
++ set_bit(R1BIO_Uptodate, &r1_bio->state);
+
+ /* Maybe we can clear some bad blocks. */
+ if (is_badblock(conf->mirrors[mirror].rdev,
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index 8f67c4d..8bba438 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -390,7 +390,17 @@ static void raid10_end_write_request(struct bio *bio, int error)
+ sector_t first_bad;
+ int bad_sectors;
+
+- set_bit(R10BIO_Uptodate, &r10_bio->state);
++ /*
++ * Do not set R10BIO_Uptodate if the current device is
++ * rebuilding or Faulty. This is because we cannot use
++ * such device for properly reading the data back (we could
++ * potentially use it, if the current write would have felt
++ * before rdev->recovery_offset, but for simplicity we don't
++ * check this here.
++ */
++ if (test_bit(In_sync, &conf->mirrors[dev].rdev->flags) &&
++ !test_bit(Faulty, &conf->mirrors[dev].rdev->flags))
++ set_bit(R10BIO_Uptodate, &r10_bio->state);
+
+ /* Maybe we can clear some bad blocks. */
+ if (is_badblock(conf->mirrors[dev].rdev,
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index ec13a59..1bc927a 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -1620,6 +1620,9 @@ static int tg3_poll_fw(struct tg3 *tp)
+ int i;
+ u32 val;
+
++ if (tg3_flag(tp, NO_FWARE_REPORTED))
++ return 0;
++
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ /* Wait up to 20ms for init done. */
+ for (i = 0; i < 200; i++) {
+@@ -8282,6 +8285,14 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
+ tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
+ }
+
++static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
++{
++ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
++ return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
++ else
++ return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
++}
++
+ /* tp->lock is held. */
+ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
+ {
+@@ -8920,6 +8931,20 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
+ tw32_f(RDMAC_MODE, rdmac_mode);
+ udelay(40);
+
++ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
++ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
++ for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
++ if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
++ break;
++ }
++ if (i < TG3_NUM_RDMA_CHANNELS) {
++ val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
++ val |= tg3_lso_rd_dma_workaround_bit(tp);
++ tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
++ tg3_flag_set(tp, 5719_5720_RDMA_BUG);
++ }
++ }
++
+ tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
+ if (!tg3_flag(tp, 5705_PLUS))
+ tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
+@@ -9166,6 +9191,13 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
+ */
+ static int tg3_init_hw(struct tg3 *tp, int reset_phy)
+ {
++ /* Chip may have been just powered on. If so, the boot code may still
++ * be running initialization. Wait for it to finish to avoid races in
++ * accessing the hardware.
++ */
++ tg3_enable_register_access(tp);
++ tg3_poll_fw(tp);
++
+ tg3_switch_clocks(tp);
+
+ tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
+@@ -9200,6 +9232,16 @@ static void tg3_periodic_fetch_stats(struct tg3 *tp)
+ TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
+ TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
+ TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
++ if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
++ (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
++ sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
++ u32 val;
++
++ val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
++ val &= ~tg3_lso_rd_dma_workaround_bit(tp);
++ tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
++ tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
++ }
+
+ TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
+ TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
+diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
+index 94b4bd0..da90ba5 100644
+--- a/drivers/net/ethernet/broadcom/tg3.h
++++ b/drivers/net/ethernet/broadcom/tg3.h
+@@ -1368,7 +1368,12 @@
+ #define TG3_LSO_RD_DMA_CRPTEN_CTRL 0x00004910
+ #define TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K 0x00030000
+ #define TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K 0x000c0000
+-/* 0x4914 --> 0x4c00 unused */
++#define TG3_LSO_RD_DMA_TX_LENGTH_WA_5719 0x02000000
++#define TG3_LSO_RD_DMA_TX_LENGTH_WA_5720 0x00200000
++/* 0x4914 --> 0x4be0 unused */
++
++#define TG3_NUM_RDMA_CHANNELS 4
++#define TG3_RDMA_LENGTH 0x00004be0
+
+ /* Write DMA control registers */
+ #define WDMAC_MODE 0x00004c00
+@@ -2921,6 +2926,7 @@ enum TG3_FLAGS {
+ TG3_FLAG_APE_HAS_NCSI,
+ TG3_FLAG_5717_PLUS,
+ TG3_FLAG_4K_FIFO_LIMIT,
++ TG3_FLAG_5719_5720_RDMA_BUG,
+ TG3_FLAG_RESET_TASK_PENDING,
+
+ /* Add new flags before this comment and TG3_FLAG_NUMBER_OF_FLAGS */
+diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
+index d9c08c6..f4ed4d8 100644
+--- a/drivers/net/wireless/ath/ath9k/Kconfig
++++ b/drivers/net/wireless/ath/ath9k/Kconfig
+@@ -50,13 +50,17 @@ config ATH9K_DEBUGFS
+
+ Also required for changing debug message flags at run time.
+
+-config ATH9K_RATE_CONTROL
++config ATH9K_LEGACY_RATE_CONTROL
+ bool "Atheros ath9k rate control"
+ depends on ATH9K
+- default y
++ default n
+ ---help---
+ Say Y, if you want to use the ath9k specific rate control
+- module instead of minstrel_ht.
++ module instead of minstrel_ht. Be warned that there are various
++ issues with the ath9k RC and minstrel is a more robust algorithm.
++ Note that even if this option is selected, "ath9k_rate_control"
++ has to be passed to mac80211 using the module parameter,
++ ieee80211_default_rc_algo.
+
+ config ATH9K_HTC
+ tristate "Atheros HTC based wireless cards support"
+diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
+index 36ed3c4..1cdb246 100644
+--- a/drivers/net/wireless/ath/ath9k/Makefile
++++ b/drivers/net/wireless/ath/ath9k/Makefile
+@@ -5,7 +5,7 @@ ath9k-y += beacon.o \
+ recv.o \
+ xmit.o \
+
+-ath9k-$(CONFIG_ATH9K_RATE_CONTROL) += rc.o
++ath9k-$(CONFIG_ATH9K_LEGACY_RATE_CONTROL) += rc.o
+ ath9k-$(CONFIG_ATH9K_PCI) += pci.o
+ ath9k-$(CONFIG_ATH9K_AHB) += ahb.o
+ ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o
+diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
+index 57622e0..ba6a49c 100644
+--- a/drivers/net/wireless/ath/ath9k/init.c
++++ b/drivers/net/wireless/ath/ath9k/init.c
+@@ -691,8 +691,7 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_MESH_POINT);
+
+- if (AR_SREV_5416(sc->sc_ah))
+- hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
++ hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+ hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+
+@@ -714,10 +713,6 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
+ sc->ant_rx = hw->wiphy->available_antennas_rx;
+ sc->ant_tx = hw->wiphy->available_antennas_tx;
+
+-#ifdef CONFIG_ATH9K_RATE_CONTROL
+- hw->rate_control_algorithm = "ath9k_rate_control";
+-#endif
+-
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
+ hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+ &sc->sbands[IEEE80211_BAND_2GHZ];
+diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h
+index b7a4bcd..e8e1901 100644
+--- a/drivers/net/wireless/ath/ath9k/rc.h
++++ b/drivers/net/wireless/ath/ath9k/rc.h
+@@ -221,7 +221,7 @@ struct ath_rate_priv {
+ struct ath_rc_stats rcstats[RATE_TABLE_SIZE];
+ };
+
+-#ifdef CONFIG_ATH9K_RATE_CONTROL
++#ifdef CONFIG_ATH9K_LEGACY_RATE_CONTROL
+ int ath_rate_control_register(void);
+ void ath_rate_control_unregister(void);
+ #else
+diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
+index c0f2041..b0c2801 100644
+--- a/drivers/net/wireless/b43/main.c
++++ b/drivers/net/wireless/b43/main.c
+@@ -2421,7 +2421,7 @@ static int b43_request_firmware(struct b43_wldev *dev)
+ for (i = 0; i < B43_NR_FWTYPES; i++) {
+ errmsg = ctx->errors[i];
+ if (strlen(errmsg))
+- b43err(dev->wl, errmsg);
++ b43err(dev->wl, "%s", errmsg);
+ }
+ b43_print_fw_helptext(dev->wl, 1);
+ err = -ENOENT;
+diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
+index c5ce163..71195cb 100644
+--- a/drivers/net/wireless/b43legacy/main.c
++++ b/drivers/net/wireless/b43legacy/main.c
+@@ -3837,6 +3837,8 @@ static void b43legacy_remove(struct ssb_device *dev)
+ cancel_work_sync(&wldev->restart_work);
+
+ B43legacy_WARN_ON(!wl);
++ if (!wldev->fw.ucode)
++ return; /* NULL if fw never loaded */
+ if (wl->current_dev == wldev)
+ ieee80211_unregister_hw(wl->hw);
+
+diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+index 3935994..bc30a5f 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
++++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+@@ -604,7 +604,7 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+ memcpy(&lq, priv->stations[i].lq,
+ sizeof(struct iwl_link_quality_cmd));
+
+- if (!memcmp(&lq, &zero_lq, sizeof(lq)))
++ if (memcmp(&lq, &zero_lq, sizeof(lq)))
+ send_lq = true;
+ }
+ spin_unlock_irqrestore(&priv->shrd->sta_lock,
+diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
+index 185a0eb..fd2b92d 100644
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -46,11 +46,33 @@
+ #include <asm/xen/hypercall.h>
+ #include <asm/xen/page.h>
+
++/*
++ * This is the maximum slots a skb can have. If a guest sends a skb
++ * which exceeds this limit it is considered malicious.
++ */
++#define FATAL_SKB_SLOTS_DEFAULT 20
++static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
++module_param(fatal_skb_slots, uint, 0444);
++
++/*
++ * To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating
++ * the maximum slots a valid packet can use. Now this value is defined
++ * to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by
++ * all backend.
++ */
++#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
++
++typedef unsigned int pending_ring_idx_t;
++#define INVALID_PENDING_RING_IDX (~0U)
++
+ struct pending_tx_info {
+- struct xen_netif_tx_request req;
++ struct xen_netif_tx_request req; /* coalesced tx request */
+ struct xenvif *vif;
++ pending_ring_idx_t head; /* head != INVALID_PENDING_RING_IDX
++ * if it is head of one or more tx
++ * reqs
++ */
+ };
+-typedef unsigned int pending_ring_idx_t;
+
+ struct netbk_rx_meta {
+ int id;
+@@ -101,7 +123,11 @@ struct xen_netbk {
+ atomic_t netfront_count;
+
+ struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
+- struct gnttab_copy tx_copy_ops[MAX_PENDING_REQS];
++ /* Coalescing tx requests before copying makes number of grant
++ * copy ops greater or equal to number of slots required. In
++ * worst case a tx request consumes 2 gnttab_copy.
++ */
++ struct gnttab_copy tx_copy_ops[2*MAX_PENDING_REQS];
+
+ u16 pending_ring[MAX_PENDING_REQS];
+
+@@ -117,6 +143,16 @@ struct xen_netbk {
+ static struct xen_netbk *xen_netbk;
+ static int xen_netbk_group_nr;
+
++/*
++ * If head != INVALID_PENDING_RING_IDX, it means this tx request is head of
++ * one or more merged tx requests, otherwise it is the continuation of
++ * previous tx request.
++ */
++static inline int pending_tx_is_head(struct xen_netbk *netbk, RING_IDX idx)
++{
++ return netbk->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX;
++}
++
+ void xen_netbk_add_xenvif(struct xenvif *vif)
+ {
+ int i;
+@@ -249,6 +285,7 @@ static int max_required_rx_slots(struct xenvif *vif)
+ {
+ int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
+
++ /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
+ if (vif->can_sg || vif->gso || vif->gso_prefix)
+ max += MAX_SKB_FRAGS + 1; /* extra_info + frags */
+
+@@ -627,6 +664,7 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
+ __skb_queue_tail(&rxq, skb);
+
+ /* Filled the batch queue? */
++ /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
+ if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE)
+ break;
+ }
+@@ -874,47 +912,99 @@ static int netbk_count_requests(struct xenvif *vif,
+ int work_to_do)
+ {
+ RING_IDX cons = vif->tx.req_cons;
+- int frags = 0;
++ int slots = 0;
++ int drop_err = 0;
++ int more_data;
+
+ if (!(first->flags & XEN_NETTXF_more_data))
+ return 0;
+
+ do {
+- if (frags >= work_to_do) {
+- netdev_err(vif->dev, "Need more frags\n");
++ struct xen_netif_tx_request dropped_tx = { 0 };
++
++ if (slots >= work_to_do) {
++ netdev_err(vif->dev,
++ "Asked for %d slots but exceeds this limit\n",
++ work_to_do);
+ netbk_fatal_tx_err(vif);
+ return -ENODATA;
+ }
+
+- if (unlikely(frags >= MAX_SKB_FRAGS)) {
+- netdev_err(vif->dev, "Too many frags\n");
++ /* This guest is really using too many slots and
++ * considered malicious.
++ */
++ if (unlikely(slots >= fatal_skb_slots)) {
++ netdev_err(vif->dev,
++ "Malicious frontend using %d slots, threshold %u\n",
++ slots, fatal_skb_slots);
+ netbk_fatal_tx_err(vif);
+ return -E2BIG;
+ }
+
+- memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags),
++ /* Xen network protocol had implicit dependency on
++ * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
++ * the historical MAX_SKB_FRAGS value 18 to honor the
++ * same behavior as before. Any packet using more than
++ * 18 slots but less than fatal_skb_slots slots is
++ * dropped
++ */
++ if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
++ if (net_ratelimit())
++ netdev_dbg(vif->dev,
++ "Too many slots (%d) exceeding limit (%d), dropping packet\n",
++ slots, XEN_NETBK_LEGACY_SLOTS_MAX);
++ drop_err = -E2BIG;
++ }
++
++ if (drop_err)
++ txp = &dropped_tx;
++
++ memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots),
+ sizeof(*txp));
+- if (txp->size > first->size) {
+- netdev_err(vif->dev, "Frag is bigger than frame.\n");
+- netbk_fatal_tx_err(vif);
+- return -EIO;
++
++ /* If the guest submitted a frame >= 64 KiB then
++ * first->size overflowed and following slots will
++ * appear to be larger than the frame.
++ *
++ * This cannot be fatal error as there are buggy
++ * frontends that do this.
++ *
++ * Consume all slots and drop the packet.
++ */
++ if (!drop_err && txp->size > first->size) {
++ if (net_ratelimit())
++ netdev_dbg(vif->dev,
++ "Invalid tx request, slot size %u > remaining size %u\n",
++ txp->size, first->size);
++ drop_err = -EIO;
+ }
+
+ first->size -= txp->size;
+- frags++;
++ slots++;
+
+ if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
+- netdev_err(vif->dev, "txp->offset: %x, size: %u\n",
++ netdev_err(vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
+ txp->offset, txp->size);
+ netbk_fatal_tx_err(vif);
+ return -EINVAL;
+ }
+- } while ((txp++)->flags & XEN_NETTXF_more_data);
+- return frags;
++
++ more_data = txp->flags & XEN_NETTXF_more_data;
++
++ if (!drop_err)
++ txp++;
++
++ } while (more_data);
++
++ if (drop_err) {
++ netbk_tx_err(vif, first, cons + slots);
++ return drop_err;
++ }
++
++ return slots;
+ }
+
+ static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk,
+- struct sk_buff *skb,
+ u16 pending_idx)
+ {
+ struct page *page;
+@@ -935,50 +1025,114 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ skb_frag_t *frags = shinfo->frags;
+ u16 pending_idx = *((u16 *)skb->data);
+- int i, start;
++ u16 head_idx = 0;
++ int slot, start;
++ struct page *page;
++ pending_ring_idx_t index, start_idx = 0;
++ uint16_t dst_offset;
++ unsigned int nr_slots;
++ struct pending_tx_info *first = NULL;
++
++ /* At this point shinfo->nr_frags is in fact the number of
++ * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
++ */
++ nr_slots = shinfo->nr_frags;
+
+ /* Skip first skb fragment if it is on same page as header fragment. */
+ start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
+
+- for (i = start; i < shinfo->nr_frags; i++, txp++) {
+- struct page *page;
+- pending_ring_idx_t index;
++ /* Coalesce tx requests, at this point the packet passed in
++ * should be <= 64K. Any packets larger than 64K have been
++ * handled in netbk_count_requests().
++ */
++ for (shinfo->nr_frags = slot = start; slot < nr_slots;
++ shinfo->nr_frags++) {
+ struct pending_tx_info *pending_tx_info =
+ netbk->pending_tx_info;
+
+- index = pending_index(netbk->pending_cons++);
+- pending_idx = netbk->pending_ring[index];
+- page = xen_netbk_alloc_page(netbk, skb, pending_idx);
++ page = alloc_page(GFP_KERNEL|__GFP_COLD);
+ if (!page)
+ goto err;
+
+- netbk->mmap_pages[pending_idx] = page;
+-
+- gop->source.u.ref = txp->gref;
+- gop->source.domid = vif->domid;
+- gop->source.offset = txp->offset;
+-
+- gop->dest.u.gmfn = virt_to_mfn(page_address(page));
+- gop->dest.domid = DOMID_SELF;
+- gop->dest.offset = txp->offset;
+-
+- gop->len = txp->size;
+- gop->flags = GNTCOPY_source_gref;
++ dst_offset = 0;
++ first = NULL;
++ while (dst_offset < PAGE_SIZE && slot < nr_slots) {
++ gop->flags = GNTCOPY_source_gref;
++
++ gop->source.u.ref = txp->gref;
++ gop->source.domid = vif->domid;
++ gop->source.offset = txp->offset;
++
++ gop->dest.domid = DOMID_SELF;
++
++ gop->dest.offset = dst_offset;
++ gop->dest.u.gmfn = virt_to_mfn(page_address(page));
++
++ if (dst_offset + txp->size > PAGE_SIZE) {
++ /* This page can only merge a portion
++ * of tx request. Do not increment any
++ * pointer / counter here. The txp
++ * will be dealt with in future
++ * rounds, eventually hitting the
++ * `else` branch.
++ */
++ gop->len = PAGE_SIZE - dst_offset;
++ txp->offset += gop->len;
++ txp->size -= gop->len;
++ dst_offset += gop->len; /* quit loop */
++ } else {
++ /* This tx request can be merged in the page */
++ gop->len = txp->size;
++ dst_offset += gop->len;
++
++ index = pending_index(netbk->pending_cons++);
++
++ pending_idx = netbk->pending_ring[index];
++
++ memcpy(&pending_tx_info[pending_idx].req, txp,
++ sizeof(*txp));
++ xenvif_get(vif);
++
++ pending_tx_info[pending_idx].vif = vif;
++
++ /* Poison these fields, corresponding
++ * fields for head tx req will be set
++ * to correct values after the loop.
++ */
++ netbk->mmap_pages[pending_idx] = (void *)(~0UL);
++ pending_tx_info[pending_idx].head =
++ INVALID_PENDING_RING_IDX;
++
++ if (!first) {
++ first = &pending_tx_info[pending_idx];
++ start_idx = index;
++ head_idx = pending_idx;
++ }
++
++ txp++;
++ slot++;
++ }
+
+- gop++;
++ gop++;
++ }
+
+- memcpy(&pending_tx_info[pending_idx].req, txp, sizeof(*txp));
+- xenvif_get(vif);
+- pending_tx_info[pending_idx].vif = vif;
+- frag_set_pending_idx(&frags[i], pending_idx);
++ first->req.offset = 0;
++ first->req.size = dst_offset;
++ first->head = start_idx;
++ set_page_ext(page, netbk, head_idx);
++ netbk->mmap_pages[head_idx] = page;
++ frag_set_pending_idx(&frags[shinfo->nr_frags], head_idx);
+ }
+
++ BUG_ON(shinfo->nr_frags > MAX_SKB_FRAGS);
++
+ return gop;
+ err:
+ /* Unwind, freeing all pages and sending error responses. */
+- while (i-- > start) {
+- xen_netbk_idx_release(netbk, frag_get_pending_idx(&frags[i]),
+- XEN_NETIF_RSP_ERROR);
++ while (shinfo->nr_frags-- > start) {
++ xen_netbk_idx_release(netbk,
++ frag_get_pending_idx(&frags[shinfo->nr_frags]),
++ XEN_NETIF_RSP_ERROR);
+ }
+ /* The head too, if necessary. */
+ if (start)
+@@ -994,8 +1148,10 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
+ struct gnttab_copy *gop = *gopp;
+ u16 pending_idx = *((u16 *)skb->data);
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
++ struct pending_tx_info *tx_info;
+ int nr_frags = shinfo->nr_frags;
+ int i, err, start;
++ u16 peek; /* peek into next tx request */
+
+ /* Check status of header. */
+ err = gop->status;
+@@ -1007,11 +1163,20 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
+
+ for (i = start; i < nr_frags; i++) {
+ int j, newerr;
++ pending_ring_idx_t head;
+
+ pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
++ tx_info = &netbk->pending_tx_info[pending_idx];
++ head = tx_info->head;
+
+ /* Check error status: if okay then remember grant handle. */
+- newerr = (++gop)->status;
++ do {
++ newerr = (++gop)->status;
++ if (newerr)
++ break;
++ peek = netbk->pending_ring[pending_index(++head)];
++ } while (!pending_tx_is_head(netbk, peek));
++
+ if (likely(!newerr)) {
+ /* Had a previous error? Invalidate this fragment. */
+ if (unlikely(err))
+@@ -1236,11 +1401,12 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
+ struct sk_buff *skb;
+ int ret;
+
+- while (((nr_pending_reqs(netbk) + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
++ while ((nr_pending_reqs(netbk) + XEN_NETBK_LEGACY_SLOTS_MAX
++ < MAX_PENDING_REQS) &&
+ !list_empty(&netbk->net_schedule_list)) {
+ struct xenvif *vif;
+ struct xen_netif_tx_request txreq;
+- struct xen_netif_tx_request txfrags[MAX_SKB_FRAGS];
++ struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
+ struct page *page;
+ struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
+ u16 pending_idx;
+@@ -1328,7 +1494,7 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
+ pending_idx = netbk->pending_ring[index];
+
+ data_len = (txreq.size > PKT_PROT_LEN &&
+- ret < MAX_SKB_FRAGS) ?
++ ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
+ PKT_PROT_LEN : txreq.size;
+
+ skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN,
+@@ -1355,15 +1521,13 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
+ }
+
+ /* XXX could copy straight to head */
+- page = xen_netbk_alloc_page(netbk, skb, pending_idx);
++ page = xen_netbk_alloc_page(netbk, pending_idx);
+ if (!page) {
+ kfree_skb(skb);
+ netbk_tx_err(vif, &txreq, idx);
+ continue;
+ }
+
+- netbk->mmap_pages[pending_idx] = page;
+-
+ gop->source.u.ref = txreq.gref;
+ gop->source.domid = vif->domid;
+ gop->source.offset = txreq.offset;
+@@ -1380,6 +1544,7 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
+ memcpy(&netbk->pending_tx_info[pending_idx].req,
+ &txreq, sizeof(txreq));
+ netbk->pending_tx_info[pending_idx].vif = vif;
++ netbk->pending_tx_info[pending_idx].head = index;
+ *((u16 *)skb->data) = pending_idx;
+
+ __skb_put(skb, data_len);
+@@ -1510,7 +1675,10 @@ static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
+ {
+ struct xenvif *vif;
+ struct pending_tx_info *pending_tx_info;
+- pending_ring_idx_t index;
++ pending_ring_idx_t head;
++ u16 peek; /* peek into next tx request */
++
++ BUG_ON(netbk->mmap_pages[pending_idx] == (void *)(~0UL));
+
+ /* Already complete? */
+ if (netbk->mmap_pages[pending_idx] == NULL)
+@@ -1519,19 +1687,40 @@ static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
+ pending_tx_info = &netbk->pending_tx_info[pending_idx];
+
+ vif = pending_tx_info->vif;
++ head = pending_tx_info->head;
++
++ BUG_ON(!pending_tx_is_head(netbk, head));
++ BUG_ON(netbk->pending_ring[pending_index(head)] != pending_idx);
+
+- make_tx_response(vif, &pending_tx_info->req, status);
++ do {
++ pending_ring_idx_t index;
++ pending_ring_idx_t idx = pending_index(head);
++ u16 info_idx = netbk->pending_ring[idx];
+
+- index = pending_index(netbk->pending_prod++);
+- netbk->pending_ring[index] = pending_idx;
++ pending_tx_info = &netbk->pending_tx_info[info_idx];
++ make_tx_response(vif, &pending_tx_info->req, status);
+
+- xenvif_put(vif);
++ /* Setting any number other than
++ * INVALID_PENDING_RING_IDX indicates this slot is
++ * starting a new packet / ending a previous packet.
++ */
++ pending_tx_info->head = 0;
++
++ index = pending_index(netbk->pending_prod++);
++ netbk->pending_ring[index] = netbk->pending_ring[info_idx];
++
++ xenvif_put(vif);
++
++ peek = netbk->pending_ring[pending_index(++head)];
++
++ } while (!pending_tx_is_head(netbk, peek));
+
+ netbk->mmap_pages[pending_idx]->mapping = 0;
+ put_page(netbk->mmap_pages[pending_idx]);
+ netbk->mmap_pages[pending_idx] = NULL;
+ }
+
++
+ static void make_tx_response(struct xenvif *vif,
+ struct xen_netif_tx_request *txp,
+ s8 st)
+@@ -1584,8 +1773,9 @@ static inline int rx_work_todo(struct xen_netbk *netbk)
+ static inline int tx_work_todo(struct xen_netbk *netbk)
+ {
+
+- if (((nr_pending_reqs(netbk) + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
+- !list_empty(&netbk->net_schedule_list))
++ if ((nr_pending_reqs(netbk) + XEN_NETBK_LEGACY_SLOTS_MAX
++ < MAX_PENDING_REQS) &&
++ !list_empty(&netbk->net_schedule_list))
+ return 1;
+
+ return 0;
+@@ -1668,6 +1858,13 @@ static int __init netback_init(void)
+ if (!xen_pv_domain())
+ return -ENODEV;
+
++ if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
++ printk(KERN_INFO
++ "xen-netback: fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
++ fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
++ fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
++ }
++
+ xen_netbk_group_nr = num_online_cpus();
+ xen_netbk = vzalloc(sizeof(struct xen_netbk) * xen_netbk_group_nr);
+ if (!xen_netbk) {
+diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
+index 9b9843e..0d9914b 100644
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -36,7 +36,7 @@
+ #include <linux/skbuff.h>
+ #include <linux/ethtool.h>
+ #include <linux/if_ether.h>
+-#include <linux/tcp.h>
++#include <net/tcp.h>
+ #include <linux/udp.h>
+ #include <linux/moduleparam.h>
+ #include <linux/mm.h>
+@@ -490,6 +490,16 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ unsigned int offset = offset_in_page(data);
+ unsigned int len = skb_headlen(skb);
+
++ /* If skb->len is too big for wire format, drop skb and alert
++ * user about misconfiguration.
++ */
++ if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {
++ net_alert_ratelimited(
++ "xennet: skb->len = %u, too big for wire format\n",
++ skb->len);
++ goto drop;
++ }
++
+ frags += DIV_ROUND_UP(offset + len, PAGE_SIZE);
+ if (unlikely(frags > MAX_SKB_FRAGS + 1)) {
+ printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n",
+@@ -1043,7 +1053,8 @@ err:
+
+ static int xennet_change_mtu(struct net_device *dev, int mtu)
+ {
+- int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN;
++ int max = xennet_can_sg(dev) ?
++ XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER : ETH_DATA_LEN;
+
+ if (mtu > max)
+ return -EINVAL;
+@@ -1318,6 +1329,8 @@ static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev
+ SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops);
+ SET_NETDEV_DEV(netdev, &dev->dev);
+
++ netif_set_gso_max_size(netdev, XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER);
++
+ np->netdev = netdev;
+
+ netif_carrier_off(netdev);
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index 7b82868..8e6c4fa 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -8665,6 +8665,13 @@ static int __must_check __init get_thinkpad_model_data(
+ tp->model_str = kstrdup(s, GFP_KERNEL);
+ if (!tp->model_str)
+ return -ENOMEM;
++ } else {
++ s = dmi_get_system_info(DMI_BIOS_VENDOR);
++ if (s && !(strnicmp(s, "Lenovo", 6))) {
++ tp->model_str = kstrdup(s, GFP_KERNEL);
++ if (!tp->model_str)
++ return -ENOMEM;
++ }
+ }
+
+ s = dmi_get_system_info(DMI_PRODUCT_NAME);
+diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
+index 23ef16c..84eab3f 100644
+--- a/drivers/rapidio/devices/tsi721.c
++++ b/drivers/rapidio/devices/tsi721.c
+@@ -555,7 +555,7 @@ static irqreturn_t tsi721_irqhandler(int irq, void *ptr)
+ /* For MSI mode re-enable device-level interrupts */
+ if (priv->flags & TSI721_USING_MSI) {
+ dev_int = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO |
+- TSI721_DEV_INT_SMSG_CH | TSI721_DEV_INT_BDMA_CH;
++ TSI721_DEV_INT_SMSG_CH;
+ iowrite32(dev_int, priv->regs + TSI721_DEV_INTE);
+ }
+
+diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
+index a3e98f1..b37c8b0 100644
+--- a/drivers/rtc/rtc-twl.c
++++ b/drivers/rtc/rtc-twl.c
+@@ -490,6 +490,7 @@ static int __devinit twl_rtc_probe(struct platform_device *pdev)
+ }
+
+ platform_set_drvdata(pdev, rtc);
++ device_init_wakeup(&pdev->dev, 1);
+ return 0;
+
+ out2:
+diff --git a/drivers/staging/gma500/cdv_intel_display.c b/drivers/staging/gma500/cdv_intel_display.c
+index 7b97c60..626ae47 100644
+--- a/drivers/staging/gma500/cdv_intel_display.c
++++ b/drivers/staging/gma500/cdv_intel_display.c
+@@ -1457,6 +1457,19 @@ static void cdv_intel_crtc_destroy(struct drm_crtc *crtc)
+ kfree(psb_intel_crtc);
+ }
+
++static void cdv_intel_crtc_disable(struct drm_crtc *crtc)
++{
++ struct gtt_range *gt;
++ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
++
++ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
++
++ if (crtc->fb) {
++ gt = to_psb_fb(crtc->fb)->gtt;
++ psb_gtt_unpin(gt);
++ }
++}
++
+ const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = {
+ .dpms = cdv_intel_crtc_dpms,
+ .mode_fixup = cdv_intel_crtc_mode_fixup,
+@@ -1464,6 +1477,7 @@ const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = {
+ .mode_set_base = cdv_intel_pipe_set_base,
+ .prepare = cdv_intel_crtc_prepare,
+ .commit = cdv_intel_crtc_commit,
++ .disable = cdv_intel_crtc_disable,
+ };
+
+ const struct drm_crtc_funcs cdv_intel_crtc_funcs = {
+diff --git a/drivers/staging/gma500/framebuffer.c b/drivers/staging/gma500/framebuffer.c
+index 3f39a37..d28fdc2 100644
+--- a/drivers/staging/gma500/framebuffer.c
++++ b/drivers/staging/gma500/framebuffer.c
+@@ -831,8 +831,8 @@ void psb_modeset_init(struct drm_device *dev)
+ for (i = 0; i < dev_priv->num_pipe; i++)
+ psb_intel_crtc_init(dev, i, mode_dev);
+
+- dev->mode_config.max_width = 2048;
+- dev->mode_config.max_height = 2048;
++ dev->mode_config.max_width = 4096;
++ dev->mode_config.max_height = 4096;
+
+ psb_setup_outputs(dev);
+ }
+diff --git a/drivers/staging/gma500/psb_intel_display.c b/drivers/staging/gma500/psb_intel_display.c
+index caa9d86..0d872e9 100644
+--- a/drivers/staging/gma500/psb_intel_display.c
++++ b/drivers/staging/gma500/psb_intel_display.c
+@@ -1255,6 +1255,19 @@ void psb_intel_crtc_destroy(struct drm_crtc *crtc)
+ kfree(psb_intel_crtc);
+ }
+
++static void psb_intel_crtc_disable(struct drm_crtc *crtc)
++{
++ struct gtt_range *gt;
++ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
++
++ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
++
++ if (crtc->fb) {
++ gt = to_psb_fb(crtc->fb)->gtt;
++ psb_gtt_unpin(gt);
++ }
++}
++
+ const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
+ .dpms = psb_intel_crtc_dpms,
+ .mode_fixup = psb_intel_crtc_mode_fixup,
+@@ -1262,6 +1275,7 @@ const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
+ .mode_set_base = psb_intel_pipe_set_base,
+ .prepare = psb_intel_crtc_prepare,
+ .commit = psb_intel_crtc_commit,
++ .disable = psb_intel_crtc_disable,
+ };
+
+ const struct drm_crtc_funcs psb_intel_crtc_funcs = {
+diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
+index 5b77316..db313ba 100644
+--- a/drivers/target/iscsi/iscsi_target_parameters.c
++++ b/drivers/target/iscsi/iscsi_target_parameters.c
+@@ -713,9 +713,9 @@ static int iscsi_add_notunderstood_response(
+ }
+ INIT_LIST_HEAD(&extra_response->er_list);
+
+- strncpy(extra_response->key, key, strlen(key) + 1);
+- strncpy(extra_response->value, NOTUNDERSTOOD,
+- strlen(NOTUNDERSTOOD) + 1);
++ strlcpy(extra_response->key, key, sizeof(extra_response->key));
++ strlcpy(extra_response->value, NOTUNDERSTOOD,
++ sizeof(extra_response->value));
+
+ list_add_tail(&extra_response->er_list,
+ &param_list->extra_response_list);
+@@ -1572,8 +1572,6 @@ int iscsi_decode_text_input(
+
+ if (phase & PHASE_SECURITY) {
+ if (iscsi_check_for_auth_key(key) > 0) {
+- char *tmpptr = key + strlen(key);
+- *tmpptr = '=';
+ kfree(tmpbuf);
+ return 1;
+ }
+diff --git a/drivers/target/iscsi/iscsi_target_parameters.h b/drivers/target/iscsi/iscsi_target_parameters.h
+index 6a37fd6..83eed65 100644
+--- a/drivers/target/iscsi/iscsi_target_parameters.h
++++ b/drivers/target/iscsi/iscsi_target_parameters.h
+@@ -1,8 +1,10 @@
+ #ifndef ISCSI_PARAMETERS_H
+ #define ISCSI_PARAMETERS_H
+
++#include <scsi/iscsi_proto.h>
++
+ struct iscsi_extra_response {
+- char key[64];
++ char key[KEY_MAXLEN];
+ char value[32];
+ struct list_head er_list;
+ } ____cacheline_aligned;
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index e9637f9..b368b83 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -1310,10 +1310,19 @@ static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
+
+ for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
+ dep = dwc->eps[epnum];
+- dwc3_free_trb_pool(dep);
+-
+- if (epnum != 0 && epnum != 1)
++ /*
++ * Physical endpoints 0 and 1 are special; they form the
++ * bi-directional USB endpoint 0.
++ *
++ * For those two physical endpoints, we don't allocate a TRB
++ * pool nor do we add them the endpoints list. Due to that, we
++ * shouldn't do these two operations otherwise we would end up
++ * with all sorts of bugs when removing dwc3.ko.
++ */
++ if (epnum != 0 && epnum != 1) {
++ dwc3_free_trb_pool(dep);
+ list_del(&dep->endpoint.ep_list);
++ }
+
+ kfree(dep);
+ }
+diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
+index 08e470f..34655d0 100644
+--- a/drivers/usb/host/ehci-sched.c
++++ b/drivers/usb/host/ehci-sched.c
+@@ -236,7 +236,7 @@ static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __hc32 mask)
+ }
+
+ static const unsigned char
+-max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 125, 25 };
++max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 };
+
+ /* carryover low/fullspeed bandwidth that crosses uframe boundries */
+ static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8])
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 430c1d5..5018e33 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -1755,6 +1755,9 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
+ }
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
++ if (!xhci->rh_bw)
++ goto no_bw;
++
+ num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
+ for (i = 0; i < num_ports; i++) {
+ struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
+@@ -1773,6 +1776,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
+ }
+ }
+
++no_bw:
+ xhci->num_usb2_ports = 0;
+ xhci->num_usb3_ports = 0;
+ xhci->num_active_eps = 0;
+@@ -2184,6 +2188,9 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
+ u32 page_size;
+ int i;
+
++ INIT_LIST_HEAD(&xhci->lpm_failed_devs);
++ INIT_LIST_HEAD(&xhci->cancel_cmd_list);
++
+ page_size = xhci_readl(xhci, &xhci->op_regs->page_size);
+ xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size);
+ for (i = 0; i < 16; i++) {
+@@ -2262,7 +2269,6 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
+ xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, false, flags);
+ if (!xhci->cmd_ring)
+ goto fail;
+- INIT_LIST_HEAD(&xhci->cancel_cmd_list);
+ xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
+ xhci_dbg(xhci, "First segment DMA is 0x%llx\n",
+ (unsigned long long)xhci->cmd_ring->first_seg->dma);
+@@ -2363,8 +2369,6 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
+ if (xhci_setup_port_arrays(xhci, flags))
+ goto fail;
+
+- INIT_LIST_HEAD(&xhci->lpm_failed_devs);
+-
+ return 0;
+
+ fail:
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 2c0350f..136c357 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -938,6 +938,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
+ struct usb_hcd *hcd = xhci_to_hcd(xhci);
+ struct usb_hcd *secondary_hcd;
+ int retval = 0;
++ bool comp_timer_running = false;
+
+ /* Wait a bit if either of the roothubs need to settle from the
+ * transition into bus suspend.
+@@ -975,6 +976,13 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
+
+ /* If restore operation fails, re-initialize the HC during resume */
+ if ((temp & STS_SRE) || hibernated) {
++
++ if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
++ !(xhci_all_ports_seen_u0(xhci))) {
++ del_timer_sync(&xhci->comp_mode_recovery_timer);
++ xhci_dbg(xhci, "Compliance Mode Recovery Timer deleted!\n");
++ }
++
+ /* Let the USB core know _both_ roothubs lost power. */
+ usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
+ usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
+@@ -1017,6 +1025,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
+ retval = xhci_init(hcd->primary_hcd);
+ if (retval)
+ return retval;
++ comp_timer_running = true;
++
+ xhci_dbg(xhci, "Start the primary HCD\n");
+ retval = xhci_run(hcd->primary_hcd);
+ if (!retval) {
+@@ -1058,7 +1068,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
+ * to suffer the Compliance Mode issue again. It doesn't matter if
+ * ports have entered previously to U0 before system's suspension.
+ */
+- if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
++ if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running)
+ compliance_mode_recovery_timer_init(xhci);
+
+ /* Re-enable port polling. */
+diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
+index 3ca6c0d..1a715f6 100644
+--- a/drivers/usb/serial/ark3116.c
++++ b/drivers/usb/serial/ark3116.c
+@@ -49,7 +49,7 @@ static int debug;
+ #define DRIVER_NAME "ark3116"
+
+ /* usb timeout of 1 second */
+-#define ARK_TIMEOUT (1*HZ)
++#define ARK_TIMEOUT 1000
+
+ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x6547, 0x0232) },
+diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
+index 01a44d3..10c30ad 100644
+--- a/drivers/usb/serial/cypress_m8.c
++++ b/drivers/usb/serial/cypress_m8.c
+@@ -96,6 +96,7 @@ static const struct usb_device_id id_table_earthmate[] = {
+ static const struct usb_device_id id_table_cyphidcomrs232[] = {
+ { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) },
+ { USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) },
++ { USB_DEVICE(VENDOR_ID_FRWD, PRODUCT_ID_CYPHIDCOM_FRWD) },
+ { } /* Terminating entry */
+ };
+
+@@ -109,6 +110,7 @@ static const struct usb_device_id id_table_combined[] = {
+ { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) },
+ { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) },
+ { USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) },
++ { USB_DEVICE(VENDOR_ID_FRWD, PRODUCT_ID_CYPHIDCOM_FRWD) },
+ { USB_DEVICE(VENDOR_ID_DAZZLE, PRODUCT_ID_CA42) },
+ { } /* Terminating entry */
+ };
+@@ -267,6 +269,12 @@ static struct usb_serial_driver cypress_ca42v2_device = {
+ * Cypress serial helper functions
+ *****************************************************************************/
+
++/* FRWD Dongle hidcom needs to skip reset and speed checks */
++static inline bool is_frwd(struct usb_device *dev)
++{
++ return ((le16_to_cpu(dev->descriptor.idVendor) == VENDOR_ID_FRWD) &&
++ (le16_to_cpu(dev->descriptor.idProduct) == PRODUCT_ID_CYPHIDCOM_FRWD));
++}
+
+ static int analyze_baud_rate(struct usb_serial_port *port, speed_t new_rate)
+ {
+@@ -276,6 +284,10 @@ static int analyze_baud_rate(struct usb_serial_port *port, speed_t new_rate)
+ if (unstable_bauds)
+ return new_rate;
+
++ /* FRWD Dongle uses 115200 bps */
++ if (is_frwd(port->serial->dev))
++ return new_rate;
++
+ /*
+ * The general purpose firmware for the Cypress M8 allows for
+ * a maximum speed of 57600bps (I have no idea whether DeLorme
+@@ -488,7 +500,11 @@ static int generic_startup(struct usb_serial *serial)
+ return -ENOMEM;
+ }
+
+- usb_reset_configuration(serial->dev);
++ /* Skip reset for FRWD device. It is a workaound:
++ device hangs if it receives SET_CONFIGURE in Configured
++ state. */
++ if (!is_frwd(serial->dev))
++ usb_reset_configuration(serial->dev);
+
+ priv->cmd_ctrl = 0;
+ priv->line_control = 0;
+diff --git a/drivers/usb/serial/cypress_m8.h b/drivers/usb/serial/cypress_m8.h
+index 67cf608..b461311 100644
+--- a/drivers/usb/serial/cypress_m8.h
++++ b/drivers/usb/serial/cypress_m8.h
+@@ -24,6 +24,10 @@
+ #define VENDOR_ID_CYPRESS 0x04b4
+ #define PRODUCT_ID_CYPHIDCOM 0x5500
+
++/* FRWD Dongle - a GPS sports watch */
++#define VENDOR_ID_FRWD 0x6737
++#define PRODUCT_ID_CYPHIDCOM_FRWD 0x0001
++
+ /* Powercom UPS, chip CY7C63723 */
+ #define VENDOR_ID_POWERCOM 0x0d9f
+ #define PRODUCT_ID_UPS 0x0002
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 918ec98..ce9f87f 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -2169,6 +2169,9 @@ static void ftdi_set_termios(struct tty_struct *tty,
+
+ cflag = termios->c_cflag;
+
++ if (!old_termios)
++ goto no_skip;
++
+ if (old_termios->c_cflag == termios->c_cflag
+ && old_termios->c_ispeed == termios->c_ispeed
+ && old_termios->c_ospeed == termios->c_ospeed)
+@@ -2182,6 +2185,7 @@ static void ftdi_set_termios(struct tty_struct *tty,
+ (termios->c_cflag & (CSIZE|PARODD|PARENB|CMSPAR|CSTOPB)))
+ goto no_data_parity_stop_changes;
+
++no_skip:
+ /* Set number of data bits, parity, stop bits */
+
+ urb_value = 0;
+diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
+index 6aca631..cf2668e 100644
+--- a/drivers/usb/serial/iuu_phoenix.c
++++ b/drivers/usb/serial/iuu_phoenix.c
+@@ -327,7 +327,7 @@ static int bulk_immediate(struct usb_serial_port *port, u8 *buf, u8 count)
+ usb_bulk_msg(serial->dev,
+ usb_sndbulkpipe(serial->dev,
+ port->bulk_out_endpointAddress), buf,
+- count, &actual, HZ * 1);
++ count, &actual, 1000);
+
+ if (status != IUU_OPERATION_OK)
+ dbg("%s - error = %2x", __func__, status);
+@@ -350,7 +350,7 @@ static int read_immediate(struct usb_serial_port *port, u8 *buf, u8 count)
+ usb_bulk_msg(serial->dev,
+ usb_rcvbulkpipe(serial->dev,
+ port->bulk_in_endpointAddress), buf,
+- count, &actual, HZ * 1);
++ count, &actual, 1000);
+
+ if (status != IUU_OPERATION_OK)
+ dbg("%s - error = %2x", __func__, status);
+diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
+index a442352..4f415e28 100644
+--- a/drivers/usb/serial/keyspan.c
++++ b/drivers/usb/serial/keyspan.c
+@@ -1833,7 +1833,7 @@ static int keyspan_usa26_send_setup(struct usb_serial *serial,
+ d_details = s_priv->device_details;
+ device_port = port->number - port->serial->minor;
+
+- outcont_urb = d_details->outcont_endpoints[port->number];
++ outcont_urb = d_details->outcont_endpoints[device_port];
+ this_urb = p_priv->outcont_urb;
+
+ dbg("%s - endpoint %d", __func__, usb_pipeendpoint(this_urb->pipe));
+diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
+index 3524a10..9580679 100644
+--- a/drivers/usb/serial/mos7720.c
++++ b/drivers/usb/serial/mos7720.c
+@@ -44,7 +44,7 @@
+ #define DRIVER_DESC "Moschip USB Serial Driver"
+
+ /* default urb timeout */
+-#define MOS_WDR_TIMEOUT (HZ * 5)
++#define MOS_WDR_TIMEOUT 5000
+
+ #define MOS_MAX_PORT 0x02
+ #define MOS_WRITE 0x0E
+@@ -234,11 +234,22 @@ static int read_mos_reg(struct usb_serial *serial, unsigned int serial_portnum,
+ __u8 requesttype = (__u8)0xc0;
+ __u16 index = get_reg_index(reg);
+ __u16 value = get_reg_value(reg, serial_portnum);
+- int status = usb_control_msg(usbdev, pipe, request, requesttype, value,
+- index, data, 1, MOS_WDR_TIMEOUT);
+- if (status < 0)
++ u8 *buf;
++ int status;
++
++ buf = kmalloc(1, GFP_KERNEL);
++ if (!buf)
++ return -ENOMEM;
++
++ status = usb_control_msg(usbdev, pipe, request, requesttype, value,
++ index, buf, 1, MOS_WDR_TIMEOUT);
++ if (status == 1)
++ *data = *buf;
++ else if (status < 0)
+ dev_err(&usbdev->dev,
+ "mos7720: usb_control_msg() failed: %d", status);
++ kfree(buf);
++
+ return status;
+ }
+
+@@ -1700,7 +1711,7 @@ static void change_port_settings(struct tty_struct *tty,
+ mos7720_port->shadowMCR |= (UART_MCR_XONANY);
+ /* To set hardware flow control to the specified *
+ * serial port, in SP1/2_CONTROL_REG */
+- if (port->number)
++ if (port_number)
+ write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x01);
+ else
+ write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x02);
+@@ -2112,7 +2123,7 @@ static int mos7720_startup(struct usb_serial *serial)
+
+ /* setting configuration feature to one */
+ usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
+- (__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5*HZ);
++ (__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5000);
+
+ /* start the interrupt urb */
+ ret_val = usb_submit_urb(serial->port[0]->interrupt_in_urb, GFP_KERNEL);
+@@ -2157,7 +2168,7 @@ static void mos7720_release(struct usb_serial *serial)
+ /* wait for synchronous usb calls to return */
+ if (mos_parport->msg_pending)
+ wait_for_completion_timeout(&mos_parport->syncmsg_compl,
+- MOS_WDR_TIMEOUT);
++ msecs_to_jiffies(MOS_WDR_TIMEOUT));
+
+ parport_remove_port(mos_parport->pp);
+ usb_set_serial_data(serial, NULL);
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 59c4997..8ea37bc 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -593,6 +593,8 @@ static const struct usb_device_id option_ids[] = {
+ .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x14ac, 0xff, 0xff, 0xff), /* Huawei E1820 */
++ .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0xff, 0xff) },
+diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
+index fd86e0e..317e503 100644
+--- a/drivers/usb/serial/pl2303.c
++++ b/drivers/usb/serial/pl2303.c
+@@ -270,7 +270,7 @@ static void pl2303_set_termios(struct tty_struct *tty,
+ serial settings even to the same values as before. Thus
+ we actually need to filter in this specific case */
+
+- if (!tty_termios_hw_change(tty->termios, old_termios))
++ if (old_termios && !tty_termios_hw_change(tty->termios, old_termios))
+ return;
+
+ cflag = tty->termios->c_cflag;
+@@ -279,7 +279,8 @@ static void pl2303_set_termios(struct tty_struct *tty,
+ if (!buf) {
+ dev_err(&port->dev, "%s - out of memory.\n", __func__);
+ /* Report back no change occurred */
+- *tty->termios = *old_termios;
++ if (old_termios)
++ *tty->termios = *old_termios;
+ return;
+ }
+
+@@ -419,7 +420,7 @@ static void pl2303_set_termios(struct tty_struct *tty,
+ control = priv->line_control;
+ if ((cflag & CBAUD) == B0)
+ priv->line_control &= ~(CONTROL_DTR | CONTROL_RTS);
+- else if ((old_termios->c_cflag & CBAUD) == B0)
++ else if (old_termios && (old_termios->c_cflag & CBAUD) == B0)
+ priv->line_control |= (CONTROL_DTR | CONTROL_RTS);
+ if (control != priv->line_control) {
+ control = priv->line_control;
+@@ -480,7 +481,6 @@ static void pl2303_close(struct usb_serial_port *port)
+
+ static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
+ {
+- struct ktermios tmp_termios;
+ struct usb_serial *serial = port->serial;
+ struct pl2303_private *priv = usb_get_serial_port_data(port);
+ int result;
+@@ -498,7 +498,7 @@ static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
+
+ /* Setup termios */
+ if (tty)
+- pl2303_set_termios(tty, port, &tmp_termios);
++ pl2303_set_termios(tty, port, NULL);
+
+ dbg("%s - submitting read urb", __func__);
+ result = usb_serial_generic_submit_read_urb(port, GFP_KERNEL);
+diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
+index 14c4a82..5535c3a 100644
+--- a/drivers/usb/serial/qcserial.c
++++ b/drivers/usb/serial/qcserial.c
+@@ -115,6 +115,7 @@ static const struct usb_device_id id_table[] = {
+ {USB_DEVICE(0x1199, 0x9019)}, /* Sierra Wireless Gobi 3000 Modem device */
+ {USB_DEVICE(0x12D1, 0x14F0)}, /* Sony Gobi 3000 QDL */
+ {USB_DEVICE(0x12D1, 0x14F1)}, /* Sony Gobi 3000 Composite */
++ {USB_DEVICE(0x0AF0, 0x8120)}, /* Option GTM681W */
+ { } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, id_table);
+diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
+index ba6b438..f3179b0 100644
+--- a/drivers/usb/serial/spcp8x5.c
++++ b/drivers/usb/serial/spcp8x5.c
+@@ -338,7 +338,6 @@ static void spcp8x5_set_termios(struct tty_struct *tty,
+ struct spcp8x5_private *priv = usb_get_serial_port_data(port);
+ unsigned long flags;
+ unsigned int cflag = tty->termios->c_cflag;
+- unsigned int old_cflag = old_termios->c_cflag;
+ unsigned short uartdata;
+ unsigned char buf[2] = {0, 0};
+ int baud;
+@@ -347,15 +346,15 @@ static void spcp8x5_set_termios(struct tty_struct *tty,
+
+
+ /* check that they really want us to change something */
+- if (!tty_termios_hw_change(tty->termios, old_termios))
++ if (old_termios && !tty_termios_hw_change(tty->termios, old_termios))
+ return;
+
+ /* set DTR/RTS active */
+ spin_lock_irqsave(&priv->lock, flags);
+ control = priv->line_control;
+- if ((old_cflag & CBAUD) == B0) {
++ if (old_termios && (old_termios->c_cflag & CBAUD) == B0) {
+ priv->line_control |= MCR_DTR;
+- if (!(old_cflag & CRTSCTS))
++ if (!(old_termios->c_cflag & CRTSCTS))
+ priv->line_control |= MCR_RTS;
+ }
+ if (control != priv->line_control) {
+@@ -445,7 +444,6 @@ static void spcp8x5_set_termios(struct tty_struct *tty,
+ * status of the device. */
+ static int spcp8x5_open(struct tty_struct *tty, struct usb_serial_port *port)
+ {
+- struct ktermios tmp_termios;
+ struct usb_serial *serial = port->serial;
+ struct spcp8x5_private *priv = usb_get_serial_port_data(port);
+ int ret;
+@@ -468,7 +466,7 @@ static int spcp8x5_open(struct tty_struct *tty, struct usb_serial_port *port)
+
+ /* Setup termios */
+ if (tty)
+- spcp8x5_set_termios(tty, port, &tmp_termios);
++ spcp8x5_set_termios(tty, port, NULL);
+
+ spcp8x5_get_msr(serial->dev, &status, priv->type);
+
+diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
+index 1c11959..80a6ff6 100644
+--- a/drivers/usb/serial/visor.c
++++ b/drivers/usb/serial/visor.c
+@@ -599,7 +599,9 @@ static int treo_attach(struct usb_serial *serial)
+ dest->read_urb = src->read_urb; \
+ dest->bulk_in_endpointAddress = src->bulk_in_endpointAddress;\
+ dest->bulk_in_buffer = src->bulk_in_buffer; \
++ dest->bulk_in_size = src->bulk_in_size; \
+ dest->interrupt_in_urb = src->interrupt_in_urb; \
++ dest->interrupt_in_urb->context = dest; \
+ dest->interrupt_in_endpointAddress = \
+ src->interrupt_in_endpointAddress;\
+ dest->interrupt_in_buffer = src->interrupt_in_buffer; \
+diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
+index 59d646d..0ec60cd 100644
+--- a/drivers/usb/serial/whiteheat.c
++++ b/drivers/usb/serial/whiteheat.c
+@@ -1209,7 +1209,7 @@ static void firm_setup_port(struct tty_struct *tty)
+ struct whiteheat_port_settings port_settings;
+ unsigned int cflag = tty->termios->c_cflag;
+
+- port_settings.port = port->number + 1;
++ port_settings.port = port->number - port->serial->minor + 1;
+
+ /* get the byte size */
+ switch (cflag & CSIZE) {
+diff --git a/drivers/xen/events.c b/drivers/xen/events.c
+index fec1204..11d7b64 100644
+--- a/drivers/xen/events.c
++++ b/drivers/xen/events.c
+@@ -1176,7 +1176,7 @@ static void __xen_evtchn_do_upcall(void)
+ {
+ int start_word_idx, start_bit_idx;
+ int word_idx, bit_idx;
+- int i;
++ int i, irq;
+ int cpu = get_cpu();
+ struct shared_info *s = HYPERVISOR_shared_info;
+ struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
+@@ -1184,6 +1184,8 @@ static void __xen_evtchn_do_upcall(void)
+
+ do {
+ unsigned long pending_words;
++ unsigned long pending_bits;
++ struct irq_desc *desc;
+
+ vcpu_info->evtchn_upcall_pending = 0;
+
+@@ -1194,6 +1196,17 @@ static void __xen_evtchn_do_upcall(void)
+ /* Clear master flag /before/ clearing selector flag. */
+ wmb();
+ #endif
++ if ((irq = per_cpu(virq_to_irq, cpu)[VIRQ_TIMER]) != -1) {
++ int evtchn = evtchn_from_irq(irq);
++ word_idx = evtchn / BITS_PER_LONG;
++ pending_bits = evtchn % BITS_PER_LONG;
++ if (active_evtchns(cpu, s, word_idx) & (1ULL << pending_bits)) {
++ desc = irq_to_desc(irq);
++ if (desc)
++ generic_handle_irq_desc(irq, desc);
++ }
++ }
++
+ pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
+
+ start_word_idx = __this_cpu_read(current_word_idx);
+@@ -1202,7 +1215,6 @@ static void __xen_evtchn_do_upcall(void)
+ word_idx = start_word_idx;
+
+ for (i = 0; pending_words != 0; i++) {
+- unsigned long pending_bits;
+ unsigned long words;
+
+ words = MASK_LSBS(pending_words, word_idx);
+@@ -1231,8 +1243,7 @@ static void __xen_evtchn_do_upcall(void)
+
+ do {
+ unsigned long bits;
+- int port, irq;
+- struct irq_desc *desc;
++ int port;
+
+ bits = MASK_LSBS(pending_bits, bit_idx);
+
+diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
+index 2263144..d0e5fc5 100644
+--- a/fs/cifs/cifs_dfs_ref.c
++++ b/fs/cifs/cifs_dfs_ref.c
+@@ -18,6 +18,7 @@
+ #include <linux/slab.h>
+ #include <linux/vfs.h>
+ #include <linux/fs.h>
++#include <linux/inet.h>
+ #include "cifsglob.h"
+ #include "cifsproto.h"
+ #include "cifsfs.h"
+@@ -150,7 +151,8 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
+ * assuming that we have 'unc=' and 'ip=' in
+ * the original sb_mountdata
+ */
+- md_len = strlen(sb_mountdata) + rc + strlen(ref->node_name) + 12;
++ md_len = strlen(sb_mountdata) + rc + strlen(ref->node_name) + 12 +
++ INET6_ADDRSTRLEN;
+ mountdata = kzalloc(md_len+1, GFP_KERNEL);
+ if (mountdata == NULL) {
+ rc = -ENOMEM;
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index cc386b2..259e950 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -2260,7 +2260,9 @@ static void ext4_orphan_cleanup(struct super_block *sb,
+ __func__, inode->i_ino, inode->i_size);
+ jbd_debug(2, "truncating inode %lu to %lld bytes\n",
+ inode->i_ino, inode->i_size);
++ mutex_lock(&inode->i_mutex);
+ ext4_truncate(inode);
++ mutex_unlock(&inode->i_mutex);
+ nr_truncates++;
+ } else {
+ ext4_msg(sb, KERN_DEBUG,
+diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
+index 77b69b2..13fc885 100644
+--- a/fs/jfs/inode.c
++++ b/fs/jfs/inode.c
+@@ -125,7 +125,7 @@ int jfs_write_inode(struct inode *inode, struct writeback_control *wbc)
+ {
+ int wait = wbc->sync_mode == WB_SYNC_ALL;
+
+- if (test_cflag(COMMIT_Nolink, inode))
++ if (inode->i_nlink == 0)
+ return 0;
+ /*
+ * If COMMIT_DIRTY is not set, the inode isn't really dirty.
+diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
+index cc5f811..bfb2a91 100644
+--- a/fs/jfs/jfs_logmgr.c
++++ b/fs/jfs/jfs_logmgr.c
+@@ -1058,7 +1058,8 @@ static int lmLogSync(struct jfs_log * log, int hard_sync)
+ */
+ void jfs_syncpt(struct jfs_log *log, int hard_sync)
+ { LOG_LOCK(log);
+- lmLogSync(log, hard_sync);
++ if (!test_bit(log_QUIESCE, &log->flag))
++ lmLogSync(log, hard_sync);
+ LOG_UNLOCK(log);
+ }
+
+diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
+index 23ce927..bd2fb43 100644
+--- a/fs/xfs/xfs_iops.c
++++ b/fs/xfs/xfs_iops.c
+@@ -507,6 +507,28 @@ xfs_vn_getattr(
+ return 0;
+ }
+
++static void
++xfs_setattr_mode(
++ struct xfs_trans *tp,
++ struct xfs_inode *ip,
++ struct iattr *iattr)
++{
++ struct inode *inode = VFS_I(ip);
++ umode_t mode = iattr->ia_mode;
++
++ ASSERT(tp);
++ ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
++
++ if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
++ mode &= ~S_ISGID;
++
++ ip->i_d.di_mode &= S_IFMT;
++ ip->i_d.di_mode |= mode & ~S_IFMT;
++
++ inode->i_mode &= S_IFMT;
++ inode->i_mode |= mode & ~S_IFMT;
++}
++
+ int
+ xfs_setattr_nonsize(
+ struct xfs_inode *ip,
+@@ -658,18 +680,8 @@ xfs_setattr_nonsize(
+ /*
+ * Change file access modes.
+ */
+- if (mask & ATTR_MODE) {
+- umode_t mode = iattr->ia_mode;
+-
+- if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
+- mode &= ~S_ISGID;
+-
+- ip->i_d.di_mode &= S_IFMT;
+- ip->i_d.di_mode |= mode & ~S_IFMT;
+-
+- inode->i_mode &= S_IFMT;
+- inode->i_mode |= mode & ~S_IFMT;
+- }
++ if (mask & ATTR_MODE)
++ xfs_setattr_mode(tp, ip, iattr);
+
+ /*
+ * Change file access or modified times.
+@@ -768,9 +780,8 @@ xfs_setattr_size(
+ return XFS_ERROR(error);
+
+ ASSERT(S_ISREG(ip->i_d.di_mode));
+- ASSERT((mask & (ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET|
+- ATTR_MTIME_SET|ATTR_KILL_SUID|ATTR_KILL_SGID|
+- ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0);
++ ASSERT((mask & (ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET|
++ ATTR_MTIME_SET|ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0);
+
+ lock_flags = XFS_ILOCK_EXCL;
+ if (!(flags & XFS_ATTR_NOLOCK))
+@@ -902,6 +913,12 @@ xfs_setattr_size(
+ xfs_iflags_set(ip, XFS_ITRUNCATED);
+ }
+
++ /*
++ * Change file access modes.
++ */
++ if (mask & ATTR_MODE)
++ xfs_setattr_mode(tp, ip, iattr);
++
+ if (mask & ATTR_CTIME) {
+ inode->i_ctime = iattr->ia_ctime;
+ ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec;
+diff --git a/include/linux/cpu.h b/include/linux/cpu.h
+index c692acc..9c3e071 100644
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -168,6 +168,8 @@ extern struct sysdev_class cpu_sysdev_class;
+
+ extern void get_online_cpus(void);
+ extern void put_online_cpus(void);
++extern void cpu_hotplug_disable(void);
++extern void cpu_hotplug_enable(void);
+ #define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
+ #define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
+ #define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
+@@ -190,6 +192,8 @@ static inline void cpu_hotplug_driver_unlock(void)
+
+ #define get_online_cpus() do { } while (0)
+ #define put_online_cpus() do { } while (0)
++#define cpu_hotplug_disable() do { } while (0)
++#define cpu_hotplug_enable() do { } while (0)
+ #define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
+ /* These aren't inline functions due to a GCC bug. */
+ #define register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
+diff --git a/include/linux/net.h b/include/linux/net.h
+index b299230..b7ca08e 100644
+--- a/include/linux/net.h
++++ b/include/linux/net.h
+@@ -249,6 +249,29 @@ extern struct socket *sockfd_lookup(int fd, int *err);
+ #define sockfd_put(sock) fput(sock->file)
+ extern int net_ratelimit(void);
+
++#define net_ratelimited_function(function, ...) \
++do { \
++ if (net_ratelimit()) \
++ function(__VA_ARGS__); \
++} while (0)
++
++#define net_emerg_ratelimited(fmt, ...) \
++ net_ratelimited_function(pr_emerg, fmt, ##__VA_ARGS__)
++#define net_alert_ratelimited(fmt, ...) \
++ net_ratelimited_function(pr_alert, fmt, ##__VA_ARGS__)
++#define net_crit_ratelimited(fmt, ...) \
++ net_ratelimited_function(pr_crit, fmt, ##__VA_ARGS__)
++#define net_err_ratelimited(fmt, ...) \
++ net_ratelimited_function(pr_err, fmt, ##__VA_ARGS__)
++#define net_notice_ratelimited(fmt, ...) \
++ net_ratelimited_function(pr_notice, fmt, ##__VA_ARGS__)
++#define net_warn_ratelimited(fmt, ...) \
++ net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__)
++#define net_info_ratelimited(fmt, ...) \
++ net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__)
++#define net_dbg_ratelimited(fmt, ...) \
++ net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__)
++
+ #define net_random() random32()
+ #define net_srandom(seed) srandom32((__force u32)seed)
+
+diff --git a/include/linux/swapops.h b/include/linux/swapops.h
+index d6955607..7f62faf 100644
+--- a/include/linux/swapops.h
++++ b/include/linux/swapops.h
+@@ -136,6 +136,7 @@ static inline void make_migration_entry_read(swp_entry_t *entry)
+
+ extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long address);
++extern void migration_entry_wait_huge(struct mm_struct *mm, pte_t *pte);
+ #else
+
+ #define make_migration_entry(page, write) swp_entry(0, 0)
+@@ -147,6 +148,8 @@ static inline int is_migration_entry(swp_entry_t swp)
+ static inline void make_migration_entry_read(swp_entry_t *entryp) { }
+ static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long address) { }
++static inline void migration_entry_wait_huge(struct mm_struct *mm,
++ pte_t *pte) { }
+ static inline int is_write_migration_entry(swp_entry_t entry)
+ {
+ return 0;
+diff --git a/include/xen/interface/io/netif.h b/include/xen/interface/io/netif.h
+index cb94668..d4635cd 100644
+--- a/include/xen/interface/io/netif.h
++++ b/include/xen/interface/io/netif.h
+@@ -13,6 +13,24 @@
+ #include "../grant_table.h"
+
+ /*
++ * Older implementation of Xen network frontend / backend has an
++ * implicit dependency on the MAX_SKB_FRAGS as the maximum number of
++ * ring slots a skb can use. Netfront / netback may not work as
++ * expected when frontend and backend have different MAX_SKB_FRAGS.
++ *
++ * A better approach is to add mechanism for netfront / netback to
++ * negotiate this value. However we cannot fix all possible
++ * frontends, so we need to define a value which states the minimum
++ * slots backend must support.
++ *
++ * The minimum value derives from older Linux kernel's MAX_SKB_FRAGS
++ * (18), which is proved to work with most frontends. Any new backend
++ * which doesn't negotiate with frontend should expect frontend to
++ * send a valid packet using slots up to this value.
++ */
++#define XEN_NETIF_NR_SLOTS_MIN 18
++
++/*
+ * Notifications after enqueuing any type of message should be conditional on
+ * the appropriate req_event or rsp_event field in the shared ring.
+ * If the client sends notification for rx requests then it should specify
+@@ -47,6 +65,7 @@
+ #define _XEN_NETTXF_extra_info (3)
+ #define XEN_NETTXF_extra_info (1U<<_XEN_NETTXF_extra_info)
+
++#define XEN_NETIF_MAX_TX_SIZE 0xFFFF
+ struct xen_netif_tx_request {
+ grant_ref_t gref; /* Reference to buffer page */
+ uint16_t offset; /* Offset within buffer page */
+diff --git a/kernel/audit.c b/kernel/audit.c
+index 09fae26..d4bc594 100644
+--- a/kernel/audit.c
++++ b/kernel/audit.c
+@@ -1167,7 +1167,7 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
+
+ /* Wait for auditd to drain the queue a little */
+ DECLARE_WAITQUEUE(wait, current);
+- set_current_state(TASK_INTERRUPTIBLE);
++ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&audit_backlog_wait, &wait);
+
+ if (audit_backlog_limit &&
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 563f136..82c91f1 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -124,6 +124,27 @@ static void cpu_hotplug_done(void)
+ mutex_unlock(&cpu_hotplug.lock);
+ }
+
++/*
++ * Wait for currently running CPU hotplug operations to complete (if any) and
++ * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
++ * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
++ * hotplug path before performing hotplug operations. So acquiring that lock
++ * guarantees mutual exclusion from any currently running hotplug operations.
++ */
++void cpu_hotplug_disable(void)
++{
++ cpu_maps_update_begin();
++ cpu_hotplug_disabled = 1;
++ cpu_maps_update_done();
++}
++
++void cpu_hotplug_enable(void)
++{
++ cpu_maps_update_begin();
++ cpu_hotplug_disabled = 0;
++ cpu_maps_update_done();
++}
++
+ #else /* #if CONFIG_HOTPLUG_CPU */
+ static void cpu_hotplug_begin(void) {}
+ static void cpu_hotplug_done(void) {}
+@@ -479,36 +500,6 @@ static int alloc_frozen_cpus(void)
+ core_initcall(alloc_frozen_cpus);
+
+ /*
+- * Prevent regular CPU hotplug from racing with the freezer, by disabling CPU
+- * hotplug when tasks are about to be frozen. Also, don't allow the freezer
+- * to continue until any currently running CPU hotplug operation gets
+- * completed.
+- * To modify the 'cpu_hotplug_disabled' flag, we need to acquire the
+- * 'cpu_add_remove_lock'. And this same lock is also taken by the regular
+- * CPU hotplug path and released only after it is complete. Thus, we
+- * (and hence the freezer) will block here until any currently running CPU
+- * hotplug operation gets completed.
+- */
+-void cpu_hotplug_disable_before_freeze(void)
+-{
+- cpu_maps_update_begin();
+- cpu_hotplug_disabled = 1;
+- cpu_maps_update_done();
+-}
+-
+-
+-/*
+- * When tasks have been thawed, re-enable regular CPU hotplug (which had been
+- * disabled while beginning to freeze tasks).
+- */
+-void cpu_hotplug_enable_after_thaw(void)
+-{
+- cpu_maps_update_begin();
+- cpu_hotplug_disabled = 0;
+- cpu_maps_update_done();
+-}
+-
+-/*
+ * When callbacks for CPU hotplug notifications are being executed, we must
+ * ensure that the state of the system with respect to the tasks being frozen
+ * or not, as reported by the notification, remains unchanged *throughout the
+@@ -527,12 +518,12 @@ cpu_hotplug_pm_callback(struct notifier_block *nb,
+
+ case PM_SUSPEND_PREPARE:
+ case PM_HIBERNATION_PREPARE:
+- cpu_hotplug_disable_before_freeze();
++ cpu_hotplug_disable();
+ break;
+
+ case PM_POST_SUSPEND:
+ case PM_POST_HIBERNATION:
+- cpu_hotplug_enable_after_thaw();
++ cpu_hotplug_enable();
+ break;
+
+ default:
+diff --git a/kernel/sys.c b/kernel/sys.c
+index be5fa8b..9d557df 100644
+--- a/kernel/sys.c
++++ b/kernel/sys.c
+@@ -353,6 +353,29 @@ int unregister_reboot_notifier(struct notifier_block *nb)
+ }
+ EXPORT_SYMBOL(unregister_reboot_notifier);
+
++/* Add backwards compatibility for stable trees. */
++#ifndef PF_NO_SETAFFINITY
++#define PF_NO_SETAFFINITY PF_THREAD_BOUND
++#endif
++
++static void migrate_to_reboot_cpu(void)
++{
++ /* The boot cpu is always logical cpu 0 */
++ int cpu = 0;
++
++ cpu_hotplug_disable();
++
++ /* Make certain the cpu I'm about to reboot on is online */
++ if (!cpu_online(cpu))
++ cpu = cpumask_first(cpu_online_mask);
++
++ /* Prevent races with other tasks migrating this task */
++ current->flags |= PF_NO_SETAFFINITY;
++
++ /* Make certain I only run on the appropriate processor */
++ set_cpus_allowed_ptr(current, cpumask_of(cpu));
++}
++
+ /**
+ * kernel_restart - reboot the system
+ * @cmd: pointer to buffer containing command to execute for restart
+@@ -364,7 +387,7 @@ EXPORT_SYMBOL(unregister_reboot_notifier);
+ void kernel_restart(char *cmd)
+ {
+ kernel_restart_prepare(cmd);
+- disable_nonboot_cpus();
++ migrate_to_reboot_cpu();
+ syscore_shutdown();
+ if (!cmd)
+ printk(KERN_EMERG "Restarting system.\n");
+@@ -391,7 +414,7 @@ static void kernel_shutdown_prepare(enum system_states state)
+ void kernel_halt(void)
+ {
+ kernel_shutdown_prepare(SYSTEM_HALT);
+- disable_nonboot_cpus();
++ migrate_to_reboot_cpu();
+ syscore_shutdown();
+ printk(KERN_EMERG "System halted.\n");
+ kmsg_dump(KMSG_DUMP_HALT);
+@@ -410,7 +433,7 @@ void kernel_power_off(void)
+ kernel_shutdown_prepare(SYSTEM_POWER_OFF);
+ if (pm_power_off_prepare)
+ pm_power_off_prepare();
+- disable_nonboot_cpus();
++ migrate_to_reboot_cpu();
+ syscore_shutdown();
+ printk(KERN_EMERG "Power down.\n");
+ kmsg_dump(KMSG_DUMP_POWEROFF);
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 24b3759..226776b 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -929,6 +929,19 @@ static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
+
+ static struct pid * const ftrace_swapper_pid = &init_struct_pid;
+
++static loff_t
++ftrace_filter_lseek(struct file *file, loff_t offset, int whence)
++{
++ loff_t ret;
++
++ if (file->f_mode & FMODE_READ)
++ ret = seq_lseek(file, offset, whence);
++ else
++ file->f_pos = ret = 1;
++
++ return ret;
++}
++
+ #ifdef CONFIG_DYNAMIC_FTRACE
+
+ #ifndef CONFIG_FTRACE_MCOUNT_RECORD
+@@ -2315,19 +2328,6 @@ ftrace_notrace_open(struct inode *inode, struct file *file)
+ inode, file);
+ }
+
+-static loff_t
+-ftrace_filter_lseek(struct file *file, loff_t offset, int origin)
+-{
+- loff_t ret;
+-
+- if (file->f_mode & FMODE_READ)
+- ret = seq_lseek(file, offset, origin);
+- else
+- file->f_pos = ret = 1;
+-
+- return ret;
+-}
+-
+ static int ftrace_match(char *str, char *regex, int len, int type)
+ {
+ int matched = 0;
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 70b4733..2dcd716 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -2751,7 +2751,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ if (ptep) {
+ entry = huge_ptep_get(ptep);
+ if (unlikely(is_hugetlb_entry_migration(entry))) {
+- migration_entry_wait(mm, (pmd_t *)ptep, address);
++ migration_entry_wait_huge(mm, ptep);
+ return 0;
+ } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
+ return VM_FAULT_HWPOISON_LARGE |
+diff --git a/mm/migrate.c b/mm/migrate.c
+index e1052d1..09d6a9d 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -184,15 +184,14 @@ static void remove_migration_ptes(struct page *old, struct page *new)
+ *
+ * This function is called from do_swap_page().
+ */
+-void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
+- unsigned long address)
++static void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
++ spinlock_t *ptl)
+ {
+- pte_t *ptep, pte;
+- spinlock_t *ptl;
++ pte_t pte;
+ swp_entry_t entry;
+ struct page *page;
+
+- ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
++ spin_lock(ptl);
+ pte = *ptep;
+ if (!is_swap_pte(pte))
+ goto out;
+@@ -220,6 +219,20 @@ out:
+ pte_unmap_unlock(ptep, ptl);
+ }
+
++void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
++ unsigned long address)
++{
++ spinlock_t *ptl = pte_lockptr(mm, pmd);
++ pte_t *ptep = pte_offset_map(pmd, address);
++ __migration_entry_wait(mm, ptep, ptl);
++}
++
++void migration_entry_wait_huge(struct mm_struct *mm, pte_t *pte)
++{
++ spinlock_t *ptl = &(mm)->page_table_lock;
++ __migration_entry_wait(mm, pte, ptl);
++}
++
+ #ifdef CONFIG_BLOCK
+ /* Returns true if all buffers are successfully locked */
+ static bool buffer_migrate_lock_buffers(struct buffer_head *head,
+diff --git a/mm/swap_state.c b/mm/swap_state.c
+index 7704d9c..7b3dadd 100644
+--- a/mm/swap_state.c
++++ b/mm/swap_state.c
+@@ -314,8 +314,24 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
+ * Swap entry may have been freed since our caller observed it.
+ */
+ err = swapcache_prepare(entry);
+- if (err == -EEXIST) { /* seems racy */
++ if (err == -EEXIST) {
+ radix_tree_preload_end();
++ /*
++ * We might race against get_swap_page() and stumble
++ * across a SWAP_HAS_CACHE swap_map entry whose page
++ * has not been brought into the swapcache yet, while
++ * the other end is scheduled away waiting on discard
++ * I/O completion at scan_swap_map().
++ *
++ * In order to avoid turning this transitory state
++ * into a permanent loop around this -EEXIST case
++ * if !CONFIG_PREEMPT and the I/O completion happens
++ * to be waiting on the CPU waitqueue where we are now
++ * busy looping, we just conditionally invoke the
++ * scheduler here, if there are some more important
++ * tasks to run.
++ */
++ cond_resched();
+ continue;
+ }
+ if (err) { /* swp entry is obsolete ? */
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 04175d9..a0b6c50 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -2297,10 +2297,15 @@ done:
+ }
+ }
+
+-static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
++static inline int l2cap_command_rej(struct l2cap_conn *conn,
++ struct l2cap_cmd_hdr *cmd, u16 cmd_len,
++ u8 *data)
+ {
+ struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
+
++ if (cmd_len < sizeof(*rej))
++ return -EPROTO;
++
+ if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
+ return 0;
+
+@@ -2317,7 +2322,8 @@ static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hd
+ return 0;
+ }
+
+-static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
++static int l2cap_connect_req(struct l2cap_conn *conn,
++ struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
+ {
+ struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
+ struct l2cap_conn_rsp rsp;
+@@ -2325,8 +2331,14 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
+ struct sock *parent, *sk = NULL;
+ int result, status = L2CAP_CS_NO_INFO;
+
+- u16 dcid = 0, scid = __le16_to_cpu(req->scid);
+- __le16 psm = req->psm;
++ u16 dcid = 0, scid;
++ __le16 psm;
++
++ if (cmd_len < sizeof(struct l2cap_conn_req))
++ return -EPROTO;
++
++ scid = __le16_to_cpu(req->scid);
++ psm = req->psm;
+
+ BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
+
+@@ -2451,7 +2463,9 @@ sendresp:
+ return 0;
+ }
+
+-static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
++static int l2cap_connect_rsp(struct l2cap_conn *conn,
++ struct l2cap_cmd_hdr *cmd, u16 cmd_len,
++ u8 *data)
+ {
+ struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
+ u16 scid, dcid, result, status;
+@@ -2459,6 +2473,9 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
+ struct sock *sk;
+ u8 req[128];
+
++ if (cmd_len < sizeof(*rsp))
++ return -EPROTO;
++
+ scid = __le16_to_cpu(rsp->scid);
+ dcid = __le16_to_cpu(rsp->dcid);
+ result = __le16_to_cpu(rsp->result);
+@@ -2534,6 +2551,9 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
+ struct sock *sk;
+ int len;
+
++ if (cmd_len < sizeof(*req))
++ return -EPROTO;
++
+ dcid = __le16_to_cpu(req->dcid);
+ flags = __le16_to_cpu(req->flags);
+
+@@ -2559,7 +2579,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
+
+ /* Reject if config buffer is too small. */
+ len = cmd_len - sizeof(*req);
+- if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
++ if (chan->conf_len + len > sizeof(chan->conf_req)) {
+ l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
+ l2cap_build_conf_rsp(chan, rsp,
+ L2CAP_CONF_REJECT, flags), rsp);
+@@ -2621,13 +2641,18 @@ unlock:
+ return 0;
+ }
+
+-static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
++static inline int l2cap_config_rsp(struct l2cap_conn *conn,
++ struct l2cap_cmd_hdr *cmd, u16 cmd_len,
++ u8 *data)
+ {
+ struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
+ u16 scid, flags, result;
+ struct l2cap_chan *chan;
+ struct sock *sk;
+- int len = cmd->len - sizeof(*rsp);
++ int len = cmd_len - sizeof(*rsp);
++
++ if (cmd_len < sizeof(*rsp))
++ return -EPROTO;
+
+ scid = __le16_to_cpu(rsp->scid);
+ flags = __le16_to_cpu(rsp->flags);
+@@ -2703,7 +2728,9 @@ done:
+ return 0;
+ }
+
+-static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
++static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
++ struct l2cap_cmd_hdr *cmd, u16 cmd_len,
++ u8 *data)
+ {
+ struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
+ struct l2cap_disconn_rsp rsp;
+@@ -2711,6 +2738,9 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
+ struct l2cap_chan *chan;
+ struct sock *sk;
+
++ if (cmd_len != sizeof(*req))
++ return -EPROTO;
++
+ scid = __le16_to_cpu(req->scid);
+ dcid = __le16_to_cpu(req->dcid);
+
+@@ -2744,13 +2774,18 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
+ return 0;
+ }
+
+-static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
++static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
++ struct l2cap_cmd_hdr *cmd, u16 cmd_len,
++ u8 *data)
+ {
+ struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
+ u16 dcid, scid;
+ struct l2cap_chan *chan;
+ struct sock *sk;
+
++ if (cmd_len != sizeof(*rsp))
++ return -EPROTO;
++
+ scid = __le16_to_cpu(rsp->scid);
+ dcid = __le16_to_cpu(rsp->dcid);
+
+@@ -2778,11 +2813,16 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
+ return 0;
+ }
+
+-static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
++static inline int l2cap_information_req(struct l2cap_conn *conn,
++ struct l2cap_cmd_hdr *cmd, u16 cmd_len,
++ u8 *data)
+ {
+ struct l2cap_info_req *req = (struct l2cap_info_req *) data;
+ u16 type;
+
++ if (cmd_len != sizeof(*req))
++ return -EPROTO;
++
+ type = __le16_to_cpu(req->type);
+
+ BT_DBG("type 0x%4.4x", type);
+@@ -2818,11 +2858,16 @@ static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cm
+ return 0;
+ }
+
+-static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
++static inline int l2cap_information_rsp(struct l2cap_conn *conn,
++ struct l2cap_cmd_hdr *cmd, u16 cmd_len,
++ u8 *data)
+ {
+ struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
+ u16 type, result;
+
++ if (cmd_len != sizeof(*rsp))
++ return -EPROTO;
++
+ type = __le16_to_cpu(rsp->type);
+ result = __le16_to_cpu(rsp->result);
+
+@@ -2941,15 +2986,15 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
+
+ switch (cmd->code) {
+ case L2CAP_COMMAND_REJ:
+- l2cap_command_rej(conn, cmd, data);
++ l2cap_command_rej(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_CONN_REQ:
+- err = l2cap_connect_req(conn, cmd, data);
++ err = l2cap_connect_req(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_CONN_RSP:
+- err = l2cap_connect_rsp(conn, cmd, data);
++ err = l2cap_connect_rsp(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_CONF_REQ:
+@@ -2957,15 +3002,15 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
+ break;
+
+ case L2CAP_CONF_RSP:
+- err = l2cap_config_rsp(conn, cmd, data);
++ err = l2cap_config_rsp(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_DISCONN_REQ:
+- err = l2cap_disconnect_req(conn, cmd, data);
++ err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_DISCONN_RSP:
+- err = l2cap_disconnect_rsp(conn, cmd, data);
++ err = l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_ECHO_REQ:
+@@ -2976,11 +3021,11 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
+ break;
+
+ case L2CAP_INFO_REQ:
+- err = l2cap_information_req(conn, cmd, data);
++ err = l2cap_information_req(conn, cmd, cmd_len, data);
+ break;
+
+ case L2CAP_INFO_RSP:
+- err = l2cap_information_rsp(conn, cmd, data);
++ err = l2cap_information_rsp(conn, cmd, cmd_len, data);
+ break;
+
+ default:
+diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
+index f4ddf34..8260cd5 100644
+--- a/net/mac80211/iface.c
++++ b/net/mac80211/iface.c
+@@ -1242,6 +1242,15 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
+
+ ASSERT_RTNL();
+
++ /*
++ * Close all AP_VLAN interfaces first, as otherwise they
++ * might be closed while the AP interface they belong to
++ * is closed, causing unregister_netdevice_many() to crash.
++ */
++ list_for_each_entry(sdata, &local->interfaces, list)
++ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
++ dev_close(sdata->dev);
++
+ mutex_lock(&local->iflist_mtx);
+ list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
+ list_del(&sdata->list);
+diff --git a/net/wireless/sme.c b/net/wireless/sme.c
+index 0acfdc9..c1c6e6d 100644
+--- a/net/wireless/sme.c
++++ b/net/wireless/sme.c
+@@ -220,6 +220,9 @@ void cfg80211_conn_work(struct work_struct *work)
+ mutex_lock(&rdev->devlist_mtx);
+
+ list_for_each_entry(wdev, &rdev->netdev_list, list) {
++ if (!wdev->netdev)
++ continue;
++
+ wdev_lock(wdev);
+ if (!netif_running(wdev->netdev)) {
+ wdev_unlock(wdev);
+diff --git a/sound/usb/card.h b/sound/usb/card.h
+index 2b7559c..0a7ca6c 100644
+--- a/sound/usb/card.h
++++ b/sound/usb/card.h
+@@ -1,6 +1,7 @@
+ #ifndef __USBAUDIO_CARD_H
+ #define __USBAUDIO_CARD_H
+
++#define MAX_NR_RATES 1024
+ #define MAX_PACKS 20
+ #define MAX_PACKS_HS (MAX_PACKS * 8) /* in high speed mode */
+ #define MAX_URBS 8
+diff --git a/sound/usb/format.c b/sound/usb/format.c
+index 89421d1..ddfef57 100644
+--- a/sound/usb/format.c
++++ b/sound/usb/format.c
+@@ -226,7 +226,7 @@ static int parse_uac2_sample_rate_range(struct audioformat *fp, int nr_triplets,
+ int min = combine_quad(&data[2 + 12 * i]);
+ int max = combine_quad(&data[6 + 12 * i]);
+ int res = combine_quad(&data[10 + 12 * i]);
+- int rate;
++ unsigned int rate;
+
+ if ((max < 0) || (min < 0) || (res < 0) || (max < min))
+ continue;
+@@ -253,6 +253,10 @@ static int parse_uac2_sample_rate_range(struct audioformat *fp, int nr_triplets,
+ fp->rates |= snd_pcm_rate_to_rate_bit(rate);
+
+ nr_rates++;
++ if (nr_rates >= MAX_NR_RATES) {
++ snd_printk(KERN_ERR "invalid uac2 rates\n");
++ break;
++ }
+
+ /* avoid endless loop */
+ if (res == 0)
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index f4540bf..97ec155 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -822,6 +822,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
+ case USB_ID(0x046d, 0x0808):
+ case USB_ID(0x046d, 0x0809):
+ case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */
++ case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */
+ case USB_ID(0x046d, 0x0991):
+ /* Most audio usb devices lie about volume resolution.
+ * Most Logitech webcams have res = 384.
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index 4e25148..e467a58 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -157,7 +157,13 @@
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL
+ },
+ {
+- USB_DEVICE(0x046d, 0x0990),
++ .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
++ USB_DEVICE_ID_MATCH_INT_CLASS |
++ USB_DEVICE_ID_MATCH_INT_SUBCLASS,
++ .idVendor = 0x046d,
++ .idProduct = 0x0990,
++ .bInterfaceClass = USB_CLASS_AUDIO,
++ .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
+ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+ .vendor_name = "Logitech, Inc.",
+ .product_name = "QuickCam Pro 9000",
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 42eeee8..9c82f8b 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -132,10 +132,14 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
+ unsigned *rate_table = NULL;
+
+ fp = kmemdup(quirk->data, sizeof(*fp), GFP_KERNEL);
+- if (! fp) {
++ if (!fp) {
+ snd_printk(KERN_ERR "cannot memdup\n");
+ return -ENOMEM;
+ }
++ if (fp->nr_rates > MAX_NR_RATES) {
++ kfree(fp);
++ return -EINVAL;
++ }
+ if (fp->nr_rates > 0) {
+ rate_table = kmemdup(fp->rate_table,
+ sizeof(int) * fp->nr_rates, GFP_KERNEL);
diff --git a/1047_linux-3.2.48.patch b/1047_linux-3.2.48.patch
new file mode 100644
index 00000000..6d55b1fa
--- /dev/null
+++ b/1047_linux-3.2.48.patch
@@ -0,0 +1,952 @@
+diff --git a/Makefile b/Makefile
+index 40e2a11..299e2eb 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 47
++SUBLEVEL = 48
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
+index 1252a26..1397408 100644
+--- a/arch/arm/include/asm/cacheflush.h
++++ b/arch/arm/include/asm/cacheflush.h
+@@ -301,9 +301,7 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
+ }
+
+ #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
+-static inline void flush_kernel_dcache_page(struct page *page)
+-{
+-}
++extern void flush_kernel_dcache_page(struct page *);
+
+ #define flush_dcache_mmap_lock(mapping) \
+ spin_lock_irq(&(mapping)->tree_lock)
+diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
+index 8fda9f7..fe61cab 100644
+--- a/arch/arm/mm/flush.c
++++ b/arch/arm/mm/flush.c
+@@ -304,6 +304,39 @@ void flush_dcache_page(struct page *page)
+ EXPORT_SYMBOL(flush_dcache_page);
+
+ /*
++ * Ensure cache coherency for the kernel mapping of this page. We can
++ * assume that the page is pinned via kmap.
++ *
++ * If the page only exists in the page cache and there are no user
++ * space mappings, this is a no-op since the page was already marked
++ * dirty at creation. Otherwise, we need to flush the dirty kernel
++ * cache lines directly.
++ */
++void flush_kernel_dcache_page(struct page *page)
++{
++ if (cache_is_vivt() || cache_is_vipt_aliasing()) {
++ struct address_space *mapping;
++
++ mapping = page_mapping(page);
++
++ if (!mapping || mapping_mapped(mapping)) {
++ void *addr;
++
++ addr = page_address(page);
++ /*
++ * kmap_atomic() doesn't set the page virtual
++ * address for highmem pages, and
++ * kunmap_atomic() takes care of cache
++ * flushing already.
++ */
++ if (!IS_ENABLED(CONFIG_HIGHMEM) || addr)
++ __cpuc_flush_dcache_area(addr, PAGE_SIZE);
++ }
++ }
++}
++EXPORT_SYMBOL(flush_kernel_dcache_page);
++
++/*
+ * Flush an anonymous page so that users of get_user_pages()
+ * can safely access the data. The expected sequence is:
+ *
+diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
+index 941a98c..a5018fb 100644
+--- a/arch/arm/mm/nommu.c
++++ b/arch/arm/mm/nommu.c
+@@ -53,6 +53,12 @@ void flush_dcache_page(struct page *page)
+ }
+ EXPORT_SYMBOL(flush_dcache_page);
+
++void flush_kernel_dcache_page(struct page *page)
++{
++ __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
++}
++EXPORT_SYMBOL(flush_kernel_dcache_page);
++
+ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long uaddr, void *dst, const void *src,
+ unsigned long len)
+diff --git a/arch/tile/lib/exports.c b/arch/tile/lib/exports.c
+index 2a81d32..e51e5cd 100644
+--- a/arch/tile/lib/exports.c
++++ b/arch/tile/lib/exports.c
+@@ -90,4 +90,6 @@ uint64_t __ashrdi3(uint64_t, unsigned int);
+ EXPORT_SYMBOL(__ashrdi3);
+ uint64_t __ashldi3(uint64_t, unsigned int);
+ EXPORT_SYMBOL(__ashldi3);
++int __ffsdi2(uint64_t);
++EXPORT_SYMBOL(__ffsdi2);
+ #endif
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 9a42703..fb2e69d 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -2120,6 +2120,7 @@ source "fs/Kconfig.binfmt"
+ config IA32_EMULATION
+ bool "IA32 Emulation"
+ depends on X86_64
++ select BINFMT_ELF
+ select COMPAT_BINFMT_ELF
+ ---help---
+ Include code to run 32-bit programs under a 64-bit kernel. You should
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index e82a53a..57867e4 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -551,8 +551,6 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
+ if (index != XCR_XFEATURE_ENABLED_MASK)
+ return 1;
+ xcr0 = xcr;
+- if (kvm_x86_ops->get_cpl(vcpu) != 0)
+- return 1;
+ if (!(xcr0 & XSTATE_FP))
+ return 1;
+ if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE))
+@@ -566,7 +564,8 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
+
+ int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
+ {
+- if (__kvm_set_xcr(vcpu, index, xcr)) {
++ if (kvm_x86_ops->get_cpl(vcpu) != 0 ||
++ __kvm_set_xcr(vcpu, index, xcr)) {
+ kvm_inject_gp(vcpu, 0);
+ return 1;
+ }
+diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
+index 07ef7e8..f9537e3 100644
+--- a/arch/x86/platform/efi/efi.c
++++ b/arch/x86/platform/efi/efi.c
+@@ -49,6 +49,13 @@
+ #define EFI_DEBUG 1
+ #define PFX "EFI: "
+
++#define EFI_MIN_RESERVE 5120
++
++#define EFI_DUMMY_GUID \
++ EFI_GUID(0x4424ac57, 0xbe4b, 0x47dd, 0x9e, 0x97, 0xed, 0x50, 0xf0, 0x9f, 0x92, 0xa9)
++
++static efi_char16_t efi_dummy_name[6] = { 'D', 'U', 'M', 'M', 'Y', 0 };
++
+ struct efi __read_mostly efi = {
+ .mps = EFI_INVALID_TABLE_ADDR,
+ .acpi = EFI_INVALID_TABLE_ADDR,
+@@ -787,6 +794,13 @@ void __init efi_enter_virtual_mode(void)
+ early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size);
+ memmap.map = NULL;
+ kfree(new_memmap);
++
++ /* clean DUMMY object */
++ efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID,
++ EFI_VARIABLE_NON_VOLATILE |
++ EFI_VARIABLE_BOOTSERVICE_ACCESS |
++ EFI_VARIABLE_RUNTIME_ACCESS,
++ 0, NULL);
+ }
+
+ /*
+@@ -838,22 +852,70 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
+ efi_status_t status;
+ u64 storage_size, remaining_size, max_size;
+
++ if (!(attributes & EFI_VARIABLE_NON_VOLATILE))
++ return 0;
++
+ status = efi.query_variable_info(attributes, &storage_size,
+ &remaining_size, &max_size);
+ if (status != EFI_SUCCESS)
+ return status;
+
+- if (!max_size && remaining_size > size)
+- printk_once(KERN_ERR FW_BUG "Broken EFI implementation"
+- " is returning MaxVariableSize=0\n");
++ /*
++ * Some firmware implementations refuse to boot if there's insufficient
++ * space in the variable store. We account for that by refusing the
++ * write if permitting it would reduce the available space to under
++ * 5KB. This figure was provided by Samsung, so should be safe.
++ */
++ if ((remaining_size - size < EFI_MIN_RESERVE) &&
++ !efi_no_storage_paranoia) {
++
++ /*
++ * Triggering garbage collection may require that the firmware
++ * generate a real EFI_OUT_OF_RESOURCES error. We can force
++ * that by attempting to use more space than is available.
++ */
++ unsigned long dummy_size = remaining_size + 1024;
++ void *dummy = kzalloc(dummy_size, GFP_ATOMIC);
++
++ if (!dummy)
++ return EFI_OUT_OF_RESOURCES;
+
+- if (!storage_size || size > remaining_size ||
+- (max_size && size > max_size))
+- return EFI_OUT_OF_RESOURCES;
++ status = efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID,
++ EFI_VARIABLE_NON_VOLATILE |
++ EFI_VARIABLE_BOOTSERVICE_ACCESS |
++ EFI_VARIABLE_RUNTIME_ACCESS,
++ dummy_size, dummy);
+
+- if (!efi_no_storage_paranoia &&
+- (remaining_size - size) < (storage_size / 2))
+- return EFI_OUT_OF_RESOURCES;
++ if (status == EFI_SUCCESS) {
++ /*
++ * This should have failed, so if it didn't make sure
++ * that we delete it...
++ */
++ efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID,
++ EFI_VARIABLE_NON_VOLATILE |
++ EFI_VARIABLE_BOOTSERVICE_ACCESS |
++ EFI_VARIABLE_RUNTIME_ACCESS,
++ 0, dummy);
++ }
++
++ kfree(dummy);
++
++ /*
++ * The runtime code may now have triggered a garbage collection
++ * run, so check the variable info again
++ */
++ status = efi.query_variable_info(attributes, &storage_size,
++ &remaining_size, &max_size);
++
++ if (status != EFI_SUCCESS)
++ return status;
++
++ /*
++ * There still isn't enough room, so return an error
++ */
++ if (remaining_size - size < EFI_MIN_RESERVE)
++ return EFI_OUT_OF_RESOURCES;
++ }
+
+ return EFI_SUCCESS;
+ }
+diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
+index 166cb36..c5f7b2c 100644
+--- a/drivers/block/virtio_blk.c
++++ b/drivers/block/virtio_blk.c
+@@ -343,6 +343,7 @@ static void virtblk_config_changed_work(struct work_struct *work)
+ cap_str_10, cap_str_2);
+
+ set_capacity(vblk->disk, capacity);
++ revalidate_disk(vblk->disk);
+ done:
+ mutex_unlock(&vblk->config_lock);
+ }
+diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
+index 4fddd21..38a7793 100644
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
+@@ -408,11 +408,6 @@ static int init_render_ring(struct intel_ring_buffer *ring)
+ if (INTEL_INFO(dev)->gen >= 6)
+ I915_WRITE(MI_MODE, GFX_MODE_ENABLE(ASYNC_FLIP_PERF_DISABLE));
+
+- /* Required for the hardware to program scanline values for waiting */
+- if (INTEL_INFO(dev)->gen == 6)
+- I915_WRITE(GFX_MODE,
+- GFX_MODE_ENABLE(GFX_TLB_INVALIDATE_ALWAYS));
+-
+ if (IS_GEN7(dev))
+ I915_WRITE(GFX_MODE_GEN7,
+ GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
+diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
+index 69c3adf..c2ab21c 100644
+--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
++++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
+@@ -520,6 +520,7 @@ static int gianfar_ptp_probe(struct platform_device *dev)
+ return 0;
+
+ no_clock:
++ iounmap(etsects->regs);
+ no_ioremap:
+ release_resource(etsects->rsrc);
+ no_resource:
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index f698183..ed7a5a6 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -5524,7 +5524,20 @@ err_out:
+ return -EIO;
+ }
+
+-static inline void rtl8169_tso_csum(struct rtl8169_private *tp,
++static bool rtl_skb_pad(struct sk_buff *skb)
++{
++ if (skb_padto(skb, ETH_ZLEN))
++ return false;
++ skb_put(skb, ETH_ZLEN - skb->len);
++ return true;
++}
++
++static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb)
++{
++ return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34;
++}
++
++static inline bool rtl8169_tso_csum(struct rtl8169_private *tp,
+ struct sk_buff *skb, u32 *opts)
+ {
+ const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version;
+@@ -5537,13 +5550,20 @@ static inline void rtl8169_tso_csum(struct rtl8169_private *tp,
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ const struct iphdr *ip = ip_hdr(skb);
+
++ if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
++ return skb_checksum_help(skb) == 0 && rtl_skb_pad(skb);
++
+ if (ip->protocol == IPPROTO_TCP)
+ opts[offset] |= info->checksum.tcp;
+ else if (ip->protocol == IPPROTO_UDP)
+ opts[offset] |= info->checksum.udp;
+ else
+ WARN_ON_ONCE(1);
++ } else {
++ if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
++ return rtl_skb_pad(skb);
+ }
++ return true;
+ }
+
+ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
+@@ -5575,6 +5595,12 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
+ if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
+ goto err_stop_0;
+
++ opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb));
++ opts[0] = DescOwn;
++
++ if (!rtl8169_tso_csum(tp, skb, opts))
++ goto err_update_stats;
++
+ len = skb_headlen(skb);
+ mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(d, mapping))) {
+@@ -5586,11 +5612,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
+ tp->tx_skb[entry].len = len;
+ txd->addr = cpu_to_le64(mapping);
+
+- opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb));
+- opts[0] = DescOwn;
+-
+- rtl8169_tso_csum(tp, skb, opts);
+-
+ frags = rtl8169_xmit_frags(tp, skb, opts);
+ if (frags < 0)
+ goto err_dma_1;
+diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
+index 4b805be..9d3b39e 100644
+--- a/drivers/usb/serial/ti_usb_3410_5052.c
++++ b/drivers/usb/serial/ti_usb_3410_5052.c
+@@ -178,7 +178,8 @@ static struct usb_device_id ti_id_table_3410[15+TI_EXTRA_VID_PID_COUNT+1] = {
+ { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) },
+ { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) },
+ { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) },
+- { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) },
++ { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STEREO_PLUG_ID) },
++ { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) },
+ { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) },
+ };
+
+diff --git a/drivers/usb/serial/ti_usb_3410_5052.h b/drivers/usb/serial/ti_usb_3410_5052.h
+index b353e7e..4a2423e 100644
+--- a/drivers/usb/serial/ti_usb_3410_5052.h
++++ b/drivers/usb/serial/ti_usb_3410_5052.h
+@@ -52,7 +52,9 @@
+
+ /* Abbott Diabetics vendor and product ids */
+ #define ABBOTT_VENDOR_ID 0x1a61
+-#define ABBOTT_PRODUCT_ID 0x3410
++#define ABBOTT_STEREO_PLUG_ID 0x3410
++#define ABBOTT_PRODUCT_ID ABBOTT_STEREO_PLUG_ID
++#define ABBOTT_STRIP_PORT_ID 0x3420
+
+ /* Commands */
+ #define TI_GET_VERSION 0x01
+diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
+index 9c51f62..844bd64 100644
+--- a/fs/ncpfs/dir.c
++++ b/fs/ncpfs/dir.c
+@@ -1033,15 +1033,6 @@ static int ncp_rmdir(struct inode *dir, struct dentry *dentry)
+ DPRINTK("ncp_rmdir: removing %s/%s\n",
+ dentry->d_parent->d_name.name, dentry->d_name.name);
+
+- /*
+- * fail with EBUSY if there are still references to this
+- * directory.
+- */
+- dentry_unhash(dentry);
+- error = -EBUSY;
+- if (!d_unhashed(dentry))
+- goto out;
+-
+ len = sizeof(__name);
+ error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
+ dentry->d_name.len, !ncp_preserve_case(dir));
+diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
+index 2ae1371..1c33dd7 100644
+--- a/include/linux/rculist_nulls.h
++++ b/include/linux/rculist_nulls.h
+@@ -105,9 +105,14 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
+ * @head: the head for your list.
+ * @member: the name of the hlist_nulls_node within the struct.
+ *
++ * The barrier() is needed to make sure compiler doesn't cache first element [1],
++ * as this loop can be restarted [2]
++ * [1] Documentation/atomic_ops.txt around line 114
++ * [2] Documentation/RCU/rculist_nulls.txt around line 146
+ */
+ #define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \
+- for (pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \
++ for (({barrier();}), \
++ pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \
+ (!is_a_nulls(pos)) && \
+ ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \
+ pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)))
+diff --git a/include/linux/socket.h b/include/linux/socket.h
+index 2acd2e2..7e9f2d3 100644
+--- a/include/linux/socket.h
++++ b/include/linux/socket.h
+@@ -336,6 +336,9 @@ extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
+
+ struct timespec;
+
++/* The __sys_...msg variants allow MSG_CMSG_COMPAT */
++extern long __sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags);
++extern long __sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
+ extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
+ unsigned int flags, struct timespec *timeout);
+ extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg,
+diff --git a/net/compat.c b/net/compat.c
+index 6def90e..8c979cc 100644
+--- a/net/compat.c
++++ b/net/compat.c
+@@ -733,19 +733,25 @@ static unsigned char nas[21] = {
+
+ asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned flags)
+ {
+- return sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
++ if (flags & MSG_CMSG_COMPAT)
++ return -EINVAL;
++ return __sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
+ }
+
+ asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg,
+ unsigned vlen, unsigned int flags)
+ {
++ if (flags & MSG_CMSG_COMPAT)
++ return -EINVAL;
+ return __sys_sendmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
+ flags | MSG_CMSG_COMPAT);
+ }
+
+ asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags)
+ {
+- return sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
++ if (flags & MSG_CMSG_COMPAT)
++ return -EINVAL;
++ return __sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
+ }
+
+ asmlinkage long compat_sys_recv(int fd, void __user *buf, size_t len, unsigned flags)
+@@ -767,6 +773,9 @@ asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
+ int datagrams;
+ struct timespec ktspec;
+
++ if (flags & MSG_CMSG_COMPAT)
++ return -EINVAL;
++
+ if (timeout == NULL)
+ return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
+ flags | MSG_CMSG_COMPAT, NULL);
+diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
+index d55110e..5f28fab 100644
+--- a/net/ipv4/ip_gre.c
++++ b/net/ipv4/ip_gre.c
+@@ -716,6 +716,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
+ tiph = &tunnel->parms.iph;
+ }
+
++ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+ if ((dst = tiph->daddr) == 0) {
+ /* NBMA tunnel */
+
+@@ -851,7 +852,6 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
+ skb_reset_transport_header(skb);
+ skb_push(skb, gre_hlen);
+ skb_reset_network_header(skb);
+- memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+ IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
+ IPSKB_REROUTED);
+ skb_dst_drop(skb);
+diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
+index 17ad951..5dc5137 100644
+--- a/net/ipv4/ipip.c
++++ b/net/ipv4/ipip.c
+@@ -448,6 +448,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
+ if (tos & 1)
+ tos = old_iph->tos;
+
++ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+ if (!dst) {
+ /* NBMA tunnel */
+ if ((rt = skb_rtable(skb)) == NULL) {
+@@ -531,7 +532,6 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
+ skb->transport_header = skb->network_header;
+ skb_push(skb, sizeof(struct iphdr));
+ skb_reset_network_header(skb);
+- memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+ IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
+ IPSKB_REROUTED);
+ skb_dst_drop(skb);
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index fe381c2..ec8b4b7e 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -3037,8 +3037,11 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
+
+ for (i = 0; i < shi->nr_frags; ++i) {
+ const struct skb_frag_struct *f = &shi->frags[i];
+- struct page *page = skb_frag_page(f);
+- sg_set_page(&sg, page, skb_frag_size(f), f->page_offset);
++ unsigned int offset = f->page_offset;
++ struct page *page = skb_frag_page(f) + (offset >> PAGE_SHIFT);
++
++ sg_set_page(&sg, page, skb_frag_size(f),
++ offset_in_page(offset));
+ if (crypto_hash_update(desc, &sg, skb_frag_size(f)))
+ return 1;
+ }
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 5c1807c..3add486 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -835,11 +835,13 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
+ &md5);
+ tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
+
+- if (tcp_packets_in_flight(tp) == 0) {
++ if (tcp_packets_in_flight(tp) == 0)
+ tcp_ca_event(sk, CA_EVENT_TX_START);
+- skb->ooo_okay = 1;
+- } else
+- skb->ooo_okay = 0;
++
++ /* if no packet is in qdisc/device queue, then allow XPS to select
++ * another queue.
++ */
++ skb->ooo_okay = sk_wmem_alloc_get(sk) == 0;
+
+ skb_push(skb, tcp_header_size);
+ skb_reset_transport_header(skb);
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index d84033b..d603caa 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -2437,8 +2437,10 @@ static void init_loopback(struct net_device *dev)
+ sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0);
+
+ /* Failure cases are ignored */
+- if (!IS_ERR(sp_rt))
++ if (!IS_ERR(sp_rt)) {
++ sp_ifa->rt = sp_rt;
+ ip6_ins_rt(sp_rt);
++ }
+ }
+ read_unlock_bh(&idev->lock);
+ }
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 3ccd9b2..6aadaa8 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1233,7 +1233,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
+ if (WARN_ON(np->cork.opt))
+ return -EINVAL;
+
+- np->cork.opt = kmalloc(opt->tot_len, sk->sk_allocation);
++ np->cork.opt = kzalloc(opt->tot_len, sk->sk_allocation);
+ if (unlikely(np->cork.opt == NULL))
+ return -ENOBUFS;
+
+diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
+index 6f60175..74410e6 100644
+--- a/net/l2tp/l2tp_ppp.c
++++ b/net/l2tp/l2tp_ppp.c
+@@ -350,19 +350,19 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
+ skb_put(skb, 2);
+
+ /* Copy user data into skb */
+- error = memcpy_fromiovec(skb->data, m->msg_iov, total_len);
++ error = memcpy_fromiovec(skb_put(skb, total_len), m->msg_iov,
++ total_len);
+ if (error < 0) {
+ kfree_skb(skb);
+ goto error_put_sess_tun;
+ }
+- skb_put(skb, total_len);
+
+ l2tp_xmit_skb(session, skb, session->hdr_len);
+
+ sock_put(ps->tunnel_sock);
+ sock_put(sk);
+
+- return error;
++ return total_len;
+
+ error_put_sess_tun:
+ sock_put(ps->tunnel_sock);
+diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c
+index e5330ed..bf99567 100644
+--- a/net/netlabel/netlabel_domainhash.c
++++ b/net/netlabel/netlabel_domainhash.c
+@@ -245,6 +245,71 @@ static void netlbl_domhsh_audit_add(struct netlbl_dom_map *entry,
+ }
+ }
+
++/**
++ * netlbl_domhsh_validate - Validate a new domain mapping entry
++ * @entry: the entry to validate
++ *
++ * This function validates the new domain mapping entry to ensure that it is
++ * a valid entry. Returns zero on success, negative values on failure.
++ *
++ */
++static int netlbl_domhsh_validate(const struct netlbl_dom_map *entry)
++{
++ struct netlbl_af4list *iter4;
++ struct netlbl_domaddr4_map *map4;
++#if IS_ENABLED(CONFIG_IPV6)
++ struct netlbl_af6list *iter6;
++ struct netlbl_domaddr6_map *map6;
++#endif /* IPv6 */
++
++ if (entry == NULL)
++ return -EINVAL;
++
++ switch (entry->type) {
++ case NETLBL_NLTYPE_UNLABELED:
++ if (entry->type_def.cipsov4 != NULL ||
++ entry->type_def.addrsel != NULL)
++ return -EINVAL;
++ break;
++ case NETLBL_NLTYPE_CIPSOV4:
++ if (entry->type_def.cipsov4 == NULL)
++ return -EINVAL;
++ break;
++ case NETLBL_NLTYPE_ADDRSELECT:
++ netlbl_af4list_foreach(iter4, &entry->type_def.addrsel->list4) {
++ map4 = netlbl_domhsh_addr4_entry(iter4);
++ switch (map4->type) {
++ case NETLBL_NLTYPE_UNLABELED:
++ if (map4->type_def.cipsov4 != NULL)
++ return -EINVAL;
++ break;
++ case NETLBL_NLTYPE_CIPSOV4:
++ if (map4->type_def.cipsov4 == NULL)
++ return -EINVAL;
++ break;
++ default:
++ return -EINVAL;
++ }
++ }
++#if IS_ENABLED(CONFIG_IPV6)
++ netlbl_af6list_foreach(iter6, &entry->type_def.addrsel->list6) {
++ map6 = netlbl_domhsh_addr6_entry(iter6);
++ switch (map6->type) {
++ case NETLBL_NLTYPE_UNLABELED:
++ break;
++ default:
++ return -EINVAL;
++ }
++ }
++#endif /* IPv6 */
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
+ /*
+ * Domain Hash Table Functions
+ */
+@@ -311,6 +376,10 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
+ struct netlbl_af6list *tmp6;
+ #endif /* IPv6 */
+
++ ret_val = netlbl_domhsh_validate(entry);
++ if (ret_val != 0)
++ return ret_val;
++
+ /* XXX - we can remove this RCU read lock as the spinlock protects the
+ * entire function, but before we do we need to fixup the
+ * netlbl_af[4,6]list RCU functions to do "the right thing" with
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 5a70215..a2ac2c3 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2820,12 +2820,11 @@ static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
+ return -EOPNOTSUPP;
+
+ uaddr->sa_family = AF_PACKET;
++ memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
+ rcu_read_lock();
+ dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
+ if (dev)
+- strncpy(uaddr->sa_data, dev->name, 14);
+- else
+- memset(uaddr->sa_data, 0, 14);
++ strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
+ rcu_read_unlock();
+ *uaddr_len = sizeof(*uaddr);
+
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 5e0d86e..ba0108f 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -3929,6 +3929,12 @@ SCTP_STATIC void sctp_destroy_sock(struct sock *sk)
+
+ /* Release our hold on the endpoint. */
+ sp = sctp_sk(sk);
++ /* This could happen during socket init, thus we bail out
++ * early, since the rest of the below is not setup either.
++ */
++ if (sp->ep == NULL)
++ return;
++
+ if (sp->do_auto_asconf) {
+ sp->do_auto_asconf = 0;
+ list_del(&sp->auto_asconf_list);
+diff --git a/net/socket.c b/net/socket.c
+index 68879db..cf546a3 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -1876,9 +1876,9 @@ struct used_address {
+ unsigned int name_len;
+ };
+
+-static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
+- struct msghdr *msg_sys, unsigned flags,
+- struct used_address *used_address)
++static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
++ struct msghdr *msg_sys, unsigned flags,
++ struct used_address *used_address)
+ {
+ struct compat_msghdr __user *msg_compat =
+ (struct compat_msghdr __user *)msg;
+@@ -1998,22 +1998,30 @@ out:
+ * BSD sendmsg interface
+ */
+
+-SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags)
++long __sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags)
+ {
+ int fput_needed, err;
+ struct msghdr msg_sys;
+- struct socket *sock = sockfd_lookup_light(fd, &err, &fput_needed);
++ struct socket *sock;
+
++ sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ if (!sock)
+ goto out;
+
+- err = __sys_sendmsg(sock, msg, &msg_sys, flags, NULL);
++ err = ___sys_sendmsg(sock, msg, &msg_sys, flags, NULL);
+
+ fput_light(sock->file, fput_needed);
+ out:
+ return err;
+ }
+
++SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned int, flags)
++{
++ if (flags & MSG_CMSG_COMPAT)
++ return -EINVAL;
++ return __sys_sendmsg(fd, msg, flags);
++}
++
+ /*
+ * Linux sendmmsg interface
+ */
+@@ -2044,15 +2052,16 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
+
+ while (datagrams < vlen) {
+ if (MSG_CMSG_COMPAT & flags) {
+- err = __sys_sendmsg(sock, (struct msghdr __user *)compat_entry,
+- &msg_sys, flags, &used_address);
++ err = ___sys_sendmsg(sock, (struct msghdr __user *)compat_entry,
++ &msg_sys, flags, &used_address);
+ if (err < 0)
+ break;
+ err = __put_user(err, &compat_entry->msg_len);
+ ++compat_entry;
+ } else {
+- err = __sys_sendmsg(sock, (struct msghdr __user *)entry,
+- &msg_sys, flags, &used_address);
++ err = ___sys_sendmsg(sock,
++ (struct msghdr __user *)entry,
++ &msg_sys, flags, &used_address);
+ if (err < 0)
+ break;
+ err = put_user(err, &entry->msg_len);
+@@ -2076,11 +2085,13 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
+ SYSCALL_DEFINE4(sendmmsg, int, fd, struct mmsghdr __user *, mmsg,
+ unsigned int, vlen, unsigned int, flags)
+ {
++ if (flags & MSG_CMSG_COMPAT)
++ return -EINVAL;
+ return __sys_sendmmsg(fd, mmsg, vlen, flags);
+ }
+
+-static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
+- struct msghdr *msg_sys, unsigned flags, int nosec)
++static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
++ struct msghdr *msg_sys, unsigned flags, int nosec)
+ {
+ struct compat_msghdr __user *msg_compat =
+ (struct compat_msghdr __user *)msg;
+@@ -2177,23 +2188,31 @@ out:
+ * BSD recvmsg interface
+ */
+
+-SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg,
+- unsigned int, flags)
++long __sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags)
+ {
+ int fput_needed, err;
+ struct msghdr msg_sys;
+- struct socket *sock = sockfd_lookup_light(fd, &err, &fput_needed);
++ struct socket *sock;
+
++ sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ if (!sock)
+ goto out;
+
+- err = __sys_recvmsg(sock, msg, &msg_sys, flags, 0);
++ err = ___sys_recvmsg(sock, msg, &msg_sys, flags, 0);
+
+ fput_light(sock->file, fput_needed);
+ out:
+ return err;
+ }
+
++SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg,
++ unsigned int, flags)
++{
++ if (flags & MSG_CMSG_COMPAT)
++ return -EINVAL;
++ return __sys_recvmsg(fd, msg, flags);
++}
++
+ /*
+ * Linux recvmmsg interface
+ */
+@@ -2231,17 +2250,18 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
+ * No need to ask LSM for more than the first datagram.
+ */
+ if (MSG_CMSG_COMPAT & flags) {
+- err = __sys_recvmsg(sock, (struct msghdr __user *)compat_entry,
+- &msg_sys, flags & ~MSG_WAITFORONE,
+- datagrams);
++ err = ___sys_recvmsg(sock, (struct msghdr __user *)compat_entry,
++ &msg_sys, flags & ~MSG_WAITFORONE,
++ datagrams);
+ if (err < 0)
+ break;
+ err = __put_user(err, &compat_entry->msg_len);
+ ++compat_entry;
+ } else {
+- err = __sys_recvmsg(sock, (struct msghdr __user *)entry,
+- &msg_sys, flags & ~MSG_WAITFORONE,
+- datagrams);
++ err = ___sys_recvmsg(sock,
++ (struct msghdr __user *)entry,
++ &msg_sys, flags & ~MSG_WAITFORONE,
++ datagrams);
+ if (err < 0)
+ break;
+ err = put_user(err, &entry->msg_len);
+@@ -2308,6 +2328,9 @@ SYSCALL_DEFINE5(recvmmsg, int, fd, struct mmsghdr __user *, mmsg,
+ int datagrams;
+ struct timespec timeout_sys;
+
++ if (flags & MSG_CMSG_COMPAT)
++ return -EINVAL;
++
+ if (!timeout)
+ return __sys_recvmmsg(fd, mmsg, vlen, flags, NULL);
+
+diff --git a/sound/usb/card.c b/sound/usb/card.c
+index acb7fac..3b79a4a 100644
+--- a/sound/usb/card.c
++++ b/sound/usb/card.c
+@@ -149,14 +149,32 @@ static int snd_usb_create_stream(struct snd_usb_audio *chip, int ctrlif, int int
+ return -EINVAL;
+ }
+
++ alts = &iface->altsetting[0];
++ altsd = get_iface_desc(alts);
++
++ /*
++ * Android with both accessory and audio interfaces enabled gets the
++ * interface numbers wrong.
++ */
++ if ((chip->usb_id == USB_ID(0x18d1, 0x2d04) ||
++ chip->usb_id == USB_ID(0x18d1, 0x2d05)) &&
++ interface == 0 &&
++ altsd->bInterfaceClass == USB_CLASS_VENDOR_SPEC &&
++ altsd->bInterfaceSubClass == USB_SUBCLASS_VENDOR_SPEC) {
++ interface = 2;
++ iface = usb_ifnum_to_if(dev, interface);
++ if (!iface)
++ return -EINVAL;
++ alts = &iface->altsetting[0];
++ altsd = get_iface_desc(alts);
++ }
++
+ if (usb_interface_claimed(iface)) {
+ snd_printdd(KERN_INFO "%d:%d:%d: skipping, already claimed\n",
+ dev->devnum, ctrlif, interface);
+ return -EINVAL;
+ }
+
+- alts = &iface->altsetting[0];
+- altsd = get_iface_desc(alts);
+ if ((altsd->bInterfaceClass == USB_CLASS_AUDIO ||
+ altsd->bInterfaceClass == USB_CLASS_VENDOR_SPEC) &&
+ altsd->bInterfaceSubClass == USB_SUBCLASS_MIDISTREAMING) {
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index 97ec155..aeb26eb 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -821,6 +821,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
+
+ case USB_ID(0x046d, 0x0808):
+ case USB_ID(0x046d, 0x0809):
++ case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */
+ case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */
+ case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */
+ case USB_ID(0x046d, 0x0991):
diff --git a/1048_linux-3.2.49.patch b/1048_linux-3.2.49.patch
new file mode 100644
index 00000000..2dab0cf6
--- /dev/null
+++ b/1048_linux-3.2.49.patch
@@ -0,0 +1,2970 @@
+diff --git a/Documentation/i2c/busses/i2c-piix4 b/Documentation/i2c/busses/i2c-piix4
+index 475bb4a..65da157 100644
+--- a/Documentation/i2c/busses/i2c-piix4
++++ b/Documentation/i2c/busses/i2c-piix4
+@@ -8,7 +8,7 @@ Supported adapters:
+ Datasheet: Only available via NDA from ServerWorks
+ * ATI IXP200, IXP300, IXP400, SB600, SB700 and SB800 southbridges
+ Datasheet: Not publicly available
+- * AMD Hudson-2
++ * AMD Hudson-2, CZ
+ Datasheet: Not publicly available
+ * Standard Microsystems (SMSC) SLC90E66 (Victory66) southbridge
+ Datasheet: Publicly available at the SMSC website http://www.smsc.com
+diff --git a/MAINTAINERS b/MAINTAINERS
+index 83f156e..8659eba 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -159,7 +159,7 @@ S: Maintained
+ F: drivers/net/ethernet/realtek/r8169.c
+
+ 8250/16?50 (AND CLONE UARTS) SERIAL DRIVER
+-M: Greg Kroah-Hartman <gregkh@suse.de>
++M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ L: linux-serial@vger.kernel.org
+ W: http://serial.sourceforge.net
+ S: Maintained
+@@ -1781,9 +1781,9 @@ X: net/wireless/wext*
+
+ CHAR and MISC DRIVERS
+ M: Arnd Bergmann <arnd@arndb.de>
+-M: Greg Kroah-Hartman <greg@kroah.com>
++M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
+-S: Maintained
++S: Supported
+ F: drivers/char/*
+ F: drivers/misc/*
+
+@@ -2315,7 +2315,7 @@ F: lib/lru_cache.c
+ F: Documentation/blockdev/drbd/
+
+ DRIVER CORE, KOBJECTS, DEBUGFS AND SYSFS
+-M: Greg Kroah-Hartman <gregkh@suse.de>
++M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core-2.6.git
+ S: Supported
+ F: Documentation/kobject.txt
+@@ -6257,15 +6257,16 @@ S: Maintained
+ F: arch/alpha/kernel/srm_env.c
+
+ STABLE BRANCH
+-M: Greg Kroah-Hartman <greg@kroah.com>
++M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ L: stable@vger.kernel.org
+-S: Maintained
++S: Supported
++F: Documentation/stable_kernel_rules.txt
+
+ STAGING SUBSYSTEM
+-M: Greg Kroah-Hartman <gregkh@suse.de>
++M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
+ L: devel@driverdev.osuosl.org
+-S: Maintained
++S: Supported
+ F: drivers/staging/
+
+ STAGING - AGERE HERMES II and II.5 WIRELESS DRIVERS
+@@ -6654,8 +6655,8 @@ S: Maintained
+ K: ^Subject:.*(?i)trivial
+
+ TTY LAYER
+-M: Greg Kroah-Hartman <gregkh@suse.de>
+-S: Maintained
++M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
++S: Supported
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty-2.6.git
+ F: drivers/tty/*
+ F: drivers/tty/serial/serial_core.c
+@@ -6943,7 +6944,7 @@ S: Maintained
+ F: drivers/usb/serial/digi_acceleport.c
+
+ USB SERIAL DRIVER
+-M: Greg Kroah-Hartman <gregkh@suse.de>
++M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ L: linux-usb@vger.kernel.org
+ S: Supported
+ F: Documentation/usb/usb-serial.txt
+@@ -6958,9 +6959,8 @@ S: Maintained
+ F: drivers/usb/serial/empeg.c
+
+ USB SERIAL KEYSPAN DRIVER
+-M: Greg Kroah-Hartman <greg@kroah.com>
++M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ L: linux-usb@vger.kernel.org
+-W: http://www.kroah.com/linux/
+ S: Maintained
+ F: drivers/usb/serial/*keyspan*
+
+@@ -6988,7 +6988,7 @@ F: Documentation/video4linux/sn9c102.txt
+ F: drivers/media/video/sn9c102/
+
+ USB SUBSYSTEM
+-M: Greg Kroah-Hartman <gregkh@suse.de>
++M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ L: linux-usb@vger.kernel.org
+ W: http://www.linux-usb.org
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6.git
+@@ -7075,7 +7075,7 @@ F: fs/hppfs/
+
+ USERSPACE I/O (UIO)
+ M: "Hans J. Koch" <hjk@hansjkoch.de>
+-M: Greg Kroah-Hartman <gregkh@suse.de>
++M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ S: Maintained
+ F: Documentation/DocBook/uio-howto.tmpl
+ F: drivers/uio/
+diff --git a/Makefile b/Makefile
+index 299e2eb..2e3d791 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 48
++SUBLEVEL = 49
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
+index a559ee7..778d248 100644
+--- a/arch/arm/kernel/perf_event.c
++++ b/arch/arm/kernel/perf_event.c
+@@ -795,6 +795,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
+ struct frame_tail __user *tail;
+
+
++ perf_callchain_store(entry, regs->ARM_pc);
+ tail = (struct frame_tail __user *)regs->ARM_fp - 1;
+
+ while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
+diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
+index fb9bb46..2c8890a 100644
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -74,7 +74,7 @@
+ #endif
+
+ int boot_cpuid = 0;
+-int __initdata spinning_secondaries;
++int spinning_secondaries;
+ u64 ppc64_pft_size;
+
+ /* Pick defaults since we might want to patch instructions
+diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
+index 054cc01..d50a821 100644
+--- a/arch/x86/xen/time.c
++++ b/arch/x86/xen/time.c
+@@ -36,9 +36,8 @@ static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
+ /* snapshots of runstate info */
+ static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate_snapshot);
+
+-/* unused ns of stolen and blocked time */
++/* unused ns of stolen time */
+ static DEFINE_PER_CPU(u64, xen_residual_stolen);
+-static DEFINE_PER_CPU(u64, xen_residual_blocked);
+
+ /* return an consistent snapshot of 64-bit time/counter value */
+ static u64 get64(const u64 *p)
+@@ -115,7 +114,7 @@ static void do_stolen_accounting(void)
+ {
+ struct vcpu_runstate_info state;
+ struct vcpu_runstate_info *snap;
+- s64 blocked, runnable, offline, stolen;
++ s64 runnable, offline, stolen;
+ cputime_t ticks;
+
+ get_runstate_snapshot(&state);
+@@ -125,7 +124,6 @@ static void do_stolen_accounting(void)
+ snap = &__get_cpu_var(xen_runstate_snapshot);
+
+ /* work out how much time the VCPU has not been runn*ing* */
+- blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked];
+ runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable];
+ offline = state.time[RUNSTATE_offline] - snap->time[RUNSTATE_offline];
+
+@@ -141,17 +139,6 @@ static void do_stolen_accounting(void)
+ ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
+ __this_cpu_write(xen_residual_stolen, stolen);
+ account_steal_ticks(ticks);
+-
+- /* Add the appropriate number of ticks of blocked time,
+- including any left-overs from last time. */
+- blocked += __this_cpu_read(xen_residual_blocked);
+-
+- if (blocked < 0)
+- blocked = 0;
+-
+- ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked);
+- __this_cpu_write(xen_residual_blocked, blocked);
+- account_idle_ticks(ticks);
+ }
+
+ /* Get the TSC speed from Xen */
+diff --git a/block/genhd.c b/block/genhd.c
+index 6edf228..8bd4ef2 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -519,7 +519,7 @@ void register_disk(struct gendisk *disk)
+
+ ddev->parent = disk->driverfs_dev;
+
+- dev_set_name(ddev, disk->disk_name);
++ dev_set_name(ddev, "%s", disk->disk_name);
+
+ /* delay uevents, until we scanned partition table */
+ dev_set_uevent_suppress(ddev, 1);
+diff --git a/crypto/algapi.c b/crypto/algapi.c
+index 54dd4e3..dc9991f 100644
+--- a/crypto/algapi.c
++++ b/crypto/algapi.c
+@@ -477,7 +477,8 @@ static struct crypto_template *__crypto_lookup_template(const char *name)
+
+ struct crypto_template *crypto_lookup_template(const char *name)
+ {
+- return try_then_request_module(__crypto_lookup_template(name), name);
++ return try_then_request_module(__crypto_lookup_template(name), "%s",
++ name);
+ }
+ EXPORT_SYMBOL_GPL(crypto_lookup_template);
+
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 87acc23..0445f52 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -302,6 +302,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, 0x8d64), board_ahci }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x8d66), board_ahci }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x8d6e), board_ahci }, /* Wellsburg RAID */
++ { PCI_VDEVICE(INTEL, 0x23a3), board_ahci }, /* Coleto Creek AHCI */
+
+ /* JMicron 360/1/3/5/6, match class to avoid IDE function */
+ { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+@@ -318,6 +319,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+
+ /* AMD */
+ { PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */
++ { PCI_VDEVICE(AMD, 0x7900), board_ahci }, /* AMD CZ */
+ /* AMD is using RAID class only for ahci controllers */
+ { PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci },
+diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
+index 0e92326..7a949af 100644
+--- a/drivers/ata/ata_piix.c
++++ b/drivers/ata/ata_piix.c
+@@ -360,6 +360,8 @@ static const struct pci_device_id piix_pci_tbl[] = {
+ /* SATA Controller IDE (BayTrail) */
+ { 0x8086, 0x0F20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt },
+ { 0x8086, 0x0F21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt },
++ /* SATA Controller IDE (Coleto Creek) */
++ { 0x8086, 0x23a6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+
+ { } /* terminate list */
+ };
+diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
+index 3c92dbd..60def03 100644
+--- a/drivers/ata/libahci.c
++++ b/drivers/ata/libahci.c
+@@ -1541,8 +1541,7 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
+ u32 fbs = readl(port_mmio + PORT_FBS);
+ int pmp = fbs >> PORT_FBS_DWE_OFFSET;
+
+- if ((fbs & PORT_FBS_SDE) && (pmp < ap->nr_pmp_links) &&
+- ata_link_online(&ap->pmp_link[pmp])) {
++ if ((fbs & PORT_FBS_SDE) && (pmp < ap->nr_pmp_links)) {
+ link = &ap->pmp_link[pmp];
+ fbs_need_dec = true;
+ }
+diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
+index 21b80c5..f63a588 100644
+--- a/drivers/ata/libata-pmp.c
++++ b/drivers/ata/libata-pmp.c
+@@ -389,9 +389,13 @@ static void sata_pmp_quirks(struct ata_port *ap)
+ /* link reports offline after LPM */
+ link->flags |= ATA_LFLAG_NO_LPM;
+
+- /* Class code report is unreliable. */
++ /*
++ * Class code report is unreliable and SRST times
++ * out under certain configurations.
++ */
+ if (link->pmp < 5)
+- link->flags |= ATA_LFLAG_ASSUME_ATA;
++ link->flags |= ATA_LFLAG_NO_SRST |
++ ATA_LFLAG_ASSUME_ATA;
+
+ /* port 5 is for SEMB device and it doesn't like SRST */
+ if (link->pmp == 5)
+@@ -399,20 +403,17 @@ static void sata_pmp_quirks(struct ata_port *ap)
+ ATA_LFLAG_ASSUME_SEMB;
+ }
+ } else if (vendor == 0x1095 && devid == 0x4723) {
+- /* sil4723 quirks */
+- ata_for_each_link(link, ap, EDGE) {
+- /* link reports offline after LPM */
+- link->flags |= ATA_LFLAG_NO_LPM;
+-
+- /* class code report is unreliable */
+- if (link->pmp < 2)
+- link->flags |= ATA_LFLAG_ASSUME_ATA;
+-
+- /* the config device at port 2 locks up on SRST */
+- if (link->pmp == 2)
+- link->flags |= ATA_LFLAG_NO_SRST |
+- ATA_LFLAG_ASSUME_ATA;
+- }
++ /*
++ * sil4723 quirks
++ *
++ * Link reports offline after LPM. Class code report is
++ * unreliable. SIMG PMPs never got SRST reliable and the
++ * config device at port 2 locks up on SRST.
++ */
++ ata_for_each_link(link, ap, EDGE)
++ link->flags |= ATA_LFLAG_NO_LPM |
++ ATA_LFLAG_NO_SRST |
++ ATA_LFLAG_ASSUME_ATA;
+ } else if (vendor == 0x1095 && devid == 0x4726) {
+ /* sil4726 quirks */
+ ata_for_each_link(link, ap, EDGE) {
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index 40a0fcb..5fb6885 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -598,8 +598,10 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
+ if (!lo->sock)
+ return -EINVAL;
+
++ lo->disconnect = 1;
++
+ nbd_send_req(lo, &sreq);
+- return 0;
++ return 0;
+ }
+
+ case NBD_CLEAR_SOCK: {
+@@ -629,6 +631,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
+ lo->sock = SOCKET_I(inode);
+ if (max_part > 0)
+ bdev->bd_invalidated = 1;
++ lo->disconnect = 0; /* we're connected now */
+ return 0;
+ } else {
+ fput(file);
+@@ -675,7 +678,8 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
+
+ mutex_unlock(&lo->tx_lock);
+
+- thread = kthread_create(nbd_thread, lo, lo->disk->disk_name);
++ thread = kthread_create(nbd_thread, lo, "%s",
++ lo->disk->disk_name);
+ if (IS_ERR(thread)) {
+ mutex_lock(&lo->tx_lock);
+ return PTR_ERR(thread);
+@@ -700,6 +704,8 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
+ set_capacity(lo->disk, 0);
+ if (max_part > 0)
+ ioctl_by_bdev(bdev, BLKRRPART, 0);
++ if (lo->disconnect) /* user requested, ignore socket errors */
++ return 0;
+ return lo->harderror;
+ }
+
+diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
+index 2678b6f..1331740 100644
+--- a/drivers/cdrom/cdrom.c
++++ b/drivers/cdrom/cdrom.c
+@@ -2885,7 +2885,7 @@ static noinline int mmc_ioctl_cdrom_read_data(struct cdrom_device_info *cdi,
+ if (lba < 0)
+ return -EINVAL;
+
+- cgc->buffer = kmalloc(blocksize, GFP_KERNEL);
++ cgc->buffer = kzalloc(blocksize, GFP_KERNEL);
+ if (cgc->buffer == NULL)
+ return -ENOMEM;
+
+diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
+index e8eedb7..720cace 100644
+--- a/drivers/dma/pl330.c
++++ b/drivers/dma/pl330.c
+@@ -349,10 +349,10 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
+ struct dma_pl330_chan *pch = to_pchan(chan);
+ unsigned long flags;
+
+- spin_lock_irqsave(&pch->lock, flags);
+-
+ tasklet_kill(&pch->task);
+
++ spin_lock_irqsave(&pch->lock, flags);
++
+ pl330_release_channel(pch->pl330_chid);
+ pch->pl330_chid = NULL;
+
+diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
+index 8af25a0..810658e 100644
+--- a/drivers/hv/ring_buffer.c
++++ b/drivers/hv/ring_buffer.c
+@@ -383,7 +383,7 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
+ sizeof(u64));
+
+ /* Make sure we flush all writes before updating the writeIndex */
+- smp_wmb();
++ wmb();
+
+ /* Now, update the write location */
+ hv_set_next_write_location(outring_info, next_write_location);
+diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
+index d2d0a2a..44442d5 100644
+--- a/drivers/hv/vmbus_drv.c
++++ b/drivers/hv/vmbus_drv.c
+@@ -466,7 +466,7 @@ static void vmbus_on_msg_dpc(unsigned long data)
+ * will not deliver any more messages since there is
+ * no empty slot
+ */
+- smp_mb();
++ mb();
+
+ if (msg->header.message_flags.msg_pending) {
+ /*
+diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
+index 60f593c..dbd4fa5 100644
+--- a/drivers/i2c/busses/Kconfig
++++ b/drivers/i2c/busses/Kconfig
+@@ -137,6 +137,7 @@ config I2C_PIIX4
+ ATI SB700
+ ATI SB800
+ AMD Hudson-2
++ AMD CZ
+ Serverworks OSB4
+ Serverworks CSB5
+ Serverworks CSB6
+diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
+index 6d14ac2..14b588c 100644
+--- a/drivers/i2c/busses/i2c-piix4.c
++++ b/drivers/i2c/busses/i2c-piix4.c
+@@ -22,7 +22,7 @@
+ Intel PIIX4, 440MX
+ Serverworks OSB4, CSB5, CSB6, HT-1000, HT-1100
+ ATI IXP200, IXP300, IXP400, SB600, SB700, SB800
+- AMD Hudson-2
++ AMD Hudson-2, CZ
+ SMSC Victory66
+
+ Note: we assume there can only be one device, with one SMBus interface.
+@@ -481,6 +481,7 @@ static const struct pci_device_id piix4_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SMBUS) },
++ { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x790b) },
+ { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS,
+ PCI_DEVICE_ID_SERVERWORKS_OSB4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS,
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index 07cb1a6..6cc8e67 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -1076,6 +1076,10 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom,
+
+ /* Large PTE found which maps this address */
+ unmap_size = PTE_PAGE_SIZE(*pte);
++
++ /* Only unmap from the first pte in the page */
++ if ((unmap_size - 1) & bus_addr)
++ break;
+ count = PAGE_SIZE_PTE_COUNT(unmap_size);
+ for (i = 0; i < count; i++)
+ pte[i] = 0ULL;
+@@ -1085,7 +1089,7 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom,
+ unmapped += unmap_size;
+ }
+
+- BUG_ON(!is_power_of_2(unmapped));
++ BUG_ON(unmapped && !is_power_of_2(unmapped));
+
+ return unmapped;
+ }
+diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c
+index e4b5c03..4f8c3f7 100644
+--- a/drivers/media/dvb/dvb-core/dmxdev.c
++++ b/drivers/media/dvb/dvb-core/dmxdev.c
+@@ -380,10 +380,8 @@ static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len,
+ ret = dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, buffer2,
+ buffer2_len);
+ }
+- if (ret < 0) {
+- dvb_ringbuffer_flush(&dmxdevfilter->buffer);
++ if (ret < 0)
+ dmxdevfilter->buffer.error = ret;
+- }
+ if (dmxdevfilter->params.sec.flags & DMX_ONESHOT)
+ dmxdevfilter->state = DMXDEV_STATE_DONE;
+ spin_unlock(&dmxdevfilter->dev->lock);
+@@ -419,10 +417,8 @@ static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len,
+ ret = dvb_dmxdev_buffer_write(buffer, buffer1, buffer1_len);
+ if (ret == buffer1_len)
+ ret = dvb_dmxdev_buffer_write(buffer, buffer2, buffer2_len);
+- if (ret < 0) {
+- dvb_ringbuffer_flush(buffer);
++ if (ret < 0)
+ buffer->error = ret;
+- }
+ spin_unlock(&dmxdevfilter->dev->lock);
+ wake_up(&buffer->queue);
+ return 0;
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index ed7a5a6..a3bd0ba 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -5584,14 +5584,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
+ goto err_stop_0;
+ }
+
+- /* 8168evl does not automatically pad to minimum length. */
+- if (unlikely(tp->mac_version == RTL_GIGA_MAC_VER_34 &&
+- skb->len < ETH_ZLEN)) {
+- if (skb_padto(skb, ETH_ZLEN))
+- goto err_update_stats;
+- skb_put(skb, ETH_ZLEN - skb->len);
+- }
+-
+ if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
+ goto err_stop_0;
+
+diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c
+index 48ab38a..02c939e 100644
+--- a/drivers/net/wan/dlci.c
++++ b/drivers/net/wan/dlci.c
+@@ -385,21 +385,37 @@ static int dlci_del(struct dlci_add *dlci)
+ struct frad_local *flp;
+ struct net_device *master, *slave;
+ int err;
++ bool found = false;
++
++ rtnl_lock();
+
+ /* validate slave device */
+ master = __dev_get_by_name(&init_net, dlci->devname);
+- if (!master)
+- return -ENODEV;
++ if (!master) {
++ err = -ENODEV;
++ goto out;
++ }
++
++ list_for_each_entry(dlp, &dlci_devs, list) {
++ if (dlp->master == master) {
++ found = true;
++ break;
++ }
++ }
++ if (!found) {
++ err = -ENODEV;
++ goto out;
++ }
+
+ if (netif_running(master)) {
+- return -EBUSY;
++ err = -EBUSY;
++ goto out;
+ }
+
+ dlp = netdev_priv(master);
+ slave = dlp->slave;
+ flp = netdev_priv(slave);
+
+- rtnl_lock();
+ err = (*flp->deassoc)(slave, master);
+ if (!err) {
+ list_del(&dlp->list);
+@@ -408,8 +424,8 @@ static int dlci_del(struct dlci_add *dlci)
+
+ dev_put(slave);
+ }
++out:
+ rtnl_unlock();
+-
+ return err;
+ }
+
+diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+index 3b262ba..c41eb9d 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
++++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+@@ -3625,7 +3625,7 @@ static u16 ar9003_hw_ant_ctrl_chain_get(struct ath_hw *ah,
+ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
+ {
+ int chain;
+- u32 regval;
++ u32 regval, value;
+ u32 ant_div_ctl1;
+ static const u32 switch_chain_reg[AR9300_MAX_CHAINS] = {
+ AR_PHY_SWITCH_CHAIN_0,
+@@ -3633,7 +3633,11 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
+ AR_PHY_SWITCH_CHAIN_2,
+ };
+
+- u32 value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz);
++ if (AR_SREV_9485(ah) && (ar9003_hw_get_rx_gain_idx(ah) == 0))
++ ath9k_hw_cfg_output(ah, AR9300_EXT_LNA_CTL_GPIO_AR9485,
++ AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED);
++
++ value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz);
+
+ if (AR_SREV_9462(ah)) {
+ if (AR_SREV_9462_10(ah)) {
+diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+index 4114fe7..4e9b71b 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
++++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+@@ -334,6 +334,8 @@
+
+ #define AR_PHY_CCA_NOM_VAL_9330_2GHZ -118
+
++#define AR9300_EXT_LNA_CTL_GPIO_AR9485 9
++
+ /*
+ * AGC Field Definitions
+ */
+diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
+index bcabfbf..5e522e4 100644
+--- a/drivers/net/wireless/ath/ath9k/calib.c
++++ b/drivers/net/wireless/ath/ath9k/calib.c
+@@ -391,7 +391,6 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
+
+ if (!caldata) {
+ chan->noisefloor = nf;
+- ah->noise = ath9k_hw_getchan_noise(ah, chan);
+ return false;
+ }
+
+@@ -413,6 +412,7 @@ void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
+
+ ah->caldata->channel = chan->channel;
+ ah->caldata->channelFlags = chan->channelFlags & ~CHANNEL_CW_INT;
++ ah->caldata->chanmode = chan->chanmode;
+ h = ah->caldata->nfCalHist;
+ default_nf = ath9k_hw_get_default_nf(ah, chan);
+ for (i = 0; i < NUM_NF_READINGS; i++) {
+diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
+index 2b8e957..c623527 100644
+--- a/drivers/net/wireless/ath/ath9k/hw.c
++++ b/drivers/net/wireless/ath/ath9k/hw.c
+@@ -1540,7 +1540,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
+ if (caldata &&
+ (chan->channel != caldata->channel ||
+ (chan->channelFlags & ~CHANNEL_CW_INT) !=
+- (caldata->channelFlags & ~CHANNEL_CW_INT))) {
++ (caldata->channelFlags & ~CHANNEL_CW_INT) ||
++ chan->chanmode != caldata->chanmode)) {
+ /* Operating channel changed, reset channel calibration data */
+ memset(caldata, 0, sizeof(*caldata));
+ ath9k_init_nfcal_hist_buffer(ah, chan);
+diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
+index 0c65a09..dc774cd 100644
+--- a/drivers/net/wireless/ath/ath9k/hw.h
++++ b/drivers/net/wireless/ath/ath9k/hw.h
+@@ -352,6 +352,7 @@ struct ath9k_rtt_hist {
+ struct ath9k_hw_cal_data {
+ u16 channel;
+ u32 channelFlags;
++ u32 chanmode;
+ int32_t CalValid;
+ int8_t iCoff;
+ int8_t qCoff;
+diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
+index df3e27c..a59267a 100644
+--- a/drivers/net/wireless/ath/ath9k/main.c
++++ b/drivers/net/wireless/ath/ath9k/main.c
+@@ -1688,13 +1688,6 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
+ ath_update_survey_stats(sc);
+ spin_unlock_irqrestore(&common->cc_lock, flags);
+
+- /*
+- * Preserve the current channel values, before updating
+- * the same channel
+- */
+- if (ah->curchan && (old_pos == pos))
+- ath9k_hw_getnf(ah, ah->curchan);
+-
+ ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos],
+ curchan, conf->channel_type);
+
+diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
+index b97a40e..36a7ce3 100644
+--- a/drivers/net/wireless/b43/Kconfig
++++ b/drivers/net/wireless/b43/Kconfig
+@@ -28,12 +28,12 @@ config B43
+
+ config B43_BCMA
+ bool "Support for BCMA bus"
+- depends on B43 && BCMA
++ depends on B43 && (BCMA = y || BCMA = B43)
+ default y
+
+ config B43_SSB
+ bool
+- depends on B43 && SSB
++ depends on B43 && (SSB = y || SSB = B43)
+ default y
+
+ # Auto-select SSB PCI-HOST support, if possible
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
+index 1e851aa..17a8e96 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
+@@ -104,7 +104,7 @@ void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+ tx_agc[RF90_PATH_A] = 0x10101010;
+ tx_agc[RF90_PATH_B] = 0x10101010;
+ } else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+- TXHIGHPWRLEVEL_LEVEL1) {
++ TXHIGHPWRLEVEL_LEVEL2) {
+ tx_agc[RF90_PATH_A] = 0x00000000;
+ tx_agc[RF90_PATH_B] = 0x00000000;
+ } else{
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+index 0984dcf..016ef86 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+@@ -367,6 +367,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
+ {RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
+ {RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/
+ {RTL_USB_DEVICE(0x20f4, 0x624d, rtl92cu_hal_cfg)}, /*TRENDNet*/
++ {RTL_USB_DEVICE(0x2357, 0x0100, rtl92cu_hal_cfg)}, /*TP-Link WN8200ND*/
+ {RTL_USB_DEVICE(0x7392, 0x7822, rtl92cu_hal_cfg)}, /*Edimax -Edimax*/
+ {}
+ };
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index cab24f7..f0c8c5d 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -1123,6 +1123,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk
+ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode);
+ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode);
++DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode);
+
+ /*
+ * Serverworks CSB5 IDE does not fully support native mode
+diff --git a/drivers/rtc/rtc-rv3029c2.c b/drivers/rtc/rtc-rv3029c2.c
+index ea09ff2..5317d94 100644
+--- a/drivers/rtc/rtc-rv3029c2.c
++++ b/drivers/rtc/rtc-rv3029c2.c
+@@ -310,7 +310,7 @@ static int rv3029c2_rtc_i2c_set_alarm(struct i2c_client *client,
+ dev_dbg(&client->dev, "alarm IRQ armed\n");
+ } else {
+ /* disable AIE irq */
+- ret = rv3029c2_rtc_i2c_alarm_set_irq(client, 1);
++ ret = rv3029c2_rtc_i2c_alarm_set_irq(client, 0);
+ if (ret)
+ return ret;
+
+diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
+index 4f1b10b..3743ac9 100644
+--- a/drivers/s390/scsi/zfcp_aux.c
++++ b/drivers/s390/scsi/zfcp_aux.c
+@@ -3,7 +3,7 @@
+ *
+ * Module interface and handling of zfcp data structures.
+ *
+- * Copyright IBM Corporation 2002, 2010
++ * Copyright IBM Corp. 2002, 2013
+ */
+
+ /*
+@@ -23,6 +23,7 @@
+ * Christof Schmitt
+ * Martin Petermann
+ * Sven Schuetz
++ * Steffen Maier
+ */
+
+ #define KMSG_COMPONENT "zfcp"
+@@ -415,6 +416,8 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
+ adapter->dma_parms.max_segment_size = ZFCP_QDIO_SBALE_LEN;
+ adapter->ccw_device->dev.dma_parms = &adapter->dma_parms;
+
++ adapter->stat_read_buf_num = FSF_STATUS_READS_RECOM;
++
+ if (!zfcp_scsi_adapter_register(adapter))
+ return adapter;
+
+diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
+index 8c849f0..8bfd579 100644
+--- a/drivers/s390/scsi/zfcp_fsf.c
++++ b/drivers/s390/scsi/zfcp_fsf.c
+@@ -3,7 +3,7 @@
+ *
+ * Implementation of FSF commands.
+ *
+- * Copyright IBM Corporation 2002, 2010
++ * Copyright IBM Corp. 2002, 2013
+ */
+
+ #define KMSG_COMPONENT "zfcp"
+@@ -455,11 +455,8 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
+
+ fc_host_port_name(shost) = nsp->fl_wwpn;
+ fc_host_node_name(shost) = nsp->fl_wwnn;
+- fc_host_port_id(shost) = ntoh24(bottom->s_id);
+- fc_host_speed(shost) = bottom->fc_link_speed;
+ fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
+
+- adapter->hydra_version = bottom->adapter_type;
+ adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK;
+ adapter->stat_read_buf_num = max(bottom->status_read_buf_num,
+ (u16)FSF_STATUS_READS_RECOM);
+@@ -467,6 +464,18 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
+ if (fc_host_permanent_port_name(shost) == -1)
+ fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
+
++ zfcp_scsi_set_prot(adapter);
++
++ /* no error return above here, otherwise must fix call chains */
++ /* do not evaluate invalid fields */
++ if (req->qtcb->header.fsf_status == FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE)
++ return 0;
++
++ fc_host_port_id(shost) = ntoh24(bottom->s_id);
++ fc_host_speed(shost) = bottom->fc_link_speed;
++
++ adapter->hydra_version = bottom->adapter_type;
++
+ switch (bottom->fc_topology) {
+ case FSF_TOPO_P2P:
+ adapter->peer_d_id = ntoh24(bottom->peer_d_id);
+@@ -488,8 +497,6 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
+ return -EIO;
+ }
+
+- zfcp_scsi_set_prot(adapter);
+-
+ return 0;
+ }
+
+@@ -534,8 +541,14 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
+ fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
+ adapter->hydra_version = 0;
+
++ /* avoids adapter shutdown to be able to recognize
++ * events such as LINK UP */
++ atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
++ &adapter->status);
+ zfcp_fsf_link_down_info_eval(req,
+ &qtcb->header.fsf_status_qual.link_down_info);
++ if (zfcp_fsf_exchange_config_evaluate(req))
++ return;
+ break;
+ default:
+ zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3");
+diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
+index b79576b..7b35364 100644
+--- a/drivers/s390/scsi/zfcp_scsi.c
++++ b/drivers/s390/scsi/zfcp_scsi.c
+@@ -3,7 +3,7 @@
+ *
+ * Interface to Linux SCSI midlayer.
+ *
+- * Copyright IBM Corporation 2002, 2010
++ * Copyright IBM Corp. 2002, 2013
+ */
+
+ #define KMSG_COMPONENT "zfcp"
+@@ -311,8 +311,12 @@ static struct scsi_host_template zfcp_scsi_host_template = {
+ .proc_name = "zfcp",
+ .can_queue = 4096,
+ .this_id = -1,
+- .sg_tablesize = 1, /* adjusted later */
+- .max_sectors = 8, /* adjusted later */
++ .sg_tablesize = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
++ * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2),
++ /* GCD, adjusted later */
++ .max_sectors = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
++ * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2) * 8,
++ /* GCD, adjusted later */
+ .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1,
+ .cmd_per_lun = 1,
+ .use_clustering = 1,
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index 7c471eb..fc5a2ef 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -4886,10 +4886,12 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
+ sense, sense_handle);
+ }
+
+- for (i = 0; i < ioc->sge_count && kbuff_arr[i]; i++) {
+- dma_free_coherent(&instance->pdev->dev,
+- kern_sge32[i].length,
+- kbuff_arr[i], kern_sge32[i].phys_addr);
++ for (i = 0; i < ioc->sge_count; i++) {
++ if (kbuff_arr[i])
++ dma_free_coherent(&instance->pdev->dev,
++ kern_sge32[i].length,
++ kbuff_arr[i],
++ kern_sge32[i].phys_addr);
+ }
+
+ megasas_return_cmd(instance, cmd);
+diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
+index 17de348..a11a909 100644
+--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
++++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
+@@ -79,10 +79,6 @@ static int msix_disable = -1;
+ module_param(msix_disable, int, 0);
+ MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
+
+-static int missing_delay[2] = {-1, -1};
+-module_param_array(missing_delay, int, NULL, 0);
+-MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
+-
+ static int mpt2sas_fwfault_debug;
+ MODULE_PARM_DESC(mpt2sas_fwfault_debug, " enable detection of firmware fault "
+ "and halt firmware - (default=0)");
+@@ -2104,7 +2100,7 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
+ }
+
+ /**
+- * _base_update_missing_delay - change the missing delay timers
++ * mpt2sas_base_update_missing_delay - change the missing delay timers
+ * @ioc: per adapter object
+ * @device_missing_delay: amount of time till device is reported missing
+ * @io_missing_delay: interval IO is returned when there is a missing device
+@@ -2115,8 +2111,8 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
+ * delay, as well as the io missing delay. This should be called at driver
+ * load time.
+ */
+-static void
+-_base_update_missing_delay(struct MPT2SAS_ADAPTER *ioc,
++void
++mpt2sas_base_update_missing_delay(struct MPT2SAS_ADAPTER *ioc,
+ u16 device_missing_delay, u8 io_missing_delay)
+ {
+ u16 dmd, dmd_new, dmd_orignal;
+@@ -4302,9 +4298,6 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
+ if (r)
+ goto out_free_resources;
+
+- if (missing_delay[0] != -1 && missing_delay[1] != -1)
+- _base_update_missing_delay(ioc, missing_delay[0],
+- missing_delay[1]);
+
+ return 0;
+
+diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
+index 3c3babc..aa4daf6 100644
+--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
++++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
+@@ -1029,6 +1029,9 @@ void mpt2sas_base_validate_event_type(struct MPT2SAS_ADAPTER *ioc, u32 *event_ty
+
+ void mpt2sas_halt_firmware(struct MPT2SAS_ADAPTER *ioc);
+
++void mpt2sas_base_update_missing_delay(struct MPT2SAS_ADAPTER *ioc,
++ u16 device_missing_delay, u8 io_missing_delay);
++
+ int mpt2sas_port_enable(struct MPT2SAS_ADAPTER *ioc);
+
+ /* scsih shared API */
+diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+index 2824a90..987c6d6 100644
+--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
++++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+@@ -101,6 +101,10 @@ static ushort max_sectors = 0xFFFF;
+ module_param(max_sectors, ushort, 0);
+ MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 8192 default=8192");
+
++static int missing_delay[2] = {-1, -1};
++module_param_array(missing_delay, int, NULL, 0);
++MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
++
+ /* scsi-mid layer global parmeter is max_report_luns, which is 511 */
+ #define MPT2SAS_MAX_LUN (16895)
+ static int max_lun = MPT2SAS_MAX_LUN;
+@@ -3930,11 +3934,7 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
+ else
+ mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
+ } else
+-/* MPI Revision I (UNIT = 0xA) - removed MPI2_SCSIIO_CONTROL_UNTAGGED */
+-/* mpi_control |= MPI2_SCSIIO_CONTROL_UNTAGGED;
+- */
+- mpi_control |= (0x500);
+-
++ mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
+ } else
+ mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
+ /* Make sure Device is not raid volume.
+@@ -7006,11 +7006,14 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
+ struct _sas_device *sas_device;
+ struct _sas_node *expander_device;
+ static struct _raid_device *raid_device;
++ u8 retry_count;
+
+ printk(MPT2SAS_INFO_FMT "scan devices: start\n", ioc->name);
+
+ _scsih_sas_host_refresh(ioc);
+
++ printk(MPT2SAS_INFO_FMT "\tscan devices: expanders start\n",
++ ioc->name);
+ /* expanders */
+ handle = 0xFFFF;
+ while (!(mpt2sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
+@@ -7019,19 +7022,39 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ break;
++ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
++ printk(MPT2SAS_INFO_FMT "\tbreak from expander scan: "
++ "ioc_status(0x%04x), loginfo(0x%08x)\n",
++ ioc->name, ioc_status,
++ le32_to_cpu(mpi_reply.IOCLogInfo));
++ break;
++ }
+ handle = le16_to_cpu(expander_pg0.DevHandle);
+ expander_device = mpt2sas_scsih_expander_find_by_sas_address(
+ ioc, le64_to_cpu(expander_pg0.SASAddress));
+ if (expander_device)
+ _scsih_refresh_expander_links(ioc, expander_device,
+ handle);
+- else
++ else {
++ printk(MPT2SAS_INFO_FMT "\tBEFORE adding expander: "
++ "handle (0x%04x), sas_addr(0x%016llx)\n",
++ ioc->name, handle, (unsigned long long)
++ le64_to_cpu(expander_pg0.SASAddress));
+ _scsih_expander_add(ioc, handle);
++ printk(MPT2SAS_INFO_FMT "\tAFTER adding expander: "
++ "handle (0x%04x), sas_addr(0x%016llx)\n",
++ ioc->name, handle, (unsigned long long)
++ le64_to_cpu(expander_pg0.SASAddress));
++ }
+ }
+
++ printk(MPT2SAS_INFO_FMT "\tscan devices: expanders complete\n",
++ ioc->name);
++
+ if (!ioc->ir_firmware)
+ goto skip_to_sas;
+
++ printk(MPT2SAS_INFO_FMT "\tscan devices phys disk start\n", ioc->name);
+ /* phys disk */
+ phys_disk_num = 0xFF;
+ while (!(mpt2sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
+@@ -7041,6 +7064,13 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ break;
++ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
++ printk(MPT2SAS_INFO_FMT "\tbreak from phys disk scan:"
++ "ioc_status(0x%04x), loginfo(0x%08x)\n",
++ ioc->name, ioc_status,
++ le32_to_cpu(mpi_reply.IOCLogInfo));
++ break;
++ }
+ phys_disk_num = pd_pg0.PhysDiskNum;
+ handle = le16_to_cpu(pd_pg0.DevHandle);
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+@@ -7050,17 +7080,46 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
+ &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
+ handle) != 0)
+ continue;
++ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
++ MPI2_IOCSTATUS_MASK;
++ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
++ printk(MPT2SAS_INFO_FMT "\tbreak from phys disk scan "
++ "ioc_status(0x%04x), loginfo(0x%08x)\n",
++ ioc->name, ioc_status,
++ le32_to_cpu(mpi_reply.IOCLogInfo));
++ break;
++ }
+ parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
+ if (!_scsih_get_sas_address(ioc, parent_handle,
+ &sas_address)) {
++ printk(MPT2SAS_INFO_FMT "\tBEFORE adding phys disk: "
++ " handle (0x%04x), sas_addr(0x%016llx)\n",
++ ioc->name, handle, (unsigned long long)
++ le64_to_cpu(sas_device_pg0.SASAddress));
+ mpt2sas_transport_update_links(ioc, sas_address,
+ handle, sas_device_pg0.PhyNum,
+ MPI2_SAS_NEG_LINK_RATE_1_5);
+ set_bit(handle, ioc->pd_handles);
+- _scsih_add_device(ioc, handle, 0, 1);
++ retry_count = 0;
++ /* This will retry adding the end device.
++ * _scsih_add_device() will decide on retries and
++ * return "1" when it should be retried
++ */
++ while (_scsih_add_device(ioc, handle, retry_count++,
++ 1)) {
++ ssleep(1);
++ }
++ printk(MPT2SAS_INFO_FMT "\tAFTER adding phys disk: "
++ " handle (0x%04x), sas_addr(0x%016llx)\n",
++ ioc->name, handle, (unsigned long long)
++ le64_to_cpu(sas_device_pg0.SASAddress));
+ }
+ }
+
++ printk(MPT2SAS_INFO_FMT "\tscan devices: phys disk complete\n",
++ ioc->name);
++
++ printk(MPT2SAS_INFO_FMT "\tscan devices: volumes start\n", ioc->name);
+ /* volumes */
+ handle = 0xFFFF;
+ while (!(mpt2sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
+@@ -7069,6 +7128,13 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ break;
++ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
++ printk(MPT2SAS_INFO_FMT "\tbreak from volume scan: "
++ "ioc_status(0x%04x), loginfo(0x%08x)\n",
++ ioc->name, ioc_status,
++ le32_to_cpu(mpi_reply.IOCLogInfo));
++ break;
++ }
+ handle = le16_to_cpu(volume_pg1.DevHandle);
+ raid_device = _scsih_raid_device_find_by_wwid(ioc,
+ le64_to_cpu(volume_pg1.WWID));
+@@ -7078,18 +7144,38 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
+ &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
+ sizeof(Mpi2RaidVolPage0_t)))
+ continue;
++ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
++ MPI2_IOCSTATUS_MASK;
++ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
++ printk(MPT2SAS_INFO_FMT "\tbreak from volume scan: "
++ "ioc_status(0x%04x), loginfo(0x%08x)\n",
++ ioc->name, ioc_status,
++ le32_to_cpu(mpi_reply.IOCLogInfo));
++ break;
++ }
+ if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
+ volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
+ volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
+ memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
+ element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
+ element.VolDevHandle = volume_pg1.DevHandle;
++ printk(MPT2SAS_INFO_FMT "\tBEFORE adding volume: "
++ " handle (0x%04x)\n", ioc->name,
++ volume_pg1.DevHandle);
+ _scsih_sas_volume_add(ioc, &element);
++ printk(MPT2SAS_INFO_FMT "\tAFTER adding volume: "
++ " handle (0x%04x)\n", ioc->name,
++ volume_pg1.DevHandle);
+ }
+ }
+
++ printk(MPT2SAS_INFO_FMT "\tscan devices: volumes complete\n",
++ ioc->name);
++
+ skip_to_sas:
+
++ printk(MPT2SAS_INFO_FMT "\tscan devices: end devices start\n",
++ ioc->name);
+ /* sas devices */
+ handle = 0xFFFF;
+ while (!(mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply,
+@@ -7099,6 +7185,13 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ break;
++ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
++ printk(MPT2SAS_INFO_FMT "\tbreak from end device scan:"
++ " ioc_status(0x%04x), loginfo(0x%08x)\n",
++ ioc->name, ioc_status,
++ le32_to_cpu(mpi_reply.IOCLogInfo));
++ break;
++ }
+ handle = le16_to_cpu(sas_device_pg0.DevHandle);
+ if (!(_scsih_is_end_device(
+ le32_to_cpu(sas_device_pg0.DeviceInfo))))
+@@ -7109,12 +7202,31 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
+ continue;
+ parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
+ if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
++ printk(MPT2SAS_INFO_FMT "\tBEFORE adding end device: "
++ "handle (0x%04x), sas_addr(0x%016llx)\n",
++ ioc->name, handle, (unsigned long long)
++ le64_to_cpu(sas_device_pg0.SASAddress));
+ mpt2sas_transport_update_links(ioc, sas_address, handle,
+ sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
+- _scsih_add_device(ioc, handle, 0, 0);
++ retry_count = 0;
++ /* This will retry adding the end device.
++ * _scsih_add_device() will decide on retries and
++ * return "1" when it should be retried
++ */
++ while (_scsih_add_device(ioc, handle, retry_count++,
++ 0)) {
++ ssleep(1);
++ }
++ printk(MPT2SAS_INFO_FMT "\tAFTER adding end device: "
++ "handle (0x%04x), sas_addr(0x%016llx)\n",
++ ioc->name, handle, (unsigned long long)
++ le64_to_cpu(sas_device_pg0.SASAddress));
+ }
+ }
+
++ printk(MPT2SAS_INFO_FMT "\tscan devices: end devices complete\n",
++ ioc->name);
++
+ printk(MPT2SAS_INFO_FMT "scan devices: complete\n", ioc->name);
+ }
+
+@@ -7206,7 +7318,9 @@ _firmware_event_work(struct work_struct *work)
+ case MPT2SAS_PORT_ENABLE_COMPLETE:
+ ioc->start_scan = 0;
+
+-
++ if (missing_delay[0] != -1 && missing_delay[1] != -1)
++ mpt2sas_base_update_missing_delay(ioc, missing_delay[0],
++ missing_delay[1]);
+
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "port enable: complete "
+ "from worker thread\n", ioc->name));
+diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c
+index d4ed9eb..caac1b2 100644
+--- a/drivers/scsi/osd/osd_uld.c
++++ b/drivers/scsi/osd/osd_uld.c
+@@ -465,7 +465,7 @@ static int osd_probe(struct device *dev)
+ oud->class_dev.class = &osd_uld_class;
+ oud->class_dev.parent = dev;
+ oud->class_dev.release = __remove;
+- error = dev_set_name(&oud->class_dev, disk->disk_name);
++ error = dev_set_name(&oud->class_dev, "%s", disk->disk_name);
+ if (error) {
+ OSD_ERR("dev_set_name failed => %d\n", error);
+ goto err_put_cdev;
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index f44d633..6dace1a 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -138,6 +138,7 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
+ char *buffer_data;
+ struct scsi_mode_data data;
+ struct scsi_sense_hdr sshdr;
++ static const char temp[] = "temporary ";
+ int len;
+
+ if (sdp->type != TYPE_DISK)
+@@ -146,6 +147,13 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
+ * it's not worth the risk */
+ return -EINVAL;
+
++ if (strncmp(buf, temp, sizeof(temp) - 1) == 0) {
++ buf += sizeof(temp) - 1;
++ sdkp->cache_override = 1;
++ } else {
++ sdkp->cache_override = 0;
++ }
++
+ for (i = 0; i < ARRAY_SIZE(sd_cache_types); i++) {
+ len = strlen(sd_cache_types[i]);
+ if (strncmp(sd_cache_types[i], buf, len) == 0 &&
+@@ -158,6 +166,13 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
+ return -EINVAL;
+ rcd = ct & 0x01 ? 1 : 0;
+ wce = ct & 0x02 ? 1 : 0;
++
++ if (sdkp->cache_override) {
++ sdkp->WCE = wce;
++ sdkp->RCD = rcd;
++ return count;
++ }
++
+ if (scsi_mode_sense(sdp, 0x08, 8, buffer, sizeof(buffer), SD_TIMEOUT,
+ SD_MAX_RETRIES, &data, NULL))
+ return -EINVAL;
+@@ -2037,6 +2052,10 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
+ int old_rcd = sdkp->RCD;
+ int old_dpofua = sdkp->DPOFUA;
+
++
++ if (sdkp->cache_override)
++ return;
++
+ first_len = 4;
+ if (sdp->skip_ms_page_8) {
+ if (sdp->type == TYPE_RBC)
+@@ -2518,6 +2537,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
+ sdkp->capacity = 0;
+ sdkp->media_present = 1;
+ sdkp->write_prot = 0;
++ sdkp->cache_override = 0;
+ sdkp->WCE = 0;
+ sdkp->RCD = 0;
+ sdkp->ATO = 0;
+diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
+index 4163f29..e3e3cd2 100644
+--- a/drivers/scsi/sd.h
++++ b/drivers/scsi/sd.h
+@@ -64,6 +64,7 @@ struct scsi_disk {
+ u8 protection_type;/* Data Integrity Field */
+ u8 provisioning_mode;
+ unsigned ATO : 1; /* state of disk ATO bit */
++ unsigned cache_override : 1; /* temp override of WCE,RCD */
+ unsigned WCE : 1; /* state of disk WCE bit */
+ unsigned RCD : 1; /* state of disk RCD bit, unused */
+ unsigned DPOFUA : 1; /* state of disk DPOFUA bit */
+diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
+index 2594a31..926d483 100644
+--- a/drivers/staging/zram/zram_drv.c
++++ b/drivers/staging/zram/zram_drv.c
+@@ -541,13 +541,20 @@ out:
+ */
+ static inline int valid_io_request(struct zram *zram, struct bio *bio)
+ {
+- if (unlikely(
+- (bio->bi_sector >= (zram->disksize >> SECTOR_SHIFT)) ||
+- (bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)) ||
+- (bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))) {
++ u64 start, end, bound;
++
++ /* unaligned request */
++ if (unlikely(bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
++ return 0;
++ if (unlikely(bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
++ return 0;
+
++ start = bio->bi_sector;
++ end = start + (bio->bi_size >> SECTOR_SHIFT);
++ bound = zram->disksize >> SECTOR_SHIFT;
++ /* out of range range */
++ if (unlikely(start >= bound || end >= bound || start > end))
+ return 0;
+- }
+
+ /* I/O request is valid */
+ return 1;
+@@ -702,7 +709,9 @@ static void zram_slot_free_notify(struct block_device *bdev,
+ struct zram *zram;
+
+ zram = bdev->bd_disk->private_data;
++ down_write(&zram->lock);
+ zram_free_page(zram, index);
++ up_write(&zram->lock);
+ zram_stat64_inc(zram, &zram->stats.notify_free);
+ }
+
+@@ -713,7 +722,7 @@ static const struct block_device_operations zram_devops = {
+
+ static int create_device(struct zram *zram, int device_id)
+ {
+- int ret = 0;
++ int ret = -ENOMEM;
+
+ init_rwsem(&zram->lock);
+ init_rwsem(&zram->init_lock);
+@@ -723,7 +732,6 @@ static int create_device(struct zram *zram, int device_id)
+ if (!zram->queue) {
+ pr_err("Error allocating disk queue for device %d\n",
+ device_id);
+- ret = -ENOMEM;
+ goto out;
+ }
+
+@@ -733,11 +741,9 @@ static int create_device(struct zram *zram, int device_id)
+ /* gendisk structure */
+ zram->disk = alloc_disk(1);
+ if (!zram->disk) {
+- blk_cleanup_queue(zram->queue);
+ pr_warning("Error allocating disk structure for device %d\n",
+ device_id);
+- ret = -ENOMEM;
+- goto out;
++ goto out_free_queue;
+ }
+
+ zram->disk->major = zram_major;
+@@ -766,11 +772,17 @@ static int create_device(struct zram *zram, int device_id)
+ &zram_disk_attr_group);
+ if (ret < 0) {
+ pr_warning("Error creating sysfs group");
+- goto out;
++ goto out_free_disk;
+ }
+
+ zram->init_done = 0;
++ return 0;
+
++out_free_disk:
++ del_gendisk(zram->disk);
++ put_disk(zram->disk);
++out_free_queue:
++ blk_cleanup_queue(zram->queue);
+ out:
+ return ret;
+ }
+@@ -846,9 +858,11 @@ static void __exit zram_exit(void)
+ for (i = 0; i < zram_num_devices; i++) {
+ zram = &zram_devices[i];
+
++ get_disk(zram->disk);
+ destroy_device(zram);
+ if (zram->init_done)
+ zram_reset_device(zram);
++ put_disk(zram->disk);
+ }
+
+ unregister_blkdev(zram_major, "zram");
+diff --git a/drivers/staging/zram/zram_drv.h b/drivers/staging/zram/zram_drv.h
+index e5cd246..87f2fec 100644
+--- a/drivers/staging/zram/zram_drv.h
++++ b/drivers/staging/zram/zram_drv.h
+@@ -107,8 +107,9 @@ struct zram {
+ void *compress_buffer;
+ struct table *table;
+ spinlock_t stat64_lock; /* protect 64-bit stats */
+- struct rw_semaphore lock; /* protect compression buffers and table
+- * against concurrent read and writes */
++ struct rw_semaphore lock; /* protect compression buffers, table,
++ * 32bit stat counters against concurrent
++ * notifications, reads and writes */
+ struct request_queue *queue;
+ struct gendisk *disk;
+ int init_done;
+diff --git a/drivers/staging/zram/zram_sysfs.c b/drivers/staging/zram/zram_sysfs.c
+index 0ea8ed2..1fae1e9 100644
+--- a/drivers/staging/zram/zram_sysfs.c
++++ b/drivers/staging/zram/zram_sysfs.c
+@@ -186,10 +186,12 @@ static ssize_t mem_used_total_show(struct device *dev,
+ u64 val = 0;
+ struct zram *zram = dev_to_zram(dev);
+
++ down_read(&zram->init_lock);
+ if (zram->init_done) {
+ val = xv_get_total_size_bytes(zram->mem_pool) +
+ ((u64)(zram->stats.pages_expand) << PAGE_SHIFT);
+ }
++ up_read(&zram->init_lock);
+
+ return sprintf(buf, "%llu\n", val);
+ }
+diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
+index 83dcf49..3b80285 100644
+--- a/drivers/target/iscsi/iscsi_target_configfs.c
++++ b/drivers/target/iscsi/iscsi_target_configfs.c
+@@ -419,7 +419,7 @@ static ssize_t __iscsi_##prefix##_store_##name( \
+ if (!capable(CAP_SYS_ADMIN)) \
+ return -EPERM; \
+ \
+- snprintf(auth->name, PAGE_SIZE, "%s", page); \
++ snprintf(auth->name, sizeof(auth->name), "%s", page); \
+ if (!strncmp("NULL", auth->name, 4)) \
+ auth->naf_flags &= ~flags; \
+ else \
+diff --git a/drivers/tty/serial/8250_pci.c b/drivers/tty/serial/8250_pci.c
+index 6986256..6c9bcdf 100644
+--- a/drivers/tty/serial/8250_pci.c
++++ b/drivers/tty/serial/8250_pci.c
+@@ -4083,10 +4083,6 @@ static struct pci_device_id serial_pci_tbl[] = {
+ PCI_VENDOR_ID_IBM, 0x0299,
+ 0, 0, pbn_b0_bt_2_115200 },
+
+- { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9835,
+- 0x1000, 0x0012,
+- 0, 0, pbn_b0_bt_2_115200 },
+-
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9901,
+ 0xA000, 0x1000,
+ 0, 0, pbn_b0_1_115200 },
+diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
+index 8d70fbc..c0b4872 100644
+--- a/drivers/tty/serial/pch_uart.c
++++ b/drivers/tty/serial/pch_uart.c
+@@ -940,22 +940,37 @@ static unsigned int dma_handle_tx(struct eg20t_port *priv)
+ static void pch_uart_err_ir(struct eg20t_port *priv, unsigned int lsr)
+ {
+ u8 fcr = ioread8(priv->membase + UART_FCR);
++ struct uart_port *port = &priv->port;
++ struct tty_struct *tty = tty_port_tty_get(&port->state->port);
++ char *error_msg[5] = {};
++ int i = 0;
+
+ /* Reset FIFO */
+ fcr |= UART_FCR_CLEAR_RCVR;
+ iowrite8(fcr, priv->membase + UART_FCR);
+
+ if (lsr & PCH_UART_LSR_ERR)
+- dev_err(&priv->pdev->dev, "Error data in FIFO\n");
++ error_msg[i++] = "Error data in FIFO\n";
+
+- if (lsr & UART_LSR_FE)
+- dev_err(&priv->pdev->dev, "Framing Error\n");
++ if (lsr & UART_LSR_FE) {
++ port->icount.frame++;
++ error_msg[i++] = " Framing Error\n";
++ }
+
+- if (lsr & UART_LSR_PE)
+- dev_err(&priv->pdev->dev, "Parity Error\n");
++ if (lsr & UART_LSR_PE) {
++ port->icount.parity++;
++ error_msg[i++] = " Parity Error\n";
++ }
+
+- if (lsr & UART_LSR_OE)
+- dev_err(&priv->pdev->dev, "Overrun Error\n");
++ if (lsr & UART_LSR_OE) {
++ port->icount.overrun++;
++ error_msg[i++] = " Overrun Error\n";
++ }
++
++ if (tty == NULL) {
++ for (i = 0; error_msg[i] != NULL; i++)
++ dev_err(&priv->pdev->dev, error_msg[i]);
++ }
+ }
+
+ static irqreturn_t pch_uart_interrupt(int irq, void *dev_id)
+diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
+index a5570b6..8d7fb6b 100644
+--- a/drivers/usb/gadget/f_mass_storage.c
++++ b/drivers/usb/gadget/f_mass_storage.c
+@@ -512,6 +512,7 @@ static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
+ /* Caller must hold fsg->lock */
+ static void wakeup_thread(struct fsg_common *common)
+ {
++ smp_wmb(); /* ensure the write of bh->state is complete */
+ /* Tell the main thread that something has happened */
+ common->thread_wakeup_needed = 1;
+ if (common->thread_task)
+@@ -731,6 +732,7 @@ static int sleep_thread(struct fsg_common *common)
+ }
+ __set_current_state(TASK_RUNNING);
+ common->thread_wakeup_needed = 0;
++ smp_rmb(); /* ensure the latest bh->state is visible */
+ return rc;
+ }
+
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 5018e33..ec73541 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -271,6 +271,10 @@ static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci
+ ctx->size += CTX_SIZE(xhci->hcc_params);
+
+ ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma);
++ if (!ctx->bytes) {
++ kfree(ctx);
++ return NULL;
++ }
+ memset(ctx->bytes, 0, ctx->size);
+ return ctx;
+ }
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 8ea37bc..b8365a7 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -159,8 +159,6 @@ static void option_instat_callback(struct urb *urb);
+ #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0x9000
+ #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001
+ #define NOVATELWIRELESS_PRODUCT_E362 0x9010
+-#define NOVATELWIRELESS_PRODUCT_G1 0xA001
+-#define NOVATELWIRELESS_PRODUCT_G1_M 0xA002
+ #define NOVATELWIRELESS_PRODUCT_G2 0xA010
+ #define NOVATELWIRELESS_PRODUCT_MC551 0xB001
+
+@@ -744,8 +742,6 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC547) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED) },
+- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1) },
+- { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1_M) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G2) },
+ /* Novatel Ovation MC551 a.k.a. Verizon USB551L */
+ { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) },
+diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
+index 5535c3a..e8c4f0c 100644
+--- a/drivers/usb/serial/qcserial.c
++++ b/drivers/usb/serial/qcserial.c
+@@ -37,7 +37,13 @@ static const struct usb_device_id id_table[] = {
+ {DEVICE_G1K(0x04da, 0x250c)}, /* Panasonic Gobi QDL device */
+ {DEVICE_G1K(0x413c, 0x8172)}, /* Dell Gobi Modem device */
+ {DEVICE_G1K(0x413c, 0x8171)}, /* Dell Gobi QDL device */
+- {DEVICE_G1K(0x1410, 0xa001)}, /* Novatel Gobi Modem device */
++ {DEVICE_G1K(0x1410, 0xa001)}, /* Novatel/Verizon USB-1000 */
++ {DEVICE_G1K(0x1410, 0xa002)}, /* Novatel Gobi Modem device */
++ {DEVICE_G1K(0x1410, 0xa003)}, /* Novatel Gobi Modem device */
++ {DEVICE_G1K(0x1410, 0xa004)}, /* Novatel Gobi Modem device */
++ {DEVICE_G1K(0x1410, 0xa005)}, /* Novatel Gobi Modem device */
++ {DEVICE_G1K(0x1410, 0xa006)}, /* Novatel Gobi Modem device */
++ {DEVICE_G1K(0x1410, 0xa007)}, /* Novatel Gobi Modem device */
+ {DEVICE_G1K(0x1410, 0xa008)}, /* Novatel Gobi QDL device */
+ {DEVICE_G1K(0x0b05, 0x1776)}, /* Asus Gobi Modem device */
+ {DEVICE_G1K(0x0b05, 0x1774)}, /* Asus Gobi QDL device */
+diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
+index 5855d17..9d8feac 100644
+--- a/drivers/video/console/vgacon.c
++++ b/drivers/video/console/vgacon.c
+@@ -42,6 +42,7 @@
+ #include <linux/kd.h>
+ #include <linux/slab.h>
+ #include <linux/vt_kern.h>
++#include <linux/sched.h>
+ #include <linux/selection.h>
+ #include <linux/spinlock.h>
+ #include <linux/ioport.h>
+@@ -1124,11 +1125,15 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
+
+ if (arg) {
+ if (set)
+- for (i = 0; i < cmapsz; i++)
++ for (i = 0; i < cmapsz; i++) {
+ vga_writeb(arg[i], charmap + i);
++ cond_resched();
++ }
+ else
+- for (i = 0; i < cmapsz; i++)
++ for (i = 0; i < cmapsz; i++) {
+ arg[i] = vga_readb(charmap + i);
++ cond_resched();
++ }
+
+ /*
+ * In 512-character mode, the character map is not contiguous if
+@@ -1139,11 +1144,15 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
+ charmap += 2 * cmapsz;
+ arg += cmapsz;
+ if (set)
+- for (i = 0; i < cmapsz; i++)
++ for (i = 0; i < cmapsz; i++) {
+ vga_writeb(arg[i], charmap + i);
++ cond_resched();
++ }
+ else
+- for (i = 0; i < cmapsz; i++)
++ for (i = 0; i < cmapsz; i++) {
+ arg[i] = vga_readb(charmap + i);
++ cond_resched();
++ }
+ }
+ }
+
+diff --git a/fs/block_dev.c b/fs/block_dev.c
+index 53ab273..c103267 100644
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -55,17 +55,24 @@ static void bdev_inode_switch_bdi(struct inode *inode,
+ struct backing_dev_info *dst)
+ {
+ struct backing_dev_info *old = inode->i_data.backing_dev_info;
++ bool wakeup_bdi = false;
+
+ if (unlikely(dst == old)) /* deadlock avoidance */
+ return;
+ bdi_lock_two(&old->wb, &dst->wb);
+ spin_lock(&inode->i_lock);
+ inode->i_data.backing_dev_info = dst;
+- if (inode->i_state & I_DIRTY)
++ if (inode->i_state & I_DIRTY) {
++ if (bdi_cap_writeback_dirty(dst) && !wb_has_dirty_io(&dst->wb))
++ wakeup_bdi = true;
+ list_move(&inode->i_wb_list, &dst->wb.b_dirty);
++ }
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&old->wb.list_lock);
+ spin_unlock(&dst->wb.list_lock);
++
++ if (wakeup_bdi)
++ bdi_wakeup_thread_delayed(dst);
+ }
+
+ sector_t blkdev_max_block(struct block_device *bdev)
+diff --git a/fs/ceph/super.c b/fs/ceph/super.c
+index b48f15f..de268a8 100644
+--- a/fs/ceph/super.c
++++ b/fs/ceph/super.c
+@@ -70,8 +70,14 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
+ /*
+ * express utilization in terms of large blocks to avoid
+ * overflow on 32-bit machines.
++ *
++ * NOTE: for the time being, we make bsize == frsize to humor
++ * not-yet-ancient versions of glibc that are broken.
++ * Someday, we will probably want to report a real block
++ * size... whatever that may mean for a network file system!
+ */
+ buf->f_bsize = 1 << CEPH_BLOCK_SHIFT;
++ buf->f_frsize = 1 << CEPH_BLOCK_SHIFT;
+ buf->f_blocks = le64_to_cpu(st.kb) >> (CEPH_BLOCK_SHIFT-10);
+ buf->f_bfree = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
+ buf->f_bavail = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
+@@ -79,7 +85,6 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
+ buf->f_files = le64_to_cpu(st.num_objects);
+ buf->f_ffree = -1;
+ buf->f_namelen = NAME_MAX;
+- buf->f_frsize = PAGE_CACHE_SIZE;
+
+ /* leave fsid little-endian, regardless of host endianness */
+ fsid = *(u64 *)(&monmap->fsid) ^ *((u64 *)&monmap->fsid + 1);
+diff --git a/fs/ceph/super.h b/fs/ceph/super.h
+index edcbf37..a097817 100644
+--- a/fs/ceph/super.h
++++ b/fs/ceph/super.h
+@@ -21,7 +21,7 @@
+
+ /* large granularity for statfs utilization stats to facilitate
+ * large volume sizes on 32-bit machines. */
+-#define CEPH_BLOCK_SHIFT 20 /* 1 MB */
++#define CEPH_BLOCK_SHIFT 22 /* 4 MB */
+ #define CEPH_BLOCK (1 << CEPH_BLOCK_SHIFT)
+
+ #define CEPH_MOUNT_OPT_DIRSTAT (1<<4) /* `cat dirname` for stats */
+diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h
+index 6d02fd5..aab18fe 100644
+--- a/fs/cifs/cifs_unicode.h
++++ b/fs/cifs/cifs_unicode.h
+@@ -323,14 +323,14 @@ UniToupper(register wchar_t uc)
+ /*
+ * UniStrupr: Upper case a unicode string
+ */
+-static inline wchar_t *
+-UniStrupr(register wchar_t *upin)
++static inline __le16 *
++UniStrupr(register __le16 *upin)
+ {
+- register wchar_t *up;
++ register __le16 *up;
+
+ up = upin;
+ while (*up) { /* For all characters */
+- *up = UniToupper(*up);
++ *up = cpu_to_le16(UniToupper(le16_to_cpu(*up)));
+ up++;
+ }
+ return upin; /* Return input pointer */
+diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
+index 5d9b9ac..cdcd665 100644
+--- a/fs/cifs/cifsencrypt.c
++++ b/fs/cifs/cifsencrypt.c
+@@ -394,7 +394,7 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
+ int rc = 0;
+ int len;
+ char nt_hash[CIFS_NTHASH_SIZE];
+- wchar_t *user;
++ __le16 *user;
+ wchar_t *domain;
+ wchar_t *server;
+
+@@ -419,7 +419,7 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
+ return rc;
+ }
+
+- /* convert ses->user_name to unicode and uppercase */
++ /* convert ses->user_name to unicode */
+ len = strlen(ses->user_name);
+ user = kmalloc(2 + (len * 2), GFP_KERNEL);
+ if (user == NULL) {
+@@ -427,7 +427,7 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
+ rc = -ENOMEM;
+ return rc;
+ }
+- len = cifs_strtoUCS((__le16 *)user, ses->user_name, len, nls_cp);
++ len = cifs_strtoUCS(user, ses->user_name, len, nls_cp);
+ UniStrupr(user);
+
+ rc = crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash,
+diff --git a/fs/exec.c b/fs/exec.c
+index 312e297..a2d0e51 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -1159,13 +1159,6 @@ void setup_new_exec(struct linux_binprm * bprm)
+ set_dumpable(current->mm, suid_dumpable);
+ }
+
+- /*
+- * Flush performance counters when crossing a
+- * security domain:
+- */
+- if (!get_dumpable(current->mm))
+- perf_event_exit_task(current);
+-
+ /* An exec changes our domain. We are no longer part of the thread
+ group */
+
+@@ -1229,6 +1222,15 @@ void install_exec_creds(struct linux_binprm *bprm)
+
+ commit_creds(bprm->cred);
+ bprm->cred = NULL;
++
++ /*
++ * Disable monitoring for regular users
++ * when executing setuid binaries. Must
++ * wait until new credentials are committed
++ * by commit_creds() above
++ */
++ if (get_dumpable(current->mm) != SUID_DUMP_USER)
++ perf_event_exit_task(current);
+ /*
+ * cred_guard_mutex must be held at least to this point to prevent
+ * ptrace_attach() from altering our determination of the task's
+diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
+index 642dc6d..1272dfb 100644
+--- a/fs/ext3/namei.c
++++ b/fs/ext3/namei.c
+@@ -585,11 +585,8 @@ static int htree_dirblock_to_tree(struct file *dir_file,
+ if (!ext3_check_dir_entry("htree_dirblock_to_tree", dir, de, bh,
+ (block<<EXT3_BLOCK_SIZE_BITS(dir->i_sb))
+ +((char *)de - bh->b_data))) {
+- /* On error, skip the f_pos to the next block. */
+- dir_file->f_pos = (dir_file->f_pos |
+- (dir->i_sb->s_blocksize - 1)) + 1;
+- brelse (bh);
+- return count;
++ /* silently ignore the rest of the block */
++ break;
+ }
+ ext3fs_dirhash(de->name, de->name_len, hinfo);
+ if ((hinfo->hash < start_hash) ||
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index ce0bc25..3e8fc80 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -4801,7 +4801,7 @@ static int ext4_xattr_fiemap(struct inode *inode,
+ error = ext4_get_inode_loc(inode, &iloc);
+ if (error)
+ return error;
+- physical = iloc.bh->b_blocknr << blockbits;
++ physical = (__u64)iloc.bh->b_blocknr << blockbits;
+ offset = EXT4_GOOD_OLD_INODE_SIZE +
+ EXT4_I(inode)->i_extra_isize;
+ physical += offset;
+@@ -4809,7 +4809,7 @@ static int ext4_xattr_fiemap(struct inode *inode,
+ flags |= FIEMAP_EXTENT_DATA_INLINE;
+ brelse(iloc.bh);
+ } else { /* external block */
+- physical = EXT4_I(inode)->i_file_acl << blockbits;
++ physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits;
+ length = inode->i_sb->s_blocksize;
+ }
+
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 025b4b6..45778a6 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -4335,7 +4335,7 @@ int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
+ struct kstat *stat)
+ {
+ struct inode *inode;
+- unsigned long delalloc_blocks;
++ unsigned long long delalloc_blocks;
+
+ inode = dentry->d_inode;
+ generic_fillattr(inode, stat);
+@@ -4352,7 +4352,7 @@ int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
+ */
+ delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks;
+
+- stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9;
++ stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits-9);
+ return 0;
+ }
+
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 88f97e5..3ca3b7f 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -585,11 +585,8 @@ static int htree_dirblock_to_tree(struct file *dir_file,
+ if (ext4_check_dir_entry(dir, NULL, de, bh,
+ (block<<EXT4_BLOCK_SIZE_BITS(dir->i_sb))
+ + ((char *)de - bh->b_data))) {
+- /* On error, skip the f_pos to the next block. */
+- dir_file->f_pos = (dir_file->f_pos |
+- (dir->i_sb->s_blocksize - 1)) + 1;
+- brelse(bh);
+- return count;
++ /* silently ignore the rest of the block */
++ break;
+ }
+ ext4fs_dirhash(de->name, de->name_len, hinfo);
+ if ((hinfo->hash < start_hash) ||
+diff --git a/fs/hpfs/map.c b/fs/hpfs/map.c
+index a790821..ea3d1ca 100644
+--- a/fs/hpfs/map.c
++++ b/fs/hpfs/map.c
+@@ -17,7 +17,8 @@ unsigned int *hpfs_map_bitmap(struct super_block *s, unsigned bmp_block,
+ struct quad_buffer_head *qbh, char *id)
+ {
+ secno sec;
+- if (hpfs_sb(s)->sb_chk) if (bmp_block * 16384 > hpfs_sb(s)->sb_fs_size) {
++ unsigned n_bands = (hpfs_sb(s)->sb_fs_size + 0x3fff) >> 14;
++ if (hpfs_sb(s)->sb_chk) if (bmp_block >= n_bands) {
+ hpfs_error(s, "hpfs_map_bitmap called with bad parameter: %08x at %s", bmp_block, id);
+ return NULL;
+ }
+diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
+index 98580a3..f760c15 100644
+--- a/fs/hpfs/super.c
++++ b/fs/hpfs/super.c
+@@ -553,7 +553,13 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
+ sbi->sb_cp_table = NULL;
+ sbi->sb_c_bitmap = -1;
+ sbi->sb_max_fwd_alloc = 0xffffff;
+-
++
++ if (sbi->sb_fs_size >= 0x80000000) {
++ hpfs_error(s, "invalid size in superblock: %08x",
++ (unsigned)sbi->sb_fs_size);
++ goto bail4;
++ }
++
+ /* Load bitmap directory */
+ if (!(sbi->sb_bmp_dir = hpfs_load_bitmap_directory(s, le32_to_cpu(superblock->bitmaps))))
+ goto bail4;
+diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
+index 6ac5bb1..18ea4d9 100644
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -470,10 +470,10 @@ int jbd2__journal_restart(handle_t *handle, int nblocks, gfp_t gfp_mask)
+ &transaction->t_outstanding_credits);
+ if (atomic_dec_and_test(&transaction->t_updates))
+ wake_up(&journal->j_wait_updates);
++ tid = transaction->t_tid;
+ spin_unlock(&transaction->t_handle_lock);
+
+ jbd_debug(2, "restarting handle %p\n", handle);
+- tid = transaction->t_tid;
+ need_to_start = !tid_geq(journal->j_commit_request, tid);
+ read_unlock(&journal->j_state_lock);
+ if (need_to_start)
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index ade5316..99625b8 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -177,8 +177,8 @@ static __be32 *read_buf(struct nfsd4_compoundargs *argp, u32 nbytes)
+ */
+ memcpy(p, argp->p, avail);
+ /* step to next page */
+- argp->p = page_address(argp->pagelist[0]);
+ argp->pagelist++;
++ argp->p = page_address(argp->pagelist[0]);
+ if (argp->pagelen < PAGE_SIZE) {
+ argp->end = argp->p + (argp->pagelen>>2);
+ argp->pagelen = 0;
+diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
+index 0d5ea9c..bef187b 100644
+--- a/fs/ocfs2/xattr.c
++++ b/fs/ocfs2/xattr.c
+@@ -6499,6 +6499,16 @@ static int ocfs2_reflink_xattr_inline(struct ocfs2_xattr_reflink *args)
+ }
+
+ new_oi = OCFS2_I(args->new_inode);
++ /*
++ * Adjust extent record count to reserve space for extended attribute.
++ * Inline data count had been adjusted in ocfs2_duplicate_inline_data().
++ */
++ if (!(new_oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) &&
++ !(ocfs2_inode_is_fast_symlink(args->new_inode))) {
++ struct ocfs2_extent_list *el = &new_di->id2.i_list;
++ le16_add_cpu(&el->l_count, -(inline_size /
++ sizeof(struct ocfs2_extent_rec)));
++ }
+ spin_lock(&new_oi->ip_lock);
+ new_oi->ip_dyn_features |= OCFS2_HAS_XATTR_FL | OCFS2_INLINE_XATTR_FL;
+ new_di->i_dyn_features = cpu_to_le16(new_oi->ip_dyn_features);
+diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
+index 6834920..aaebf0f 100644
+--- a/fs/ubifs/dir.c
++++ b/fs/ubifs/dir.c
+@@ -357,31 +357,50 @@ static unsigned int vfs_dent_type(uint8_t type)
+ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
+ {
+ int err, over = 0;
++ loff_t pos = file->f_pos;
+ struct qstr nm;
+ union ubifs_key key;
+ struct ubifs_dent_node *dent;
+ struct inode *dir = file->f_path.dentry->d_inode;
+ struct ubifs_info *c = dir->i_sb->s_fs_info;
+
+- dbg_gen("dir ino %lu, f_pos %#llx", dir->i_ino, file->f_pos);
++ dbg_gen("dir ino %lu, f_pos %#llx", dir->i_ino, pos);
+
+- if (file->f_pos > UBIFS_S_KEY_HASH_MASK || file->f_pos == 2)
++ if (pos > UBIFS_S_KEY_HASH_MASK || pos == 2)
+ /*
+ * The directory was seek'ed to a senseless position or there
+ * are no more entries.
+ */
+ return 0;
+
++ if (file->f_version == 0) {
++ /*
++ * The file was seek'ed, which means that @file->private_data
++ * is now invalid. This may also be just the first
++ * 'ubifs_readdir()' invocation, in which case
++ * @file->private_data is NULL, and the below code is
++ * basically a no-op.
++ */
++ kfree(file->private_data);
++ file->private_data = NULL;
++ }
++
++ /*
++ * 'generic_file_llseek()' unconditionally sets @file->f_version to
++ * zero, and we use this for detecting whether the file was seek'ed.
++ */
++ file->f_version = 1;
++
+ /* File positions 0 and 1 correspond to "." and ".." */
+- if (file->f_pos == 0) {
++ if (pos == 0) {
+ ubifs_assert(!file->private_data);
+ over = filldir(dirent, ".", 1, 0, dir->i_ino, DT_DIR);
+ if (over)
+ return 0;
+- file->f_pos = 1;
++ file->f_pos = pos = 1;
+ }
+
+- if (file->f_pos == 1) {
++ if (pos == 1) {
+ ubifs_assert(!file->private_data);
+ over = filldir(dirent, "..", 2, 1,
+ parent_ino(file->f_path.dentry), DT_DIR);
+@@ -397,7 +416,7 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
+ goto out;
+ }
+
+- file->f_pos = key_hash_flash(c, &dent->key);
++ file->f_pos = pos = key_hash_flash(c, &dent->key);
+ file->private_data = dent;
+ }
+
+@@ -405,17 +424,16 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
+ if (!dent) {
+ /*
+ * The directory was seek'ed to and is now readdir'ed.
+- * Find the entry corresponding to @file->f_pos or the
+- * closest one.
++ * Find the entry corresponding to @pos or the closest one.
+ */
+- dent_key_init_hash(c, &key, dir->i_ino, file->f_pos);
++ dent_key_init_hash(c, &key, dir->i_ino, pos);
+ nm.name = NULL;
+ dent = ubifs_tnc_next_ent(c, &key, &nm);
+ if (IS_ERR(dent)) {
+ err = PTR_ERR(dent);
+ goto out;
+ }
+- file->f_pos = key_hash_flash(c, &dent->key);
++ file->f_pos = pos = key_hash_flash(c, &dent->key);
+ file->private_data = dent;
+ }
+
+@@ -427,7 +445,7 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
+ ubifs_inode(dir)->creat_sqnum);
+
+ nm.len = le16_to_cpu(dent->nlen);
+- over = filldir(dirent, dent->name, nm.len, file->f_pos,
++ over = filldir(dirent, dent->name, nm.len, pos,
+ le64_to_cpu(dent->inum),
+ vfs_dent_type(dent->type));
+ if (over)
+@@ -443,9 +461,17 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
+ }
+
+ kfree(file->private_data);
+- file->f_pos = key_hash_flash(c, &dent->key);
++ file->f_pos = pos = key_hash_flash(c, &dent->key);
+ file->private_data = dent;
+ cond_resched();
++
++ if (file->f_version == 0)
++ /*
++ * The file was seek'ed meanwhile, lets return and start
++ * reading direntries from the new position on the next
++ * invocation.
++ */
++ return 0;
+ }
+
+ out:
+@@ -456,15 +482,13 @@ out:
+
+ kfree(file->private_data);
+ file->private_data = NULL;
++ /* 2 is a special value indicating that there are no more direntries */
+ file->f_pos = 2;
+ return 0;
+ }
+
+-/* If a directory is seeked, we have to free saved readdir() state */
+ static loff_t ubifs_dir_llseek(struct file *file, loff_t offset, int origin)
+ {
+- kfree(file->private_data);
+- file->private_data = NULL;
+ return generic_file_llseek(file, offset, origin);
+ }
+
+diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
+index 9bab75f..ace0984 100644
+--- a/include/linux/cgroup.h
++++ b/include/linux/cgroup.h
+@@ -531,16 +531,54 @@ static inline struct cgroup_subsys_state *cgroup_subsys_state(
+ return cgrp->subsys[subsys_id];
+ }
+
+-/*
+- * function to get the cgroup_subsys_state which allows for extra
+- * rcu_dereference_check() conditions, such as locks used during the
+- * cgroup_subsys::attach() methods.
++/**
++ * task_css_set_check - obtain a task's css_set with extra access conditions
++ * @task: the task to obtain css_set for
++ * @__c: extra condition expression to be passed to rcu_dereference_check()
++ *
++ * A task's css_set is RCU protected, initialized and exited while holding
++ * task_lock(), and can only be modified while holding both cgroup_mutex
++ * and task_lock() while the task is alive. This macro verifies that the
++ * caller is inside proper critical section and returns @task's css_set.
++ *
++ * The caller can also specify additional allowed conditions via @__c, such
++ * as locks used during the cgroup_subsys::attach() methods.
++ */
++#define task_css_set_check(task, __c) \
++ rcu_dereference_check((task)->cgroups, \
++ lockdep_is_held(&(task)->alloc_lock) || \
++ cgroup_lock_is_held() || (__c))
++
++/**
++ * task_subsys_state_check - obtain css for (task, subsys) w/ extra access conds
++ * @task: the target task
++ * @subsys_id: the target subsystem ID
++ * @__c: extra condition expression to be passed to rcu_dereference_check()
++ *
++ * Return the cgroup_subsys_state for the (@task, @subsys_id) pair. The
++ * synchronization rules are the same as task_css_set_check().
+ */
+ #define task_subsys_state_check(task, subsys_id, __c) \
+- rcu_dereference_check(task->cgroups->subsys[subsys_id], \
+- lockdep_is_held(&task->alloc_lock) || \
+- cgroup_lock_is_held() || (__c))
++ task_css_set_check((task), (__c))->subsys[(subsys_id)]
++
++/**
++ * task_css_set - obtain a task's css_set
++ * @task: the task to obtain css_set for
++ *
++ * See task_css_set_check().
++ */
++static inline struct css_set *task_css_set(struct task_struct *task)
++{
++ return task_css_set_check(task, false);
++}
+
++/**
++ * task_subsys_state - obtain css for (task, subsys)
++ * @task: the target task
++ * @subsys_id: the target subsystem ID
++ *
++ * See task_subsys_state_check().
++ */
+ static inline struct cgroup_subsys_state *
+ task_subsys_state(struct task_struct *task, int subsys_id)
+ {
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index a2227f7..32697c1 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -327,6 +327,17 @@ static inline unsigned hstate_index_to_shift(unsigned index)
+ return hstates[index].order + PAGE_SHIFT;
+ }
+
++pgoff_t __basepage_index(struct page *page);
++
++/* Return page->index in PAGE_SIZE units */
++static inline pgoff_t basepage_index(struct page *page)
++{
++ if (!PageCompound(page))
++ return page->index;
++
++ return __basepage_index(page);
++}
++
+ #else
+ struct hstate {};
+ #define alloc_huge_page_node(h, nid) NULL
+@@ -345,6 +356,11 @@ static inline unsigned int pages_per_huge_page(struct hstate *h)
+ return 1;
+ }
+ #define hstate_index_to_shift(index) 0
++
++static inline pgoff_t basepage_index(struct page *page)
++{
++ return page->index;
++}
+ #endif
+
+ #endif /* _LINUX_HUGETLB_H */
+diff --git a/include/linux/nbd.h b/include/linux/nbd.h
+index d146ca1..e6fe174 100644
+--- a/include/linux/nbd.h
++++ b/include/linux/nbd.h
+@@ -68,6 +68,7 @@ struct nbd_device {
+ u64 bytesize;
+ pid_t pid; /* pid of nbd-client, if attached */
+ int xmit_timeout;
++ int disconnect; /* a disconnect has been requested by user */
+ };
+
+ #endif
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
+index 9b9b2aa..3cfcfea 100644
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -819,8 +819,7 @@ struct perf_event {
+ /* mmap bits */
+ struct mutex mmap_mutex;
+ atomic_t mmap_count;
+- int mmap_locked;
+- struct user_struct *mmap_user;
++
+ struct ring_buffer *rb;
+ struct list_head rb_entry;
+
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 9f21915..8be9b746 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -185,9 +185,6 @@ static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
+ static void update_context_time(struct perf_event_context *ctx);
+ static u64 perf_event_time(struct perf_event *event);
+
+-static void ring_buffer_attach(struct perf_event *event,
+- struct ring_buffer *rb);
+-
+ void __weak perf_event_print_debug(void) { }
+
+ extern __weak const char *perf_pmu_name(void)
+@@ -714,8 +711,18 @@ perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
+ {
+ struct perf_event_context *ctx;
+
+- rcu_read_lock();
+ retry:
++ /*
++ * One of the few rules of preemptible RCU is that one cannot do
++ * rcu_read_unlock() while holding a scheduler (or nested) lock when
++ * part of the read side critical section was preemptible -- see
++ * rcu_read_unlock_special().
++ *
++ * Since ctx->lock nests under rq->lock we must ensure the entire read
++ * side critical section is non-preemptible.
++ */
++ preempt_disable();
++ rcu_read_lock();
+ ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
+ if (ctx) {
+ /*
+@@ -731,6 +738,8 @@ retry:
+ raw_spin_lock_irqsave(&ctx->lock, *flags);
+ if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
+ raw_spin_unlock_irqrestore(&ctx->lock, *flags);
++ rcu_read_unlock();
++ preempt_enable();
+ goto retry;
+ }
+
+@@ -740,6 +749,7 @@ retry:
+ }
+ }
+ rcu_read_unlock();
++ preempt_enable();
+ return ctx;
+ }
+
+@@ -1687,7 +1697,16 @@ static int __perf_event_enable(void *info)
+ struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
+ int err;
+
+- if (WARN_ON_ONCE(!ctx->is_active))
++ /*
++ * There's a time window between 'ctx->is_active' check
++ * in perf_event_enable function and this place having:
++ * - IRQs on
++ * - ctx->lock unlocked
++ *
++ * where the task could be killed and 'ctx' deactivated
++ * by perf_event_exit_task.
++ */
++ if (!ctx->is_active)
+ return -EINVAL;
+
+ raw_spin_lock(&ctx->lock);
+@@ -2939,6 +2958,7 @@ static void free_event_rcu(struct rcu_head *head)
+ }
+
+ static void ring_buffer_put(struct ring_buffer *rb);
++static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb);
+
+ static void free_event(struct perf_event *event)
+ {
+@@ -2962,8 +2982,22 @@ static void free_event(struct perf_event *event)
+ }
+
+ if (event->rb) {
+- ring_buffer_put(event->rb);
+- event->rb = NULL;
++ struct ring_buffer *rb;
++
++ /*
++ * Can happen when we close an event with re-directed output.
++ *
++ * Since we have a 0 refcount, perf_mmap_close() will skip
++ * over us; possibly making our ring_buffer_put() the last.
++ */
++ mutex_lock(&event->mmap_mutex);
++ rb = event->rb;
++ if (rb) {
++ rcu_assign_pointer(event->rb, NULL);
++ ring_buffer_detach(event, rb);
++ ring_buffer_put(rb); /* could be last */
++ }
++ mutex_unlock(&event->mmap_mutex);
+ }
+
+ if (is_cgroup_event(event))
+@@ -3201,30 +3235,13 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
+ unsigned int events = POLL_HUP;
+
+ /*
+- * Race between perf_event_set_output() and perf_poll(): perf_poll()
+- * grabs the rb reference but perf_event_set_output() overrides it.
+- * Here is the timeline for two threads T1, T2:
+- * t0: T1, rb = rcu_dereference(event->rb)
+- * t1: T2, old_rb = event->rb
+- * t2: T2, event->rb = new rb
+- * t3: T2, ring_buffer_detach(old_rb)
+- * t4: T1, ring_buffer_attach(rb1)
+- * t5: T1, poll_wait(event->waitq)
+- *
+- * To avoid this problem, we grab mmap_mutex in perf_poll()
+- * thereby ensuring that the assignment of the new ring buffer
+- * and the detachment of the old buffer appear atomic to perf_poll()
++ * Pin the event->rb by taking event->mmap_mutex; otherwise
++ * perf_event_set_output() can swizzle our rb and make us miss wakeups.
+ */
+ mutex_lock(&event->mmap_mutex);
+-
+- rcu_read_lock();
+- rb = rcu_dereference(event->rb);
+- if (rb) {
+- ring_buffer_attach(event, rb);
++ rb = event->rb;
++ if (rb)
+ events = atomic_xchg(&rb->poll, 0);
+- }
+- rcu_read_unlock();
+-
+ mutex_unlock(&event->mmap_mutex);
+
+ poll_wait(file, &event->waitq, wait);
+@@ -3538,16 +3555,12 @@ static void ring_buffer_attach(struct perf_event *event,
+ return;
+
+ spin_lock_irqsave(&rb->event_lock, flags);
+- if (!list_empty(&event->rb_entry))
+- goto unlock;
+-
+- list_add(&event->rb_entry, &rb->event_list);
+-unlock:
++ if (list_empty(&event->rb_entry))
++ list_add(&event->rb_entry, &rb->event_list);
+ spin_unlock_irqrestore(&rb->event_lock, flags);
+ }
+
+-static void ring_buffer_detach(struct perf_event *event,
+- struct ring_buffer *rb)
++static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb)
+ {
+ unsigned long flags;
+
+@@ -3566,13 +3579,10 @@ static void ring_buffer_wakeup(struct perf_event *event)
+
+ rcu_read_lock();
+ rb = rcu_dereference(event->rb);
+- if (!rb)
+- goto unlock;
+-
+- list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
+- wake_up_all(&event->waitq);
+-
+-unlock:
++ if (rb) {
++ list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
++ wake_up_all(&event->waitq);
++ }
+ rcu_read_unlock();
+ }
+
+@@ -3601,18 +3611,10 @@ static struct ring_buffer *ring_buffer_get(struct perf_event *event)
+
+ static void ring_buffer_put(struct ring_buffer *rb)
+ {
+- struct perf_event *event, *n;
+- unsigned long flags;
+-
+ if (!atomic_dec_and_test(&rb->refcount))
+ return;
+
+- spin_lock_irqsave(&rb->event_lock, flags);
+- list_for_each_entry_safe(event, n, &rb->event_list, rb_entry) {
+- list_del_init(&event->rb_entry);
+- wake_up_all(&event->waitq);
+- }
+- spin_unlock_irqrestore(&rb->event_lock, flags);
++ WARN_ON_ONCE(!list_empty(&rb->event_list));
+
+ call_rcu(&rb->rcu_head, rb_free_rcu);
+ }
+@@ -3622,26 +3624,100 @@ static void perf_mmap_open(struct vm_area_struct *vma)
+ struct perf_event *event = vma->vm_file->private_data;
+
+ atomic_inc(&event->mmap_count);
++ atomic_inc(&event->rb->mmap_count);
+ }
+
++/*
++ * A buffer can be mmap()ed multiple times; either directly through the same
++ * event, or through other events by use of perf_event_set_output().
++ *
++ * In order to undo the VM accounting done by perf_mmap() we need to destroy
++ * the buffer here, where we still have a VM context. This means we need
++ * to detach all events redirecting to us.
++ */
+ static void perf_mmap_close(struct vm_area_struct *vma)
+ {
+ struct perf_event *event = vma->vm_file->private_data;
+
+- if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
+- unsigned long size = perf_data_size(event->rb);
+- struct user_struct *user = event->mmap_user;
+- struct ring_buffer *rb = event->rb;
++ struct ring_buffer *rb = event->rb;
++ struct user_struct *mmap_user = rb->mmap_user;
++ int mmap_locked = rb->mmap_locked;
++ unsigned long size = perf_data_size(rb);
++
++ atomic_dec(&rb->mmap_count);
++
++ if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
++ return;
++
++ /* Detach current event from the buffer. */
++ rcu_assign_pointer(event->rb, NULL);
++ ring_buffer_detach(event, rb);
++ mutex_unlock(&event->mmap_mutex);
++
++ /* If there's still other mmap()s of this buffer, we're done. */
++ if (atomic_read(&rb->mmap_count)) {
++ ring_buffer_put(rb); /* can't be last */
++ return;
++ }
++
++ /*
++ * No other mmap()s, detach from all other events that might redirect
++ * into the now unreachable buffer. Somewhat complicated by the
++ * fact that rb::event_lock otherwise nests inside mmap_mutex.
++ */
++again:
++ rcu_read_lock();
++ list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
++ if (!atomic_long_inc_not_zero(&event->refcount)) {
++ /*
++ * This event is en-route to free_event() which will
++ * detach it and remove it from the list.
++ */
++ continue;
++ }
++ rcu_read_unlock();
+
+- atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
+- vma->vm_mm->pinned_vm -= event->mmap_locked;
+- rcu_assign_pointer(event->rb, NULL);
+- ring_buffer_detach(event, rb);
++ mutex_lock(&event->mmap_mutex);
++ /*
++ * Check we didn't race with perf_event_set_output() which can
++ * swizzle the rb from under us while we were waiting to
++ * acquire mmap_mutex.
++ *
++ * If we find a different rb; ignore this event, a next
++ * iteration will no longer find it on the list. We have to
++ * still restart the iteration to make sure we're not now
++ * iterating the wrong list.
++ */
++ if (event->rb == rb) {
++ rcu_assign_pointer(event->rb, NULL);
++ ring_buffer_detach(event, rb);
++ ring_buffer_put(rb); /* can't be last, we still have one */
++ }
+ mutex_unlock(&event->mmap_mutex);
++ put_event(event);
+
+- ring_buffer_put(rb);
+- free_uid(user);
++ /*
++ * Restart the iteration; either we're on the wrong list or
++ * destroyed its integrity by doing a deletion.
++ */
++ goto again;
+ }
++ rcu_read_unlock();
++
++ /*
++ * It could be there's still a few 0-ref events on the list; they'll
++ * get cleaned up by free_event() -- they'll also still have their
++ * ref on the rb and will free it whenever they are done with it.
++ *
++ * Aside from that, this buffer is 'fully' detached and unmapped,
++ * undo the VM accounting.
++ */
++
++ atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm);
++ vma->vm_mm->pinned_vm -= mmap_locked;
++ free_uid(mmap_user);
++
++ ring_buffer_put(rb); /* could be last */
+ }
+
+ static const struct vm_operations_struct perf_mmap_vmops = {
+@@ -3691,12 +3767,24 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
+ return -EINVAL;
+
+ WARN_ON_ONCE(event->ctx->parent_ctx);
++again:
+ mutex_lock(&event->mmap_mutex);
+ if (event->rb) {
+- if (event->rb->nr_pages == nr_pages)
+- atomic_inc(&event->rb->refcount);
+- else
++ if (event->rb->nr_pages != nr_pages) {
+ ret = -EINVAL;
++ goto unlock;
++ }
++
++ if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
++ /*
++ * Raced against perf_mmap_close() through
++ * perf_event_set_output(). Try again, hope for better
++ * luck.
++ */
++ mutex_unlock(&event->mmap_mutex);
++ goto again;
++ }
++
+ goto unlock;
+ }
+
+@@ -3737,19 +3825,27 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
+ ret = -ENOMEM;
+ goto unlock;
+ }
+- rcu_assign_pointer(event->rb, rb);
++
++ atomic_set(&rb->mmap_count, 1);
++ rb->mmap_locked = extra;
++ rb->mmap_user = get_current_user();
+
+ atomic_long_add(user_extra, &user->locked_vm);
+- event->mmap_locked = extra;
+- event->mmap_user = get_current_user();
+- vma->vm_mm->pinned_vm += event->mmap_locked;
++ vma->vm_mm->pinned_vm += extra;
++
++ ring_buffer_attach(event, rb);
++ rcu_assign_pointer(event->rb, rb);
+
+ unlock:
+ if (!ret)
+ atomic_inc(&event->mmap_count);
+ mutex_unlock(&event->mmap_mutex);
+
+- vma->vm_flags |= VM_RESERVED;
++ /*
++ * Since pinned accounting is per vm we cannot allow fork() to copy our
++ * vma.
++ */
++ vma->vm_flags |= VM_DONTCOPY | VM_RESERVED;
+ vma->vm_ops = &perf_mmap_vmops;
+
+ return ret;
+@@ -6114,6 +6210,8 @@ set:
+ if (atomic_read(&event->mmap_count))
+ goto unlock;
+
++ old_rb = event->rb;
++
+ if (output_event) {
+ /* get the rb we want to redirect to */
+ rb = ring_buffer_get(output_event);
+@@ -6121,16 +6219,28 @@ set:
+ goto unlock;
+ }
+
+- old_rb = event->rb;
+- rcu_assign_pointer(event->rb, rb);
+ if (old_rb)
+ ring_buffer_detach(event, old_rb);
++
++ if (rb)
++ ring_buffer_attach(event, rb);
++
++ rcu_assign_pointer(event->rb, rb);
++
++ if (old_rb) {
++ ring_buffer_put(old_rb);
++ /*
++ * Since we detached before setting the new rb, so that we
++ * could attach the new rb, we could have missed a wakeup.
++ * Provide it now.
++ */
++ wake_up_all(&event->waitq);
++ }
++
+ ret = 0;
+ unlock:
+ mutex_unlock(&event->mmap_mutex);
+
+- if (old_rb)
+- ring_buffer_put(old_rb);
+ out:
+ return ret;
+ }
+@@ -6797,7 +6907,7 @@ inherit_task_group(struct perf_event *event, struct task_struct *parent,
+ * child.
+ */
+
+- child_ctx = alloc_perf_context(event->pmu, child);
++ child_ctx = alloc_perf_context(parent_ctx->pmu, child);
+ if (!child_ctx)
+ return -ENOMEM;
+
+diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
+index b7971d6..98ac24e 100644
+--- a/kernel/events/hw_breakpoint.c
++++ b/kernel/events/hw_breakpoint.c
+@@ -147,7 +147,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
+ return;
+ }
+
+- for_each_online_cpu(cpu) {
++ for_each_possible_cpu(cpu) {
+ unsigned int nr;
+
+ nr = per_cpu(nr_cpu_bp_pinned[type], cpu);
+@@ -233,7 +233,7 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
+ if (cpu >= 0) {
+ toggle_bp_task_slot(bp, cpu, enable, type, weight);
+ } else {
+- for_each_online_cpu(cpu)
++ for_each_possible_cpu(cpu)
+ toggle_bp_task_slot(bp, cpu, enable, type, weight);
+ }
+
+diff --git a/kernel/events/internal.h b/kernel/events/internal.h
+index 64568a6..a2101bb 100644
+--- a/kernel/events/internal.h
++++ b/kernel/events/internal.h
+@@ -26,6 +26,10 @@ struct ring_buffer {
+ spinlock_t event_lock;
+ struct list_head event_list;
+
++ atomic_t mmap_count;
++ unsigned long mmap_locked;
++ struct user_struct *mmap_user;
++
+ struct perf_event_mmap_page *user_page;
+ void *data_pages[0];
+ };
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 77bccfc..1d0538e 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -60,6 +60,7 @@
+ #include <linux/pid.h>
+ #include <linux/nsproxy.h>
+ #include <linux/ptrace.h>
++#include <linux/hugetlb.h>
+
+ #include <asm/futex.h>
+
+@@ -363,7 +364,7 @@ again:
+ } else {
+ key->both.offset |= FUT_OFF_INODE; /* inode-based key */
+ key->shared.inode = page_head->mapping->host;
+- key->shared.pgoff = page_head->index;
++ key->shared.pgoff = basepage_index(page);
+ }
+
+ get_futex_key_refs(key);
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index 382a6bd..52bdd58 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -539,9 +539,9 @@ int can_request_irq(unsigned int irq, unsigned long irqflags)
+ return 0;
+
+ if (irq_settings_can_request(desc)) {
+- if (desc->action)
+- if (irqflags & desc->action->flags & IRQF_SHARED)
+- canrequest =1;
++ if (!desc->action ||
++ irqflags & desc->action->flags & IRQF_SHARED)
++ canrequest = 1;
+ }
+ irq_put_desc_unlock(desc, flags);
+ return canrequest;
+diff --git a/kernel/printk.c b/kernel/printk.c
+index c0d12ea..16688ec 100644
+--- a/kernel/printk.c
++++ b/kernel/printk.c
+@@ -813,9 +813,9 @@ static int console_trylock_for_printk(unsigned int cpu)
+ }
+ }
+ printk_cpu = UINT_MAX;
++ raw_spin_unlock(&logbuf_lock);
+ if (wake)
+ up(&console_sem);
+- raw_spin_unlock(&logbuf_lock);
+ return retval;
+ }
+ static const char recursion_bug_msg [] =
+diff --git a/kernel/timer.c b/kernel/timer.c
+index f2f71d7..f8b05a4 100644
+--- a/kernel/timer.c
++++ b/kernel/timer.c
+@@ -145,9 +145,11 @@ static unsigned long round_jiffies_common(unsigned long j, int cpu,
+ /* now that we have rounded, subtract the extra skew again */
+ j -= cpu * 3;
+
+- if (j <= jiffies) /* rounding ate our timeout entirely; */
+- return original;
+- return j;
++ /*
++ * Make sure j is still in the future. Otherwise return the
++ * unmodified value.
++ */
++ return time_is_after_jiffies(j) ? j : original;
+ }
+
+ /**
+diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
+index cb65454..7c75bbb 100644
+--- a/kernel/trace/trace_syscalls.c
++++ b/kernel/trace/trace_syscalls.c
+@@ -303,6 +303,8 @@ void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id)
+ struct syscall_metadata *sys_data;
+ struct ring_buffer_event *event;
+ struct ring_buffer *buffer;
++ unsigned long irq_flags;
++ int pc;
+ int size;
+ int syscall_nr;
+
+@@ -318,8 +320,11 @@ void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id)
+
+ size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
+
++ local_save_flags(irq_flags);
++ pc = preempt_count();
++
+ event = trace_current_buffer_lock_reserve(&buffer,
+- sys_data->enter_event->event.type, size, 0, 0);
++ sys_data->enter_event->event.type, size, irq_flags, pc);
+ if (!event)
+ return;
+
+@@ -329,7 +334,8 @@ void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id)
+
+ if (!filter_current_check_discard(buffer, sys_data->enter_event,
+ entry, event))
+- trace_current_buffer_unlock_commit(buffer, event, 0, 0);
++ trace_current_buffer_unlock_commit(buffer, event,
++ irq_flags, pc);
+ }
+
+ void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
+@@ -338,6 +344,8 @@ void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
+ struct syscall_metadata *sys_data;
+ struct ring_buffer_event *event;
+ struct ring_buffer *buffer;
++ unsigned long irq_flags;
++ int pc;
+ int syscall_nr;
+
+ syscall_nr = syscall_get_nr(current, regs);
+@@ -350,8 +358,12 @@ void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
+ if (!sys_data)
+ return;
+
++ local_save_flags(irq_flags);
++ pc = preempt_count();
++
+ event = trace_current_buffer_lock_reserve(&buffer,
+- sys_data->exit_event->event.type, sizeof(*entry), 0, 0);
++ sys_data->exit_event->event.type, sizeof(*entry),
++ irq_flags, pc);
+ if (!event)
+ return;
+
+@@ -361,7 +373,8 @@ void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
+
+ if (!filter_current_check_discard(buffer, sys_data->exit_event,
+ entry, event))
+- trace_current_buffer_unlock_commit(buffer, event, 0, 0);
++ trace_current_buffer_unlock_commit(buffer, event,
++ irq_flags, pc);
+ }
+
+ int reg_event_syscall_enter(struct ftrace_event_call *call)
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 2dcd716..ddf2128 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -679,6 +679,23 @@ int PageHuge(struct page *page)
+ }
+ EXPORT_SYMBOL_GPL(PageHuge);
+
++pgoff_t __basepage_index(struct page *page)
++{
++ struct page *page_head = compound_head(page);
++ pgoff_t index = page_index(page_head);
++ unsigned long compound_idx;
++
++ if (!PageHuge(page_head))
++ return page_index(page);
++
++ if (compound_order(page_head) >= MAX_ORDER)
++ compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
++ else
++ compound_idx = page - page_head;
++
++ return (index << compound_order(page_head)) + compound_idx;
++}
++
+ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
+ {
+ struct page *page;
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index a0b6c50..dd7c019 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -1737,6 +1737,9 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
+ BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
+ conn, code, ident, dlen);
+
++ if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
++ return NULL;
++
+ len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
+ count = min_t(unsigned int, conn->mtu, len);
+
+@@ -2865,7 +2868,7 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn,
+ struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
+ u16 type, result;
+
+- if (cmd_len != sizeof(*rsp))
++ if (cmd_len < sizeof(*rsp))
+ return -EPROTO;
+
+ type = __le16_to_cpu(rsp->type);
+diff --git a/net/ceph/auth_none.c b/net/ceph/auth_none.c
+index 214c2bb..9f78c5f 100644
+--- a/net/ceph/auth_none.c
++++ b/net/ceph/auth_none.c
+@@ -39,6 +39,11 @@ static int should_authenticate(struct ceph_auth_client *ac)
+ return xi->starting;
+ }
+
++static int build_request(struct ceph_auth_client *ac, void *buf, void *end)
++{
++ return 0;
++}
++
+ /*
+ * the generic auth code decode the global_id, and we carry no actual
+ * authenticate state, so nothing happens here.
+@@ -107,6 +112,7 @@ static const struct ceph_auth_client_ops ceph_auth_none_ops = {
+ .destroy = destroy,
+ .is_authenticated = is_authenticated,
+ .should_authenticate = should_authenticate,
++ .build_request = build_request,
+ .handle_reply = handle_reply,
+ .create_authorizer = ceph_auth_none_create_authorizer,
+ .destroy_authorizer = ceph_auth_none_destroy_authorizer,
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 3c8bc6e..d148a2b 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -902,7 +902,7 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
+ per_cvt->assigned = 1;
+ hinfo->nid = per_cvt->cvt_nid;
+
+- snd_hda_codec_write(codec, per_pin->pin_nid, 0,
++ snd_hda_codec_write_cache(codec, per_pin->pin_nid, 0,
+ AC_VERB_SET_CONNECT_SEL,
+ mux_idx);
+ snd_hda_spdif_ctls_assign(codec, pin_idx, per_cvt->cvt_nid);
+diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
+index e97df24..8b687da 100644
+--- a/sound/soc/codecs/wm8962.c
++++ b/sound/soc/codecs/wm8962.c
+@@ -2117,7 +2117,6 @@ static int wm8962_put_hp_sw(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+ {
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+- u16 *reg_cache = codec->reg_cache;
+ int ret;
+
+ /* Apply the update (if any) */
+@@ -2126,16 +2125,19 @@ static int wm8962_put_hp_sw(struct snd_kcontrol *kcontrol,
+ return 0;
+
+ /* If the left PGA is enabled hit that VU bit... */
+- if (snd_soc_read(codec, WM8962_PWR_MGMT_2) & WM8962_HPOUTL_PGA_ENA)
+- return snd_soc_write(codec, WM8962_HPOUTL_VOLUME,
+- reg_cache[WM8962_HPOUTL_VOLUME]);
++ ret = snd_soc_read(codec, WM8962_PWR_MGMT_2);
++ if (ret & WM8962_HPOUTL_PGA_ENA) {
++ snd_soc_write(codec, WM8962_HPOUTL_VOLUME,
++ snd_soc_read(codec, WM8962_HPOUTL_VOLUME));
++ return 1;
++ }
+
+ /* ...otherwise the right. The VU is stereo. */
+- if (snd_soc_read(codec, WM8962_PWR_MGMT_2) & WM8962_HPOUTR_PGA_ENA)
+- return snd_soc_write(codec, WM8962_HPOUTR_VOLUME,
+- reg_cache[WM8962_HPOUTR_VOLUME]);
++ if (ret & WM8962_HPOUTR_PGA_ENA)
++ snd_soc_write(codec, WM8962_HPOUTR_VOLUME,
++ snd_soc_read(codec, WM8962_HPOUTR_VOLUME));
+
+- return 0;
++ return 1;
+ }
+
+ /* The VU bits for the speakers are in a different register to the mute
+@@ -3944,7 +3946,6 @@ static int wm8962_probe(struct snd_soc_codec *codec)
+ int ret;
+ struct wm8962_priv *wm8962 = snd_soc_codec_get_drvdata(codec);
+ struct wm8962_pdata *pdata = dev_get_platdata(codec->dev);
+- u16 *reg_cache = codec->reg_cache;
+ int i, trigger, irq_pol;
+ bool dmicclk, dmicdat;
+
+@@ -4055,8 +4056,9 @@ static int wm8962_probe(struct snd_soc_codec *codec)
+
+ /* Put the speakers into mono mode? */
+ if (pdata->spk_mono)
+- reg_cache[WM8962_CLASS_D_CONTROL_2]
+- |= WM8962_SPK_MONO;
++ snd_soc_update_bits(codec, WM8962_CLASS_D_CONTROL_2,
++ WM8962_SPK_MONO_MASK, WM8962_SPK_MONO);
++
+
+ /* Micbias setup, detection enable and detection
+ * threasholds. */
diff --git a/1049_linux-3.2.50.patch b/1049_linux-3.2.50.patch
new file mode 100644
index 00000000..20b3015e
--- /dev/null
+++ b/1049_linux-3.2.50.patch
@@ -0,0 +1,2495 @@
+diff --git a/Makefile b/Makefile
+index 2e3d791..0799e8e 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 49
++SUBLEVEL = 50
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h
+index 0192a4e..80de64b 100644
+--- a/arch/powerpc/include/asm/module.h
++++ b/arch/powerpc/include/asm/module.h
+@@ -87,10 +87,9 @@ struct exception_table_entry;
+ void sort_ex_table(struct exception_table_entry *start,
+ struct exception_table_entry *finish);
+
+-#ifdef CONFIG_MODVERSIONS
++#if defined(CONFIG_MODVERSIONS) && defined(CONFIG_PPC64)
+ #define ARCH_RELOCATES_KCRCTAB
+-
+-extern const unsigned long reloc_start[];
++#define reloc_start PHYSICAL_START
+ #endif
+ #endif /* __KERNEL__ */
+ #endif /* _ASM_POWERPC_MODULE_H */
+diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
+index 920276c..3e8fe4b 100644
+--- a/arch/powerpc/kernel/vmlinux.lds.S
++++ b/arch/powerpc/kernel/vmlinux.lds.S
+@@ -38,9 +38,6 @@ jiffies = jiffies_64 + 4;
+ #endif
+ SECTIONS
+ {
+- . = 0;
+- reloc_start = .;
+-
+ . = KERNELBASE;
+
+ /*
+diff --git a/arch/sparc/kernel/asm-offsets.c b/arch/sparc/kernel/asm-offsets.c
+index 68f7e11..ce48203 100644
+--- a/arch/sparc/kernel/asm-offsets.c
++++ b/arch/sparc/kernel/asm-offsets.c
+@@ -34,6 +34,8 @@ int foo(void)
+ DEFINE(AOFF_task_thread, offsetof(struct task_struct, thread));
+ BLANK();
+ DEFINE(AOFF_mm_context, offsetof(struct mm_struct, context));
++ BLANK();
++ DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm));
+
+ /* DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28); */
+ return 0;
+diff --git a/arch/sparc/mm/hypersparc.S b/arch/sparc/mm/hypersparc.S
+index 44aad32..969f964 100644
+--- a/arch/sparc/mm/hypersparc.S
++++ b/arch/sparc/mm/hypersparc.S
+@@ -74,7 +74,7 @@ hypersparc_flush_cache_mm_out:
+
+ /* The things we do for performance... */
+ hypersparc_flush_cache_range:
+- ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
++ ld [%o0 + VMA_VM_MM], %o0
+ #ifndef CONFIG_SMP
+ ld [%o0 + AOFF_mm_context], %g1
+ cmp %g1, -1
+@@ -163,7 +163,7 @@ hypersparc_flush_cache_range_out:
+ */
+ /* Verified, my ass... */
+ hypersparc_flush_cache_page:
+- ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
++ ld [%o0 + VMA_VM_MM], %o0
+ ld [%o0 + AOFF_mm_context], %g2
+ #ifndef CONFIG_SMP
+ cmp %g2, -1
+@@ -284,7 +284,7 @@ hypersparc_flush_tlb_mm_out:
+ sta %g5, [%g1] ASI_M_MMUREGS
+
+ hypersparc_flush_tlb_range:
+- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
++ ld [%o0 + VMA_VM_MM], %o0
+ mov SRMMU_CTX_REG, %g1
+ ld [%o0 + AOFF_mm_context], %o3
+ lda [%g1] ASI_M_MMUREGS, %g5
+@@ -307,7 +307,7 @@ hypersparc_flush_tlb_range_out:
+ sta %g5, [%g1] ASI_M_MMUREGS
+
+ hypersparc_flush_tlb_page:
+- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
++ ld [%o0 + VMA_VM_MM], %o0
+ mov SRMMU_CTX_REG, %g1
+ ld [%o0 + AOFF_mm_context], %o3
+ andn %o1, (PAGE_SIZE - 1), %o1
+diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
+index 6ff4d78..b4989f9 100644
+--- a/arch/sparc/mm/init_64.c
++++ b/arch/sparc/mm/init_64.c
+@@ -1071,7 +1071,14 @@ static int __init grab_mblocks(struct mdesc_handle *md)
+ m->size = *val;
+ val = mdesc_get_property(md, node,
+ "address-congruence-offset", NULL);
+- m->offset = *val;
++
++ /* The address-congruence-offset property is optional.
++ * Explicity zero it be identifty this.
++ */
++ if (val)
++ m->offset = *val;
++ else
++ m->offset = 0UL;
+
+ numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n",
+ count - 1, m->base, m->size, m->offset);
+diff --git a/arch/sparc/mm/swift.S b/arch/sparc/mm/swift.S
+index c801c39..5d2b88d 100644
+--- a/arch/sparc/mm/swift.S
++++ b/arch/sparc/mm/swift.S
+@@ -105,7 +105,7 @@ swift_flush_cache_mm_out:
+
+ .globl swift_flush_cache_range
+ swift_flush_cache_range:
+- ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
++ ld [%o0 + VMA_VM_MM], %o0
+ sub %o2, %o1, %o2
+ sethi %hi(4096), %o3
+ cmp %o2, %o3
+@@ -116,7 +116,7 @@ swift_flush_cache_range:
+
+ .globl swift_flush_cache_page
+ swift_flush_cache_page:
+- ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
++ ld [%o0 + VMA_VM_MM], %o0
+ 70:
+ ld [%o0 + AOFF_mm_context], %g2
+ cmp %g2, -1
+@@ -219,7 +219,7 @@ swift_flush_sig_insns:
+ .globl swift_flush_tlb_range
+ .globl swift_flush_tlb_all
+ swift_flush_tlb_range:
+- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
++ ld [%o0 + VMA_VM_MM], %o0
+ swift_flush_tlb_mm:
+ ld [%o0 + AOFF_mm_context], %g2
+ cmp %g2, -1
+@@ -233,7 +233,7 @@ swift_flush_tlb_all_out:
+
+ .globl swift_flush_tlb_page
+ swift_flush_tlb_page:
+- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
++ ld [%o0 + VMA_VM_MM], %o0
+ mov SRMMU_CTX_REG, %g1
+ ld [%o0 + AOFF_mm_context], %o3
+ andn %o1, (PAGE_SIZE - 1), %o1
+diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
+index afd021e..072f553 100644
+--- a/arch/sparc/mm/tlb.c
++++ b/arch/sparc/mm/tlb.c
+@@ -115,8 +115,8 @@ no_cache_flush:
+ }
+
+ if (!tb->active) {
+- global_flush_tlb_page(mm, vaddr);
+ flush_tsb_user_page(mm, vaddr);
++ global_flush_tlb_page(mm, vaddr);
+ goto out;
+ }
+
+diff --git a/arch/sparc/mm/tsunami.S b/arch/sparc/mm/tsunami.S
+index 4e55e8f..bf10a34 100644
+--- a/arch/sparc/mm/tsunami.S
++++ b/arch/sparc/mm/tsunami.S
+@@ -24,7 +24,7 @@
+ /* Sliiick... */
+ tsunami_flush_cache_page:
+ tsunami_flush_cache_range:
+- ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
++ ld [%o0 + VMA_VM_MM], %o0
+ tsunami_flush_cache_mm:
+ ld [%o0 + AOFF_mm_context], %g2
+ cmp %g2, -1
+@@ -46,7 +46,7 @@ tsunami_flush_sig_insns:
+
+ /* More slick stuff... */
+ tsunami_flush_tlb_range:
+- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
++ ld [%o0 + VMA_VM_MM], %o0
+ tsunami_flush_tlb_mm:
+ ld [%o0 + AOFF_mm_context], %g2
+ cmp %g2, -1
+@@ -65,7 +65,7 @@ tsunami_flush_tlb_out:
+
+ /* This one can be done in a fine grained manner... */
+ tsunami_flush_tlb_page:
+- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
++ ld [%o0 + VMA_VM_MM], %o0
+ mov SRMMU_CTX_REG, %g1
+ ld [%o0 + AOFF_mm_context], %o3
+ andn %o1, (PAGE_SIZE - 1), %o1
+diff --git a/arch/sparc/mm/viking.S b/arch/sparc/mm/viking.S
+index 6dfcc13..a516372 100644
+--- a/arch/sparc/mm/viking.S
++++ b/arch/sparc/mm/viking.S
+@@ -109,7 +109,7 @@ viking_mxcc_flush_page:
+ viking_flush_cache_page:
+ viking_flush_cache_range:
+ #ifndef CONFIG_SMP
+- ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
++ ld [%o0 + VMA_VM_MM], %o0
+ #endif
+ viking_flush_cache_mm:
+ #ifndef CONFIG_SMP
+@@ -149,7 +149,7 @@ viking_flush_tlb_mm:
+ #endif
+
+ viking_flush_tlb_range:
+- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
++ ld [%o0 + VMA_VM_MM], %o0
+ mov SRMMU_CTX_REG, %g1
+ ld [%o0 + AOFF_mm_context], %o3
+ lda [%g1] ASI_M_MMUREGS, %g5
+@@ -174,7 +174,7 @@ viking_flush_tlb_range:
+ #endif
+
+ viking_flush_tlb_page:
+- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
++ ld [%o0 + VMA_VM_MM], %o0
+ mov SRMMU_CTX_REG, %g1
+ ld [%o0 + AOFF_mm_context], %o3
+ lda [%g1] ASI_M_MMUREGS, %g5
+@@ -240,7 +240,7 @@ sun4dsmp_flush_tlb_range:
+ tst %g5
+ bne 3f
+ mov SRMMU_CTX_REG, %g1
+- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
++ ld [%o0 + VMA_VM_MM], %o0
+ ld [%o0 + AOFF_mm_context], %o3
+ lda [%g1] ASI_M_MMUREGS, %g5
+ sethi %hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4
+@@ -266,7 +266,7 @@ sun4dsmp_flush_tlb_page:
+ tst %g5
+ bne 2f
+ mov SRMMU_CTX_REG, %g1
+- ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
++ ld [%o0 + VMA_VM_MM], %o0
+ ld [%o0 + AOFF_mm_context], %o3
+ lda [%g1] ASI_M_MMUREGS, %g5
+ and %o1, PAGE_MASK, %o1
+diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
+index d985713..f81597f 100644
+--- a/drivers/acpi/acpi_memhotplug.c
++++ b/drivers/acpi/acpi_memhotplug.c
+@@ -421,6 +421,7 @@ static int acpi_memory_device_add(struct acpi_device *device)
+ /* Get the range from the _CRS */
+ result = acpi_memory_get_device_resources(mem_device);
+ if (result) {
++ device->driver_data = NULL;
+ kfree(mem_device);
+ return result;
+ }
+diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
+index 7a949af..5b0b5f7 100644
+--- a/drivers/ata/ata_piix.c
++++ b/drivers/ata/ata_piix.c
+@@ -352,7 +352,7 @@ static const struct pci_device_id piix_pci_tbl[] = {
+ /* SATA Controller IDE (Wellsburg) */
+ { 0x8086, 0x8d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+ /* SATA Controller IDE (Wellsburg) */
+- { 0x8086, 0x8d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
++ { 0x8086, 0x8d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
+ /* SATA Controller IDE (Wellsburg) */
+ { 0x8086, 0x8d60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+ /* SATA Controller IDE (Wellsburg) */
+diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
+index 85fdd4b..2232b85 100644
+--- a/drivers/block/xen-blkback/blkback.c
++++ b/drivers/block/xen-blkback/blkback.c
+@@ -277,6 +277,7 @@ int xen_blkif_schedule(void *arg)
+ {
+ struct xen_blkif *blkif = arg;
+ struct xen_vbd *vbd = &blkif->vbd;
++ int ret;
+
+ xen_blkif_get(blkif);
+
+@@ -297,8 +298,12 @@ int xen_blkif_schedule(void *arg)
+ blkif->waiting_reqs = 0;
+ smp_mb(); /* clear flag *before* checking for work */
+
+- if (do_block_io_op(blkif))
++ ret = do_block_io_op(blkif);
++ if (ret > 0)
+ blkif->waiting_reqs = 1;
++ if (ret == -EACCES)
++ wait_event_interruptible(blkif->shutdown_wq,
++ kthread_should_stop());
+
+ if (log_stats && time_after(jiffies, blkif->st_print))
+ print_stats(blkif);
+@@ -539,6 +544,12 @@ __do_block_io_op(struct xen_blkif *blkif)
+ rp = blk_rings->common.sring->req_prod;
+ rmb(); /* Ensure we see queued requests up to 'rp'. */
+
++ if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) {
++ rc = blk_rings->common.rsp_prod_pvt;
++ pr_warn(DRV_PFX "Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n",
++ rp, rc, rp - rc, blkif->vbd.pdevice);
++ return -EACCES;
++ }
+ while (rc != rp) {
+
+ if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
+diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
+index dfb1b3a..f67985d 100644
+--- a/drivers/block/xen-blkback/common.h
++++ b/drivers/block/xen-blkback/common.h
+@@ -198,6 +198,8 @@ struct xen_blkif {
+ int st_wr_sect;
+
+ wait_queue_head_t waiting_to_free;
++ /* Thread shutdown wait queue. */
++ wait_queue_head_t shutdown_wq;
+ };
+
+
+diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
+index 674e3c2..77aed26 100644
+--- a/drivers/block/xen-blkback/xenbus.c
++++ b/drivers/block/xen-blkback/xenbus.c
+@@ -118,6 +118,7 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid)
+ atomic_set(&blkif->drain, 0);
+ blkif->st_print = jiffies;
+ init_waitqueue_head(&blkif->waiting_to_free);
++ init_waitqueue_head(&blkif->shutdown_wq);
+
+ return blkif;
+ }
+@@ -178,6 +179,7 @@ static void xen_blkif_disconnect(struct xen_blkif *blkif)
+ {
+ if (blkif->xenblkd) {
+ kthread_stop(blkif->xenblkd);
++ wake_up(&blkif->shutdown_wq);
+ blkif->xenblkd = NULL;
+ }
+
+diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
+index c32fd93..8115557 100644
+--- a/drivers/gpu/drm/radeon/radeon_combios.c
++++ b/drivers/gpu/drm/radeon/radeon_combios.c
+@@ -147,7 +147,7 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
+ enum radeon_combios_table_offset table)
+ {
+ struct radeon_device *rdev = dev->dev_private;
+- int rev;
++ int rev, size;
+ uint16_t offset = 0, check_offset;
+
+ if (!rdev->bios)
+@@ -156,174 +156,106 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
+ switch (table) {
+ /* absolute offset tables */
+ case COMBIOS_ASIC_INIT_1_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0xc);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0xc;
+ break;
+ case COMBIOS_BIOS_SUPPORT_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x14);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x14;
+ break;
+ case COMBIOS_DAC_PROGRAMMING_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x2a);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x2a;
+ break;
+ case COMBIOS_MAX_COLOR_DEPTH_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x2c);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x2c;
+ break;
+ case COMBIOS_CRTC_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x2e);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x2e;
+ break;
+ case COMBIOS_PLL_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x30);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x30;
+ break;
+ case COMBIOS_TV_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x32);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x32;
+ break;
+ case COMBIOS_DFP_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x34);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x34;
+ break;
+ case COMBIOS_HW_CONFIG_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x36);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x36;
+ break;
+ case COMBIOS_MULTIMEDIA_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x38);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x38;
+ break;
+ case COMBIOS_TV_STD_PATCH_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x3e);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x3e;
+ break;
+ case COMBIOS_LCD_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x40);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x40;
+ break;
+ case COMBIOS_MOBILE_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x42);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x42;
+ break;
+ case COMBIOS_PLL_INIT_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x46);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x46;
+ break;
+ case COMBIOS_MEM_CONFIG_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x48);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x48;
+ break;
+ case COMBIOS_SAVE_MASK_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x4a);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x4a;
+ break;
+ case COMBIOS_HARDCODED_EDID_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x4c);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x4c;
+ break;
+ case COMBIOS_ASIC_INIT_2_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x4e);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x4e;
+ break;
+ case COMBIOS_CONNECTOR_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x50);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x50;
+ break;
+ case COMBIOS_DYN_CLK_1_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x52);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x52;
+ break;
+ case COMBIOS_RESERVED_MEM_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x54);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x54;
+ break;
+ case COMBIOS_EXT_TMDS_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x58);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x58;
+ break;
+ case COMBIOS_MEM_CLK_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x5a);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x5a;
+ break;
+ case COMBIOS_EXT_DAC_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x5c);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x5c;
+ break;
+ case COMBIOS_MISC_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x5e);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x5e;
+ break;
+ case COMBIOS_CRT_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x60);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x60;
+ break;
+ case COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x62);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x62;
+ break;
+ case COMBIOS_COMPONENT_VIDEO_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x64);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x64;
+ break;
+ case COMBIOS_FAN_SPEED_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x66);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x66;
+ break;
+ case COMBIOS_OVERDRIVE_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x68);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x68;
+ break;
+ case COMBIOS_OEM_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x6a);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x6a;
+ break;
+ case COMBIOS_DYN_CLK_2_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x6c);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x6c;
+ break;
+ case COMBIOS_POWER_CONNECTOR_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x6e);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x6e;
+ break;
+ case COMBIOS_I2C_INFO_TABLE:
+- check_offset = RBIOS16(rdev->bios_header_start + 0x70);
+- if (check_offset)
+- offset = check_offset;
++ check_offset = 0x70;
+ break;
+ /* relative offset tables */
+ case COMBIOS_ASIC_INIT_3_TABLE: /* offset from misc info */
+@@ -439,11 +371,16 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
+ }
+ break;
+ default:
++ check_offset = 0;
+ break;
+ }
+
+- return offset;
++ size = RBIOS8(rdev->bios_header_start + 0x6);
++ /* check absolute offset tables */
++ if (table < COMBIOS_ASIC_INIT_3_TABLE && check_offset && check_offset < size)
++ offset = RBIOS16(rdev->bios_header_start + check_offset);
+
++ return offset;
+ }
+
+ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
+@@ -953,16 +890,22 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
+ dac = RBIOS8(dac_info + 0x3) & 0xf;
+ p_dac->ps2_pdac_adj = (bg << 8) | (dac);
+ }
+- /* if the values are all zeros, use the table */
+- if (p_dac->ps2_pdac_adj)
++ /* if the values are zeros, use the table */
++ if ((dac == 0) || (bg == 0))
++ found = 0;
++ else
+ found = 1;
+ }
+
+ /* quirks */
++ /* Radeon 7000 (RV100) */
++ if (((dev->pdev->device == 0x5159) &&
++ (dev->pdev->subsystem_vendor == 0x174B) &&
++ (dev->pdev->subsystem_device == 0x7c28)) ||
+ /* Radeon 9100 (R200) */
+- if ((dev->pdev->device == 0x514D) &&
++ ((dev->pdev->device == 0x514D) &&
+ (dev->pdev->subsystem_vendor == 0x174B) &&
+- (dev->pdev->subsystem_device == 0x7149)) {
++ (dev->pdev->subsystem_device == 0x7149))) {
+ /* vbios value is bad, use the default */
+ found = 0;
+ }
+diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
+index 68fe73c..99b1145 100644
+--- a/drivers/net/dummy.c
++++ b/drivers/net/dummy.c
+@@ -186,6 +186,8 @@ static int __init dummy_init_module(void)
+
+ rtnl_lock();
+ err = __rtnl_link_register(&dummy_link_ops);
++ if (err < 0)
++ goto out;
+
+ for (i = 0; i < numdummies && !err; i++) {
+ err = dummy_init_one();
+@@ -193,6 +195,8 @@ static int __init dummy_init_module(void)
+ }
+ if (err < 0)
+ __rtnl_link_unregister(&dummy_link_ops);
++
++out:
+ rtnl_unlock();
+
+ return err;
+diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+index dd893b3..87851f0 100644
+--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
++++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+@@ -1685,8 +1685,8 @@ check_sum:
+ return 0;
+ }
+
+-static void atl1e_tx_map(struct atl1e_adapter *adapter,
+- struct sk_buff *skb, struct atl1e_tpd_desc *tpd)
++static int atl1e_tx_map(struct atl1e_adapter *adapter,
++ struct sk_buff *skb, struct atl1e_tpd_desc *tpd)
+ {
+ struct atl1e_tpd_desc *use_tpd = NULL;
+ struct atl1e_tx_buffer *tx_buffer = NULL;
+@@ -1697,6 +1697,8 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
+ u16 nr_frags;
+ u16 f;
+ int segment;
++ int ring_start = adapter->tx_ring.next_to_use;
++ int ring_end;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ segment = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK;
+@@ -1709,6 +1711,9 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
+ tx_buffer->length = map_len;
+ tx_buffer->dma = pci_map_single(adapter->pdev,
+ skb->data, hdr_len, PCI_DMA_TODEVICE);
++ if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma))
++ return -ENOSPC;
++
+ ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_SINGLE);
+ mapped_len += map_len;
+ use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
+@@ -1735,6 +1740,22 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
+ tx_buffer->dma =
+ pci_map_single(adapter->pdev, skb->data + mapped_len,
+ map_len, PCI_DMA_TODEVICE);
++
++ if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) {
++ /* We need to unwind the mappings we've done */
++ ring_end = adapter->tx_ring.next_to_use;
++ adapter->tx_ring.next_to_use = ring_start;
++ while (adapter->tx_ring.next_to_use != ring_end) {
++ tpd = atl1e_get_tpd(adapter);
++ tx_buffer = atl1e_get_tx_buffer(adapter, tpd);
++ pci_unmap_single(adapter->pdev, tx_buffer->dma,
++ tx_buffer->length, PCI_DMA_TODEVICE);
++ }
++ /* Reset the tx rings next pointer */
++ adapter->tx_ring.next_to_use = ring_start;
++ return -ENOSPC;
++ }
++
+ ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_SINGLE);
+ mapped_len += map_len;
+ use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
+@@ -1770,6 +1791,23 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
+ (i * MAX_TX_BUF_LEN),
+ tx_buffer->length,
+ DMA_TO_DEVICE);
++
++ if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) {
++ /* We need to unwind the mappings we've done */
++ ring_end = adapter->tx_ring.next_to_use;
++ adapter->tx_ring.next_to_use = ring_start;
++ while (adapter->tx_ring.next_to_use != ring_end) {
++ tpd = atl1e_get_tpd(adapter);
++ tx_buffer = atl1e_get_tx_buffer(adapter, tpd);
++ dma_unmap_page(&adapter->pdev->dev, tx_buffer->dma,
++ tx_buffer->length, DMA_TO_DEVICE);
++ }
++
++ /* Reset the ring next to use pointer */
++ adapter->tx_ring.next_to_use = ring_start;
++ return -ENOSPC;
++ }
++
+ ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_PAGE);
+ use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
+ use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) |
+@@ -1787,6 +1825,7 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
+ /* The last buffer info contain the skb address,
+ so it will be free after unmap */
+ tx_buffer->skb = skb;
++ return 0;
+ }
+
+ static void atl1e_tx_queue(struct atl1e_adapter *adapter, u16 count,
+@@ -1854,10 +1893,15 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb,
+ return NETDEV_TX_OK;
+ }
+
+- atl1e_tx_map(adapter, skb, tpd);
++ if (atl1e_tx_map(adapter, skb, tpd)) {
++ dev_kfree_skb_any(skb);
++ goto out;
++ }
++
+ atl1e_tx_queue(adapter, tpd_req, tpd);
+
+ netdev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
++out:
+ spin_unlock_irqrestore(&adapter->tx_lock, flags);
+ return NETDEV_TX_OK;
+ }
+diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
+index 9b23074..b2077ca 100644
+--- a/drivers/net/ethernet/renesas/sh_eth.c
++++ b/drivers/net/ethernet/renesas/sh_eth.c
+@@ -136,8 +136,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
+ .rmcr_value = 0x00000001,
+
+ .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
+- .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
+- EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
++ .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
++ EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
++ EESR_ECI,
+ .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
+
+ .apr = 1,
+@@ -251,9 +252,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = {
+ .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+
+ .tx_check = EESR_TC1 | EESR_FTC,
+- .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
+- EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
+- EESR_ECI,
++ .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
++ EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
++ EESR_TDE | EESR_ECI,
+ .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
+ EESR_TFE,
+ .fdr_value = 0x0000072f,
+@@ -355,9 +356,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
+ .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+
+ .tx_check = EESR_TC1 | EESR_FTC,
+- .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
+- EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
+- EESR_ECI,
++ .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
++ EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
++ EESR_TDE | EESR_ECI,
+ .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
+ EESR_TFE,
+
+diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
+index 47877b1..590705c 100644
+--- a/drivers/net/ethernet/renesas/sh_eth.h
++++ b/drivers/net/ethernet/renesas/sh_eth.h
+@@ -461,7 +461,7 @@ enum EESR_BIT {
+
+ #define DEFAULT_TX_CHECK (EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | \
+ EESR_RTO)
+-#define DEFAULT_EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | \
++#define DEFAULT_EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | \
+ EESR_RDE | EESR_RFRMER | EESR_ADE | \
+ EESR_TFE | EESR_TDE | EESR_ECI)
+ #define DEFAULT_TX_ERROR_CHECK (EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | \
+diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
+index 8c6c059..bd08919 100644
+--- a/drivers/net/ethernet/sun/sunvnet.c
++++ b/drivers/net/ethernet/sun/sunvnet.c
+@@ -1248,6 +1248,8 @@ static int vnet_port_remove(struct vio_dev *vdev)
+ dev_set_drvdata(&vdev->dev, NULL);
+
+ kfree(port);
++
++ unregister_netdev(vp->dev);
+ }
+ return 0;
+ }
+diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
+index 46b5f5f..b19841a 100644
+--- a/drivers/net/ifb.c
++++ b/drivers/net/ifb.c
+@@ -290,11 +290,17 @@ static int __init ifb_init_module(void)
+
+ rtnl_lock();
+ err = __rtnl_link_register(&ifb_link_ops);
++ if (err < 0)
++ goto out;
+
+- for (i = 0; i < numifbs && !err; i++)
++ for (i = 0; i < numifbs && !err; i++) {
+ err = ifb_init_one(i);
++ cond_resched();
++ }
+ if (err)
+ __rtnl_link_unregister(&ifb_link_ops);
++
++out:
+ rtnl_unlock();
+
+ return err;
+diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
+index 26106c0..96b9e3c 100644
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -532,8 +532,10 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
+ return -EMSGSIZE;
+ num_pages = get_user_pages_fast(base, size, 0, &page[i]);
+ if (num_pages != size) {
+- for (i = 0; i < num_pages; i++)
+- put_page(page[i]);
++ int j;
++
++ for (j = 0; j < num_pages; j++)
++ put_page(page[i + j]);
+ return -EFAULT;
+ }
+ truesize = size * PAGE_SIZE;
+@@ -653,6 +655,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
+ int vnet_hdr_len = 0;
+ int copylen = 0;
+ bool zerocopy = false;
++ size_t linear;
+
+ if (q->flags & IFF_VNET_HDR) {
+ vnet_hdr_len = q->vnet_hdr_sz;
+@@ -707,11 +710,14 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
+ copylen = vnet_hdr.hdr_len;
+ if (!copylen)
+ copylen = GOODCOPY_LEN;
+- } else
++ linear = copylen;
++ } else {
+ copylen = len;
++ linear = vnet_hdr.hdr_len;
++ }
+
+ skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen,
+- vnet_hdr.hdr_len, noblock, &err);
++ linear, noblock, &err);
+ if (!skb)
+ goto err;
+
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 6ee8410..43a6a11 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -508,7 +508,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
+ {
+ struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
+ void *buf;
+- unsigned int len, received = 0;
++ unsigned int r, len, received = 0;
+
+ again:
+ while (received < budget &&
+@@ -525,8 +525,9 @@ again:
+
+ /* Out of packets? */
+ if (received < budget) {
++ r = virtqueue_enable_cb_prepare(vi->rvq);
+ napi_complete(napi);
+- if (unlikely(!virtqueue_enable_cb(vi->rvq)) &&
++ if (unlikely(virtqueue_poll(vi->rvq, r)) &&
+ napi_schedule_prep(napi)) {
+ virtqueue_disable_cb(vi->rvq);
+ __napi_schedule(napi);
+diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
+index 84a78af..182fcb2 100644
+--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
++++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
+@@ -1788,7 +1788,7 @@ static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
+ fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
+ }
+
+- memset(sc_cmd->sense_buffer, 0, sizeof(sc_cmd->sense_buffer));
++ memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+ if (fcp_sns_len)
+ memcpy(sc_cmd->sense_buffer, rq_data, fcp_sns_len);
+
+diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
+index 66ad3dc..e294d11 100644
+--- a/drivers/scsi/isci/task.c
++++ b/drivers/scsi/isci/task.c
+@@ -1038,6 +1038,7 @@ int isci_task_abort_task(struct sas_task *task)
+ int ret = TMF_RESP_FUNC_FAILED;
+ unsigned long flags;
+ int perform_termination = 0;
++ int target_done_already = 0;
+
+ /* Get the isci_request reference from the task. Note that
+ * this check does not depend on the pending request list
+@@ -1052,9 +1053,11 @@ int isci_task_abort_task(struct sas_task *task)
+ /* If task is already done, the request isn't valid */
+ if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
+ (task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
+- old_request)
++ old_request) {
+ isci_device = isci_lookup_device(task->dev);
+-
++ target_done_already = test_bit(IREQ_COMPLETE_IN_TARGET,
++ &old_request->flags);
++ }
+ spin_unlock(&task->task_state_lock);
+ spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+
+@@ -1116,7 +1119,7 @@ int isci_task_abort_task(struct sas_task *task)
+ }
+ if (task->task_proto == SAS_PROTOCOL_SMP ||
+ sas_protocol_ata(task->task_proto) ||
+- test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) {
++ target_done_already) {
+
+ spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+
+diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
+index a4b267e..9fbe260 100644
+--- a/drivers/scsi/qla2xxx/qla_iocb.c
++++ b/drivers/scsi/qla2xxx/qla_iocb.c
+@@ -423,6 +423,8 @@ qla2x00_start_scsi(srb_t *sp)
+ __constant_cpu_to_le16(CF_SIMPLE_TAG);
+ break;
+ }
++ } else {
++ cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
+ }
+
+ /* Load SCSI command packet. */
+@@ -1244,11 +1246,11 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
+ fcp_cmnd->task_attribute = TSK_ORDERED;
+ break;
+ default:
+- fcp_cmnd->task_attribute = 0;
++ fcp_cmnd->task_attribute = TSK_SIMPLE;
+ break;
+ }
+ } else {
+- fcp_cmnd->task_attribute = 0;
++ fcp_cmnd->task_attribute = TSK_SIMPLE;
+ }
+
+ cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
+@@ -1454,7 +1456,12 @@ qla24xx_start_scsi(srb_t *sp)
+ case ORDERED_QUEUE_TAG:
+ cmd_pkt->task = TSK_ORDERED;
+ break;
++ default:
++ cmd_pkt->task = TSK_SIMPLE;
++ break;
+ }
++ } else {
++ cmd_pkt->task = TSK_SIMPLE;
+ }
+
+ /* Load SCSI command packet. */
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 6dace1a..17603da 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -641,10 +641,17 @@ static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq)
+
+ static void sd_unprep_fn(struct request_queue *q, struct request *rq)
+ {
++ struct scsi_cmnd *SCpnt = rq->special;
++
+ if (rq->cmd_flags & REQ_DISCARD) {
+ free_page((unsigned long)rq->buffer);
+ rq->buffer = NULL;
+ }
++ if (SCpnt->cmnd != rq->cmd) {
++ mempool_free(SCpnt->cmnd, sd_cdb_pool);
++ SCpnt->cmnd = NULL;
++ SCpnt->cmd_len = 0;
++ }
+ }
+
+ /**
+@@ -1452,21 +1459,6 @@ static int sd_done(struct scsi_cmnd *SCpnt)
+ if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt))
+ sd_dif_complete(SCpnt, good_bytes);
+
+- if (scsi_host_dif_capable(sdkp->device->host, sdkp->protection_type)
+- == SD_DIF_TYPE2_PROTECTION && SCpnt->cmnd != SCpnt->request->cmd) {
+-
+- /* We have to print a failed command here as the
+- * extended CDB gets freed before scsi_io_completion()
+- * is called.
+- */
+- if (result)
+- scsi_print_command(SCpnt);
+-
+- mempool_free(SCpnt->cmnd, sd_cdb_pool);
+- SCpnt->cmnd = NULL;
+- SCpnt->cmd_len = 0;
+- }
+-
+ return good_bytes;
+ }
+
+diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
+index fe4dbf3..7e42190 100644
+--- a/drivers/staging/comedi/comedi_fops.c
++++ b/drivers/staging/comedi/comedi_fops.c
+@@ -1078,22 +1078,19 @@ static int do_cmd_ioctl(struct comedi_device *dev,
+ DPRINTK("subdevice busy\n");
+ return -EBUSY;
+ }
+- s->busy = file;
+
+ /* make sure channel/gain list isn't too long */
+ if (user_cmd.chanlist_len > s->len_chanlist) {
+ DPRINTK("channel/gain list too long %u > %d\n",
+ user_cmd.chanlist_len, s->len_chanlist);
+- ret = -EINVAL;
+- goto cleanup;
++ return -EINVAL;
+ }
+
+ /* make sure channel/gain list isn't too short */
+ if (user_cmd.chanlist_len < 1) {
+ DPRINTK("channel/gain list too short %u < 1\n",
+ user_cmd.chanlist_len);
+- ret = -EINVAL;
+- goto cleanup;
++ return -EINVAL;
+ }
+
+ async->cmd = user_cmd;
+@@ -1103,8 +1100,7 @@ static int do_cmd_ioctl(struct comedi_device *dev,
+ kmalloc(async->cmd.chanlist_len * sizeof(int), GFP_KERNEL);
+ if (!async->cmd.chanlist) {
+ DPRINTK("allocation failed\n");
+- ret = -ENOMEM;
+- goto cleanup;
++ return -ENOMEM;
+ }
+
+ if (copy_from_user(async->cmd.chanlist, user_cmd.chanlist,
+@@ -1156,6 +1152,9 @@ static int do_cmd_ioctl(struct comedi_device *dev,
+
+ comedi_set_subdevice_runflags(s, ~0, SRF_USER | SRF_RUNNING);
+
++ /* set s->busy _after_ setting SRF_RUNNING flag to avoid race with
++ * comedi_read() or comedi_write() */
++ s->busy = file;
+ ret = s->do_cmd(dev, s);
+ if (ret == 0)
+ return 0;
+@@ -1370,6 +1369,7 @@ static int do_cancel_ioctl(struct comedi_device *dev, unsigned int arg,
+ void *file)
+ {
+ struct comedi_subdevice *s;
++ int ret;
+
+ if (arg >= dev->n_subdevices)
+ return -EINVAL;
+@@ -1386,7 +1386,11 @@ static int do_cancel_ioctl(struct comedi_device *dev, unsigned int arg,
+ if (s->busy != file)
+ return -EBUSY;
+
+- return do_cancel(dev, s);
++ ret = do_cancel(dev, s);
++ if (comedi_get_subdevice_runflags(s) & SRF_USER)
++ wake_up_interruptible(&s->async->wait_head);
++
++ return ret;
+ }
+
+ /*
+@@ -1653,6 +1657,7 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
+
+ if (!(comedi_get_subdevice_runflags(s) & SRF_RUNNING)) {
+ if (count == 0) {
++ mutex_lock(&dev->mutex);
+ if (comedi_get_subdevice_runflags(s) &
+ SRF_ERROR) {
+ retval = -EPIPE;
+@@ -1660,6 +1665,7 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
+ retval = 0;
+ }
+ do_become_nonbusy(dev, s);
++ mutex_unlock(&dev->mutex);
+ }
+ break;
+ }
+@@ -1774,6 +1780,7 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
+
+ if (n == 0) {
+ if (!(comedi_get_subdevice_runflags(s) & SRF_RUNNING)) {
++ mutex_lock(&dev->mutex);
+ do_become_nonbusy(dev, s);
+ if (comedi_get_subdevice_runflags(s) &
+ SRF_ERROR) {
+@@ -1781,6 +1788,7 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
+ } else {
+ retval = 0;
+ }
++ mutex_unlock(&dev->mutex);
+ break;
+ }
+ if (file->f_flags & O_NONBLOCK) {
+@@ -1818,9 +1826,11 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
+ buf += n;
+ break; /* makes device work like a pipe */
+ }
+- if (!(comedi_get_subdevice_runflags(s) & (SRF_ERROR | SRF_RUNNING)) &&
+- async->buf_read_count - async->buf_write_count == 0) {
+- do_become_nonbusy(dev, s);
++ if (!(comedi_get_subdevice_runflags(s) & (SRF_ERROR | SRF_RUNNING))) {
++ mutex_lock(&dev->mutex);
++ if (async->buf_read_count - async->buf_write_count == 0)
++ do_become_nonbusy(dev, s);
++ mutex_unlock(&dev->mutex);
+ }
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&async->wait_head, &wait);
+diff --git a/drivers/staging/line6/pcm.c b/drivers/staging/line6/pcm.c
+index 9d4c8a6..2d3a420 100644
+--- a/drivers/staging/line6/pcm.c
++++ b/drivers/staging/line6/pcm.c
+@@ -360,8 +360,11 @@ static int snd_line6_pcm_free(struct snd_device *device)
+ */
+ static void pcm_disconnect_substream(struct snd_pcm_substream *substream)
+ {
+- if (substream->runtime && snd_pcm_running(substream))
++ if (substream->runtime && snd_pcm_running(substream)) {
++ snd_pcm_stream_lock_irq(substream);
+ snd_pcm_stop(substream, SNDRV_PCM_STATE_DISCONNECTED);
++ snd_pcm_stream_unlock_irq(substream);
++ }
+ }
+
+ /*
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 22cbe06..2768a7e 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -463,6 +463,15 @@ resubmit:
+ static inline int
+ hub_clear_tt_buffer (struct usb_device *hdev, u16 devinfo, u16 tt)
+ {
++ /* Need to clear both directions for control ep */
++ if (((devinfo >> 11) & USB_ENDPOINT_XFERTYPE_MASK) ==
++ USB_ENDPOINT_XFER_CONTROL) {
++ int status = usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
++ HUB_CLEAR_TT_BUFFER, USB_RT_PORT,
++ devinfo ^ 0x8000, tt, NULL, 0, 1000);
++ if (status)
++ return status;
++ }
+ return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
+ HUB_CLEAR_TT_BUFFER, USB_RT_PORT, devinfo,
+ tt, NULL, 0, 1000);
+diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
+index 29a8e16..4795c0c 100644
+--- a/drivers/usb/dwc3/core.h
++++ b/drivers/usb/dwc3/core.h
+@@ -643,8 +643,8 @@ struct dwc3 {
+
+ struct dwc3_event_type {
+ u32 is_devspec:1;
+- u32 type:6;
+- u32 reserved8_31:25;
++ u32 type:7;
++ u32 reserved8_31:24;
+ } __packed;
+
+ #define DWC3_DEPEVT_XFERCOMPLETE 0x01
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index b368b83..619ee19 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -1217,6 +1217,7 @@ err1:
+ __dwc3_gadget_ep_disable(dwc->eps[0]);
+
+ err0:
++ dwc->gadget_driver = NULL;
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return ret;
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index aca647a..79d2720 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -89,7 +89,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ xhci->quirks |= XHCI_AMD_PLL_FIX;
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) {
+- xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
+ xhci->quirks |= XHCI_EP_LIMIT_QUIRK;
+ xhci->limit_active_eps = 64;
+ xhci->quirks |= XHCI_SW_BW_CHECKING;
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index d08a804..633476e 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -463,7 +463,7 @@ static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
+
+ /* A ring has pending URBs if its TD list is not empty */
+ if (!(ep->ep_state & EP_HAS_STREAMS)) {
+- if (!(list_empty(&ep->ring->td_list)))
++ if (ep->ring && !(list_empty(&ep->ring->td_list)))
+ xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
+ return;
+ }
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 136c357..6e1c92a 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -1153,9 +1153,6 @@ static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
+ }
+
+ xhci = hcd_to_xhci(hcd);
+- if (xhci->xhc_state & XHCI_STATE_HALTED)
+- return -ENODEV;
+-
+ if (check_virt_dev) {
+ if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
+ printk(KERN_DEBUG "xHCI %s called with unaddressed "
+@@ -1171,6 +1168,9 @@ static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
+ }
+ }
+
++ if (xhci->xhc_state & XHCI_STATE_HALTED)
++ return -ENODEV;
++
+ return 1;
+ }
+
+@@ -4178,6 +4178,13 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
+
+ get_quirks(dev, xhci);
+
++ /* In xhci controllers which follow xhci 1.0 spec gives a spurious
++ * success event after a short transfer. This quirk will ignore such
++ * spurious event.
++ */
++ if (xhci->hci_version > 0x96)
++ xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
++
+ /* Make sure the HC is halted. */
+ retval = xhci_halt(xhci);
+ if (retval)
+diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
+index dd573ab..7af163d 100644
+--- a/drivers/usb/misc/sisusbvga/sisusb.c
++++ b/drivers/usb/misc/sisusbvga/sisusb.c
+@@ -3247,6 +3247,7 @@ static const struct usb_device_id sisusb_table[] = {
+ { USB_DEVICE(0x0711, 0x0903) },
+ { USB_DEVICE(0x0711, 0x0918) },
+ { USB_DEVICE(0x0711, 0x0920) },
++ { USB_DEVICE(0x0711, 0x0950) },
+ { USB_DEVICE(0x182d, 0x021c) },
+ { USB_DEVICE(0x182d, 0x0269) },
+ { }
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 913a178..c408ff7 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -60,6 +60,7 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
+ { USB_DEVICE(0x0489, 0xE003) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
+ { USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */
++ { USB_DEVICE(0x0846, 0x1100) }, /* NetGear Managed Switch M4100 series, M5300 series, M7100 series */
+ { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
+ { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
+ { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */
+@@ -124,6 +125,8 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
+ { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
+ { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
++ { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
++ { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
+ { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
+ { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
+ { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
+@@ -154,6 +157,7 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
+ { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
+ { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
++ { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
+ { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
+ { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */
+ { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */
+diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
+index e89ee48..5e8c736 100644
+--- a/drivers/usb/serial/mos7840.c
++++ b/drivers/usb/serial/mos7840.c
+@@ -925,20 +925,20 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
+ status = mos7840_get_reg_sync(port, mos7840_port->SpRegOffset, &Data);
+ if (status < 0) {
+ dbg("Reading Spreg failed");
+- return -1;
++ goto err;
+ }
+ Data |= 0x80;
+ status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data);
+ if (status < 0) {
+ dbg("writing Spreg failed");
+- return -1;
++ goto err;
+ }
+
+ Data &= ~0x80;
+ status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data);
+ if (status < 0) {
+ dbg("writing Spreg failed");
+- return -1;
++ goto err;
+ }
+ /* End of block to be checked */
+
+@@ -947,7 +947,7 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
+ &Data);
+ if (status < 0) {
+ dbg("Reading Controlreg failed");
+- return -1;
++ goto err;
+ }
+ Data |= 0x08; /* Driver done bit */
+ Data |= 0x20; /* rx_disable */
+@@ -955,7 +955,7 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
+ mos7840_port->ControlRegOffset, Data);
+ if (status < 0) {
+ dbg("writing Controlreg failed");
+- return -1;
++ goto err;
+ }
+ /* do register settings here */
+ /* Set all regs to the device default values. */
+@@ -966,21 +966,21 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
+ status = mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data);
+ if (status < 0) {
+ dbg("disabling interrupts failed");
+- return -1;
++ goto err;
+ }
+ /* Set FIFO_CONTROL_REGISTER to the default value */
+ Data = 0x00;
+ status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data);
+ if (status < 0) {
+ dbg("Writing FIFO_CONTROL_REGISTER failed");
+- return -1;
++ goto err;
+ }
+
+ Data = 0xcf;
+ status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data);
+ if (status < 0) {
+ dbg("Writing FIFO_CONTROL_REGISTER failed");
+- return -1;
++ goto err;
+ }
+
+ Data = 0x03;
+@@ -1136,7 +1136,15 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
+ dbg ("%s leave", __func__);
+
+ return 0;
+-
++err:
++ for (j = 0; j < NUM_URBS; ++j) {
++ urb = mos7840_port->write_urb_pool[j];
++ if (!urb)
++ continue;
++ kfree(urb->transfer_buffer);
++ usb_free_urb(urb);
++ }
++ return status;
+ }
+
+ /*****************************************************************************
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index b8365a7..c2103f4 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -347,17 +347,12 @@ static void option_instat_callback(struct urb *urb);
+ #define OLIVETTI_VENDOR_ID 0x0b3c
+ #define OLIVETTI_PRODUCT_OLICARD100 0xc000
+ #define OLIVETTI_PRODUCT_OLICARD145 0xc003
++#define OLIVETTI_PRODUCT_OLICARD200 0xc005
+
+ /* Celot products */
+ #define CELOT_VENDOR_ID 0x211f
+ #define CELOT_PRODUCT_CT680M 0x6801
+
+-/* ONDA Communication vendor id */
+-#define ONDA_VENDOR_ID 0x1ee8
+-
+-/* ONDA MT825UP HSDPA 14.2 modem */
+-#define ONDA_MT825UP 0x000b
+-
+ /* Samsung products */
+ #define SAMSUNG_VENDOR_ID 0x04e8
+ #define SAMSUNG_PRODUCT_GT_B3730 0x6889
+@@ -450,7 +445,8 @@ static void option_instat_callback(struct urb *urb);
+
+ /* Hyundai Petatel Inc. products */
+ #define PETATEL_VENDOR_ID 0x1ff4
+-#define PETATEL_PRODUCT_NP10T 0x600e
++#define PETATEL_PRODUCT_NP10T_600A 0x600a
++#define PETATEL_PRODUCT_NP10T_600E 0x600e
+
+ /* TP-LINK Incorporated products */
+ #define TPLINK_VENDOR_ID 0x2357
+@@ -797,6 +793,7 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
++ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) },
+@@ -832,7 +829,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0018, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0020, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0021, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+@@ -1278,8 +1276,8 @@ static const struct usb_device_id option_ids[] = {
+
+ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
+ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) },
++ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200) },
+ { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
+- { USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */
+ { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM610) },
+@@ -1351,9 +1349,12 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x02, 0x01) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) },
+ { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
+- { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T) },
++ { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600A) },
++ { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) },
+ { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++ { USB_DEVICE(TPLINK_VENDOR_ID, 0x9000), /* TP-Link MA260 */
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x02, 0x01) }, /* D-Link DWM-156 (variant) */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x00, 0x00) }, /* D-Link DWM-156 (variant) */
+@@ -1361,6 +1362,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
++ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
++ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
+ { } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, option_ids);
+diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
+index 9d3b39e..42038ba 100644
+--- a/drivers/usb/serial/ti_usb_3410_5052.c
++++ b/drivers/usb/serial/ti_usb_3410_5052.c
+@@ -408,7 +408,7 @@ static int ti_startup(struct usb_serial *serial)
+ usb_set_serial_data(serial, tdev);
+
+ /* determine device type */
+- if (usb_match_id(serial->interface, ti_id_table_3410))
++ if (serial->type == &ti_1port_device)
+ tdev->td_is_3410 = 1;
+ dbg("%s - device type is %s", __func__,
+ tdev->td_is_3410 ? "3410" : "5052");
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index 7b8d564..8a3b531 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -657,6 +657,13 @@ UNUSUAL_DEV( 0x054c, 0x016a, 0x0000, 0x9999,
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_FIX_INQUIRY ),
+
++/* Submitted by Ren Bigcren <bigcren.ren@sonymobile.com> */
++UNUSUAL_DEV( 0x054c, 0x02a5, 0x0100, 0x0100,
++ "Sony Corp.",
++ "MicroVault Flash Drive",
++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++ US_FL_NO_READ_CAPACITY_16 ),
++
+ /* floppy reports multiple luns */
+ UNUSUAL_DEV( 0x055d, 0x2020, 0x0000, 0x0210,
+ "SAMSUNG",
+diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
+index dc2eed1..4a88ac3 100644
+--- a/drivers/virtio/virtio_ring.c
++++ b/drivers/virtio/virtio_ring.c
+@@ -360,9 +360,22 @@ void virtqueue_disable_cb(struct virtqueue *_vq)
+ }
+ EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
+
+-bool virtqueue_enable_cb(struct virtqueue *_vq)
++/**
++ * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
++ * @vq: the struct virtqueue we're talking about.
++ *
++ * This re-enables callbacks; it returns current queue state
++ * in an opaque unsigned value. This value should be later tested by
++ * virtqueue_poll, to detect a possible race between the driver checking for
++ * more work, and enabling callbacks.
++ *
++ * Caller must ensure we don't call this with other virtqueue
++ * operations at the same time (except where noted).
++ */
++unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
+ {
+ struct vring_virtqueue *vq = to_vvq(_vq);
++ u16 last_used_idx;
+
+ START_USE(vq);
+
+@@ -372,15 +385,45 @@ bool virtqueue_enable_cb(struct virtqueue *_vq)
+ * either clear the flags bit or point the event index at the next
+ * entry. Always do both to keep code simple. */
+ vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
+- vring_used_event(&vq->vring) = vq->last_used_idx;
++ vring_used_event(&vq->vring) = last_used_idx = vq->last_used_idx;
++ END_USE(vq);
++ return last_used_idx;
++}
++EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
++
++/**
++ * virtqueue_poll - query pending used buffers
++ * @vq: the struct virtqueue we're talking about.
++ * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
++ *
++ * Returns "true" if there are pending used buffers in the queue.
++ *
++ * This does not need to be serialized.
++ */
++bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
++{
++ struct vring_virtqueue *vq = to_vvq(_vq);
++
+ virtio_mb();
+- if (unlikely(more_used(vq))) {
+- END_USE(vq);
+- return false;
+- }
++ return (u16)last_used_idx != vq->vring.used->idx;
++}
++EXPORT_SYMBOL_GPL(virtqueue_poll);
+
+- END_USE(vq);
+- return true;
++/**
++ * virtqueue_enable_cb - restart callbacks after disable_cb.
++ * @vq: the struct virtqueue we're talking about.
++ *
++ * This re-enables callbacks; it returns "false" if there are pending
++ * buffers in the queue, to detect a possible race between the driver
++ * checking for more work, and enabling callbacks.
++ *
++ * Caller must ensure we don't call this with other virtqueue
++ * operations at the same time (except where noted).
++ */
++bool virtqueue_enable_cb(struct virtqueue *_vq)
++{
++ unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
++ return !virtqueue_poll(_vq, last_used_idx);
+ }
+ EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
+
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 8d4d53d..49eefdb 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -6560,6 +6560,7 @@ void btrfs_drop_snapshot(struct btrfs_root *root,
+ int err = 0;
+ int ret;
+ int level;
++ bool root_dropped = false;
+
+ path = btrfs_alloc_path();
+ if (!path) {
+@@ -6614,6 +6615,7 @@ void btrfs_drop_snapshot(struct btrfs_root *root,
+ while (1) {
+ btrfs_tree_lock(path->nodes[level]);
+ btrfs_set_lock_blocking(path->nodes[level]);
++ path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
+
+ ret = btrfs_lookup_extent_info(trans, root,
+ path->nodes[level]->start,
+@@ -6627,6 +6629,7 @@ void btrfs_drop_snapshot(struct btrfs_root *root,
+ break;
+
+ btrfs_tree_unlock(path->nodes[level]);
++ path->locks[level] = 0;
+ WARN_ON(wc->refs[level] != 1);
+ level--;
+ }
+@@ -6707,11 +6710,21 @@ void btrfs_drop_snapshot(struct btrfs_root *root,
+ free_extent_buffer(root->commit_root);
+ kfree(root);
+ }
++ root_dropped = true;
+ out_free:
+ btrfs_end_transaction_throttle(trans, tree_root);
+ kfree(wc);
+ btrfs_free_path(path);
+ out:
++ /*
++ * So if we need to stop dropping the snapshot for whatever reason we
++ * need to make sure to add it back to the dead root list so that we
++ * keep trying to do the work later. This also cleans up roots if we
++ * don't have it in the radix (like when we recover after a power fail
++ * or unmount) so we don't leak memory.
++ */
++ if (root_dropped == false)
++ btrfs_add_dead_root(root);
+ if (err)
+ btrfs_std_error(root->fs_info, err);
+ return;
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 9243103..9b8c131 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -4696,11 +4696,16 @@ do_more:
+ * blocks being freed are metadata. these blocks shouldn't
+ * be used until this transaction is committed
+ */
++ retry:
+ new_entry = kmem_cache_alloc(ext4_free_ext_cachep, GFP_NOFS);
+ if (!new_entry) {
+- ext4_mb_unload_buddy(&e4b);
+- err = -ENOMEM;
+- goto error_return;
++ /*
++ * We use a retry loop because
++ * ext4_free_blocks() is not allowed to fail.
++ */
++ cond_resched();
++ congestion_wait(BLK_RW_ASYNC, HZ/50);
++ goto retry;
+ }
+ new_entry->start_cluster = bit;
+ new_entry->group = block_group;
+diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
+index f0179c3..cd8703d 100644
+--- a/fs/lockd/svclock.c
++++ b/fs/lockd/svclock.c
+@@ -913,6 +913,7 @@ nlmsvc_retry_blocked(void)
+ unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
+ struct nlm_block *block;
+
++ spin_lock(&nlm_blocked_lock);
+ while (!list_empty(&nlm_blocked) && !kthread_should_stop()) {
+ block = list_entry(nlm_blocked.next, struct nlm_block, b_list);
+
+@@ -922,6 +923,7 @@ nlmsvc_retry_blocked(void)
+ timeout = block->b_when - jiffies;
+ break;
+ }
++ spin_unlock(&nlm_blocked_lock);
+
+ dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n",
+ block, block->b_when);
+@@ -931,7 +933,9 @@ nlmsvc_retry_blocked(void)
+ retry_deferred_block(block);
+ } else
+ nlmsvc_grant_blocked(block);
++ spin_lock(&nlm_blocked_lock);
+ }
++ spin_unlock(&nlm_blocked_lock);
+
+ return timeout;
+ }
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index 1ec1fde..561a3dc 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -782,9 +782,10 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
+ }
+ *filp = dentry_open(dget(dentry), mntget(fhp->fh_export->ex_path.mnt),
+ flags, current_cred());
+- if (IS_ERR(*filp))
++ if (IS_ERR(*filp)) {
+ host_err = PTR_ERR(*filp);
+- else
++ *filp = NULL;
++ } else
+ host_err = ima_file_check(*filp, access);
+ out_nfserr:
+ err = nfserrno(host_err);
+diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
+index 9fde1c0..9860f6b 100644
+--- a/fs/notify/fanotify/fanotify_user.c
++++ b/fs/notify/fanotify/fanotify_user.c
+@@ -118,6 +118,7 @@ static int fill_event_metadata(struct fsnotify_group *group,
+ metadata->event_len = FAN_EVENT_METADATA_LEN;
+ metadata->metadata_len = FAN_EVENT_METADATA_LEN;
+ metadata->vers = FANOTIFY_METADATA_VERSION;
++ metadata->reserved = 0;
+ metadata->mask = event->mask & FAN_ALL_OUTGOING_EVENTS;
+ metadata->pid = pid_vnr(event->tgid);
+ if (unlikely(event->mask & FAN_Q_OVERFLOW))
+diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
+index b5f927f..732c962 100644
+--- a/include/linux/if_pppox.h
++++ b/include/linux/if_pppox.h
+@@ -128,11 +128,11 @@ struct pppoe_tag {
+
+ struct pppoe_hdr {
+ #if defined(__LITTLE_ENDIAN_BITFIELD)
+- __u8 ver : 4;
+ __u8 type : 4;
++ __u8 ver : 4;
+ #elif defined(__BIG_ENDIAN_BITFIELD)
+- __u8 type : 4;
+ __u8 ver : 4;
++ __u8 type : 4;
+ #else
+ #error "Please fix <asm/byteorder.h>"
+ #endif
+diff --git a/include/linux/virtio.h b/include/linux/virtio.h
+index 4c069d8b..96c7843 100644
+--- a/include/linux/virtio.h
++++ b/include/linux/virtio.h
+@@ -96,6 +96,10 @@ void virtqueue_disable_cb(struct virtqueue *vq);
+
+ bool virtqueue_enable_cb(struct virtqueue *vq);
+
++unsigned virtqueue_enable_cb_prepare(struct virtqueue *vq);
++
++bool virtqueue_poll(struct virtqueue *vq, unsigned);
++
+ bool virtqueue_enable_cb_delayed(struct virtqueue *vq);
+
+ void *virtqueue_detach_unused_buf(struct virtqueue *vq);
+diff --git a/include/net/addrconf.h b/include/net/addrconf.h
+index cbc6bb0..44b1110 100644
+--- a/include/net/addrconf.h
++++ b/include/net/addrconf.h
+@@ -81,6 +81,9 @@ extern int ipv6_dev_get_saddr(struct net *net,
+ const struct in6_addr *daddr,
+ unsigned int srcprefs,
+ struct in6_addr *saddr);
++extern int __ipv6_get_lladdr(struct inet6_dev *idev,
++ struct in6_addr *addr,
++ unsigned char banned_flags);
+ extern int ipv6_get_lladdr(struct net_device *dev,
+ struct in6_addr *addr,
+ unsigned char banned_flags);
+diff --git a/include/net/udp.h b/include/net/udp.h
+index 3b285f4..e158330 100644
+--- a/include/net/udp.h
++++ b/include/net/udp.h
+@@ -180,6 +180,7 @@ extern int udp_get_port(struct sock *sk, unsigned short snum,
+ extern void udp_err(struct sk_buff *, u32);
+ extern int udp_sendmsg(struct kiocb *iocb, struct sock *sk,
+ struct msghdr *msg, size_t len);
++extern int udp_push_pending_frames(struct sock *sk);
+ extern void udp_flush_pending_frames(struct sock *sk);
+ extern int udp_rcv(struct sk_buff *skb);
+ extern int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
+diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h
+index 75271b9..7d28aff 100644
+--- a/include/xen/interface/io/ring.h
++++ b/include/xen/interface/io/ring.h
+@@ -188,6 +188,11 @@ struct __name##_back_ring { \
+ #define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
+ (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
+
++/* Ill-behaved frontend determination: Can there be this many requests? */
++#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \
++ (((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r))
++
++
+ #define RING_PUSH_REQUESTS(_r) do { \
+ wmb(); /* back sees requests /before/ updated producer index */ \
+ (_r)->sring->req_prod = (_r)->req_prod_pvt; \
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 0ec6c34..a584ad9 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -631,7 +631,15 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
+
+ memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
+ max_data->pid = tsk->pid;
+- max_data->uid = task_uid(tsk);
++ /*
++ * If tsk == current, then use current_uid(), as that does not use
++ * RCU. The irq tracer can be called out of RCU scope.
++ */
++ if (tsk == current)
++ max_data->uid = current_uid();
++ else
++ max_data->uid = task_uid(tsk);
++
+ max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
+ max_data->policy = tsk->policy;
+ max_data->rt_priority = tsk->rt_priority;
+diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
+index 0cccca8..b40d3da 100644
+--- a/net/8021q/vlan_dev.c
++++ b/net/8021q/vlan_dev.c
+@@ -72,6 +72,8 @@ vlan_dev_get_egress_qos_mask(struct net_device *dev, struct sk_buff *skb)
+ {
+ struct vlan_priority_tci_mapping *mp;
+
++ smp_rmb(); /* coupled with smp_wmb() in vlan_dev_set_egress_priority() */
++
+ mp = vlan_dev_info(dev)->egress_priority_map[(skb->priority & 0xF)];
+ while (mp) {
+ if (mp->priority == skb->priority) {
+@@ -232,6 +234,11 @@ int vlan_dev_set_egress_priority(const struct net_device *dev,
+ np->next = mp;
+ np->priority = skb_prio;
+ np->vlan_qos = vlan_qos;
++ /* Before inserting this element in hash table, make sure all its fields
++ * are committed to memory.
++ * coupled with smp_rmb() in vlan_dev_get_egress_qos_mask()
++ */
++ smp_wmb();
+ vlan->egress_priority_map[skb_prio & 0xF] = np;
+ if (vlan_qos)
+ vlan->nr_egress_mappings++;
+diff --git a/net/9p/trans_common.c b/net/9p/trans_common.c
+index de8df95..2ee3879 100644
+--- a/net/9p/trans_common.c
++++ b/net/9p/trans_common.c
+@@ -24,11 +24,11 @@
+ */
+ void p9_release_pages(struct page **pages, int nr_pages)
+ {
+- int i = 0;
+- while (pages[i] && nr_pages--) {
+- put_page(pages[i]);
+- i++;
+- }
++ int i;
++
++ for (i = 0; i < nr_pages; i++)
++ if (pages[i])
++ put_page(pages[i]);
+ }
+ EXPORT_SYMBOL(p9_release_pages);
+
+diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
+index 5ac1811..b81500c 100644
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -467,8 +467,9 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
+ skb_set_transport_header(skb, skb->len);
+ mldq = (struct mld_msg *) icmp6_hdr(skb);
+
+- interval = ipv6_addr_any(group) ? br->multicast_last_member_interval :
+- br->multicast_query_response_interval;
++ interval = ipv6_addr_any(group) ?
++ br->multicast_query_response_interval :
++ br->multicast_last_member_interval;
+
+ mldq->mld_type = ICMPV6_MGM_QUERY;
+ mldq->mld_code = 0;
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index 5b9709f..0ea3fd3 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -237,7 +237,7 @@ static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
+ we must kill timers etc. and move
+ it to safe state.
+ */
+- skb_queue_purge(&n->arp_queue);
++ __skb_queue_purge(&n->arp_queue);
+ n->output = neigh_blackhole;
+ if (n->nud_state & NUD_VALID)
+ n->nud_state = NUD_NOARP;
+@@ -291,7 +291,7 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl)
+ if (!n)
+ goto out_entries;
+
+- skb_queue_head_init(&n->arp_queue);
++ __skb_queue_head_init(&n->arp_queue);
+ rwlock_init(&n->lock);
+ seqlock_init(&n->ha_lock);
+ n->updated = n->used = now;
+@@ -701,7 +701,9 @@ void neigh_destroy(struct neighbour *neigh)
+ if (neigh_del_timer(neigh))
+ printk(KERN_WARNING "Impossible event.\n");
+
+- skb_queue_purge(&neigh->arp_queue);
++ write_lock_bh(&neigh->lock);
++ __skb_queue_purge(&neigh->arp_queue);
++ write_unlock_bh(&neigh->lock);
+
+ dev_put(neigh->dev);
+ neigh_parms_put(neigh->parms);
+@@ -843,7 +845,7 @@ static void neigh_invalidate(struct neighbour *neigh)
+ neigh->ops->error_report(neigh, skb);
+ write_lock(&neigh->lock);
+ }
+- skb_queue_purge(&neigh->arp_queue);
++ __skb_queue_purge(&neigh->arp_queue);
+ }
+
+ static void neigh_probe(struct neighbour *neigh)
+@@ -1176,7 +1178,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
+
+ write_lock_bh(&neigh->lock);
+ }
+- skb_queue_purge(&neigh->arp_queue);
++ __skb_queue_purge(&neigh->arp_queue);
+ }
+ out:
+ if (update_isrouter) {
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 5a65eea..5decc93 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -766,7 +766,7 @@ send:
+ /*
+ * Push out all pending data as one UDP datagram. Socket is locked.
+ */
+-static int udp_push_pending_frames(struct sock *sk)
++int udp_push_pending_frames(struct sock *sk)
+ {
+ struct udp_sock *up = udp_sk(sk);
+ struct inet_sock *inet = inet_sk(sk);
+@@ -785,6 +785,7 @@ out:
+ up->pending = 0;
+ return err;
+ }
++EXPORT_SYMBOL(udp_push_pending_frames);
+
+ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ size_t len)
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index d603caa..314bda2 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -1236,6 +1236,23 @@ try_nextdev:
+ }
+ EXPORT_SYMBOL(ipv6_dev_get_saddr);
+
++int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
++ unsigned char banned_flags)
++{
++ struct inet6_ifaddr *ifp;
++ int err = -EADDRNOTAVAIL;
++
++ list_for_each_entry(ifp, &idev->addr_list, if_list) {
++ if (ifp->scope == IFA_LINK &&
++ !(ifp->flags & banned_flags)) {
++ ipv6_addr_copy(addr, &ifp->addr);
++ err = 0;
++ break;
++ }
++ }
++ return err;
++}
++
+ int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
+ unsigned char banned_flags)
+ {
+@@ -1245,17 +1262,8 @@ int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
+ rcu_read_lock();
+ idev = __in6_dev_get(dev);
+ if (idev) {
+- struct inet6_ifaddr *ifp;
+-
+ read_lock_bh(&idev->lock);
+- list_for_each_entry(ifp, &idev->addr_list, if_list) {
+- if (ifp->scope == IFA_LINK &&
+- !(ifp->flags & banned_flags)) {
+- ipv6_addr_copy(addr, &ifp->addr);
+- err = 0;
+- break;
+- }
+- }
++ err = __ipv6_get_lladdr(idev, addr, banned_flags);
+ read_unlock_bh(&idev->lock);
+ }
+ rcu_read_unlock();
+@@ -2434,6 +2442,9 @@ static void init_loopback(struct net_device *dev)
+ if (sp_ifa->flags & (IFA_F_DADFAILED | IFA_F_TENTATIVE))
+ continue;
+
++ if (sp_ifa->rt)
++ continue;
++
+ sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0);
+
+ /* Failure cases are ignored */
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 6aadaa8..db60043 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -909,11 +909,17 @@ static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
+ const struct flowi6 *fl6)
+ {
+ struct ipv6_pinfo *np = inet6_sk(sk);
+- struct rt6_info *rt = (struct rt6_info *)dst;
++ struct rt6_info *rt;
+
+ if (!dst)
+ goto out;
+
++ if (dst->ops->family != AF_INET6) {
++ dst_release(dst);
++ return NULL;
++ }
++
++ rt = (struct rt6_info *)dst;
+ /* Yes, checking route validity in not connected
+ * case is not very simple. Take into account,
+ * that we do not support routing by source, TOS,
+@@ -1178,11 +1184,12 @@ static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
+ return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
+ }
+
+-static void ip6_append_data_mtu(int *mtu,
++static void ip6_append_data_mtu(unsigned int *mtu,
+ int *maxfraglen,
+ unsigned int fragheaderlen,
+ struct sk_buff *skb,
+- struct rt6_info *rt)
++ struct rt6_info *rt,
++ bool pmtuprobe)
+ {
+ if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
+ if (skb == NULL) {
+@@ -1194,7 +1201,9 @@ static void ip6_append_data_mtu(int *mtu,
+ * this fragment is not first, the headers
+ * space is regarded as data space.
+ */
+- *mtu = dst_mtu(rt->dst.path);
++ *mtu = min(*mtu, pmtuprobe ?
++ rt->dst.dev->mtu :
++ dst_mtu(rt->dst.path));
+ }
+ *maxfraglen = ((*mtu - fragheaderlen) & ~7)
+ + fragheaderlen - sizeof(struct frag_hdr);
+@@ -1211,11 +1220,10 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ struct inet_cork *cork;
+ struct sk_buff *skb, *skb_prev = NULL;
+- unsigned int maxfraglen, fragheaderlen;
++ unsigned int maxfraglen, fragheaderlen, mtu;
+ int exthdrlen;
+ int dst_exthdrlen;
+ int hh_len;
+- int mtu;
+ int copy;
+ int err;
+ int offset = 0;
+@@ -1378,7 +1386,9 @@ alloc_new_skb:
+ /* update mtu and maxfraglen if necessary */
+ if (skb == NULL || skb_prev == NULL)
+ ip6_append_data_mtu(&mtu, &maxfraglen,
+- fragheaderlen, skb, rt);
++ fragheaderlen, skb, rt,
++ np->pmtudisc ==
++ IPV6_PMTUDISC_PROBE);
+
+ skb_prev = skb;
+
+diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
+index f2d74ea..c7ec4bb 100644
+--- a/net/ipv6/mcast.c
++++ b/net/ipv6/mcast.c
+@@ -1334,8 +1334,9 @@ mld_scount(struct ifmcaddr6 *pmc, int type, int gdeleted, int sdeleted)
+ return scount;
+ }
+
+-static struct sk_buff *mld_newpack(struct net_device *dev, int size)
++static struct sk_buff *mld_newpack(struct inet6_dev *idev, int size)
+ {
++ struct net_device *dev = idev->dev;
+ struct net *net = dev_net(dev);
+ struct sock *sk = net->ipv6.igmp_sk;
+ struct sk_buff *skb;
+@@ -1358,7 +1359,7 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size)
+
+ skb_reserve(skb, LL_RESERVED_SPACE(dev));
+
+- if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
++ if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) {
+ /* <draft-ietf-magma-mld-source-05.txt>:
+ * use unspecified address as the source address
+ * when a valid link-local address is not available.
+@@ -1461,7 +1462,7 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
+ struct mld2_grec *pgr;
+
+ if (!skb)
+- skb = mld_newpack(dev, dev->mtu);
++ skb = mld_newpack(pmc->idev, dev->mtu);
+ if (!skb)
+ return NULL;
+ pgr = (struct mld2_grec *)skb_put(skb, sizeof(struct mld2_grec));
+@@ -1481,7 +1482,8 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
+ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
+ int type, int gdeleted, int sdeleted)
+ {
+- struct net_device *dev = pmc->idev->dev;
++ struct inet6_dev *idev = pmc->idev;
++ struct net_device *dev = idev->dev;
+ struct mld2_report *pmr;
+ struct mld2_grec *pgr = NULL;
+ struct ip6_sf_list *psf, *psf_next, *psf_prev, **psf_list;
+@@ -1510,7 +1512,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
+ AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
+ if (skb)
+ mld_sendpack(skb);
+- skb = mld_newpack(dev, dev->mtu);
++ skb = mld_newpack(idev, dev->mtu);
+ }
+ }
+ first = 1;
+@@ -1537,7 +1539,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
+ pgr->grec_nsrcs = htons(scount);
+ if (skb)
+ mld_sendpack(skb);
+- skb = mld_newpack(dev, dev->mtu);
++ skb = mld_newpack(idev, dev->mtu);
+ first = 1;
+ scount = 0;
+ }
+@@ -1592,8 +1594,8 @@ static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc)
+ struct sk_buff *skb = NULL;
+ int type;
+
++ read_lock_bh(&idev->lock);
+ if (!pmc) {
+- read_lock_bh(&idev->lock);
+ for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
+ if (pmc->mca_flags & MAF_NOREPORT)
+ continue;
+@@ -1605,7 +1607,6 @@ static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc)
+ skb = add_grec(skb, pmc, type, 0, 0);
+ spin_unlock_bh(&pmc->mca_lock);
+ }
+- read_unlock_bh(&idev->lock);
+ } else {
+ spin_lock_bh(&pmc->mca_lock);
+ if (pmc->mca_sfcount[MCAST_EXCLUDE])
+@@ -1615,6 +1616,7 @@ static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc)
+ skb = add_grec(skb, pmc, type, 0, 0);
+ spin_unlock_bh(&pmc->mca_lock);
+ }
++ read_unlock_bh(&idev->lock);
+ if (skb)
+ mld_sendpack(skb);
+ }
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 20f0812..f9e496b 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -893,11 +893,16 @@ static int udp_v6_push_pending_frames(struct sock *sk)
+ struct udphdr *uh;
+ struct udp_sock *up = udp_sk(sk);
+ struct inet_sock *inet = inet_sk(sk);
+- struct flowi6 *fl6 = &inet->cork.fl.u.ip6;
++ struct flowi6 *fl6;
+ int err = 0;
+ int is_udplite = IS_UDPLITE(sk);
+ __wsum csum = 0;
+
++ if (up->pending == AF_INET)
++ return udp_push_pending_frames(sk);
++
++ fl6 = &inet->cork.fl.u.ip6;
++
+ /* Grab the skbuff where UDP header space exists. */
+ if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
+ goto out;
+diff --git a/net/key/af_key.c b/net/key/af_key.c
+index 1e733e9..6fefdfc 100644
+--- a/net/key/af_key.c
++++ b/net/key/af_key.c
+@@ -1705,6 +1705,7 @@ static int key_notify_sa_flush(const struct km_event *c)
+ hdr->sadb_msg_version = PF_KEY_V2;
+ hdr->sadb_msg_errno = (uint8_t) 0;
+ hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
++ hdr->sadb_msg_reserved = 0;
+
+ pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
+
+@@ -2686,6 +2687,7 @@ static int key_notify_policy_flush(const struct km_event *c)
+ hdr->sadb_msg_version = PF_KEY_V2;
+ hdr->sadb_msg_errno = (uint8_t) 0;
+ hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
++ hdr->sadb_msg_reserved = 0;
+ pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
+ return 0;
+
+diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
+index 74410e6..e579006 100644
+--- a/net/l2tp/l2tp_ppp.c
++++ b/net/l2tp/l2tp_ppp.c
+@@ -1778,7 +1778,8 @@ static const struct proto_ops pppol2tp_ops = {
+
+ static const struct pppox_proto pppol2tp_proto = {
+ .create = pppol2tp_create,
+- .ioctl = pppol2tp_ioctl
++ .ioctl = pppol2tp_ioctl,
++ .owner = THIS_MODULE,
+ };
+
+ #ifdef CONFIG_L2TP_V3
+diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
+index 3e16c6a..dc24ba9 100644
+--- a/net/x25/af_x25.c
++++ b/net/x25/af_x25.c
+@@ -1586,11 +1586,11 @@ out_cud_release:
+ case SIOCX25CALLACCPTAPPRV: {
+ rc = -EINVAL;
+ lock_sock(sk);
+- if (sk->sk_state != TCP_CLOSE)
+- break;
+- clear_bit(X25_ACCPT_APPRV_FLAG, &x25->flags);
++ if (sk->sk_state == TCP_CLOSE) {
++ clear_bit(X25_ACCPT_APPRV_FLAG, &x25->flags);
++ rc = 0;
++ }
+ release_sock(sk);
+- rc = 0;
+ break;
+ }
+
+@@ -1598,14 +1598,15 @@ out_cud_release:
+ rc = -EINVAL;
+ lock_sock(sk);
+ if (sk->sk_state != TCP_ESTABLISHED)
+- break;
++ goto out_sendcallaccpt_release;
+ /* must call accptapprv above */
+ if (test_bit(X25_ACCPT_APPRV_FLAG, &x25->flags))
+- break;
++ goto out_sendcallaccpt_release;
+ x25_write_internal(sk, X25_CALL_ACCEPTED);
+ x25->state = X25_STATE_3;
+- release_sock(sk);
+ rc = 0;
++out_sendcallaccpt_release:
++ release_sock(sk);
+ break;
+ }
+
+diff --git a/sound/arm/pxa2xx-pcm-lib.c b/sound/arm/pxa2xx-pcm-lib.c
+index 76e0d56..823359e 100644
+--- a/sound/arm/pxa2xx-pcm-lib.c
++++ b/sound/arm/pxa2xx-pcm-lib.c
+@@ -166,7 +166,9 @@ void pxa2xx_pcm_dma_irq(int dma_ch, void *dev_id)
+ } else {
+ printk(KERN_ERR "%s: DMA error on channel %d (DCSR=%#x)\n",
+ rtd->params->name, dma_ch, dcsr);
++ snd_pcm_stream_lock(substream);
+ snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
++ snd_pcm_stream_unlock(substream);
+ }
+ }
+ EXPORT_SYMBOL(pxa2xx_pcm_dma_irq);
+diff --git a/sound/pci/asihpi/asihpi.c b/sound/pci/asihpi/asihpi.c
+index f4b9e2b..fbf0bcd 100644
+--- a/sound/pci/asihpi/asihpi.c
++++ b/sound/pci/asihpi/asihpi.c
+@@ -768,7 +768,10 @@ static void snd_card_asihpi_timer_function(unsigned long data)
+ s->number);
+ ds->drained_count++;
+ if (ds->drained_count > 2) {
++ unsigned long flags;
++ snd_pcm_stream_lock_irqsave(s, flags);
+ snd_pcm_stop(s, SNDRV_PCM_STATE_XRUN);
++ snd_pcm_stream_unlock_irqrestore(s, flags);
+ continue;
+ }
+ } else {
+diff --git a/sound/pci/atiixp.c b/sound/pci/atiixp.c
+index 15e4e5e..6faa173 100644
+--- a/sound/pci/atiixp.c
++++ b/sound/pci/atiixp.c
+@@ -688,7 +688,9 @@ static void snd_atiixp_xrun_dma(struct atiixp *chip, struct atiixp_dma *dma)
+ if (! dma->substream || ! dma->running)
+ return;
+ snd_printdd("atiixp: XRUN detected (DMA %d)\n", dma->ops->type);
++ snd_pcm_stream_lock(dma->substream);
+ snd_pcm_stop(dma->substream, SNDRV_PCM_STATE_XRUN);
++ snd_pcm_stream_unlock(dma->substream);
+ }
+
+ /*
+diff --git a/sound/pci/atiixp_modem.c b/sound/pci/atiixp_modem.c
+index 57bf8f4..d752120 100644
+--- a/sound/pci/atiixp_modem.c
++++ b/sound/pci/atiixp_modem.c
+@@ -638,7 +638,9 @@ static void snd_atiixp_xrun_dma(struct atiixp_modem *chip,
+ if (! dma->substream || ! dma->running)
+ return;
+ snd_printdd("atiixp-modem: XRUN detected (DMA %d)\n", dma->ops->type);
++ snd_pcm_stream_lock(dma->substream);
+ snd_pcm_stop(dma->substream, SNDRV_PCM_STATE_XRUN);
++ snd_pcm_stream_unlock(dma->substream);
+ }
+
+ /*
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index d148a2b..55d9b30 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -1897,6 +1897,8 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
+ { .id = 0x10de0042, .name = "GPU 42 HDMI/DP", .patch = patch_generic_hdmi },
+ { .id = 0x10de0043, .name = "GPU 43 HDMI/DP", .patch = patch_generic_hdmi },
+ { .id = 0x10de0044, .name = "GPU 44 HDMI/DP", .patch = patch_generic_hdmi },
++{ .id = 0x10de0051, .name = "GPU 51 HDMI/DP", .patch = patch_generic_hdmi },
++{ .id = 0x10de0060, .name = "GPU 60 HDMI/DP", .patch = patch_generic_hdmi },
+ { .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi_2ch },
+ { .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch },
+ { .id = 0x80860054, .name = "IbexPeak HDMI", .patch = patch_generic_hdmi },
+@@ -1943,6 +1945,8 @@ MODULE_ALIAS("snd-hda-codec-id:10de0041");
+ MODULE_ALIAS("snd-hda-codec-id:10de0042");
+ MODULE_ALIAS("snd-hda-codec-id:10de0043");
+ MODULE_ALIAS("snd-hda-codec-id:10de0044");
++MODULE_ALIAS("snd-hda-codec-id:10de0051");
++MODULE_ALIAS("snd-hda-codec-id:10de0060");
+ MODULE_ALIAS("snd-hda-codec-id:10de0067");
+ MODULE_ALIAS("snd-hda-codec-id:10de8001");
+ MODULE_ALIAS("snd-hda-codec-id:17e80047");
+diff --git a/sound/soc/codecs/max98088.c b/sound/soc/codecs/max98088.c
+index b7cf246..d58c575 100644
+--- a/sound/soc/codecs/max98088.c
++++ b/sound/soc/codecs/max98088.c
+@@ -1595,7 +1595,7 @@ static int max98088_dai2_digital_mute(struct snd_soc_dai *codec_dai, int mute)
+
+ static void max98088_sync_cache(struct snd_soc_codec *codec)
+ {
+- u16 *reg_cache = codec->reg_cache;
++ u8 *reg_cache = codec->reg_cache;
+ int i;
+
+ if (!codec->cache_sync)
+diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
+index bbcf921..b5d4a97 100644
+--- a/sound/soc/codecs/sgtl5000.c
++++ b/sound/soc/codecs/sgtl5000.c
+@@ -38,7 +38,7 @@
+ static const u16 sgtl5000_regs[SGTL5000_MAX_REG_OFFSET] = {
+ [SGTL5000_CHIP_CLK_CTRL] = 0x0008,
+ [SGTL5000_CHIP_I2S_CTRL] = 0x0010,
+- [SGTL5000_CHIP_SSS_CTRL] = 0x0008,
++ [SGTL5000_CHIP_SSS_CTRL] = 0x0010,
+ [SGTL5000_CHIP_DAC_VOL] = 0x3c3c,
+ [SGTL5000_CHIP_PAD_STRENGTH] = 0x015f,
+ [SGTL5000_CHIP_ANA_HP_CTRL] = 0x1818,
+diff --git a/sound/soc/codecs/sgtl5000.h b/sound/soc/codecs/sgtl5000.h
+index 8a9f435..d3a68bb 100644
+--- a/sound/soc/codecs/sgtl5000.h
++++ b/sound/soc/codecs/sgtl5000.h
+@@ -347,7 +347,7 @@
+ #define SGTL5000_PLL_INT_DIV_MASK 0xf800
+ #define SGTL5000_PLL_INT_DIV_SHIFT 11
+ #define SGTL5000_PLL_INT_DIV_WIDTH 5
+-#define SGTL5000_PLL_FRAC_DIV_MASK 0x0700
++#define SGTL5000_PLL_FRAC_DIV_MASK 0x07ff
+ #define SGTL5000_PLL_FRAC_DIV_SHIFT 0
+ #define SGTL5000_PLL_FRAC_DIV_WIDTH 11
+
+diff --git a/sound/soc/s6000/s6000-pcm.c b/sound/soc/s6000/s6000-pcm.c
+index 55efc2b..75babae 100644
+--- a/sound/soc/s6000/s6000-pcm.c
++++ b/sound/soc/s6000/s6000-pcm.c
+@@ -128,7 +128,9 @@ static irqreturn_t s6000_pcm_irq(int irq, void *data)
+ substream->runtime &&
+ snd_pcm_running(substream)) {
+ dev_dbg(pcm->dev, "xrun\n");
++ snd_pcm_stream_lock(substream);
+ snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
++ snd_pcm_stream_unlock(substream);
+ ret = IRQ_HANDLED;
+ }
+
+diff --git a/sound/usb/6fire/pcm.c b/sound/usb/6fire/pcm.c
+index d144cdb..888a7c7 100644
+--- a/sound/usb/6fire/pcm.c
++++ b/sound/usb/6fire/pcm.c
+@@ -541,7 +541,7 @@ static snd_pcm_uframes_t usb6fire_pcm_pointer(
+ snd_pcm_uframes_t ret;
+
+ if (rt->panic || !sub)
+- return SNDRV_PCM_STATE_XRUN;
++ return SNDRV_PCM_POS_XRUN;
+
+ spin_lock_irqsave(&sub->lock, flags);
+ ret = sub->dma_off;
+@@ -640,17 +640,25 @@ int __devinit usb6fire_pcm_init(struct sfire_chip *chip)
+ void usb6fire_pcm_abort(struct sfire_chip *chip)
+ {
+ struct pcm_runtime *rt = chip->pcm;
++ unsigned long flags;
+ int i;
+
+ if (rt) {
+ rt->panic = true;
+
+- if (rt->playback.instance)
++ if (rt->playback.instance) {
++ snd_pcm_stream_lock_irqsave(rt->playback.instance, flags);
+ snd_pcm_stop(rt->playback.instance,
+ SNDRV_PCM_STATE_XRUN);
+- if (rt->capture.instance)
++ snd_pcm_stream_unlock_irqrestore(rt->playback.instance, flags);
++ }
++
++ if (rt->capture.instance) {
++ snd_pcm_stream_lock_irqsave(rt->capture.instance, flags);
+ snd_pcm_stop(rt->capture.instance,
+ SNDRV_PCM_STATE_XRUN);
++ snd_pcm_stream_unlock_irqrestore(rt->capture.instance, flags);
++ }
+
+ for (i = 0; i < PCM_N_URBS; i++) {
+ usb_poison_urb(&rt->in_urbs[i].instance);
+diff --git a/sound/usb/misc/ua101.c b/sound/usb/misc/ua101.c
+index c0609c2..84052cf 100644
+--- a/sound/usb/misc/ua101.c
++++ b/sound/usb/misc/ua101.c
+@@ -613,14 +613,24 @@ static int start_usb_playback(struct ua101 *ua)
+
+ static void abort_alsa_capture(struct ua101 *ua)
+ {
+- if (test_bit(ALSA_CAPTURE_RUNNING, &ua->states))
++ unsigned long flags;
++
++ if (test_bit(ALSA_CAPTURE_RUNNING, &ua->states)) {
++ snd_pcm_stream_lock_irqsave(ua->capture.substream, flags);
+ snd_pcm_stop(ua->capture.substream, SNDRV_PCM_STATE_XRUN);
++ snd_pcm_stream_unlock_irqrestore(ua->capture.substream, flags);
++ }
+ }
+
+ static void abort_alsa_playback(struct ua101 *ua)
+ {
+- if (test_bit(ALSA_PLAYBACK_RUNNING, &ua->states))
++ unsigned long flags;
++
++ if (test_bit(ALSA_PLAYBACK_RUNNING, &ua->states)) {
++ snd_pcm_stream_lock_irqsave(ua->playback.substream, flags);
+ snd_pcm_stop(ua->playback.substream, SNDRV_PCM_STATE_XRUN);
++ snd_pcm_stream_unlock_irqrestore(ua->playback.substream, flags);
++ }
+ }
+
+ static int set_stream_hw(struct ua101 *ua, struct snd_pcm_substream *substream,
+diff --git a/sound/usb/usx2y/usbusx2yaudio.c b/sound/usb/usx2y/usbusx2yaudio.c
+index 6ffb371..d5724d8 100644
+--- a/sound/usb/usx2y/usbusx2yaudio.c
++++ b/sound/usb/usx2y/usbusx2yaudio.c
+@@ -273,7 +273,11 @@ static void usX2Y_clients_stop(struct usX2Ydev *usX2Y)
+ struct snd_usX2Y_substream *subs = usX2Y->subs[s];
+ if (subs) {
+ if (atomic_read(&subs->state) >= state_PRERUNNING) {
++ unsigned long flags;
++
++ snd_pcm_stream_lock_irqsave(subs->pcm_substream, flags);
+ snd_pcm_stop(subs->pcm_substream, SNDRV_PCM_STATE_XRUN);
++ snd_pcm_stream_unlock_irqrestore(subs->pcm_substream, flags);
+ }
+ for (u = 0; u < NRURBS; u++) {
+ struct urb *urb = subs->urb[u];
diff --git a/1050_linux-3.2.51.patch b/1050_linux-3.2.51.patch
new file mode 100644
index 00000000..5d5832b1
--- /dev/null
+++ b/1050_linux-3.2.51.patch
@@ -0,0 +1,3886 @@
+diff --git a/Makefile b/Makefile
+index 0799e8e..0f11936 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 50
++SUBLEVEL = 51
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/alpha/Makefile b/arch/alpha/Makefile
+index 4759fe7..2cc3cc5 100644
+--- a/arch/alpha/Makefile
++++ b/arch/alpha/Makefile
+@@ -12,7 +12,7 @@ NM := $(NM) -B
+
+ LDFLAGS_vmlinux := -static -N #-relax
+ CHECKFLAGS += -D__alpha__ -m64
+-cflags-y := -pipe -mno-fp-regs -ffixed-8 -msmall-data
++cflags-y := -pipe -mno-fp-regs -ffixed-8
+ cflags-y += $(call cc-option, -fno-jump-tables)
+
+ cpuflags-$(CONFIG_ALPHA_EV4) := -mcpu=ev4
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index 27bcd12..790ea68 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -1,7 +1,6 @@
+ config ARM
+ bool
+ default y
+- select HAVE_AOUT
+ select HAVE_DMA_API_DEBUG
+ select HAVE_IDE if PCI || ISA || PCMCIA
+ select HAVE_MEMBLOCK
+diff --git a/arch/arm/include/asm/a.out-core.h b/arch/arm/include/asm/a.out-core.h
+deleted file mode 100644
+index 92f10cb..0000000
+--- a/arch/arm/include/asm/a.out-core.h
++++ /dev/null
+@@ -1,45 +0,0 @@
+-/* a.out coredump register dumper
+- *
+- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+- * Written by David Howells (dhowells@redhat.com)
+- *
+- * This program is free software; you can redistribute it and/or
+- * modify it under the terms of the GNU General Public Licence
+- * as published by the Free Software Foundation; either version
+- * 2 of the Licence, or (at your option) any later version.
+- */
+-
+-#ifndef _ASM_A_OUT_CORE_H
+-#define _ASM_A_OUT_CORE_H
+-
+-#ifdef __KERNEL__
+-
+-#include <linux/user.h>
+-#include <linux/elfcore.h>
+-
+-/*
+- * fill in the user structure for an a.out core dump
+- */
+-static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
+-{
+- struct task_struct *tsk = current;
+-
+- dump->magic = CMAGIC;
+- dump->start_code = tsk->mm->start_code;
+- dump->start_stack = regs->ARM_sp & ~(PAGE_SIZE - 1);
+-
+- dump->u_tsize = (tsk->mm->end_code - tsk->mm->start_code) >> PAGE_SHIFT;
+- dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT;
+- dump->u_ssize = 0;
+-
+- memset(dump->u_debugreg, 0, sizeof(dump->u_debugreg));
+-
+- if (dump->start_stack < 0x04000000)
+- dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT;
+-
+- dump->regs = *regs;
+- dump->u_fpvalid = dump_fpu (regs, &dump->u_fp);
+-}
+-
+-#endif /* __KERNEL__ */
+-#endif /* _ASM_A_OUT_CORE_H */
+diff --git a/arch/arm/include/asm/a.out.h b/arch/arm/include/asm/a.out.h
+deleted file mode 100644
+index 083894b..0000000
+--- a/arch/arm/include/asm/a.out.h
++++ /dev/null
+@@ -1,34 +0,0 @@
+-#ifndef __ARM_A_OUT_H__
+-#define __ARM_A_OUT_H__
+-
+-#include <linux/personality.h>
+-#include <linux/types.h>
+-
+-struct exec
+-{
+- __u32 a_info; /* Use macros N_MAGIC, etc for access */
+- __u32 a_text; /* length of text, in bytes */
+- __u32 a_data; /* length of data, in bytes */
+- __u32 a_bss; /* length of uninitialized data area for file, in bytes */
+- __u32 a_syms; /* length of symbol table data in file, in bytes */
+- __u32 a_entry; /* start address */
+- __u32 a_trsize; /* length of relocation info for text, in bytes */
+- __u32 a_drsize; /* length of relocation info for data, in bytes */
+-};
+-
+-/*
+- * This is always the same
+- */
+-#define N_TXTADDR(a) (0x00008000)
+-
+-#define N_TRSIZE(a) ((a).a_trsize)
+-#define N_DRSIZE(a) ((a).a_drsize)
+-#define N_SYMSIZE(a) ((a).a_syms)
+-
+-#define M_ARM 103
+-
+-#ifndef LIBRARY_START_TEXT
+-#define LIBRARY_START_TEXT (0x00c00000)
+-#endif
+-
+-#endif /* __A_OUT_GNU_H__ */
+diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
+index b2d9df5..3352451 100644
+--- a/arch/arm/include/asm/processor.h
++++ b/arch/arm/include/asm/processor.h
+@@ -54,7 +54,6 @@ struct thread_struct {
+
+ #define start_thread(regs,pc,sp) \
+ ({ \
+- unsigned long *stack = (unsigned long *)sp; \
+ set_fs(USER_DS); \
+ memset(regs->uregs, 0, sizeof(regs->uregs)); \
+ if (current->personality & ADDR_LIMIT_32BIT) \
+@@ -66,9 +65,6 @@ struct thread_struct {
+ regs->ARM_cpsr |= PSR_ENDSTATE; \
+ regs->ARM_pc = pc & ~1; /* pc */ \
+ regs->ARM_sp = sp; /* sp */ \
+- regs->ARM_r2 = stack[2]; /* r2 (envp) */ \
+- regs->ARM_r1 = stack[1]; /* r1 (argv) */ \
+- regs->ARM_r0 = stack[0]; /* r0 (argc) */ \
+ nommu_start_thread(regs); \
+ })
+
+diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
+index 778d248..4a2db48 100644
+--- a/arch/arm/kernel/perf_event.c
++++ b/arch/arm/kernel/perf_event.c
+@@ -116,7 +116,12 @@ armpmu_map_cache_event(const unsigned (*cache_map)
+ static int
+ armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
+ {
+- int mapping = (*event_map)[config];
++ int mapping;
++
++ if (config >= PERF_COUNT_HW_MAX)
++ return -ENOENT;
++
++ mapping = (*event_map)[config];
+ return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
+ }
+
+@@ -326,6 +331,9 @@ validate_event(struct pmu_hw_events *hw_events,
+ struct hw_perf_event fake_event = event->hw;
+ struct pmu *leader_pmu = event->group_leader->pmu;
+
++ if (is_software_event(event))
++ return 1;
++
+ if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
+ return 1;
+
+diff --git a/arch/cris/kernel/vmlinux.lds.S b/arch/cris/kernel/vmlinux.lds.S
+index a6990cb..a68b983 100644
+--- a/arch/cris/kernel/vmlinux.lds.S
++++ b/arch/cris/kernel/vmlinux.lds.S
+@@ -52,6 +52,7 @@ SECTIONS
+
+ EXCEPTION_TABLE(4)
+
++ _sdata = .;
+ RODATA
+
+ . = ALIGN (4);
+diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c
+index e711ace..54891e1 100644
+--- a/arch/hexagon/kernel/dma.c
++++ b/arch/hexagon/kernel/dma.c
+@@ -22,6 +22,7 @@
+ #include <linux/bootmem.h>
+ #include <linux/genalloc.h>
+ #include <asm/dma-mapping.h>
++#include <linux/module.h>
+
+ struct dma_map_ops *dma_ops;
+ EXPORT_SYMBOL(dma_ops);
+diff --git a/arch/hexagon/kernel/ptrace.c b/arch/hexagon/kernel/ptrace.c
+index bea3f08..8fe0349 100644
+--- a/arch/hexagon/kernel/ptrace.c
++++ b/arch/hexagon/kernel/ptrace.c
+@@ -28,6 +28,7 @@
+ #include <linux/ptrace.h>
+ #include <linux/regset.h>
+ #include <linux/user.h>
++#include <linux/elf.h>
+
+ #include <asm/system.h>
+ #include <asm/user.h>
+diff --git a/arch/hexagon/kernel/time.c b/arch/hexagon/kernel/time.c
+index 6bee15c..5d9b33b 100644
+--- a/arch/hexagon/kernel/time.c
++++ b/arch/hexagon/kernel/time.c
+@@ -28,6 +28,7 @@
+ #include <linux/of.h>
+ #include <linux/of_address.h>
+ #include <linux/of_irq.h>
++#include <linux/module.h>
+
+ #include <asm/timer-regs.h>
+ #include <asm/hexagon_vm.h>
+diff --git a/arch/hexagon/kernel/vdso.c b/arch/hexagon/kernel/vdso.c
+index 16277c3..e4ceedb 100644
+--- a/arch/hexagon/kernel/vdso.c
++++ b/arch/hexagon/kernel/vdso.c
+@@ -21,6 +21,7 @@
+ #include <linux/err.h>
+ #include <linux/mm.h>
+ #include <linux/vmalloc.h>
++#include <linux/binfmts.h>
+
+ #include <asm/vdso.h>
+
+diff --git a/arch/m32r/boot/compressed/Makefile b/arch/m32r/boot/compressed/Makefile
+index 177716b..01729c2 100644
+--- a/arch/m32r/boot/compressed/Makefile
++++ b/arch/m32r/boot/compressed/Makefile
+@@ -43,9 +43,9 @@ endif
+
+ OBJCOPYFLAGS += -R .empty_zero_page
+
+-suffix_$(CONFIG_KERNEL_GZIP) = gz
+-suffix_$(CONFIG_KERNEL_BZIP2) = bz2
+-suffix_$(CONFIG_KERNEL_LZMA) = lzma
++suffix-$(CONFIG_KERNEL_GZIP) = gz
++suffix-$(CONFIG_KERNEL_BZIP2) = bz2
++suffix-$(CONFIG_KERNEL_LZMA) = lzma
+
+ $(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix-y) FORCE
+ $(call if_changed,ld)
+diff --git a/arch/m32r/boot/compressed/misc.c b/arch/m32r/boot/compressed/misc.c
+index 370d608..28a0952 100644
+--- a/arch/m32r/boot/compressed/misc.c
++++ b/arch/m32r/boot/compressed/misc.c
+@@ -28,7 +28,7 @@ static unsigned long free_mem_ptr;
+ static unsigned long free_mem_end_ptr;
+
+ #ifdef CONFIG_KERNEL_BZIP2
+-static void *memset(void *s, int c, size_t n)
++void *memset(void *s, int c, size_t n)
+ {
+ char *ss = s;
+
+@@ -39,6 +39,16 @@ static void *memset(void *s, int c, size_t n)
+ #endif
+
+ #ifdef CONFIG_KERNEL_GZIP
++void *memcpy(void *dest, const void *src, size_t n)
++{
++ char *d = dest;
++ const char *s = src;
++ while (n--)
++ *d++ = *s++;
++
++ return dest;
++}
++
+ #define BOOT_HEAP_SIZE 0x10000
+ #include "../../../../lib/decompress_inflate.c"
+ #endif
+diff --git a/arch/m68k/emu/natfeat.c b/arch/m68k/emu/natfeat.c
+index 2291a7d..fa277ae 100644
+--- a/arch/m68k/emu/natfeat.c
++++ b/arch/m68k/emu/natfeat.c
+@@ -18,9 +18,11 @@
+ #include <asm/machdep.h>
+ #include <asm/natfeat.h>
+
++extern long nf_get_id2(const char *feature_name);
++
+ asm("\n"
+-" .global nf_get_id,nf_call\n"
+-"nf_get_id:\n"
++" .global nf_get_id2,nf_call\n"
++"nf_get_id2:\n"
+ " .short 0x7300\n"
+ " rts\n"
+ "nf_call:\n"
+@@ -29,12 +31,25 @@ asm("\n"
+ "1: moveq.l #0,%d0\n"
+ " rts\n"
+ " .section __ex_table,\"a\"\n"
+-" .long nf_get_id,1b\n"
++" .long nf_get_id2,1b\n"
+ " .long nf_call,1b\n"
+ " .previous");
+-EXPORT_SYMBOL_GPL(nf_get_id);
+ EXPORT_SYMBOL_GPL(nf_call);
+
++long nf_get_id(const char *feature_name)
++{
++ /* feature_name may be in vmalloc()ed memory, so make a copy */
++ char name_copy[32];
++ size_t n;
++
++ n = strlcpy(name_copy, feature_name, sizeof(name_copy));
++ if (n >= sizeof(name_copy))
++ return 0;
++
++ return nf_get_id2(name_copy);
++}
++EXPORT_SYMBOL_GPL(nf_get_id);
++
+ void nfprint(const char *fmt, ...)
+ {
+ static char buf[256];
+diff --git a/arch/m68k/include/asm/div64.h b/arch/m68k/include/asm/div64.h
+index edb6614..7558032 100644
+--- a/arch/m68k/include/asm/div64.h
++++ b/arch/m68k/include/asm/div64.h
+@@ -13,16 +13,17 @@
+ unsigned long long n64; \
+ } __n; \
+ unsigned long __rem, __upper; \
++ unsigned long __base = (base); \
+ \
+ __n.n64 = (n); \
+ if ((__upper = __n.n32[0])) { \
+ asm ("divul.l %2,%1:%0" \
+- : "=d" (__n.n32[0]), "=d" (__upper) \
+- : "d" (base), "0" (__n.n32[0])); \
++ : "=d" (__n.n32[0]), "=d" (__upper) \
++ : "d" (__base), "0" (__n.n32[0])); \
+ } \
+ asm ("divu.l %2,%1:%0" \
+- : "=d" (__n.n32[1]), "=d" (__rem) \
+- : "d" (base), "1" (__upper), "0" (__n.n32[1])); \
++ : "=d" (__n.n32[1]), "=d" (__rem) \
++ : "d" (__base), "1" (__upper), "0" (__n.n32[1])); \
+ (n) = __n.n64; \
+ __rem; \
+ })
+diff --git a/arch/microblaze/configs/mmu_defconfig b/arch/microblaze/configs/mmu_defconfig
+index b3f5eec..a470f57 100644
+--- a/arch/microblaze/configs/mmu_defconfig
++++ b/arch/microblaze/configs/mmu_defconfig
+@@ -1,25 +1,22 @@
+ CONFIG_EXPERIMENTAL=y
+ CONFIG_SYSVIPC=y
++CONFIG_POSIX_MQUEUE=y
++CONFIG_FHANDLE=y
++CONFIG_AUDIT=y
++CONFIG_AUDIT_LOGINUID_IMMUTABLE=y
+ CONFIG_IKCONFIG=y
+ CONFIG_IKCONFIG_PROC=y
++CONFIG_SYSFS_DEPRECATED=y
+ CONFIG_SYSFS_DEPRECATED_V2=y
+-CONFIG_BLK_DEV_INITRD=y
+-CONFIG_INITRAMFS_SOURCE="rootfs.cpio"
+-CONFIG_INITRAMFS_COMPRESSION_GZIP=y
+-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+-CONFIG_EXPERT=y
+ CONFIG_KALLSYMS_ALL=y
+-CONFIG_KALLSYMS_EXTRA_PASS=y
+-# CONFIG_HOTPLUG is not set
+ # CONFIG_BASE_FULL is not set
+-# CONFIG_FUTEX is not set
+-# CONFIG_EPOLL is not set
+-# CONFIG_SIGNALFD is not set
+-# CONFIG_SHMEM is not set
++CONFIG_EMBEDDED=y
+ CONFIG_SLAB=y
+ CONFIG_MODULES=y
+ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
++CONFIG_PARTITION_ADVANCED=y
++# CONFIG_EFI_PARTITION is not set
+ CONFIG_OPT_LIB_ASM=y
+ CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR=1
+ CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR=1
+@@ -37,33 +34,53 @@ CONFIG_UNIX=y
+ CONFIG_INET=y
+ # CONFIG_INET_LRO is not set
+ # CONFIG_IPV6 is not set
++CONFIG_MTD=y
+ CONFIG_PROC_DEVICETREE=y
+ CONFIG_BLK_DEV_RAM=y
+ CONFIG_BLK_DEV_RAM_SIZE=8192
+ CONFIG_NETDEVICES=y
+-CONFIG_NET_ETHERNET=y
+ CONFIG_XILINX_EMACLITE=y
++CONFIG_XILINX_LL_TEMAC=y
+ # CONFIG_INPUT is not set
+ # CONFIG_SERIO is not set
+ # CONFIG_VT is not set
++CONFIG_SERIAL_8250=y
++CONFIG_SERIAL_8250_CONSOLE=y
+ CONFIG_SERIAL_UARTLITE=y
+ CONFIG_SERIAL_UARTLITE_CONSOLE=y
+ # CONFIG_HW_RANDOM is not set
++CONFIG_XILINX_HWICAP=y
++CONFIG_I2C=y
++CONFIG_I2C_XILINX=y
++CONFIG_SPI=y
++CONFIG_SPI_XILINX=y
++CONFIG_GPIOLIB=y
++CONFIG_GPIO_SYSFS=y
++CONFIG_GPIO_XILINX=y
+ # CONFIG_HWMON is not set
++CONFIG_WATCHDOG=y
++CONFIG_XILINX_WATCHDOG=y
++CONFIG_FB=y
++CONFIG_FB_XILINX=y
+ # CONFIG_USB_SUPPORT is not set
++CONFIG_UIO=y
++CONFIG_UIO_PDRV=y
++CONFIG_UIO_PDRV_GENIRQ=y
++CONFIG_UIO_DMEM_GENIRQ=y
+ CONFIG_EXT2_FS=y
+ # CONFIG_DNOTIFY is not set
++CONFIG_CRAMFS=y
++CONFIG_ROMFS_FS=y
+ CONFIG_NFS_FS=y
+-CONFIG_NFS_V3=y
+ CONFIG_CIFS=y
+ CONFIG_CIFS_STATS=y
+ CONFIG_CIFS_STATS2=y
+-CONFIG_PARTITION_ADVANCED=y
+-CONFIG_DEBUG_KERNEL=y
+ CONFIG_DETECT_HUNG_TASK=y
+ CONFIG_DEBUG_SLAB=y
+ CONFIG_DEBUG_SPINLOCK=y
+ CONFIG_DEBUG_INFO=y
+-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+ CONFIG_EARLY_PRINTK=y
++CONFIG_KEYS=y
++CONFIG_ENCRYPTED_KEYS=y
++CONFIG_KEYS_DEBUG_PROC_KEYS=y
+ # CONFIG_CRYPTO_ANSI_CPRNG is not set
+diff --git a/arch/microblaze/configs/nommu_defconfig b/arch/microblaze/configs/nommu_defconfig
+index 0249e4b..5454a6d 100644
+--- a/arch/microblaze/configs/nommu_defconfig
++++ b/arch/microblaze/configs/nommu_defconfig
+@@ -1,41 +1,40 @@
+ CONFIG_EXPERIMENTAL=y
+ CONFIG_SYSVIPC=y
+ CONFIG_POSIX_MQUEUE=y
++CONFIG_FHANDLE=y
++CONFIG_AUDIT=y
++CONFIG_AUDIT_LOGINUID_IMMUTABLE=y
+ CONFIG_BSD_PROCESS_ACCT=y
+ CONFIG_BSD_PROCESS_ACCT_V3=y
+ CONFIG_IKCONFIG=y
+ CONFIG_IKCONFIG_PROC=y
++CONFIG_SYSFS_DEPRECATED=y
+ CONFIG_SYSFS_DEPRECATED_V2=y
+-CONFIG_EXPERT=y
+ CONFIG_KALLSYMS_ALL=y
+-CONFIG_KALLSYMS_EXTRA_PASS=y
+-# CONFIG_HOTPLUG is not set
+ # CONFIG_BASE_FULL is not set
++CONFIG_EMBEDDED=y
+ CONFIG_SLAB=y
+ CONFIG_MODULES=y
+ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_BLK_DEV_BSG is not set
+-# CONFIG_OPT_LIB_FUNCTION is not set
++CONFIG_PARTITION_ADVANCED=y
++# CONFIG_EFI_PARTITION is not set
+ CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR=1
+ CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR=1
+ CONFIG_XILINX_MICROBLAZE0_USE_BARREL=1
+ CONFIG_XILINX_MICROBLAZE0_USE_DIV=1
+ CONFIG_XILINX_MICROBLAZE0_USE_HW_MUL=2
+ CONFIG_XILINX_MICROBLAZE0_USE_FPU=2
+-CONFIG_HIGH_RES_TIMERS=y
+ CONFIG_HZ_100=y
+ CONFIG_CMDLINE_BOOL=y
+-CONFIG_BINFMT_FLAT=y
++CONFIG_CMDLINE_FORCE=y
+ CONFIG_NET=y
+ CONFIG_PACKET=y
+ CONFIG_UNIX=y
+ CONFIG_INET=y
+ # CONFIG_INET_LRO is not set
+ # CONFIG_IPV6 is not set
+-# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+ CONFIG_MTD=y
+-CONFIG_MTD_CONCAT=y
+-CONFIG_MTD_PARTITIONS=y
+ CONFIG_MTD_CMDLINE_PARTS=y
+ CONFIG_MTD_CHAR=y
+ CONFIG_MTD_BLOCK=y
+@@ -45,41 +44,55 @@ CONFIG_MTD_CFI_AMDSTD=y
+ CONFIG_MTD_RAM=y
+ CONFIG_MTD_UCLINUX=y
+ CONFIG_PROC_DEVICETREE=y
+-CONFIG_BLK_DEV_NBD=y
+ CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_SIZE=8192
+ CONFIG_NETDEVICES=y
+-CONFIG_NET_ETHERNET=y
++CONFIG_XILINX_EMACLITE=y
++CONFIG_XILINX_LL_TEMAC=y
+ # CONFIG_INPUT is not set
+ # CONFIG_SERIO is not set
+ # CONFIG_VT is not set
++CONFIG_SERIAL_8250=y
++CONFIG_SERIAL_8250_CONSOLE=y
+ CONFIG_SERIAL_UARTLITE=y
+ CONFIG_SERIAL_UARTLITE_CONSOLE=y
+-CONFIG_HW_RANDOM=y
++# CONFIG_HW_RANDOM is not set
++CONFIG_XILINX_HWICAP=y
++CONFIG_I2C=y
++CONFIG_I2C_XILINX=y
++CONFIG_SPI=y
++CONFIG_SPI_XILINX=y
++CONFIG_GPIOLIB=y
++CONFIG_GPIO_SYSFS=y
++CONFIG_GPIO_XILINX=y
+ # CONFIG_HWMON is not set
+-CONFIG_VIDEO_OUTPUT_CONTROL=y
++CONFIG_WATCHDOG=y
++CONFIG_XILINX_WATCHDOG=y
++CONFIG_FB=y
++CONFIG_FB_XILINX=y
++# CONFIG_USB_SUPPORT is not set
++CONFIG_UIO=y
++CONFIG_UIO_PDRV=y
++CONFIG_UIO_PDRV_GENIRQ=y
++CONFIG_UIO_DMEM_GENIRQ=y
+ CONFIG_EXT2_FS=y
+ # CONFIG_DNOTIFY is not set
+ CONFIG_CRAMFS=y
+ CONFIG_ROMFS_FS=y
+ CONFIG_NFS_FS=y
+-CONFIG_NFS_V3=y
+ CONFIG_NFS_V3_ACL=y
+-CONFIG_UNUSED_SYMBOLS=y
+-CONFIG_DEBUG_FS=y
+-CONFIG_DEBUG_KERNEL=y
+-CONFIG_DEBUG_SHIRQ=y
++CONFIG_NLS=y
+ CONFIG_DETECT_HUNG_TASK=y
+-CONFIG_SCHEDSTATS=y
+-CONFIG_TIMER_STATS=y
+-CONFIG_DEBUG_OBJECTS=y
+-CONFIG_DEBUG_OBJECTS_SELFTEST=y
+-CONFIG_DEBUG_OBJECTS_FREE=y
+-CONFIG_DEBUG_OBJECTS_TIMERS=y
++CONFIG_DEBUG_SLAB=y
++CONFIG_DEBUG_SPINLOCK=y
+ CONFIG_DEBUG_INFO=y
+-CONFIG_DEBUG_LIST=y
+-CONFIG_DEBUG_SG=y
+-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+-CONFIG_SYSCTL_SYSCALL_CHECK=y
+ CONFIG_EARLY_PRINTK=y
++CONFIG_KEYS=y
++CONFIG_ENCRYPTED_KEYS=y
++CONFIG_KEYS_DEBUG_PROC_KEYS=y
++CONFIG_CRYPTO_ECB=y
++CONFIG_CRYPTO_MD4=y
++CONFIG_CRYPTO_MD5=y
++CONFIG_CRYPTO_ARC4=y
++CONFIG_CRYPTO_DES=y
+ # CONFIG_CRYPTO_ANSI_CPRNG is not set
+-# CONFIG_CRC32 is not set
+diff --git a/arch/microblaze/include/asm/futex.h b/arch/microblaze/include/asm/futex.h
+index b0526d2..ff8cde1 100644
+--- a/arch/microblaze/include/asm/futex.h
++++ b/arch/microblaze/include/asm/futex.h
+@@ -24,7 +24,7 @@
+ .word 1b,4b,2b,4b; \
+ .previous;" \
+ : "=&r" (oldval), "=&r" (ret) \
+- : "b" (uaddr), "i" (-EFAULT), "r" (oparg) \
++ : "r" (uaddr), "i" (-EFAULT), "r" (oparg) \
+ ); \
+ })
+
+diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
+index 951e18f..16ef838 100644
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -937,6 +937,7 @@ config RELOCATABLE
+ must live at a different physical address than the primary
+ kernel.
+
++# This value must have zeroes in the bottom 60 bits otherwise lots will break
+ config PAGE_OFFSET
+ hex
+ default "0xc000000000000000"
+diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
+index dd9c4fd..5b0bde2 100644
+--- a/arch/powerpc/include/asm/page.h
++++ b/arch/powerpc/include/asm/page.h
+@@ -132,9 +132,19 @@ extern phys_addr_t kernstart_addr;
+ #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) - PHYSICAL_START + KERNELBASE))
+ #define __pa(x) ((unsigned long)(x) + PHYSICAL_START - KERNELBASE)
+ #else
++#ifdef CONFIG_PPC64
++/*
++ * gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET
++ * with -mcmodel=medium, so we use & and | instead of - and + on 64-bit.
++ */
++#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET))
++#define __pa(x) ((unsigned long)(x) & 0x0fffffffffffffffUL)
++
++#else /* 32-bit, non book E */
+ #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START))
+ #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START)
+ #endif
++#endif
+
+ /*
+ * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
+diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
+index 84daabe..826681d 100644
+--- a/arch/powerpc/kernel/lparcfg.c
++++ b/arch/powerpc/kernel/lparcfg.c
+@@ -37,7 +37,13 @@
+ #include <asm/vdso_datapage.h>
+ #include <asm/vio.h>
+ #include <asm/mmu.h>
++#include <asm/machdep.h>
+
++
++/*
++ * This isn't a module but we expose that to userspace
++ * via /proc so leave the definitions here
++ */
+ #define MODULE_VERS "1.9"
+ #define MODULE_NAME "lparcfg"
+
+@@ -487,7 +493,8 @@ static void parse_em_data(struct seq_file *m)
+ {
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+- if (plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS)
++ if (firmware_has_feature(FW_FEATURE_LPAR) &&
++ plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS)
+ seq_printf(m, "power_mode_data=%016lx\n", retbuf[0]);
+ }
+
+@@ -772,7 +779,6 @@ static int lparcfg_open(struct inode *inode, struct file *file)
+ }
+
+ static const struct file_operations lparcfg_fops = {
+- .owner = THIS_MODULE,
+ .read = seq_read,
+ .write = lparcfg_write,
+ .open = lparcfg_open,
+@@ -799,15 +805,4 @@ static int __init lparcfg_init(void)
+ proc_ppc64_lparcfg = ent;
+ return 0;
+ }
+-
+-static void __exit lparcfg_cleanup(void)
+-{
+- if (proc_ppc64_lparcfg)
+- remove_proc_entry("lparcfg", proc_ppc64_lparcfg->parent);
+-}
+-
+-module_init(lparcfg_init);
+-module_exit(lparcfg_cleanup);
+-MODULE_DESCRIPTION("Interface for LPAR configuration data");
+-MODULE_AUTHOR("Dave Engebretsen");
+-MODULE_LICENSE("GPL");
++machine_device_initcall(pseries, lparcfg_init);
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index 4db9b1e..dd072b1 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -469,6 +469,8 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
+
+ static void __vcpu_run(struct kvm_vcpu *vcpu)
+ {
++ int rc;
++
+ memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
+
+ if (need_resched())
+@@ -479,21 +481,24 @@ static void __vcpu_run(struct kvm_vcpu *vcpu)
+
+ kvm_s390_deliver_pending_interrupts(vcpu);
+
++ VCPU_EVENT(vcpu, 6, "entering sie flags %x",
++ atomic_read(&vcpu->arch.sie_block->cpuflags));
++
+ vcpu->arch.sie_block->icptcode = 0;
+ local_irq_disable();
+ kvm_guest_enter();
+ local_irq_enable();
+- VCPU_EVENT(vcpu, 6, "entering sie flags %x",
+- atomic_read(&vcpu->arch.sie_block->cpuflags));
+- if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
++ rc = sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs);
++ local_irq_disable();
++ kvm_guest_exit();
++ local_irq_enable();
++
++ if (rc) {
+ VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
+ kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ }
+ VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
+ vcpu->arch.sie_block->icptcode);
+- local_irq_disable();
+- kvm_guest_exit();
+- local_irq_enable();
+
+ memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
+ }
+diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
+index f210d51..87537e2 100644
+--- a/arch/sparc/Kconfig
++++ b/arch/sparc/Kconfig
+@@ -31,6 +31,7 @@ config SPARC
+
+ config SPARC32
+ def_bool !64BIT
++ select GENERIC_ATOMIC64
+
+ config SPARC64
+ def_bool 64BIT
+diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
+index 5c3c8b6..07dd35e 100644
+--- a/arch/sparc/include/asm/atomic_32.h
++++ b/arch/sparc/include/asm/atomic_32.h
+@@ -15,6 +15,8 @@
+
+ #ifdef __KERNEL__
+
++#include <asm-generic/atomic64.h>
++
+ #include <asm/system.h>
+
+ #define ATOMIC_INIT(i) { (i) }
+diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
+index a3fc437..4961516 100644
+--- a/arch/sparc/lib/Makefile
++++ b/arch/sparc/lib/Makefile
+@@ -40,7 +40,7 @@ lib-$(CONFIG_SPARC64) += copy_in_user.o user_fixup.o memmove.o
+ lib-$(CONFIG_SPARC64) += mcount.o ipcsum.o xor.o hweight.o ffs.o
+
+ obj-y += iomap.o
+-obj-$(CONFIG_SPARC32) += atomic32.o
++obj-$(CONFIG_SPARC32) += atomic32.o ucmpdi2.o
+ obj-y += ksyms.o
+ obj-$(CONFIG_SPARC64) += PeeCeeI.o
+ obj-y += usercopy.o
+diff --git a/arch/sparc/lib/ucmpdi2.c b/arch/sparc/lib/ucmpdi2.c
+new file mode 100644
+index 0000000..1e06ed5
+--- /dev/null
++++ b/arch/sparc/lib/ucmpdi2.c
+@@ -0,0 +1,19 @@
++#include <linux/module.h>
++#include "libgcc.h"
++
++word_type __ucmpdi2(unsigned long long a, unsigned long long b)
++{
++ const DWunion au = {.ll = a};
++ const DWunion bu = {.ll = b};
++
++ if ((unsigned int) au.s.high < (unsigned int) bu.s.high)
++ return 0;
++ else if ((unsigned int) au.s.high > (unsigned int) bu.s.high)
++ return 2;
++ if ((unsigned int) au.s.low < (unsigned int) bu.s.low)
++ return 0;
++ else if ((unsigned int) au.s.low > (unsigned int) bu.s.low)
++ return 2;
++ return 1;
++}
++EXPORT_SYMBOL(__ucmpdi2);
+diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
+index 739d859..fa4ea09 100644
+--- a/arch/x86/kernel/i387.c
++++ b/arch/x86/kernel/i387.c
+@@ -51,7 +51,7 @@ void __cpuinit mxcsr_feature_mask_init(void)
+ clts();
+ if (cpu_has_fxsr) {
+ memset(&fx_scratch, 0, sizeof(struct i387_fxsave_struct));
+- asm volatile("fxsave %0" : : "m" (fx_scratch));
++ asm volatile("fxsave %0" : "+m" (fx_scratch));
+ mask = fx_scratch.mxcsr_mask;
+ if (mask == 0)
+ mask = 0x0000ffbf;
+diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
+index 0514890..cdb2fc9 100644
+--- a/arch/x86/kernel/sys_x86_64.c
++++ b/arch/x86/kernel/sys_x86_64.c
+@@ -115,7 +115,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
+ *begin = new_begin;
+ }
+ } else {
+- *begin = TASK_UNMAPPED_BASE;
++ *begin = current->mm->mmap_legacy_base;
+ *end = TASK_SIZE;
+ }
+ }
+diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
+index 845df68..5c1ae28 100644
+--- a/arch/x86/mm/mmap.c
++++ b/arch/x86/mm/mmap.c
+@@ -112,12 +112,14 @@ static unsigned long mmap_legacy_base(void)
+ */
+ void arch_pick_mmap_layout(struct mm_struct *mm)
+ {
++ mm->mmap_legacy_base = mmap_legacy_base();
++ mm->mmap_base = mmap_base();
++
+ if (mmap_is_legacy()) {
+- mm->mmap_base = mmap_legacy_base();
++ mm->mmap_base = mm->mmap_legacy_base;
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+ } else {
+- mm->mmap_base = mmap_base();
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ mm->unmap_area = arch_unmap_area_topdown;
+ }
+diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
+index 6e5a7f1..4d54b38 100644
+--- a/arch/x86/xen/setup.c
++++ b/arch/x86/xen/setup.c
+@@ -212,6 +212,17 @@ static void xen_align_and_add_e820_region(u64 start, u64 size, int type)
+ e820_add_region(start, end - start, type);
+ }
+
++void xen_ignore_unusable(struct e820entry *list, size_t map_size)
++{
++ struct e820entry *entry;
++ unsigned int i;
++
++ for (i = 0, entry = list; i < map_size; i++, entry++) {
++ if (entry->type == E820_UNUSABLE)
++ entry->type = E820_RAM;
++ }
++}
++
+ /**
+ * machine_specific_memory_setup - Hook for machine specific memory setup.
+ **/
+@@ -250,6 +261,17 @@ char * __init xen_memory_setup(void)
+ }
+ BUG_ON(rc);
+
++ /*
++ * Xen won't allow a 1:1 mapping to be created to UNUSABLE
++ * regions, so if we're using the machine memory map leave the
++ * region as RAM as it is in the pseudo-physical map.
++ *
++ * UNUSABLE regions in domUs are not handled and will need
++ * a patch in the future.
++ */
++ if (xen_initial_domain())
++ xen_ignore_unusable(map, memmap.nr_entries);
++
+ /* Make sure the Xen-supplied memory map is well-ordered. */
+ sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries);
+
+diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
+index a1a4b8e..c749b93 100644
+--- a/drivers/acpi/battery.c
++++ b/drivers/acpi/battery.c
+@@ -117,6 +117,7 @@ struct acpi_battery {
+ struct acpi_device *device;
+ struct notifier_block pm_nb;
+ unsigned long update_time;
++ int revision;
+ int rate_now;
+ int capacity_now;
+ int voltage_now;
+@@ -350,6 +351,7 @@ static struct acpi_offsets info_offsets[] = {
+ };
+
+ static struct acpi_offsets extended_info_offsets[] = {
++ {offsetof(struct acpi_battery, revision), 0},
+ {offsetof(struct acpi_battery, power_unit), 0},
+ {offsetof(struct acpi_battery, design_capacity), 0},
+ {offsetof(struct acpi_battery, full_charge_capacity), 0},
+diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
+index cf047c4..e7b3a9e 100644
+--- a/drivers/ata/Kconfig
++++ b/drivers/ata/Kconfig
+@@ -93,7 +93,7 @@ config SATA_FSL
+ If unsure, say N.
+
+ config SATA_INIC162X
+- tristate "Initio 162x SATA support"
++ tristate "Initio 162x SATA support (Very Experimental)"
+ depends on PCI
+ help
+ This option enables support for Initio 162x Serial ATA.
+diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
+index f63a588..f5c35be 100644
+--- a/drivers/ata/libata-pmp.c
++++ b/drivers/ata/libata-pmp.c
+@@ -289,24 +289,24 @@ static int sata_pmp_configure(struct ata_device *dev, int print_info)
+
+ /* Disable sending Early R_OK.
+ * With "cached read" HDD testing and multiple ports busy on a SATA
+- * host controller, 3726 PMP will very rarely drop a deferred
++ * host controller, 3x26 PMP will very rarely drop a deferred
+ * R_OK that was intended for the host. Symptom will be all
+ * 5 drives under test will timeout, get reset, and recover.
+ */
+- if (vendor == 0x1095 && devid == 0x3726) {
++ if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) {
+ u32 reg;
+
+ err_mask = sata_pmp_read(&ap->link, PMP_GSCR_SII_POL, &reg);
+ if (err_mask) {
+ rc = -EIO;
+- reason = "failed to read Sil3726 Private Register";
++ reason = "failed to read Sil3x26 Private Register";
+ goto fail;
+ }
+ reg &= ~0x1;
+ err_mask = sata_pmp_write(&ap->link, PMP_GSCR_SII_POL, reg);
+ if (err_mask) {
+ rc = -EIO;
+- reason = "failed to write Sil3726 Private Register";
++ reason = "failed to write Sil3x26 Private Register";
+ goto fail;
+ }
+ }
+@@ -383,8 +383,8 @@ static void sata_pmp_quirks(struct ata_port *ap)
+ u16 devid = sata_pmp_gscr_devid(gscr);
+ struct ata_link *link;
+
+- if (vendor == 0x1095 && devid == 0x3726) {
+- /* sil3726 quirks */
++ if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) {
++ /* sil3x26 quirks */
+ ata_for_each_link(link, ap, EDGE) {
+ /* link reports offline after LPM */
+ link->flags |= ATA_LFLAG_NO_LPM;
+diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
+index 5c7d70c..3a8b55e 100644
+--- a/drivers/ata/sata_inic162x.c
++++ b/drivers/ata/sata_inic162x.c
+@@ -6,6 +6,18 @@
+ *
+ * This file is released under GPL v2.
+ *
++ * **** WARNING ****
++ *
++ * This driver never worked properly and unfortunately data corruption is
++ * relatively common. There isn't anyone working on the driver and there's
++ * no support from the vendor. Do not use this driver in any production
++ * environment.
++ *
++ * http://thread.gmane.org/gmane.linux.debian.devel.bugs.rc/378525/focus=54491
++ * https://bugzilla.kernel.org/show_bug.cgi?id=60565
++ *
++ * *****************
++ *
+ * This controller is eccentric and easily locks up if something isn't
+ * right. Documentation is available at initio's website but it only
+ * documents registers (not programming model).
+@@ -809,6 +821,8 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
+
++ dev_alert(&pdev->dev, "inic162x support is broken with common data corruption issues and will be disabled by default, contact linux-ide@vger.kernel.org if in production use\n");
++
+ /* alloc host */
+ host = ata_host_alloc_pinfo(&pdev->dev, ppi, NR_PORTS);
+ hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
+diff --git a/drivers/base/memory.c b/drivers/base/memory.c
+index 8272d92..732ad0d 100644
+--- a/drivers/base/memory.c
++++ b/drivers/base/memory.c
+@@ -172,6 +172,8 @@ static ssize_t show_mem_removable(struct sys_device *dev,
+ container_of(dev, struct memory_block, sysdev);
+
+ for (i = 0; i < sections_per_block; i++) {
++ if (!present_section_nr(mem->start_section_nr + i))
++ continue;
+ pfn = section_nr_to_pfn(mem->start_section_nr + i);
+ ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
+ }
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index 79038e5..6790cf7 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -751,8 +751,7 @@ static int pm_genpd_resume_noirq(struct device *dev)
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+- if (genpd->suspend_power_off
+- || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
++ if (genpd->suspend_power_off)
+ return 0;
+
+ /*
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index 853fdf8..bde72f7 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -89,6 +89,11 @@ static struct usb_device_id ath3k_table[] = {
+ { USB_DEVICE(0x13d3, 0x3393) },
+ { USB_DEVICE(0x0489, 0xe04e) },
+ { USB_DEVICE(0x0489, 0xe056) },
++ { USB_DEVICE(0x0489, 0xe04d) },
++ { USB_DEVICE(0x04c5, 0x1330) },
++ { USB_DEVICE(0x13d3, 0x3402) },
++ { USB_DEVICE(0x0cf3, 0x3121) },
++ { USB_DEVICE(0x0cf3, 0xe003) },
+
+ /* Atheros AR5BBU12 with sflash firmware */
+ { USB_DEVICE(0x0489, 0xE02C) },
+@@ -125,6 +130,11 @@ static struct usb_device_id ath3k_blist_tbl[] = {
+ { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
+
+ /* Atheros AR5BBU22 with sflash firmware */
+ { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 6b784b7..1bd3924 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -63,6 +63,9 @@ static struct usb_device_id btusb_table[] = {
+ /* Apple-specific (Broadcom) devices */
+ { USB_VENDOR_AND_INTERFACE_INFO(0x05ac, 0xff, 0x01, 0x01) },
+
++ /* MediaTek MT76x0E */
++ { USB_DEVICE(0x0e8d, 0x763f) },
++
+ /* Broadcom SoftSailing reporting vendor specific */
+ { USB_DEVICE(0x0a5c, 0x21e1) },
+
+@@ -156,6 +159,11 @@ static struct usb_device_id blacklist_table[] = {
+ { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
+
+ /* Atheros AR5BBU12 with sflash firmware */
+ { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
+diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
+index d5ae736..c68b8ad 100644
+--- a/drivers/char/virtio_console.c
++++ b/drivers/char/virtio_console.c
+@@ -257,9 +257,12 @@ static struct port *find_port_by_devt_in_portdev(struct ports_device *portdev,
+ unsigned long flags;
+
+ spin_lock_irqsave(&portdev->ports_lock, flags);
+- list_for_each_entry(port, &portdev->ports, list)
+- if (port->cdev->dev == dev)
++ list_for_each_entry(port, &portdev->ports, list) {
++ if (port->cdev->dev == dev) {
++ kref_get(&port->kref);
+ goto out;
++ }
++ }
+ port = NULL;
+ out:
+ spin_unlock_irqrestore(&portdev->ports_lock, flags);
+@@ -634,6 +637,10 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
+
+ port = filp->private_data;
+
++ /* Port is hot-unplugged. */
++ if (!port->guest_connected)
++ return -ENODEV;
++
+ if (!port_has_data(port)) {
+ /*
+ * If nothing's connected on the host just return 0 in
+@@ -650,7 +657,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
+ if (ret < 0)
+ return ret;
+ }
+- /* Port got hot-unplugged. */
++ /* Port got hot-unplugged while we were waiting above. */
+ if (!port->guest_connected)
+ return -ENODEV;
+ /*
+@@ -793,14 +800,14 @@ static int port_fops_open(struct inode *inode, struct file *filp)
+ struct port *port;
+ int ret;
+
++ /* We get the port with a kref here */
+ port = find_port_by_devt(cdev->dev);
++ if (!port) {
++ /* Port was unplugged before we could proceed */
++ return -ENXIO;
++ }
+ filp->private_data = port;
+
+- /* Prevent against a port getting hot-unplugged at the same time */
+- spin_lock_irq(&port->portdev->ports_lock);
+- kref_get(&port->kref);
+- spin_unlock_irq(&port->portdev->ports_lock);
+-
+ /*
+ * Don't allow opening of console port devices -- that's done
+ * via /dev/hvc
+@@ -1264,14 +1271,6 @@ static void remove_port(struct kref *kref)
+
+ port = container_of(kref, struct port, kref);
+
+- sysfs_remove_group(&port->dev->kobj, &port_attribute_group);
+- device_destroy(pdrvdata.class, port->dev->devt);
+- cdev_del(port->cdev);
+-
+- kfree(port->name);
+-
+- debugfs_remove(port->debugfs_file);
+-
+ kfree(port);
+ }
+
+@@ -1289,12 +1288,14 @@ static void unplug_port(struct port *port)
+ spin_unlock_irq(&port->portdev->ports_lock);
+
+ if (port->guest_connected) {
++ /* Let the app know the port is going down. */
++ send_sigio_to_port(port);
++
++ /* Do this after sigio is actually sent */
+ port->guest_connected = false;
+ port->host_connected = false;
+- wake_up_interruptible(&port->waitqueue);
+
+- /* Let the app know the port is going down. */
+- send_sigio_to_port(port);
++ wake_up_interruptible(&port->waitqueue);
+ }
+
+ if (is_console_port(port)) {
+@@ -1320,6 +1321,14 @@ static void unplug_port(struct port *port)
+ */
+ port->portdev = NULL;
+
++ sysfs_remove_group(&port->dev->kobj, &port_attribute_group);
++ device_destroy(pdrvdata.class, port->dev->devt);
++ cdev_del(port->cdev);
++
++ kfree(port->name);
++
++ debugfs_remove(port->debugfs_file);
++
+ /*
+ * Locks around here are not necessary - a port can't be
+ * opened after we removed the port struct from ports_list
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index 144d37c..61274bf 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -275,6 +275,7 @@ enum intel_pch {
+ #define QUIRK_PIPEA_FORCE (1<<0)
+ #define QUIRK_LVDS_SSC_DISABLE (1<<1)
+ #define QUIRK_INVERT_BRIGHTNESS (1<<2)
++#define QUIRK_NO_PCH_PWM_ENABLE (1<<3)
+
+ struct intel_fbdev;
+ struct intel_fbc_work;
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 124dd87..97a050f 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -362,6 +362,7 @@
+ #define IPEIR_I965 0x02064
+ #define IPEHR_I965 0x02068
+ #define INSTDONE_I965 0x0206c
++#define RING_INSTPM(base) ((base)+0xc0)
+ #define INSTPS 0x02070 /* 965+ only */
+ #define INSTDONE1 0x0207c /* 965+ only */
+ #define ACTHD_I965 0x02074
+@@ -458,6 +459,8 @@
+ will not assert AGPBUSY# and will only
+ be delivered when out of C3. */
+ #define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */
++#define INSTPM_TLB_INVALIDATE (1<<9)
++#define INSTPM_SYNC_FLUSH (1<<5)
+ #define ACTHD 0x020c8
+ #define FW_BLC 0x020d8
+ #define FW_BLC2 0x020dc
+@@ -3513,7 +3516,7 @@
+ #define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22)
+ #define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22)
+ #define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22)
+-#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x33 <<22)
++#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x3e <<22)
+
+ /* legacy values */
+ #define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22)
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index cfbb893..ee29c1f 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -8842,6 +8842,17 @@ static void quirk_invert_brightness(struct drm_device *dev)
+ dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
+ }
+
++/*
++ * Some machines (Dell XPS13) suffer broken backlight controls if
++ * BLM_PCH_PWM_ENABLE is set.
++ */
++static void quirk_no_pcm_pwm_enable(struct drm_device *dev)
++{
++ struct drm_i915_private *dev_priv = dev->dev_private;
++ dev_priv->quirks |= QUIRK_NO_PCH_PWM_ENABLE;
++ DRM_INFO("applying no-PCH_PWM_ENABLE quirk\n");
++}
++
+ struct intel_quirk {
+ int device;
+ int subsystem_vendor;
+@@ -8916,6 +8927,11 @@ struct intel_quirk intel_quirks[] = {
+
+ /* Acer/Packard Bell NCL20 */
+ { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
++
++ /* Dell XPS13 HD Sandy Bridge */
++ { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
++ /* Dell XPS13 HD and XPS13 FHD Ivy Bridge */
++ { 0x0166, 0x1028, 0x058b, quirk_no_pcm_pwm_enable },
+ };
+
+ static void intel_init_quirks(struct drm_device *dev)
+diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
+index 2ffa740..74d312f 100644
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -402,13 +402,7 @@ static void intel_lvds_prepare(struct drm_encoder *encoder)
+ {
+ struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
+
+- /*
+- * Prior to Ironlake, we must disable the pipe if we want to adjust
+- * the panel fitter. However at all other times we can just reset
+- * the registers regardless.
+- */
+- if (!HAS_PCH_SPLIT(encoder->dev) && intel_lvds->pfit_dirty)
+- intel_lvds_disable(intel_lvds);
++ intel_lvds_disable(intel_lvds);
+ }
+
+ static void intel_lvds_commit(struct drm_encoder *encoder)
+@@ -1075,7 +1069,8 @@ bool intel_lvds_init(struct drm_device *dev)
+ goto failed;
+
+ out:
+- if (HAS_PCH_SPLIT(dev)) {
++ if (HAS_PCH_SPLIT(dev) &&
++ !(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) {
+ u32 pwm;
+
+ pipe = (I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT) ? 1 : 0;
+diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
+index 38a7793..3c55cf6 100644
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
+@@ -776,6 +776,18 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
+
+ I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
+ POSTING_READ(mmio);
++
++ /* Flush the TLB for this page */
++ if (INTEL_INFO(dev)->gen >= 6) {
++ u32 reg = RING_INSTPM(ring->mmio_base);
++ I915_WRITE(reg,
++ _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
++ INSTPM_SYNC_FLUSH));
++ if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
++ 1000))
++ DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
++ ring->name);
++ }
+ }
+
+ static int
+diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
+index d969f3c..afb351a 100644
+--- a/drivers/gpu/drm/radeon/atom.c
++++ b/drivers/gpu/drm/radeon/atom.c
+@@ -1220,12 +1220,17 @@ int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
+ int r;
+
+ mutex_lock(&ctx->mutex);
++ /* reset data block */
++ ctx->data_block = 0;
+ /* reset reg block */
+ ctx->reg_block = 0;
+ /* reset fb window */
+ ctx->fb_base = 0;
+ /* reset io mode */
+ ctx->io_mode = ATOM_IO_MM;
++ /* reset divmul */
++ ctx->divmul[0] = 0;
++ ctx->divmul[1] = 0;
+ r = atom_execute_table_locked(ctx, index, params);
+ mutex_unlock(&ctx->mutex);
+ return r;
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index 9bea4a6..f5962a0 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -3036,6 +3036,8 @@ static int evergreen_startup(struct radeon_device *rdev)
+ /* enable pcie gen2 link */
+ evergreen_pcie_gen2_enable(rdev);
+
++ evergreen_mc_program(rdev);
++
+ if (ASIC_IS_DCE5(rdev)) {
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
+ r = ni_init_microcode(rdev);
+@@ -3063,7 +3065,6 @@ static int evergreen_startup(struct radeon_device *rdev)
+ if (r)
+ return r;
+
+- evergreen_mc_program(rdev);
+ if (rdev->flags & RADEON_IS_AGP) {
+ evergreen_agp_enable(rdev);
+ } else {
+diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
+index 3f9705b..77e6fb1 100644
+--- a/drivers/gpu/drm/radeon/ni.c
++++ b/drivers/gpu/drm/radeon/ni.c
+@@ -1353,6 +1353,8 @@ static int cayman_startup(struct radeon_device *rdev)
+ /* enable pcie gen2 link */
+ evergreen_pcie_gen2_enable(rdev);
+
++ evergreen_mc_program(rdev);
++
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
+ r = ni_init_microcode(rdev);
+ if (r) {
+@@ -1370,7 +1372,6 @@ static int cayman_startup(struct radeon_device *rdev)
+ if (r)
+ return r;
+
+- evergreen_mc_program(rdev);
+ r = cayman_pcie_gart_enable(rdev);
+ if (r)
+ return r;
+diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
+index 3d46d7d4..57e45c6 100644
+--- a/drivers/gpu/drm/radeon/r600.c
++++ b/drivers/gpu/drm/radeon/r600.c
+@@ -2415,6 +2415,8 @@ int r600_startup(struct radeon_device *rdev)
+ /* enable pcie gen2 link */
+ r600_pcie_gen2_enable(rdev);
+
++ r600_mc_program(rdev);
++
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+ r = r600_init_microcode(rdev);
+ if (r) {
+@@ -2427,7 +2429,6 @@ int r600_startup(struct radeon_device *rdev)
+ if (r)
+ return r;
+
+- r600_mc_program(rdev);
+ if (rdev->flags & RADEON_IS_AGP) {
+ r600_agp_enable(rdev);
+ } else {
+diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
+index 63db75d..3e72074 100644
+--- a/drivers/gpu/drm/radeon/rv770.c
++++ b/drivers/gpu/drm/radeon/rv770.c
+@@ -1057,6 +1057,8 @@ static int rv770_startup(struct radeon_device *rdev)
+ /* enable pcie gen2 link */
+ rv770_pcie_gen2_enable(rdev);
+
++ rv770_mc_program(rdev);
++
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+ r = r600_init_microcode(rdev);
+ if (r) {
+@@ -1069,7 +1071,6 @@ static int rv770_startup(struct radeon_device *rdev)
+ if (r)
+ return r;
+
+- rv770_mc_program(rdev);
+ if (rdev->flags & RADEON_IS_AGP) {
+ rv770_agp_enable(rdev);
+ } else {
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+index c41226a..2952249 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+@@ -29,7 +29,9 @@
+ #include "drmP.h"
+ #include "ttm/ttm_bo_driver.h"
+
+-#define VMW_PPN_SIZE sizeof(unsigned long)
++#define VMW_PPN_SIZE (sizeof(unsigned long))
++/* A future safe maximum remap size. */
++#define VMW_PPN_PER_REMAP ((31 * 1024) / VMW_PPN_SIZE)
+
+ static int vmw_gmr2_bind(struct vmw_private *dev_priv,
+ struct page *pages[],
+@@ -38,43 +40,61 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv,
+ {
+ SVGAFifoCmdDefineGMR2 define_cmd;
+ SVGAFifoCmdRemapGMR2 remap_cmd;
+- uint32_t define_size = sizeof(define_cmd) + 4;
+- uint32_t remap_size = VMW_PPN_SIZE * num_pages + sizeof(remap_cmd) + 4;
+ uint32_t *cmd;
+ uint32_t *cmd_orig;
++ uint32_t define_size = sizeof(define_cmd) + sizeof(*cmd);
++ uint32_t remap_num = num_pages / VMW_PPN_PER_REMAP + ((num_pages % VMW_PPN_PER_REMAP) > 0);
++ uint32_t remap_size = VMW_PPN_SIZE * num_pages + (sizeof(remap_cmd) + sizeof(*cmd)) * remap_num;
++ uint32_t remap_pos = 0;
++ uint32_t cmd_size = define_size + remap_size;
+ uint32_t i;
+
+- cmd_orig = cmd = vmw_fifo_reserve(dev_priv, define_size + remap_size);
++ cmd_orig = cmd = vmw_fifo_reserve(dev_priv, cmd_size);
+ if (unlikely(cmd == NULL))
+ return -ENOMEM;
+
+ define_cmd.gmrId = gmr_id;
+ define_cmd.numPages = num_pages;
+
++ *cmd++ = SVGA_CMD_DEFINE_GMR2;
++ memcpy(cmd, &define_cmd, sizeof(define_cmd));
++ cmd += sizeof(define_cmd) / sizeof(*cmd);
++
++ /*
++ * Need to split the command if there are too many
++ * pages that goes into the gmr.
++ */
++
+ remap_cmd.gmrId = gmr_id;
+ remap_cmd.flags = (VMW_PPN_SIZE > sizeof(*cmd)) ?
+ SVGA_REMAP_GMR2_PPN64 : SVGA_REMAP_GMR2_PPN32;
+- remap_cmd.offsetPages = 0;
+- remap_cmd.numPages = num_pages;
+
+- *cmd++ = SVGA_CMD_DEFINE_GMR2;
+- memcpy(cmd, &define_cmd, sizeof(define_cmd));
+- cmd += sizeof(define_cmd) / sizeof(uint32);
++ while (num_pages > 0) {
++ unsigned long nr = min(num_pages, (unsigned long)VMW_PPN_PER_REMAP);
++
++ remap_cmd.offsetPages = remap_pos;
++ remap_cmd.numPages = nr;
+
+- *cmd++ = SVGA_CMD_REMAP_GMR2;
+- memcpy(cmd, &remap_cmd, sizeof(remap_cmd));
+- cmd += sizeof(remap_cmd) / sizeof(uint32);
++ *cmd++ = SVGA_CMD_REMAP_GMR2;
++ memcpy(cmd, &remap_cmd, sizeof(remap_cmd));
++ cmd += sizeof(remap_cmd) / sizeof(*cmd);
+
+- for (i = 0; i < num_pages; ++i) {
+- if (VMW_PPN_SIZE <= 4)
+- *cmd = page_to_pfn(*pages++);
+- else
+- *((uint64_t *)cmd) = page_to_pfn(*pages++);
++ for (i = 0; i < nr; ++i) {
++ if (VMW_PPN_SIZE <= 4)
++ *cmd = page_to_pfn(*pages++);
++ else
++ *((uint64_t *)cmd) = page_to_pfn(*pages++);
+
+- cmd += VMW_PPN_SIZE / sizeof(*cmd);
++ cmd += VMW_PPN_SIZE / sizeof(*cmd);
++ }
++
++ num_pages -= nr;
++ remap_pos += nr;
+ }
+
+- vmw_fifo_commit(dev_priv, define_size + remap_size);
++ BUG_ON(cmd != cmd_orig + cmd_size / sizeof(*cmd));
++
++ vmw_fifo_commit(dev_priv, cmd_size);
+
+ return 0;
+ }
+diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
+index c6d1ce0..a9726c1 100644
+--- a/drivers/hwmon/adt7470.c
++++ b/drivers/hwmon/adt7470.c
+@@ -215,7 +215,7 @@ static inline int adt7470_write_word_data(struct i2c_client *client, u8 reg,
+ u16 value)
+ {
+ return i2c_smbus_write_byte_data(client, reg, value & 0xFF)
+- && i2c_smbus_write_byte_data(client, reg + 1, value >> 8);
++ || i2c_smbus_write_byte_data(client, reg + 1, value >> 8);
+ }
+
+ static void adt7470_init_client(struct i2c_client *client)
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 298e02a..c706a7b 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -1139,7 +1139,7 @@ read_again:
+ * know the original bi_idx, so we just free
+ * them all
+ */
+- __bio_for_each_segment(bvec, mbio, j, 0)
++ bio_for_each_segment_all(bvec, mbio, j)
+ bvec->bv_page = r1_bio->behind_bvecs[j].bv_page;
+ if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
+ atomic_inc(&r1_bio->behind_remaining);
+diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
+index a746ba2..a956053 100644
+--- a/drivers/net/arcnet/arcnet.c
++++ b/drivers/net/arcnet/arcnet.c
+@@ -1007,7 +1007,7 @@ static void arcnet_rx(struct net_device *dev, int bufnum)
+
+ soft = &pkt.soft.rfc1201;
+
+- lp->hw.copy_from_card(dev, bufnum, 0, &pkt, sizeof(ARC_HDR_SIZE));
++ lp->hw.copy_from_card(dev, bufnum, 0, &pkt, ARC_HDR_SIZE);
+ if (pkt.hard.offset[0]) {
+ ofs = pkt.hard.offset[0];
+ length = 256 - ofs;
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
+index fcd0e47..2a7d091 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
+@@ -108,9 +108,8 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
+
+ /* Enable arbiter */
+ reg &= ~IXGBE_DPMCS_ARBDIS;
+- /* Enable DFP and Recycle mode */
+- reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM);
+ reg |= IXGBE_DPMCS_TSOEF;
++
+ /* Configure Max TSO packet size 34KB including payload and headers */
+ reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT);
+
+diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
+index b19841a..00f1367 100644
+--- a/drivers/net/ifb.c
++++ b/drivers/net/ifb.c
+@@ -34,6 +34,7 @@
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
+ #include <linux/moduleparam.h>
++#include <linux/sched.h>
+ #include <net/pkt_sched.h>
+ #include <net/net_namespace.h>
+
+diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
+index f3d17f8..a8e4640 100644
+--- a/drivers/net/usb/smsc75xx.c
++++ b/drivers/net/usb/smsc75xx.c
+@@ -43,7 +43,6 @@
+ #define EEPROM_MAC_OFFSET (0x01)
+ #define DEFAULT_TX_CSUM_ENABLE (true)
+ #define DEFAULT_RX_CSUM_ENABLE (true)
+-#define DEFAULT_TSO_ENABLE (true)
+ #define SMSC75XX_INTERNAL_PHY_ID (1)
+ #define SMSC75XX_TX_OVERHEAD (8)
+ #define MAX_RX_FIFO_SIZE (20 * 1024)
+@@ -1035,17 +1034,14 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
+
+ INIT_WORK(&pdata->set_multicast, smsc75xx_deferred_multicast_write);
+
+- if (DEFAULT_TX_CSUM_ENABLE) {
++ if (DEFAULT_TX_CSUM_ENABLE)
+ dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+- if (DEFAULT_TSO_ENABLE)
+- dev->net->features |= NETIF_F_SG |
+- NETIF_F_TSO | NETIF_F_TSO6;
+- }
++
+ if (DEFAULT_RX_CSUM_ENABLE)
+ dev->net->features |= NETIF_F_RXCSUM;
+
+ dev->net->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+- NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_RXCSUM;
++ NETIF_F_RXCSUM;
+
+ /* Init all registers */
+ ret = smsc75xx_reset(dev);
+@@ -1170,8 +1166,6 @@ static struct sk_buff *smsc75xx_tx_fixup(struct usbnet *dev,
+ {
+ u32 tx_cmd_a, tx_cmd_b;
+
+- skb_linearize(skb);
+-
+ if (skb_headroom(skb) < SMSC75XX_TX_OVERHEAD) {
+ struct sk_buff *skb2 =
+ skb_copy_expand(skb, SMSC75XX_TX_OVERHEAD, 0, flags);
+diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+index 84890d5..ef921e1 100644
+--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
++++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+@@ -851,6 +851,7 @@ static int ath9k_init_device(struct ath9k_htc_priv *priv,
+ if (error != 0)
+ goto err_rx;
+
++ ath9k_hw_disable(priv->ah);
+ #ifdef CONFIG_MAC80211_LEDS
+ /* must be initialized before ieee80211_register_hw */
+ priv->led_cdev.default_trigger = ieee80211_create_tpt_led_trigger(priv->hw,
+diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+index a48bb83..9a57149 100644
+--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
++++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+@@ -448,6 +448,7 @@ static void ath9k_htc_tx_process(struct ath9k_htc_priv *priv,
+ struct ieee80211_conf *cur_conf = &priv->hw->conf;
+ bool txok;
+ int slot;
++ int hdrlen, padsize;
+
+ slot = strip_drv_header(priv, skb);
+ if (slot < 0) {
+@@ -504,6 +505,15 @@ send_mac80211:
+
+ ath9k_htc_tx_clear_slot(priv, slot);
+
++ /* Remove padding before handing frame back to mac80211 */
++ hdrlen = ieee80211_get_hdrlen_from_skb(skb);
++
++ padsize = hdrlen & 3;
++ if (padsize && skb->len > hdrlen + padsize) {
++ memmove(skb->data + padsize, skb->data, hdrlen);
++ skb_pull(skb, padsize);
++ }
++
+ /* Send status to mac80211 */
+ ieee80211_tx_status(priv->hw, skb);
+ }
+diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
+index 045a936..271e818 100644
+--- a/drivers/net/wireless/hostap/hostap_ioctl.c
++++ b/drivers/net/wireless/hostap/hostap_ioctl.c
+@@ -522,9 +522,9 @@ static int prism2_ioctl_giwaplist(struct net_device *dev,
+
+ data->length = prism2_ap_get_sta_qual(local, addr, qual, IW_MAX_AP, 1);
+
+- memcpy(extra, &addr, sizeof(struct sockaddr) * data->length);
++ memcpy(extra, addr, sizeof(struct sockaddr) * data->length);
+ data->flags = 1; /* has quality information */
+- memcpy(extra + sizeof(struct sockaddr) * data->length, &qual,
++ memcpy(extra + sizeof(struct sockaddr) * data->length, qual,
+ sizeof(struct iw_quality) * data->length);
+
+ kfree(addr);
+diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c
+index 1bb64c9..09891e5 100644
+--- a/drivers/net/wireless/iwlegacy/iwl-core.c
++++ b/drivers/net/wireless/iwlegacy/iwl-core.c
+@@ -1757,6 +1757,7 @@ int iwl_legacy_force_reset(struct iwl_priv *priv, bool external)
+
+ return 0;
+ }
++EXPORT_SYMBOL(iwl_legacy_force_reset);
+
+ int
+ iwl_legacy_mac_change_interface(struct ieee80211_hw *hw,
+diff --git a/drivers/net/wireless/iwlegacy/iwl4965-base.c b/drivers/net/wireless/iwlegacy/iwl4965-base.c
+index d2fba9e..6e25c7b 100644
+--- a/drivers/net/wireless/iwlegacy/iwl4965-base.c
++++ b/drivers/net/wireless/iwlegacy/iwl4965-base.c
+@@ -868,13 +868,13 @@ static void iwl4965_irq_tasklet(struct iwl_priv *priv)
+ * is killed. Hence update the killswitch state here. The
+ * rfkill handler will care about restarting if needed.
+ */
+- if (!test_bit(STATUS_ALIVE, &priv->status)) {
+- if (hw_rf_kill)
+- set_bit(STATUS_RF_KILL_HW, &priv->status);
+- else
+- clear_bit(STATUS_RF_KILL_HW, &priv->status);
+- wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill);
++ if (hw_rf_kill) {
++ set_bit(STATUS_RF_KILL_HW, &priv->status);
++ } else {
++ clear_bit(STATUS_RF_KILL_HW, &priv->status);
++ iwl_legacy_force_reset(priv, true);
+ }
++ wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill);
+
+ handled |= CSR_INT_BIT_RF_KILL;
+ }
+@@ -1764,6 +1764,9 @@ static void iwl4965_alive_start(struct iwl_priv *priv)
+
+ priv->active_rate = IWL_RATES_MASK;
+
++ iwl_legacy_power_update_mode(priv, true);
++ IWL_DEBUG_INFO(priv, "Updated power mode\n");
++
+ if (iwl_legacy_is_associated_ctx(ctx)) {
+ struct iwl_legacy_rxon_cmd *active_rxon =
+ (struct iwl_legacy_rxon_cmd *)&ctx->active;
+@@ -1796,9 +1799,6 @@ static void iwl4965_alive_start(struct iwl_priv *priv)
+ IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
+ wake_up(&priv->wait_command_queue);
+
+- iwl_legacy_power_update_mode(priv, true);
+- IWL_DEBUG_INFO(priv, "Updated power mode\n");
+-
+ return;
+
+ restart:
+diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
+index 16cdd12..94d35ad 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
++++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
+@@ -1297,7 +1297,7 @@ int iwl_alive_start(struct iwl_priv *priv)
+ BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
+ if (ret)
+ return ret;
+- } else {
++ } else if (priv->cfg->bt_params) {
+ /*
+ * default is 2-wire BT coexexistence support
+ */
+diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
+index 832ec4d..5ef176a 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-core.c
++++ b/drivers/net/wireless/iwlwifi/iwl-core.c
+@@ -808,8 +808,11 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ return;
+
+- if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING,
++ if (!test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING,
+ &priv->shrd->status))
++ return;
++
++ if (ctx->vif)
+ ieee80211_chswitch_done(ctx->vif, is_success);
+ }
+
+diff --git a/drivers/net/wireless/iwlwifi/iwl-pci.c b/drivers/net/wireless/iwlwifi/iwl-pci.c
+index 1800029..346dc9b 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-pci.c
++++ b/drivers/net/wireless/iwlwifi/iwl-pci.c
+@@ -227,6 +227,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
+ {IWL_PCI_DEVICE(0x423C, 0x1306, iwl5150_abg_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x423C, 0x1221, iwl5150_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x423C, 0x1321, iwl5150_agn_cfg)}, /* Half Mini Card */
++ {IWL_PCI_DEVICE(0x423C, 0x1326, iwl5150_abg_cfg)}, /* Half Mini Card */
+
+ {IWL_PCI_DEVICE(0x423D, 0x1211, iwl5150_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x423D, 0x1311, iwl5150_agn_cfg)}, /* Half Mini Card */
+diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
+index 3579a68..17f8720 100644
+--- a/drivers/net/wireless/mwifiex/sdio.c
++++ b/drivers/net/wireless/mwifiex/sdio.c
+@@ -1429,8 +1429,8 @@ static int mwifiex_sdio_host_to_card(struct mwifiex_adapter *adapter,
+ /* Allocate buffer and copy payload */
+ blk_size = MWIFIEX_SDIO_BLOCK_SIZE;
+ buf_block_len = (pkt_len + blk_size - 1) / blk_size;
+- *(u16 *) &payload[0] = (u16) pkt_len;
+- *(u16 *) &payload[2] = type;
++ *(__le16 *)&payload[0] = cpu_to_le16((u16)pkt_len);
++ *(__le16 *)&payload[2] = cpu_to_le16(type);
+
+ /*
+ * This is SDIO specific header
+diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
+index 50f92d5..4d792a2 100644
+--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
++++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
+@@ -856,13 +856,8 @@ void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index)
+ spin_unlock_irqrestore(&queue->index_lock, irqflags);
+ }
+
+-void rt2x00queue_pause_queue(struct data_queue *queue)
++void rt2x00queue_pause_queue_nocheck(struct data_queue *queue)
+ {
+- if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
+- !test_bit(QUEUE_STARTED, &queue->flags) ||
+- test_and_set_bit(QUEUE_PAUSED, &queue->flags))
+- return;
+-
+ switch (queue->qid) {
+ case QID_AC_VO:
+ case QID_AC_VI:
+@@ -878,6 +873,15 @@ void rt2x00queue_pause_queue(struct data_queue *queue)
+ break;
+ }
+ }
++void rt2x00queue_pause_queue(struct data_queue *queue)
++{
++ if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
++ !test_bit(QUEUE_STARTED, &queue->flags) ||
++ test_and_set_bit(QUEUE_PAUSED, &queue->flags))
++ return;
++
++ rt2x00queue_pause_queue_nocheck(queue);
++}
+ EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue);
+
+ void rt2x00queue_unpause_queue(struct data_queue *queue)
+@@ -939,7 +943,7 @@ void rt2x00queue_stop_queue(struct data_queue *queue)
+ return;
+ }
+
+- rt2x00queue_pause_queue(queue);
++ rt2x00queue_pause_queue_nocheck(queue);
+
+ queue->rt2x00dev->ops->lib->stop_queue(queue);
+
+diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
+index 8efa2f2..f8c319c 100644
+--- a/drivers/net/wireless/zd1201.c
++++ b/drivers/net/wireless/zd1201.c
+@@ -98,10 +98,12 @@ static int zd1201_fw_upload(struct usb_device *dev, int apfw)
+ goto exit;
+
+ err = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x4,
+- USB_DIR_IN | 0x40, 0,0, &ret, sizeof(ret), ZD1201_FW_TIMEOUT);
++ USB_DIR_IN | 0x40, 0, 0, buf, sizeof(ret), ZD1201_FW_TIMEOUT);
+ if (err < 0)
+ goto exit;
+
++ memcpy(&ret, buf, sizeof(ret));
++
+ if (ret & 0x80) {
+ err = -EIO;
+ goto exit;
+diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
+index fd85fa2..b77808c 100644
+--- a/drivers/of/fdt.c
++++ b/drivers/of/fdt.c
+@@ -389,6 +389,8 @@ static void __unflatten_device_tree(struct boot_param_header *blob,
+ mem = (unsigned long)
+ dt_alloc(size + 4, __alignof__(struct device_node));
+
++ memset((void *)mem, 0, size);
++
+ ((__be32 *)mem)[size / 4] = cpu_to_be32(0xdeadbeef);
+
+ pr_debug(" unflattening %lx...\n", mem);
+diff --git a/drivers/parisc/iommu-helpers.h b/drivers/parisc/iommu-helpers.h
+index a9c46cc..8c33491 100644
+--- a/drivers/parisc/iommu-helpers.h
++++ b/drivers/parisc/iommu-helpers.h
+@@ -1,3 +1,5 @@
++#include <linux/prefetch.h>
++
+ /**
+ * iommu_fill_pdir - Insert coalesced scatter/gather chunks into the I/O Pdir.
+ * @ioc: The I/O Controller.
+diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
+index 083a49f..165274c 100644
+--- a/drivers/pci/Makefile
++++ b/drivers/pci/Makefile
+@@ -42,6 +42,7 @@ obj-$(CONFIG_UNICORE32) += setup-bus.o setup-irq.o
+ obj-$(CONFIG_PARISC) += setup-bus.o
+ obj-$(CONFIG_SUPERH) += setup-bus.o setup-irq.o
+ obj-$(CONFIG_PPC) += setup-bus.o
++obj-$(CONFIG_FRV) += setup-bus.o
+ obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o
+ obj-$(CONFIG_X86_VISWS) += setup-irq.o
+ obj-$(CONFIG_MN10300) += setup-bus.o
+diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
+index e1b4f80..5c87270 100644
+--- a/drivers/s390/scsi/zfcp_erp.c
++++ b/drivers/s390/scsi/zfcp_erp.c
+@@ -102,10 +102,13 @@ static void zfcp_erp_action_dismiss_port(struct zfcp_port *port)
+
+ if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
+ zfcp_erp_action_dismiss(&port->erp_action);
+- else
+- shost_for_each_device(sdev, port->adapter->scsi_host)
++ else {
++ spin_lock(port->adapter->scsi_host->host_lock);
++ __shost_for_each_device(sdev, port->adapter->scsi_host)
+ if (sdev_to_zfcp(sdev)->port == port)
+ zfcp_erp_action_dismiss_lun(sdev);
++ spin_unlock(port->adapter->scsi_host->host_lock);
++ }
+ }
+
+ static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
+@@ -592,9 +595,11 @@ static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear,
+ {
+ struct scsi_device *sdev;
+
+- shost_for_each_device(sdev, port->adapter->scsi_host)
++ spin_lock(port->adapter->scsi_host->host_lock);
++ __shost_for_each_device(sdev, port->adapter->scsi_host)
+ if (sdev_to_zfcp(sdev)->port == port)
+ _zfcp_erp_lun_reopen(sdev, clear, id, 0);
++ spin_unlock(port->adapter->scsi_host->host_lock);
+ }
+
+ static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
+@@ -1435,8 +1440,10 @@ void zfcp_erp_set_adapter_status(struct zfcp_adapter *adapter, u32 mask)
+ atomic_set_mask(common_mask, &port->status);
+ read_unlock_irqrestore(&adapter->port_list_lock, flags);
+
+- shost_for_each_device(sdev, adapter->scsi_host)
++ spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
++ __shost_for_each_device(sdev, adapter->scsi_host)
+ atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status);
++ spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
+ }
+
+ /**
+@@ -1470,11 +1477,13 @@ void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask)
+ }
+ read_unlock_irqrestore(&adapter->port_list_lock, flags);
+
+- shost_for_each_device(sdev, adapter->scsi_host) {
++ spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
++ __shost_for_each_device(sdev, adapter->scsi_host) {
+ atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status);
+ if (clear_counter)
+ atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
+ }
++ spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
+ }
+
+ /**
+@@ -1488,16 +1497,19 @@ void zfcp_erp_set_port_status(struct zfcp_port *port, u32 mask)
+ {
+ struct scsi_device *sdev;
+ u32 common_mask = mask & ZFCP_COMMON_FLAGS;
++ unsigned long flags;
+
+ atomic_set_mask(mask, &port->status);
+
+ if (!common_mask)
+ return;
+
+- shost_for_each_device(sdev, port->adapter->scsi_host)
++ spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
++ __shost_for_each_device(sdev, port->adapter->scsi_host)
+ if (sdev_to_zfcp(sdev)->port == port)
+ atomic_set_mask(common_mask,
+ &sdev_to_zfcp(sdev)->status);
++ spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
+ }
+
+ /**
+@@ -1512,6 +1524,7 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask)
+ struct scsi_device *sdev;
+ u32 common_mask = mask & ZFCP_COMMON_FLAGS;
+ u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
++ unsigned long flags;
+
+ atomic_clear_mask(mask, &port->status);
+
+@@ -1521,13 +1534,15 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask)
+ if (clear_counter)
+ atomic_set(&port->erp_counter, 0);
+
+- shost_for_each_device(sdev, port->adapter->scsi_host)
++ spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
++ __shost_for_each_device(sdev, port->adapter->scsi_host)
+ if (sdev_to_zfcp(sdev)->port == port) {
+ atomic_clear_mask(common_mask,
+ &sdev_to_zfcp(sdev)->status);
+ if (clear_counter)
+ atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
+ }
++ spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
+ }
+
+ /**
+diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
+index e76d003..52c6b59 100644
+--- a/drivers/s390/scsi/zfcp_qdio.c
++++ b/drivers/s390/scsi/zfcp_qdio.c
+@@ -224,11 +224,9 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
+
+ static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
+ {
+- spin_lock_irq(&qdio->req_q_lock);
+ if (atomic_read(&qdio->req_q_free) ||
+ !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
+ return 1;
+- spin_unlock_irq(&qdio->req_q_lock);
+ return 0;
+ }
+
+@@ -246,9 +244,8 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
+ {
+ long ret;
+
+- spin_unlock_irq(&qdio->req_q_lock);
+- ret = wait_event_interruptible_timeout(qdio->req_q_wq,
+- zfcp_qdio_sbal_check(qdio), 5 * HZ);
++ ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq,
++ zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ);
+
+ if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
+ return -EIO;
+@@ -262,7 +259,6 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
+ zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1");
+ }
+
+- spin_lock_irq(&qdio->req_q_lock);
+ return -EIO;
+ }
+
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index fc5a2ef..b018997 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -3547,11 +3547,21 @@ static int megasas_init_fw(struct megasas_instance *instance)
+ break;
+ }
+
+- /*
+- * We expect the FW state to be READY
+- */
+- if (megasas_transition_to_ready(instance, 0))
+- goto fail_ready_state;
++ if (megasas_transition_to_ready(instance, 0)) {
++ atomic_set(&instance->fw_reset_no_pci_access, 1);
++ instance->instancet->adp_reset
++ (instance, instance->reg_set);
++ atomic_set(&instance->fw_reset_no_pci_access, 0);
++ dev_info(&instance->pdev->dev,
++ "megasas: FW restarted successfully from %s!\n",
++ __func__);
++
++ /*waitting for about 30 second before retry*/
++ ssleep(30);
++
++ if (megasas_transition_to_ready(instance, 0))
++ goto fail_ready_state;
++ }
+
+ /* Check if MSI-X is supported while in ready state */
+ msix_enable = (instance->instancet->read_fw_status_reg(reg_set) &
+diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
+index f6a50c9..bfb05b8 100644
+--- a/drivers/scsi/nsp32.c
++++ b/drivers/scsi/nsp32.c
+@@ -2927,7 +2927,7 @@ static void nsp32_do_bus_reset(nsp32_hw_data *data)
+ * reset SCSI bus
+ */
+ nsp32_write1(base, SCSI_BUS_CONTROL, BUSCTL_RST);
+- udelay(RESET_HOLD_TIME);
++ mdelay(RESET_HOLD_TIME / 1000);
+ nsp32_write1(base, SCSI_BUS_CONTROL, 0);
+ for(i = 0; i < 5; i++) {
+ intrdat = nsp32_read2(base, IRQ_STATUS); /* dummy read */
+diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
+index 717a8d4..903b2f5 100644
+--- a/drivers/target/target_core_cdb.c
++++ b/drivers/target/target_core_cdb.c
+@@ -127,11 +127,12 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
+ goto out;
+ }
+
+- snprintf((unsigned char *)&buf[8], 8, "LIO-ORG");
+- snprintf((unsigned char *)&buf[16], 16, "%s",
+- &dev->se_sub_dev->t10_wwn.model[0]);
+- snprintf((unsigned char *)&buf[32], 4, "%s",
+- &dev->se_sub_dev->t10_wwn.revision[0]);
++ memcpy(&buf[8], "LIO-ORG ", 8);
++ memset(&buf[16], 0x20, 16);
++ memcpy(&buf[16], dev->se_sub_dev->t10_wwn.model,
++ min_t(size_t, strlen(dev->se_sub_dev->t10_wwn.model), 16));
++ memcpy(&buf[32], dev->se_sub_dev->t10_wwn.revision,
++ min_t(size_t, strlen(dev->se_sub_dev->t10_wwn.revision), 4));
+ buf[4] = 31; /* Set additional length to 31 */
+
+ out:
+diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
+index 6f4dd83..3749688 100644
+--- a/drivers/tty/hvc/hvsi_lib.c
++++ b/drivers/tty/hvc/hvsi_lib.c
+@@ -341,8 +341,8 @@ void hvsilib_establish(struct hvsi_priv *pv)
+
+ pr_devel("HVSI@%x: ... waiting handshake\n", pv->termno);
+
+- /* Try for up to 200s */
+- for (timeout = 0; timeout < 20; timeout++) {
++ /* Try for up to 400ms */
++ for (timeout = 0; timeout < 40; timeout++) {
+ if (pv->established)
+ goto established;
+ if (!hvsi_get_packet(pv))
+diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
+index 5b3d063..ab7d11e 100644
+--- a/drivers/tty/serial/mxs-auart.c
++++ b/drivers/tty/serial/mxs-auart.c
+@@ -374,11 +374,18 @@ static void mxs_auart_settermios(struct uart_port *u,
+
+ static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
+ {
+- u32 istatus, istat;
++ u32 istat;
+ struct mxs_auart_port *s = context;
+ u32 stat = readl(s->port.membase + AUART_STAT);
+
+- istatus = istat = readl(s->port.membase + AUART_INTR);
++ istat = readl(s->port.membase + AUART_INTR);
++
++ /* ack irq */
++ writel(istat & (AUART_INTR_RTIS
++ | AUART_INTR_TXIS
++ | AUART_INTR_RXIS
++ | AUART_INTR_CTSMIS),
++ s->port.membase + AUART_INTR_CLR);
+
+ if (istat & AUART_INTR_CTSMIS) {
+ uart_handle_cts_change(&s->port, stat & AUART_STAT_CTS);
+@@ -397,12 +404,6 @@ static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
+ istat &= ~AUART_INTR_TXIS;
+ }
+
+- writel(istatus & (AUART_INTR_RTIS
+- | AUART_INTR_TXIS
+- | AUART_INTR_RXIS
+- | AUART_INTR_CTSMIS),
+- s->port.membase + AUART_INTR_CLR);
+-
+ return IRQ_HANDLED;
+ }
+
+@@ -542,7 +543,7 @@ auart_console_write(struct console *co, const char *str, unsigned int count)
+ struct mxs_auart_port *s;
+ struct uart_port *port;
+ unsigned int old_ctrl0, old_ctrl2;
+- unsigned int to = 1000;
++ unsigned int to = 20000;
+
+ if (co->index > MXS_AUART_PORTS || co->index < 0)
+ return;
+@@ -563,18 +564,23 @@ auart_console_write(struct console *co, const char *str, unsigned int count)
+
+ uart_console_write(port, str, count, mxs_auart_console_putchar);
+
+- /*
+- * Finally, wait for transmitter to become empty
+- * and restore the TCR
+- */
++ /* Finally, wait for transmitter to become empty ... */
+ while (readl(port->membase + AUART_STAT) & AUART_STAT_BUSY) {
++ udelay(1);
+ if (!to--)
+ break;
+- udelay(1);
+ }
+
+- writel(old_ctrl0, port->membase + AUART_CTRL0);
+- writel(old_ctrl2, port->membase + AUART_CTRL2);
++ /*
++ * ... and restore the TCR if we waited long enough for the transmitter
++ * to be idle. This might keep the transmitter enabled although it is
++ * unused, but that is better than to disable it while it is still
++ * transmitting.
++ */
++ if (!(readl(port->membase + AUART_STAT) & AUART_STAT_BUSY)) {
++ writel(old_ctrl0, port->membase + AUART_CTRL0);
++ writel(old_ctrl2, port->membase + AUART_CTRL2);
++ }
+
+ clk_disable(s->clk);
+ }
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 2fbcb75..f52182d 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -78,6 +78,12 @@ static const struct usb_device_id usb_quirk_list[] = {
+ { USB_DEVICE(0x04d8, 0x000c), .driver_info =
+ USB_QUIRK_CONFIG_INTF_STRINGS },
+
++ /* CarrolTouch 4000U */
++ { USB_DEVICE(0x04e7, 0x0009), .driver_info = USB_QUIRK_RESET_RESUME },
++
++ /* CarrolTouch 4500U */
++ { USB_DEVICE(0x04e7, 0x0030), .driver_info = USB_QUIRK_RESET_RESUME },
++
+ /* Samsung Android phone modem - ID conflict with SPH-I500 */
+ { USB_DEVICE(0x04e8, 0x6601), .driver_info =
+ USB_QUIRK_CONFIG_INTF_STRINGS },
+diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c
+index fe85871..db5128b7e 100644
+--- a/drivers/usb/misc/adutux.c
++++ b/drivers/usb/misc/adutux.c
+@@ -829,7 +829,7 @@ static int adu_probe(struct usb_interface *interface,
+
+ /* let the user know what node this device is now attached to */
+ dev_info(&interface->dev, "ADU%d %s now attached to /dev/usb/adutux%d\n",
+- udev->descriptor.idProduct, dev->serial_number,
++ le16_to_cpu(udev->descriptor.idProduct), dev->serial_number,
+ (dev->minor - ADU_MINOR_BASE));
+ exit:
+ dbg(2," %s : leave, return value %p (dev)", __func__, dev);
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index ce9f87f..a3f6fe0 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -743,9 +743,34 @@ static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(FTDI_VID, FTDI_NDI_AURORA_SCU_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
+ { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
+- { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_SERIAL_VX7_PID) },
+- { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_CT29B_PID) },
+- { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_RTS01_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S03_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_59_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_57A_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_57B_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_29A_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_29B_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_29F_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_62B_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S01_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_63_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_29C_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_81B_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_82B_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_K5D_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_K4Y_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_K5G_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S05_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_60_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_61_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_62_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_63B_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_64_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_65_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_92_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_92D_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_W5R_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_A5R_PID) },
++ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_PW1_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) },
+ { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 5d25e26..61685ed 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -815,11 +815,35 @@
+ /*
+ * RT Systems programming cables for various ham radios
+ */
+-#define RTSYSTEMS_VID 0x2100 /* Vendor ID */
+-#define RTSYSTEMS_SERIAL_VX7_PID 0x9e52 /* Serial converter for VX-7 Radios using FT232RL */
+-#define RTSYSTEMS_CT29B_PID 0x9e54 /* CT29B Radio Cable */
+-#define RTSYSTEMS_RTS01_PID 0x9e57 /* USB-RTS01 Radio Cable */
+-
++#define RTSYSTEMS_VID 0x2100 /* Vendor ID */
++#define RTSYSTEMS_USB_S03_PID 0x9001 /* RTS-03 USB to Serial Adapter */
++#define RTSYSTEMS_USB_59_PID 0x9e50 /* USB-59 USB to 8 pin plug */
++#define RTSYSTEMS_USB_57A_PID 0x9e51 /* USB-57A USB to 4pin 3.5mm plug */
++#define RTSYSTEMS_USB_57B_PID 0x9e52 /* USB-57B USB to extended 4pin 3.5mm plug */
++#define RTSYSTEMS_USB_29A_PID 0x9e53 /* USB-29A USB to 3.5mm stereo plug */
++#define RTSYSTEMS_USB_29B_PID 0x9e54 /* USB-29B USB to 6 pin mini din */
++#define RTSYSTEMS_USB_29F_PID 0x9e55 /* USB-29F USB to 6 pin modular plug */
++#define RTSYSTEMS_USB_62B_PID 0x9e56 /* USB-62B USB to 8 pin mini din plug*/
++#define RTSYSTEMS_USB_S01_PID 0x9e57 /* USB-RTS01 USB to 3.5 mm stereo plug*/
++#define RTSYSTEMS_USB_63_PID 0x9e58 /* USB-63 USB to 9 pin female*/
++#define RTSYSTEMS_USB_29C_PID 0x9e59 /* USB-29C USB to 4 pin modular plug*/
++#define RTSYSTEMS_USB_81B_PID 0x9e5A /* USB-81 USB to 8 pin mini din plug*/
++#define RTSYSTEMS_USB_82B_PID 0x9e5B /* USB-82 USB to 2.5 mm stereo plug*/
++#define RTSYSTEMS_USB_K5D_PID 0x9e5C /* USB-K5D USB to 8 pin modular plug*/
++#define RTSYSTEMS_USB_K4Y_PID 0x9e5D /* USB-K4Y USB to 2.5/3.5 mm plugs*/
++#define RTSYSTEMS_USB_K5G_PID 0x9e5E /* USB-K5G USB to 8 pin modular plug*/
++#define RTSYSTEMS_USB_S05_PID 0x9e5F /* USB-RTS05 USB to 2.5 mm stereo plug*/
++#define RTSYSTEMS_USB_60_PID 0x9e60 /* USB-60 USB to 6 pin din*/
++#define RTSYSTEMS_USB_61_PID 0x9e61 /* USB-61 USB to 6 pin mini din*/
++#define RTSYSTEMS_USB_62_PID 0x9e62 /* USB-62 USB to 8 pin mini din*/
++#define RTSYSTEMS_USB_63B_PID 0x9e63 /* USB-63 USB to 9 pin female*/
++#define RTSYSTEMS_USB_64_PID 0x9e64 /* USB-64 USB to 9 pin male*/
++#define RTSYSTEMS_USB_65_PID 0x9e65 /* USB-65 USB to 9 pin female null modem*/
++#define RTSYSTEMS_USB_92_PID 0x9e66 /* USB-92 USB to 12 pin plug*/
++#define RTSYSTEMS_USB_92D_PID 0x9e67 /* USB-92D USB to 12 pin plug data*/
++#define RTSYSTEMS_USB_W5R_PID 0x9e68 /* USB-W5R USB to 8 pin modular plug*/
++#define RTSYSTEMS_USB_A5R_PID 0x9e69 /* USB-A5R USB to 8 pin modular plug*/
++#define RTSYSTEMS_USB_PW1_PID 0x9e6A /* USB-PW1 USB to 8 pin modular plug*/
+
+ /*
+ * Physik Instrumente
+diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
+index 4f415e28..b668069 100644
+--- a/drivers/usb/serial/keyspan.c
++++ b/drivers/usb/serial/keyspan.c
+@@ -2620,7 +2620,7 @@ static int keyspan_startup(struct usb_serial *serial)
+ if (d_details == NULL) {
+ dev_err(&serial->dev->dev, "%s - unknown product id %x\n",
+ __func__, le16_to_cpu(serial->dev->descriptor.idProduct));
+- return 1;
++ return -ENODEV;
+ }
+
+ /* Setup private data for serial driver */
+diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
+index 9580679..9270d5c 100644
+--- a/drivers/usb/serial/mos7720.c
++++ b/drivers/usb/serial/mos7720.c
+@@ -97,6 +97,7 @@ struct urbtracker {
+ struct list_head urblist_entry;
+ struct kref ref_count;
+ struct urb *urb;
++ struct usb_ctrlrequest *setup;
+ };
+
+ enum mos7715_pp_modes {
+@@ -279,6 +280,7 @@ static void destroy_urbtracker(struct kref *kref)
+ struct mos7715_parport *mos_parport = urbtrack->mos_parport;
+ dbg("%s called", __func__);
+ usb_free_urb(urbtrack->urb);
++ kfree(urbtrack->setup);
+ kfree(urbtrack);
+ kref_put(&mos_parport->ref_count, destroy_mos_parport);
+ }
+@@ -363,7 +365,6 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
+ struct urbtracker *urbtrack;
+ int ret_val;
+ unsigned long flags;
+- struct usb_ctrlrequest setup;
+ struct usb_serial *serial = mos_parport->serial;
+ struct usb_device *usbdev = serial->dev;
+ dbg("%s called", __func__);
+@@ -382,14 +383,20 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
+ kfree(urbtrack);
+ return -ENOMEM;
+ }
+- setup.bRequestType = (__u8)0x40;
+- setup.bRequest = (__u8)0x0e;
+- setup.wValue = get_reg_value(reg, dummy);
+- setup.wIndex = get_reg_index(reg);
+- setup.wLength = 0;
++ urbtrack->setup = kmalloc(sizeof(*urbtrack->setup), GFP_KERNEL);
++ if (!urbtrack->setup) {
++ usb_free_urb(urbtrack->urb);
++ kfree(urbtrack);
++ return -ENOMEM;
++ }
++ urbtrack->setup->bRequestType = (__u8)0x40;
++ urbtrack->setup->bRequest = (__u8)0x0e;
++ urbtrack->setup->wValue = get_reg_value(reg, dummy);
++ urbtrack->setup->wIndex = get_reg_index(reg);
++ urbtrack->setup->wLength = 0;
+ usb_fill_control_urb(urbtrack->urb, usbdev,
+ usb_sndctrlpipe(usbdev, 0),
+- (unsigned char *)&setup,
++ (unsigned char *)urbtrack->setup,
+ NULL, 0, async_complete, urbtrack);
+ kref_init(&urbtrack->ref_count);
+ INIT_LIST_HEAD(&urbtrack->urblist_entry);
+diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
+index 5e8c736..5d2501e 100644
+--- a/drivers/usb/serial/mos7840.c
++++ b/drivers/usb/serial/mos7840.c
+@@ -185,6 +185,10 @@
+ #define URB_TRANSFER_BUFFER_SIZE 32 /* URB Size */
+
+
++enum mos7840_flag {
++ MOS7840_FLAG_CTRL_BUSY,
++};
++
+ static const struct usb_device_id moschip_port_id_table[] = {
+ {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
+ {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
+@@ -258,6 +262,8 @@ struct moschip_port {
+ struct urb *write_urb_pool[NUM_URBS];
+ char busy[NUM_URBS];
+ bool read_urb_busy;
++
++ unsigned long flags;
+ };
+
+
+@@ -519,11 +525,11 @@ static void mos7840_control_callback(struct urb *urb)
+ /* this urb is terminated, clean up */
+ dbg("%s - urb shutting down with status: %d", __func__,
+ status);
+- return;
++ goto out;
+ default:
+ dbg("%s - nonzero urb status received: %d", __func__,
+ status);
+- return;
++ goto out;
+ }
+
+ dbg("%s urb buffer size is %d", __func__, urb->actual_length);
+@@ -536,6 +542,8 @@ static void mos7840_control_callback(struct urb *urb)
+ mos7840_handle_new_msr(mos7840_port, regval);
+ else if (mos7840_port->MsrLsr == 1)
+ mos7840_handle_new_lsr(mos7840_port, regval);
++out:
++ clear_bit_unlock(MOS7840_FLAG_CTRL_BUSY, &mos7840_port->flags);
+ }
+
+ static int mos7840_get_reg(struct moschip_port *mcs, __u16 Wval, __u16 reg,
+@@ -546,6 +554,9 @@ static int mos7840_get_reg(struct moschip_port *mcs, __u16 Wval, __u16 reg,
+ unsigned char *buffer = mcs->ctrl_buf;
+ int ret;
+
++ if (test_and_set_bit_lock(MOS7840_FLAG_CTRL_BUSY, &mcs->flags))
++ return -EBUSY;
++
+ dr->bRequestType = MCS_RD_RTYPE;
+ dr->bRequest = MCS_RDREQ;
+ dr->wValue = cpu_to_le16(Wval); /* 0 */
+@@ -557,6 +568,9 @@ static int mos7840_get_reg(struct moschip_port *mcs, __u16 Wval, __u16 reg,
+ mos7840_control_callback, mcs);
+ mcs->control_urb->transfer_buffer_length = 2;
+ ret = usb_submit_urb(mcs->control_urb, GFP_ATOMIC);
++ if (ret)
++ clear_bit_unlock(MOS7840_FLAG_CTRL_BUSY, &mcs->flags);
++
+ return ret;
+ }
+
+diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
+index 42038ba..885d15d 100644
+--- a/drivers/usb/serial/ti_usb_3410_5052.c
++++ b/drivers/usb/serial/ti_usb_3410_5052.c
+@@ -1713,12 +1713,13 @@ static int ti_download_firmware(struct ti_device *tdev)
+
+ dbg("%s\n", __func__);
+ /* try ID specific firmware first, then try generic firmware */
+- sprintf(buf, "ti_usb-v%04x-p%04x.fw", dev->descriptor.idVendor,
+- dev->descriptor.idProduct);
++ sprintf(buf, "ti_usb-v%04x-p%04x.fw",
++ le16_to_cpu(dev->descriptor.idVendor),
++ le16_to_cpu(dev->descriptor.idProduct));
+ if ((status = request_firmware(&fw_p, buf, &dev->dev)) != 0) {
+ buf[0] = '\0';
+- if (dev->descriptor.idVendor == MTS_VENDOR_ID) {
+- switch (dev->descriptor.idProduct) {
++ if (le16_to_cpu(dev->descriptor.idVendor) == MTS_VENDOR_ID) {
++ switch (le16_to_cpu(dev->descriptor.idProduct)) {
+ case MTS_CDMA_PRODUCT_ID:
+ strcpy(buf, "mts_cdma.fw");
+ break;
+diff --git a/drivers/xen/events.c b/drivers/xen/events.c
+index 11d7b64..f6227cc 100644
+--- a/drivers/xen/events.c
++++ b/drivers/xen/events.c
+@@ -316,7 +316,7 @@ static void init_evtchn_cpu_bindings(void)
+
+ for_each_possible_cpu(i)
+ memset(per_cpu(cpu_evtchn_mask, i),
+- (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i)));
++ (i == 0) ? ~0 : 0, NR_EVENT_CHANNELS/8);
+ }
+
+ static inline void clear_evtchn(int port)
+@@ -1340,8 +1340,10 @@ void rebind_evtchn_irq(int evtchn, int irq)
+ /* Rebind an evtchn so that it gets delivered to a specific cpu */
+ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
+ {
++ struct shared_info *s = HYPERVISOR_shared_info;
+ struct evtchn_bind_vcpu bind_vcpu;
+ int evtchn = evtchn_from_irq(irq);
++ int masked;
+
+ if (!VALID_EVTCHN(evtchn))
+ return -1;
+@@ -1358,6 +1360,12 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
+ bind_vcpu.vcpu = tcpu;
+
+ /*
++ * Mask the event while changing the VCPU binding to prevent
++ * it being delivered on an unexpected VCPU.
++ */
++ masked = sync_test_and_set_bit(evtchn, s->evtchn_mask);
++
++ /*
+ * If this fails, it usually just indicates that we're dealing with a
+ * virq or IPI channel, which don't actually need to be rebound. Ignore
+ * it, but don't do the xenlinux-level rebind in that case.
+@@ -1365,6 +1373,9 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
+ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
+ bind_evtchn_to_cpu(evtchn, tcpu);
+
++ if (!masked)
++ unmask_evtchn(evtchn);
++
+ return 0;
+ }
+
+diff --git a/fs/bio.c b/fs/bio.c
+index 4fc4dbb..b84d851 100644
+--- a/fs/bio.c
++++ b/fs/bio.c
+@@ -734,7 +734,7 @@ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
+ int iov_idx = 0;
+ unsigned int iov_off = 0;
+
+- __bio_for_each_segment(bvec, bio, i, 0) {
++ bio_for_each_segment_all(bvec, bio, i) {
+ char *bv_addr = page_address(bvec->bv_page);
+ unsigned int bv_len = iovecs[i].bv_len;
+
+@@ -787,12 +787,22 @@ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
+ int bio_uncopy_user(struct bio *bio)
+ {
+ struct bio_map_data *bmd = bio->bi_private;
+- int ret = 0;
++ struct bio_vec *bvec;
++ int ret = 0, i;
+
+- if (!bio_flagged(bio, BIO_NULL_MAPPED))
+- ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
+- bmd->nr_sgvecs, bio_data_dir(bio) == READ,
+- 0, bmd->is_our_pages);
++ if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
++ /*
++ * if we're in a workqueue, the request is orphaned, so
++ * don't copy into a random user address space, just free.
++ */
++ if (current->mm)
++ ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
++ bmd->nr_sgvecs, bio_data_dir(bio) == READ,
++ 0, bmd->is_our_pages);
++ else if (bmd->is_our_pages)
++ bio_for_each_segment_all(bvec, bio, i)
++ __free_page(bvec->bv_page);
++ }
+ bio_free_map_data(bmd);
+ bio_put(bio);
+ return ret;
+@@ -916,7 +926,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
+ return bio;
+ cleanup:
+ if (!map_data)
+- bio_for_each_segment(bvec, bio, i)
++ bio_for_each_segment_all(bvec, bio, i)
+ __free_page(bvec->bv_page);
+
+ bio_put(bio);
+@@ -1130,7 +1140,7 @@ static void __bio_unmap_user(struct bio *bio)
+ /*
+ * make sure we dirty pages we wrote to
+ */
+- __bio_for_each_segment(bvec, bio, i, 0) {
++ bio_for_each_segment_all(bvec, bio, i) {
+ if (bio_data_dir(bio) == READ)
+ set_page_dirty_lock(bvec->bv_page);
+
+@@ -1236,7 +1246,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
+ int i;
+ char *p = bmd->sgvecs[0].iov_base;
+
+- __bio_for_each_segment(bvec, bio, i, 0) {
++ bio_for_each_segment_all(bvec, bio, i) {
+ char *addr = page_address(bvec->bv_page);
+ int len = bmd->iovecs[i].bv_len;
+
+@@ -1276,7 +1286,7 @@ struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
+ if (!reading) {
+ void *p = data;
+
+- bio_for_each_segment(bvec, bio, i) {
++ bio_for_each_segment_all(bvec, bio, i) {
+ char *addr = page_address(bvec->bv_page);
+
+ memcpy(addr, p, bvec->bv_len);
+@@ -1556,7 +1566,7 @@ sector_t bio_sector_offset(struct bio *bio, unsigned short index,
+ if (index >= bio->bi_idx)
+ index = bio->bi_vcnt - 1;
+
+- __bio_for_each_segment(bv, bio, i, 0) {
++ bio_for_each_segment_all(bv, bio, i) {
+ if (i == index) {
+ if (offset > bv->bv_offset)
+ sectors += (offset - bv->bv_offset) / sector_sz;
+diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
+index cdcd665..b4675bd 100644
+--- a/fs/cifs/cifsencrypt.c
++++ b/fs/cifs/cifsencrypt.c
+@@ -369,7 +369,7 @@ find_domain_name(struct cifs_ses *ses, const struct nls_table *nls_cp)
+ if (blobptr + attrsize > blobend)
+ break;
+ if (type == NTLMSSP_AV_NB_DOMAIN_NAME) {
+- if (!attrsize)
++ if (!attrsize || attrsize >= CIFS_MAX_DOMAINNAME_LEN)
+ break;
+ if (!ses->domainName) {
+ ses->domainName =
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index 2f3ff59..7b68088 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -38,6 +38,7 @@
+ #define MAX_TREE_SIZE (2 + MAX_SERVER_SIZE + 1 + MAX_SHARE_SIZE + 1)
+ #define MAX_SERVER_SIZE 15
+ #define MAX_SHARE_SIZE 80
++#define CIFS_MAX_DOMAINNAME_LEN 256 /* max domain name length */
+ #define MAX_USERNAME_SIZE 256 /* reasonable maximum for current servers */
+ #define MAX_PASSWORD_SIZE 512 /* max for windows seems to be 256 wide chars */
+
+diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
+index 4c37ed4..52a820a 100644
+--- a/fs/cifs/readdir.c
++++ b/fs/cifs/readdir.c
+@@ -96,6 +96,14 @@ cifs_readdir_lookup(struct dentry *parent, struct qstr *name,
+ dput(dentry);
+ }
+
++ /*
++ * If we know that the inode will need to be revalidated immediately,
++ * then don't create a new dentry for it. We'll end up doing an on
++ * the wire call either way and this spares us an invalidation.
++ */
++ if (fattr->cf_flags & CIFS_FATTR_NEED_REVAL)
++ return NULL;
++
+ dentry = d_alloc(parent, name);
+ if (dentry == NULL)
+ return NULL;
+diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
+index 2504809..d362626 100644
+--- a/fs/cifs/sess.c
++++ b/fs/cifs/sess.c
+@@ -198,7 +198,7 @@ static void unicode_domain_string(char **pbcc_area, struct cifs_ses *ses,
+ bytes_ret = 0;
+ } else
+ bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->domainName,
+- 256, nls_cp);
++ CIFS_MAX_DOMAINNAME_LEN, nls_cp);
+ bcc_ptr += 2 * bytes_ret;
+ bcc_ptr += 2; /* account for null terminator */
+
+@@ -256,8 +256,8 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
+
+ /* copy domain */
+ if (ses->domainName != NULL) {
+- strncpy(bcc_ptr, ses->domainName, 256);
+- bcc_ptr += strnlen(ses->domainName, 256);
++ strncpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
++ bcc_ptr += strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
+ } /* else we will send a null domain name
+ so the server will default to its own domain */
+ *bcc_ptr = 0;
+diff --git a/fs/exofs/ore.c b/fs/exofs/ore.c
+index 1585db1..26bb512 100644
+--- a/fs/exofs/ore.c
++++ b/fs/exofs/ore.c
+@@ -401,7 +401,7 @@ static void _clear_bio(struct bio *bio)
+ struct bio_vec *bv;
+ unsigned i;
+
+- __bio_for_each_segment(bv, bio, i, 0) {
++ bio_for_each_segment_all(bv, bio, i) {
+ unsigned this_count = bv->bv_len;
+
+ if (likely(PAGE_SIZE == this_count))
+diff --git a/fs/exofs/ore_raid.c b/fs/exofs/ore_raid.c
+index fff2070..2c64826 100644
+--- a/fs/exofs/ore_raid.c
++++ b/fs/exofs/ore_raid.c
+@@ -432,7 +432,7 @@ static void _mark_read4write_pages_uptodate(struct ore_io_state *ios, int ret)
+ if (!bio)
+ continue;
+
+- __bio_for_each_segment(bv, bio, i, 0) {
++ bio_for_each_segment_all(bv, bio, i) {
+ struct page *page = bv->bv_page;
+
+ SetPageUptodate(page);
+diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
+index aca1790..d0b8f98 100644
+--- a/fs/ext4/ext4_jbd2.c
++++ b/fs/ext4/ext4_jbd2.c
+@@ -109,10 +109,10 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
+
+ if (ext4_handle_valid(handle)) {
+ err = jbd2_journal_dirty_metadata(handle, bh);
+- if (err) {
+- /* Errors can only happen if there is a bug */
+- handle->h_err = err;
+- __ext4_journal_stop(where, line, handle);
++ /* Errors can only happen if there is a bug */
++ if (WARN_ON_ONCE(err)) {
++ ext4_journal_abort_handle(where, line, __func__, bh,
++ handle, err);
+ }
+ } else {
+ if (inode)
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 259e950..84f84bf 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -3372,7 +3372,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ }
+ if (test_opt(sb, DIOREAD_NOLOCK)) {
+ ext4_msg(sb, KERN_ERR, "can't mount with "
+- "both data=journal and delalloc");
++ "both data=journal and dioread_nolock");
+ goto failed_mount;
+ }
+ if (test_opt(sb, DELALLOC))
+@@ -4539,6 +4539,21 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
+ goto restore_opts;
+ }
+
++ if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
++ if (test_opt2(sb, EXPLICIT_DELALLOC)) {
++ ext4_msg(sb, KERN_ERR, "can't mount with "
++ "both data=journal and delalloc");
++ err = -EINVAL;
++ goto restore_opts;
++ }
++ if (test_opt(sb, DIOREAD_NOLOCK)) {
++ ext4_msg(sb, KERN_ERR, "can't mount with "
++ "both data=journal and dioread_nolock");
++ err = -EINVAL;
++ goto restore_opts;
++ }
++ }
++
+ if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
+ ext4_abort(sb, "Abort forced by user");
+
+diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c
+index 9197a1b..9f7c758 100644
+--- a/fs/jfs/jfs_dtree.c
++++ b/fs/jfs/jfs_dtree.c
+@@ -3047,6 +3047,14 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
+
+ dir_index = (u32) filp->f_pos;
+
++ /*
++ * NFSv4 reserves cookies 1 and 2 for . and .. so the value
++ * we return to the vfs is one greater than the one we use
++ * internally.
++ */
++ if (dir_index)
++ dir_index--;
++
+ if (dir_index > 1) {
+ struct dir_table_slot dirtab_slot;
+
+@@ -3086,7 +3094,7 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
+ if (p->header.flag & BT_INTERNAL) {
+ jfs_err("jfs_readdir: bad index table");
+ DT_PUTPAGE(mp);
+- filp->f_pos = -1;
++ filp->f_pos = DIREND;
+ return 0;
+ }
+ } else {
+@@ -3094,7 +3102,7 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
+ /*
+ * self "."
+ */
+- filp->f_pos = 0;
++ filp->f_pos = 1;
+ if (filldir(dirent, ".", 1, 0, ip->i_ino,
+ DT_DIR))
+ return 0;
+@@ -3102,7 +3110,7 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
+ /*
+ * parent ".."
+ */
+- filp->f_pos = 1;
++ filp->f_pos = 2;
+ if (filldir(dirent, "..", 2, 1, PARENT(ip), DT_DIR))
+ return 0;
+
+@@ -3123,24 +3131,25 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
+ /*
+ * Legacy filesystem - OS/2 & Linux JFS < 0.3.6
+ *
+- * pn = index = 0: First entry "."
+- * pn = 0; index = 1: Second entry ".."
++ * pn = 0; index = 1: First entry "."
++ * pn = 0; index = 2: Second entry ".."
+ * pn > 0: Real entries, pn=1 -> leftmost page
+ * pn = index = -1: No more entries
+ */
+ dtpos = filp->f_pos;
+- if (dtpos == 0) {
++ if (dtpos < 2) {
+ /* build "." entry */
+
++ filp->f_pos = 1;
+ if (filldir(dirent, ".", 1, filp->f_pos, ip->i_ino,
+ DT_DIR))
+ return 0;
+- dtoffset->index = 1;
++ dtoffset->index = 2;
+ filp->f_pos = dtpos;
+ }
+
+ if (dtoffset->pn == 0) {
+- if (dtoffset->index == 1) {
++ if (dtoffset->index == 2) {
+ /* build ".." entry */
+
+ if (filldir(dirent, "..", 2, filp->f_pos,
+@@ -3233,6 +3242,12 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
+ }
+ jfs_dirent->position = unique_pos++;
+ }
++ /*
++ * We add 1 to the index because we may
++ * use a value of 2 internally, and NFSv4
++ * doesn't like that.
++ */
++ jfs_dirent->position++;
+ } else {
+ jfs_dirent->position = dtpos;
+ len = min(d_namleft, DTLHDRDATALEN_LEGACY);
+diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
+index 168cb93..3fde055 100644
+--- a/fs/nfs/callback_xdr.c
++++ b/fs/nfs/callback_xdr.c
+@@ -451,9 +451,9 @@ static __be32 decode_cb_sequence_args(struct svc_rqst *rqstp,
+ args->csa_nrclists = ntohl(*p++);
+ args->csa_rclists = NULL;
+ if (args->csa_nrclists) {
+- args->csa_rclists = kmalloc(args->csa_nrclists *
+- sizeof(*args->csa_rclists),
+- GFP_KERNEL);
++ args->csa_rclists = kmalloc_array(args->csa_nrclists,
++ sizeof(*args->csa_rclists),
++ GFP_KERNEL);
+ if (unlikely(args->csa_rclists == NULL))
+ goto out;
+
+diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
+index 850a7c0..07a666a 100644
+--- a/fs/nilfs2/segbuf.c
++++ b/fs/nilfs2/segbuf.c
+@@ -345,8 +345,7 @@ static void nilfs_end_bio_write(struct bio *bio, int err)
+
+ if (err == -EOPNOTSUPP) {
+ set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
+- bio_put(bio);
+- /* to be detected by submit_seg_bio() */
++ /* to be detected by nilfs_segbuf_submit_bio() */
+ }
+
+ if (!uptodate)
+@@ -377,12 +376,12 @@ static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf,
+ bio->bi_private = segbuf;
+ bio_get(bio);
+ submit_bio(mode, bio);
++ segbuf->sb_nbio++;
+ if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
+ bio_put(bio);
+ err = -EOPNOTSUPP;
+ goto failed;
+ }
+- segbuf->sb_nbio++;
+ bio_put(bio);
+
+ wi->bio = NULL;
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index 3efa725..ef1740d 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -604,7 +604,7 @@ const struct file_operations proc_clear_refs_operations = {
+ };
+
+ struct pagemapread {
+- int pos, len;
++ int pos, len; /* units: PM_ENTRY_BYTES, not bytes */
+ u64 *buffer;
+ };
+
+@@ -792,8 +792,8 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
+ if (!count)
+ goto out_task;
+
+- pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
+- pm.buffer = kmalloc(pm.len, GFP_TEMPORARY);
++ pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
++ pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY);
+ ret = -ENOMEM;
+ if (!pm.buffer)
+ goto out_task;
+diff --git a/include/linux/bio.h b/include/linux/bio.h
+index 847994a..e868554 100644
+--- a/include/linux/bio.h
++++ b/include/linux/bio.h
+@@ -135,16 +135,27 @@ static inline int bio_has_allocated_vec(struct bio *bio)
+ #define bio_io_error(bio) bio_endio((bio), -EIO)
+
+ /*
+- * drivers should not use the __ version unless they _really_ want to
+- * run through the entire bio and not just pending pieces
++ * drivers should not use the __ version unless they _really_ know what
++ * they're doing
+ */
+ #define __bio_for_each_segment(bvl, bio, i, start_idx) \
+ for (bvl = bio_iovec_idx((bio), (start_idx)), i = (start_idx); \
+ i < (bio)->bi_vcnt; \
+ bvl++, i++)
+
++/*
++ * drivers should _never_ use the all version - the bio may have been split
++ * before it got to the driver and the driver won't own all of it
++ */
++#define bio_for_each_segment_all(bvl, bio, i) \
++ for (i = 0; \
++ bvl = bio_iovec_idx((bio), (i)), i < (bio)->bi_vcnt; \
++ i++)
++
+ #define bio_for_each_segment(bvl, bio, i) \
+- __bio_for_each_segment(bvl, bio, i, (bio)->bi_idx)
++ for (i = (bio)->bi_idx; \
++ bvl = bio_iovec_idx((bio), (i)), i < (bio)->bi_vcnt; \
++ i++)
+
+ /*
+ * get a reference to a bio, so it won't disappear. the intended use is
+diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
+index c3da42d..82924bf 100644
+--- a/include/linux/ftrace_event.h
++++ b/include/linux/ftrace_event.h
+@@ -71,6 +71,8 @@ struct trace_iterator {
+ /* trace_seq for __print_flags() and __print_symbolic() etc. */
+ struct trace_seq tmp_seq;
+
++ cpumask_var_t started;
++
+ /* The below is zeroed out in pipe_read */
+ struct trace_seq seq;
+ struct trace_entry *ent;
+@@ -83,7 +85,7 @@ struct trace_iterator {
+ loff_t pos;
+ long idx;
+
+- cpumask_var_t started;
++ /* All new field here will be zeroed out in pipe_read */
+ };
+
+
+diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
+index 5b42f1b..de3a321 100644
+--- a/include/linux/mm_types.h
++++ b/include/linux/mm_types.h
+@@ -297,6 +297,7 @@ struct mm_struct {
+ void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
+ #endif
+ unsigned long mmap_base; /* base of mmap area */
++ unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
+ unsigned long task_size; /* size of task vm space */
+ unsigned long cached_hole_size; /* if non-zero, the largest hole below free_area_cache */
+ unsigned long free_area_cache; /* first hole of size cached_hole_size or larger */
+diff --git a/include/linux/slab.h b/include/linux/slab.h
+index 573c809..a595dce 100644
+--- a/include/linux/slab.h
++++ b/include/linux/slab.h
+@@ -190,7 +190,7 @@ size_t ksize(const void *);
+ #endif
+
+ /**
+- * kcalloc - allocate memory for an array. The memory is set to zero.
++ * kmalloc_array - allocate memory for an array.
+ * @n: number of elements.
+ * @size: element size.
+ * @flags: the type of memory to allocate.
+@@ -240,11 +240,22 @@ size_t ksize(const void *);
+ * for general use, and so are not documented here. For a full list of
+ * potential flags, always refer to linux/gfp.h.
+ */
+-static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
++static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
+ {
+ if (size != 0 && n > ULONG_MAX / size)
+ return NULL;
+- return __kmalloc(n * size, flags | __GFP_ZERO);
++ return __kmalloc(n * size, flags);
++}
++
++/**
++ * kcalloc - allocate memory for an array. The memory is set to zero.
++ * @n: number of elements.
++ * @size: element size.
++ * @flags: the type of memory to allocate (see kmalloc).
++ */
++static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
++{
++ return kmalloc_array(n, size, flags | __GFP_ZERO);
+ }
+
+ #if !defined(CONFIG_NUMA) && !defined(CONFIG_SLOB)
+diff --git a/include/linux/wait.h b/include/linux/wait.h
+index bea7ad5..e007f76 100644
+--- a/include/linux/wait.h
++++ b/include/linux/wait.h
+@@ -530,6 +530,63 @@ do { \
+ ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
+
+
++#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
++ lock, ret) \
++do { \
++ DEFINE_WAIT(__wait); \
++ \
++ for (;;) { \
++ prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
++ if (condition) \
++ break; \
++ if (signal_pending(current)) { \
++ ret = -ERESTARTSYS; \
++ break; \
++ } \
++ spin_unlock_irq(&lock); \
++ ret = schedule_timeout(ret); \
++ spin_lock_irq(&lock); \
++ if (!ret) \
++ break; \
++ } \
++ finish_wait(&wq, &__wait); \
++} while (0)
++
++/**
++ * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets true or a timeout elapses.
++ * The condition is checked under the lock. This is expected
++ * to be called with the lock taken.
++ * @wq: the waitqueue to wait on
++ * @condition: a C expression for the event to wait for
++ * @lock: a locked spinlock_t, which will be released before schedule()
++ * and reacquired afterwards.
++ * @timeout: timeout, in jiffies
++ *
++ * The process is put to sleep (TASK_INTERRUPTIBLE) until the
++ * @condition evaluates to true or signal is received. The @condition is
++ * checked each time the waitqueue @wq is woken up.
++ *
++ * wake_up() has to be called after changing any variable that could
++ * change the result of the wait condition.
++ *
++ * This is supposed to be called while holding the lock. The lock is
++ * dropped before going to sleep and is reacquired afterwards.
++ *
++ * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
++ * was interrupted by a signal, and the remaining jiffies otherwise
++ * if the condition evaluated to true before the timeout elapsed.
++ */
++#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
++ timeout) \
++({ \
++ int __ret = timeout; \
++ \
++ if (!(condition)) \
++ __wait_event_interruptible_lock_irq_timeout( \
++ wq, condition, lock, __ret); \
++ __ret; \
++})
++
+
+ #define __wait_event_killable(wq, condition, ret) \
+ do { \
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 8be9b746..5bbe443 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -900,6 +900,15 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
+ }
+
+ /*
++ * Initialize event state based on the perf_event_attr::disabled.
++ */
++static inline void perf_event__state_init(struct perf_event *event)
++{
++ event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF :
++ PERF_EVENT_STATE_INACTIVE;
++}
++
++/*
+ * Called at perf_event creation and when events are attached/detached from a
+ * group.
+ */
+@@ -6050,8 +6059,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
+ event->overflow_handler = overflow_handler;
+ event->overflow_handler_context = context;
+
+- if (attr->disabled)
+- event->state = PERF_EVENT_STATE_OFF;
++ perf_event__state_init(event);
+
+ pmu = NULL;
+
+@@ -6433,9 +6441,17 @@ SYSCALL_DEFINE5(perf_event_open,
+
+ mutex_lock(&gctx->mutex);
+ perf_remove_from_context(group_leader);
++
++ /*
++ * Removing from the context ends up with disabled
++ * event. What we want here is event in the initial
++ * startup state, ready to be add into new context.
++ */
++ perf_event__state_init(group_leader);
+ list_for_each_entry(sibling, &group_leader->sibling_list,
+ group_entry) {
+ perf_remove_from_context(sibling);
++ perf_event__state_init(sibling);
+ put_ctx(gctx);
+ }
+ mutex_unlock(&gctx->mutex);
+diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
+index 66e4576..59474c5 100644
+--- a/kernel/sched_fair.c
++++ b/kernel/sched_fair.c
+@@ -5033,7 +5033,7 @@ static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task
+ * idle runqueue:
+ */
+ if (rq->cfs.load.weight)
+- rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
++ rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
+
+ return rr_interval;
+ }
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index a584ad9..ce1067f 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -3375,6 +3375,7 @@ waitagain:
+ memset(&iter->seq, 0,
+ sizeof(struct trace_iterator) -
+ offsetof(struct trace_iterator, seq));
++ cpumask_clear(iter->started);
+ iter->pos = -1;
+
+ trace_event_read_lock();
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 0ad2420..0bc9ff0 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -1920,6 +1920,15 @@ __acquires(&gcwq->lock)
+ dump_stack();
+ }
+
++ /*
++ * The following prevents a kworker from hogging CPU on !PREEMPT
++ * kernels, where a requeueing work item waiting for something to
++ * happen could deadlock with stop_machine as such work item could
++ * indefinitely requeue itself while all other CPUs are trapped in
++ * stop_machine.
++ */
++ cond_resched();
++
+ spin_lock_irq(&gcwq->lock);
+
+ /* clear cpu intensive status */
+diff --git a/mm/bounce.c b/mm/bounce.c
+index 4e9ae72..f71a3b34 100644
+--- a/mm/bounce.c
++++ b/mm/bounce.c
+@@ -132,7 +132,7 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool, int err)
+ /*
+ * free up bounce indirect pages used
+ */
+- __bio_for_each_segment(bvec, bio, i, 0) {
++ bio_for_each_segment_all(bvec, bio, i) {
+ org_vec = bio_orig->bi_io_vec + i;
+ if (bvec->bv_page == org_vec->bv_page)
+ continue;
+diff --git a/mm/nommu.c b/mm/nommu.c
+index f0cd7ab..1db7971 100644
+--- a/mm/nommu.c
++++ b/mm/nommu.c
+@@ -1825,6 +1825,16 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
+ }
+ EXPORT_SYMBOL(remap_pfn_range);
+
++int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
++{
++ unsigned long pfn = start >> PAGE_SHIFT;
++ unsigned long vm_len = vma->vm_end - vma->vm_start;
++
++ pfn += vma->vm_pgoff;
++ return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
++}
++EXPORT_SYMBOL(vm_iomap_memory);
++
+ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
+ unsigned long pgoff)
+ {
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 5c028e2..b5afea2 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -5777,6 +5777,10 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
+ zone->free_area[order].nr_free--;
+ __mod_zone_page_state(zone, NR_FREE_PAGES,
+ - (1UL << order));
++#ifdef CONFIG_HIGHMEM
++ if (PageHighMem(page))
++ totalhigh_pages -= 1 << order;
++#endif
+ for (i = 0; i < (1 << order); i++)
+ SetPageReserved((page+i));
+ pfn += (1 << order);
+diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
+index 5485077..739b073 100644
+--- a/net/ipv4/sysctl_net_ipv4.c
++++ b/net/ipv4/sysctl_net_ipv4.c
+@@ -32,6 +32,8 @@ static int tcp_adv_win_scale_min = -31;
+ static int tcp_adv_win_scale_max = 31;
+ static int ip_ttl_min = 1;
+ static int ip_ttl_max = 255;
++static int tcp_syn_retries_min = 1;
++static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
+ static int ip_ping_group_range_min[] = { 0, 0 };
+ static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
+
+@@ -231,7 +233,9 @@ static struct ctl_table ipv4_table[] = {
+ .data = &sysctl_tcp_syn_retries,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+- .proc_handler = proc_dointvec
++ .proc_handler = proc_dointvec_minmax,
++ .extra1 = &tcp_syn_retries_min,
++ .extra2 = &tcp_syn_retries_max
+ },
+ {
+ .procname = "tcp_synack_retries",
+diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
+index 449a918..f5af259 100644
+--- a/net/ipv6/ip6mr.c
++++ b/net/ipv6/ip6mr.c
+@@ -257,10 +257,12 @@ static void __net_exit ip6mr_rules_exit(struct net *net)
+ {
+ struct mr6_table *mrt, *next;
+
++ rtnl_lock();
+ list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) {
+ list_del(&mrt->list);
+ ip6mr_free_table(mrt);
+ }
++ rtnl_unlock();
+ fib_rules_unregister(net->ipv6.mr6_rules_ops);
+ }
+ #else
+@@ -287,7 +289,10 @@ static int __net_init ip6mr_rules_init(struct net *net)
+
+ static void __net_exit ip6mr_rules_exit(struct net *net)
+ {
++ rtnl_lock();
+ ip6mr_free_table(net->ipv6.mrt6);
++ net->ipv6.mrt6 = NULL;
++ rtnl_unlock();
+ }
+ #endif
+
+diff --git a/net/key/af_key.c b/net/key/af_key.c
+index 6fefdfc..8dbdb8e 100644
+--- a/net/key/af_key.c
++++ b/net/key/af_key.c
+@@ -2073,6 +2073,7 @@ static int pfkey_xfrm_policy2msg(struct sk_buff *skb, const struct xfrm_policy *
+ pol->sadb_x_policy_type = IPSEC_POLICY_NONE;
+ }
+ pol->sadb_x_policy_dir = dir+1;
++ pol->sadb_x_policy_reserved = 0;
+ pol->sadb_x_policy_id = xp->index;
+ pol->sadb_x_policy_priority = xp->priority;
+
+@@ -2686,6 +2687,7 @@ static int key_notify_policy_flush(const struct km_event *c)
+ hdr->sadb_msg_pid = c->pid;
+ hdr->sadb_msg_version = PF_KEY_V2;
+ hdr->sadb_msg_errno = (uint8_t) 0;
++ hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC;
+ hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
+ hdr->sadb_msg_reserved = 0;
+ pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
+@@ -3108,7 +3110,9 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
+ pol->sadb_x_policy_exttype = SADB_X_EXT_POLICY;
+ pol->sadb_x_policy_type = IPSEC_POLICY_IPSEC;
+ pol->sadb_x_policy_dir = dir+1;
++ pol->sadb_x_policy_reserved = 0;
+ pol->sadb_x_policy_id = xp->index;
++ pol->sadb_x_policy_priority = xp->priority;
+
+ /* Set sadb_comb's. */
+ if (x->id.proto == IPPROTO_AH)
+@@ -3496,6 +3500,7 @@ static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
+ pol->sadb_x_policy_exttype = SADB_X_EXT_POLICY;
+ pol->sadb_x_policy_type = IPSEC_POLICY_IPSEC;
+ pol->sadb_x_policy_dir = dir + 1;
++ pol->sadb_x_policy_reserved = 0;
+ pol->sadb_x_policy_id = 0;
+ pol->sadb_x_policy_priority = 0;
+
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index cd6cbdb..7d882fc 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -821,8 +821,14 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
+
+- /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */
+- if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) {
++ /*
++ * Drop duplicate 802.11 retransmissions
++ * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
++ */
++ if (rx->skb->len >= 24 && rx->sta &&
++ !ieee80211_is_ctl(hdr->frame_control) &&
++ !ieee80211_is_qos_nullfunc(hdr->frame_control) &&
++ !is_multicast_ether_addr(hdr->addr1)) {
+ if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
+ rx->sta->last_seq_ctrl[rx->seqno_idx] ==
+ hdr->seq_ctrl)) {
+diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
+index e25e490..6e38ef0 100644
+--- a/net/sched/sch_atm.c
++++ b/net/sched/sch_atm.c
+@@ -606,6 +606,7 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
+ struct sockaddr_atmpvc pvc;
+ int state;
+
++ memset(&pvc, 0, sizeof(pvc));
+ pvc.sap_family = AF_ATMPVC;
+ pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1;
+ pvc.sap_addr.vpi = flow->vcc->vpi;
+diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
+index b7cddb9..7f59944 100644
+--- a/net/sched/sch_cbq.c
++++ b/net/sched/sch_cbq.c
+@@ -1467,6 +1467,7 @@ static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
+ unsigned char *b = skb_tail_pointer(skb);
+ struct tc_cbq_wrropt opt;
+
++ memset(&opt, 0, sizeof(opt));
+ opt.flags = 0;
+ opt.allot = cl->allot;
+ opt.priority = cl->priority + 1;
+diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
+index 96eb168..3dd7207 100644
+--- a/net/sctp/outqueue.c
++++ b/net/sctp/outqueue.c
+@@ -205,6 +205,8 @@ static inline int sctp_cacc_skip(struct sctp_transport *primary,
+ */
+ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
+ {
++ memset(q, 0, sizeof(struct sctp_outq));
++
+ q->asoc = asoc;
+ INIT_LIST_HEAD(&q->out_chunk_list);
+ INIT_LIST_HEAD(&q->control_chunk_list);
+@@ -212,13 +214,7 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
+ INIT_LIST_HEAD(&q->sacked);
+ INIT_LIST_HEAD(&q->abandoned);
+
+- q->fast_rtx = 0;
+- q->outstanding_bytes = 0;
+ q->empty = 1;
+- q->cork = 0;
+-
+- q->malloced = 0;
+- q->out_qlen = 0;
+ }
+
+ /* Free the outqueue structure and any related pending chunks.
+diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
+index 2763e3e..38f388c 100644
+--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
++++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
+@@ -82,9 +82,9 @@ gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize)
+ >>PAGE_CACHE_SHIFT;
+ unsigned int offset = (buf->page_base + len - 1)
+ & (PAGE_CACHE_SIZE - 1);
+- ptr = kmap_atomic(buf->pages[last], KM_USER0);
++ ptr = kmap_atomic(buf->pages[last]);
+ pad = *(ptr + offset);
+- kunmap_atomic(ptr, KM_USER0);
++ kunmap_atomic(ptr);
+ goto out;
+ } else
+ len -= buf->page_len;
+diff --git a/net/sunrpc/socklib.c b/net/sunrpc/socklib.c
+index 145e6784..0a648c5 100644
+--- a/net/sunrpc/socklib.c
++++ b/net/sunrpc/socklib.c
+@@ -114,7 +114,7 @@ ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct
+ }
+
+ len = PAGE_CACHE_SIZE;
+- kaddr = kmap_atomic(*ppage, KM_SKB_SUNRPC_DATA);
++ kaddr = kmap_atomic(*ppage);
+ if (base) {
+ len -= base;
+ if (pglen < len)
+@@ -127,7 +127,7 @@ ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct
+ ret = copy_actor(desc, kaddr, len);
+ }
+ flush_dcache_page(*ppage);
+- kunmap_atomic(kaddr, KM_SKB_SUNRPC_DATA);
++ kunmap_atomic(kaddr);
+ copied += ret;
+ if (ret != len || !desc->count)
+ goto out;
+diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
+index 593f4c6..6997cdd 100644
+--- a/net/sunrpc/xdr.c
++++ b/net/sunrpc/xdr.c
+@@ -122,9 +122,9 @@ xdr_terminate_string(struct xdr_buf *buf, const u32 len)
+ {
+ char *kaddr;
+
+- kaddr = kmap_atomic(buf->pages[0], KM_USER0);
++ kaddr = kmap_atomic(buf->pages[0]);
+ kaddr[buf->page_base + len] = '\0';
+- kunmap_atomic(kaddr, KM_USER0);
++ kunmap_atomic(kaddr);
+ }
+ EXPORT_SYMBOL_GPL(xdr_terminate_string);
+
+@@ -232,12 +232,15 @@ _shift_data_right_pages(struct page **pages, size_t pgto_base,
+ pgto_base -= copy;
+ pgfrom_base -= copy;
+
+- vto = kmap_atomic(*pgto, KM_USER0);
+- vfrom = kmap_atomic(*pgfrom, KM_USER1);
+- memmove(vto + pgto_base, vfrom + pgfrom_base, copy);
++ vto = kmap_atomic(*pgto);
++ if (*pgto != *pgfrom) {
++ vfrom = kmap_atomic(*pgfrom);
++ memcpy(vto + pgto_base, vfrom + pgfrom_base, copy);
++ kunmap_atomic(vfrom);
++ } else
++ memmove(vto + pgto_base, vto + pgfrom_base, copy);
+ flush_dcache_page(*pgto);
+- kunmap_atomic(vfrom, KM_USER1);
+- kunmap_atomic(vto, KM_USER0);
++ kunmap_atomic(vto);
+
+ } while ((len -= copy) != 0);
+ }
+@@ -267,9 +270,9 @@ _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
+ if (copy > len)
+ copy = len;
+
+- vto = kmap_atomic(*pgto, KM_USER0);
++ vto = kmap_atomic(*pgto);
+ memcpy(vto + pgbase, p, copy);
+- kunmap_atomic(vto, KM_USER0);
++ kunmap_atomic(vto);
+
+ len -= copy;
+ if (len == 0)
+@@ -311,9 +314,9 @@ _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
+ if (copy > len)
+ copy = len;
+
+- vfrom = kmap_atomic(*pgfrom, KM_USER0);
++ vfrom = kmap_atomic(*pgfrom);
+ memcpy(p, vfrom + pgbase, copy);
+- kunmap_atomic(vfrom, KM_USER0);
++ kunmap_atomic(vfrom);
+
+ pgbase += copy;
+ if (pgbase == PAGE_CACHE_SIZE) {
+diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
+index 554d081..1776e57 100644
+--- a/net/sunrpc/xprtrdma/rpc_rdma.c
++++ b/net/sunrpc/xprtrdma/rpc_rdma.c
+@@ -338,9 +338,9 @@ rpcrdma_inline_pullup(struct rpc_rqst *rqst, int pad)
+ curlen = copy_len;
+ dprintk("RPC: %s: page %d destp 0x%p len %d curlen %d\n",
+ __func__, i, destp, copy_len, curlen);
+- srcp = kmap_atomic(ppages[i], KM_SKB_SUNRPC_DATA);
++ srcp = kmap_atomic(ppages[i]);
+ memcpy(destp, srcp+page_base, curlen);
+- kunmap_atomic(srcp, KM_SKB_SUNRPC_DATA);
++ kunmap_atomic(srcp);
+ rqst->rq_svec[0].iov_len += curlen;
+ destp += curlen;
+ copy_len -= curlen;
+@@ -639,10 +639,10 @@ rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
+ dprintk("RPC: %s: page %d"
+ " srcp 0x%p len %d curlen %d\n",
+ __func__, i, srcp, copy_len, curlen);
+- destp = kmap_atomic(ppages[i], KM_SKB_SUNRPC_DATA);
++ destp = kmap_atomic(ppages[i]);
+ memcpy(destp + page_base, srcp, curlen);
+ flush_dcache_page(ppages[i]);
+- kunmap_atomic(destp, KM_SKB_SUNRPC_DATA);
++ kunmap_atomic(destp);
+ srcp += curlen;
+ copy_len -= curlen;
+ if (copy_len == 0)
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index c06c365..6d4d263 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -4826,12 +4826,14 @@ EXPORT_SYMBOL(cfg80211_testmode_alloc_event_skb);
+
+ void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp)
+ {
++ struct cfg80211_registered_device *rdev = ((void **)skb->cb)[0];
+ void *hdr = ((void **)skb->cb)[1];
+ struct nlattr *data = ((void **)skb->cb)[2];
+
+ nla_nest_end(skb, data);
+ genlmsg_end(skb, hdr);
+- genlmsg_multicast(skb, 0, nl80211_testmode_mcgrp.id, gfp);
++ genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), skb, 0,
++ nl80211_testmode_mcgrp.id, gfp);
+ }
+ EXPORT_SYMBOL(cfg80211_testmode_event);
+ #endif
+@@ -7282,7 +7284,8 @@ void nl80211_send_mgmt_tx_status(struct cfg80211_registered_device *rdev,
+ return;
+ }
+
+- genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, gfp);
++ genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
++ nl80211_mlme_mcgrp.id, gfp);
+ return;
+
+ nla_put_failure:
+diff --git a/sound/i2c/other/ak4xxx-adda.c b/sound/i2c/other/ak4xxx-adda.c
+index cef813d..ed726d1 100644
+--- a/sound/i2c/other/ak4xxx-adda.c
++++ b/sound/i2c/other/ak4xxx-adda.c
+@@ -571,7 +571,7 @@ static int ak4xxx_capture_source_info(struct snd_kcontrol *kcontrol,
+ struct snd_akm4xxx *ak = snd_kcontrol_chip(kcontrol);
+ int mixer_ch = AK_GET_SHIFT(kcontrol->private_value);
+ const char **input_names;
+- int num_names, idx;
++ unsigned int num_names, idx;
+
+ num_names = ak4xxx_capture_num_inputs(ak, mixer_ch);
+ if (!num_names)
+diff --git a/sound/isa/opti9xx/opti92x-ad1848.c b/sound/isa/opti9xx/opti92x-ad1848.c
+index 97871be..cba84ef 100644
+--- a/sound/isa/opti9xx/opti92x-ad1848.c
++++ b/sound/isa/opti9xx/opti92x-ad1848.c
+@@ -173,11 +173,7 @@ MODULE_DEVICE_TABLE(pnp_card, snd_opti9xx_pnpids);
+
+ #endif /* CONFIG_PNP */
+
+-#ifdef OPTi93X
+-#define DEV_NAME "opti93x"
+-#else
+-#define DEV_NAME "opti92x"
+-#endif
++#define DEV_NAME KBUILD_MODNAME
+
+ static char * snd_opti9xx_names[] = {
+ "unknown",
+@@ -1126,7 +1122,7 @@ static void __devexit snd_opti9xx_pnp_remove(struct pnp_card_link * pcard)
+
+ static struct pnp_card_driver opti9xx_pnpc_driver = {
+ .flags = PNP_DRIVER_RES_DISABLE,
+- .name = "opti9xx",
++ .name = DEV_NAME,
+ .id_table = snd_opti9xx_pnpids,
+ .probe = snd_opti9xx_pnp_probe,
+ .remove = __devexit_p(snd_opti9xx_pnp_remove),
+diff --git a/sound/oss/Kconfig b/sound/oss/Kconfig
+index 6c9e8e8..1fc28f5 100644
+--- a/sound/oss/Kconfig
++++ b/sound/oss/Kconfig
+@@ -250,6 +250,7 @@ config MSND_FIFOSIZE
+ menuconfig SOUND_OSS
+ tristate "OSS sound modules"
+ depends on ISA_DMA_API && VIRT_TO_BUS
++ depends on !GENERIC_ISA_DMA_SUPPORT_BROKEN
+ help
+ OSS is the Open Sound System suite of sound card drivers. They make
+ sound programming easier since they provide a common API. Say Y or
+diff --git a/sound/usb/6fire/comm.c b/sound/usb/6fire/comm.c
+index c994daa..af6ec8d 100644
+--- a/sound/usb/6fire/comm.c
++++ b/sound/usb/6fire/comm.c
+@@ -111,19 +111,37 @@ static int usb6fire_comm_send_buffer(u8 *buffer, struct usb_device *dev)
+ static int usb6fire_comm_write8(struct comm_runtime *rt, u8 request,
+ u8 reg, u8 value)
+ {
+- u8 buffer[13]; /* 13: maximum length of message */
++ u8 *buffer;
++ int ret;
++
++ /* 13: maximum length of message */
++ buffer = kmalloc(13, GFP_KERNEL);
++ if (!buffer)
++ return -ENOMEM;
+
+ usb6fire_comm_init_buffer(buffer, 0x00, request, reg, value, 0x00);
+- return usb6fire_comm_send_buffer(buffer, rt->chip->dev);
++ ret = usb6fire_comm_send_buffer(buffer, rt->chip->dev);
++
++ kfree(buffer);
++ return ret;
+ }
+
+ static int usb6fire_comm_write16(struct comm_runtime *rt, u8 request,
+ u8 reg, u8 vl, u8 vh)
+ {
+- u8 buffer[13]; /* 13: maximum length of message */
++ u8 *buffer;
++ int ret;
++
++ /* 13: maximum length of message */
++ buffer = kmalloc(13, GFP_KERNEL);
++ if (!buffer)
++ return -ENOMEM;
+
+ usb6fire_comm_init_buffer(buffer, 0x00, request, reg, vl, vh);
+- return usb6fire_comm_send_buffer(buffer, rt->chip->dev);
++ ret = usb6fire_comm_send_buffer(buffer, rt->chip->dev);
++
++ kfree(buffer);
++ return ret;
+ }
+
+ int __devinit usb6fire_comm_init(struct sfire_chip *chip)
+@@ -136,6 +154,12 @@ int __devinit usb6fire_comm_init(struct sfire_chip *chip)
+ if (!rt)
+ return -ENOMEM;
+
++ rt->receiver_buffer = kzalloc(COMM_RECEIVER_BUFSIZE, GFP_KERNEL);
++ if (!rt->receiver_buffer) {
++ kfree(rt);
++ return -ENOMEM;
++ }
++
+ rt->serial = 1;
+ rt->chip = chip;
+ usb_init_urb(urb);
+@@ -153,6 +177,7 @@ int __devinit usb6fire_comm_init(struct sfire_chip *chip)
+ urb->interval = 1;
+ ret = usb_submit_urb(urb, GFP_KERNEL);
+ if (ret < 0) {
++ kfree(rt->receiver_buffer);
+ kfree(rt);
+ snd_printk(KERN_ERR PREFIX "cannot create comm data receiver.");
+ return ret;
+@@ -171,6 +196,9 @@ void usb6fire_comm_abort(struct sfire_chip *chip)
+
+ void usb6fire_comm_destroy(struct sfire_chip *chip)
+ {
+- kfree(chip->comm);
++ struct comm_runtime *rt = chip->comm;
++
++ kfree(rt->receiver_buffer);
++ kfree(rt);
+ chip->comm = NULL;
+ }
+diff --git a/sound/usb/6fire/comm.h b/sound/usb/6fire/comm.h
+index edc5dc8..19e2f92 100644
+--- a/sound/usb/6fire/comm.h
++++ b/sound/usb/6fire/comm.h
+@@ -25,7 +25,7 @@ struct comm_runtime {
+ struct sfire_chip *chip;
+
+ struct urb receiver;
+- u8 receiver_buffer[COMM_RECEIVER_BUFSIZE];
++ u8 *receiver_buffer;
+
+ u8 serial; /* urb serial */
+
+diff --git a/sound/usb/6fire/midi.c b/sound/usb/6fire/midi.c
+index 13f4509..8283c5d 100644
+--- a/sound/usb/6fire/midi.c
++++ b/sound/usb/6fire/midi.c
+@@ -20,6 +20,10 @@
+ #include "chip.h"
+ #include "comm.h"
+
++enum {
++ MIDI_BUFSIZE = 64
++};
++
+ static void usb6fire_midi_out_handler(struct urb *urb)
+ {
+ struct midi_runtime *rt = urb->context;
+@@ -157,6 +161,12 @@ int __devinit usb6fire_midi_init(struct sfire_chip *chip)
+ if (!rt)
+ return -ENOMEM;
+
++ rt->out_buffer = kzalloc(MIDI_BUFSIZE, GFP_KERNEL);
++ if (!rt->out_buffer) {
++ kfree(rt);
++ return -ENOMEM;
++ }
++
+ rt->chip = chip;
+ rt->in_received = usb6fire_midi_in_received;
+ rt->out_buffer[0] = 0x80; /* 'send midi' command */
+@@ -170,6 +180,7 @@ int __devinit usb6fire_midi_init(struct sfire_chip *chip)
+
+ ret = snd_rawmidi_new(chip->card, "6FireUSB", 0, 1, 1, &rt->instance);
+ if (ret < 0) {
++ kfree(rt->out_buffer);
+ kfree(rt);
+ snd_printk(KERN_ERR PREFIX "unable to create midi.\n");
+ return ret;
+@@ -198,6 +209,9 @@ void usb6fire_midi_abort(struct sfire_chip *chip)
+
+ void usb6fire_midi_destroy(struct sfire_chip *chip)
+ {
+- kfree(chip->midi);
++ struct midi_runtime *rt = chip->midi;
++
++ kfree(rt->out_buffer);
++ kfree(rt);
+ chip->midi = NULL;
+ }
+diff --git a/sound/usb/6fire/midi.h b/sound/usb/6fire/midi.h
+index 97a7bf6..7f8f448 100644
+--- a/sound/usb/6fire/midi.h
++++ b/sound/usb/6fire/midi.h
+@@ -17,10 +17,6 @@
+
+ #include "common.h"
+
+-enum {
+- MIDI_BUFSIZE = 64
+-};
+-
+ struct midi_runtime {
+ struct sfire_chip *chip;
+ struct snd_rawmidi *instance;
+@@ -33,7 +29,7 @@ struct midi_runtime {
+ struct snd_rawmidi_substream *out;
+ struct urb out_urb;
+ u8 out_serial; /* serial number of out packet */
+- u8 out_buffer[MIDI_BUFSIZE];
++ u8 *out_buffer;
+ int buffer_offset;
+
+ void (*in_received)(struct midi_runtime *rt, u8 *data, int length);
+diff --git a/sound/usb/6fire/pcm.c b/sound/usb/6fire/pcm.c
+index 888a7c7..8609c74 100644
+--- a/sound/usb/6fire/pcm.c
++++ b/sound/usb/6fire/pcm.c
+@@ -579,6 +579,33 @@ static void __devinit usb6fire_pcm_init_urb(struct pcm_urb *urb,
+ urb->instance.number_of_packets = PCM_N_PACKETS_PER_URB;
+ }
+
++static int usb6fire_pcm_buffers_init(struct pcm_runtime *rt)
++{
++ int i;
++
++ for (i = 0; i < PCM_N_URBS; i++) {
++ rt->out_urbs[i].buffer = kzalloc(PCM_N_PACKETS_PER_URB
++ * PCM_MAX_PACKET_SIZE, GFP_KERNEL);
++ if (!rt->out_urbs[i].buffer)
++ return -ENOMEM;
++ rt->in_urbs[i].buffer = kzalloc(PCM_N_PACKETS_PER_URB
++ * PCM_MAX_PACKET_SIZE, GFP_KERNEL);
++ if (!rt->in_urbs[i].buffer)
++ return -ENOMEM;
++ }
++ return 0;
++}
++
++static void usb6fire_pcm_buffers_destroy(struct pcm_runtime *rt)
++{
++ int i;
++
++ for (i = 0; i < PCM_N_URBS; i++) {
++ kfree(rt->out_urbs[i].buffer);
++ kfree(rt->in_urbs[i].buffer);
++ }
++}
++
+ int __devinit usb6fire_pcm_init(struct sfire_chip *chip)
+ {
+ int i;
+@@ -590,6 +617,13 @@ int __devinit usb6fire_pcm_init(struct sfire_chip *chip)
+ if (!rt)
+ return -ENOMEM;
+
++ ret = usb6fire_pcm_buffers_init(rt);
++ if (ret) {
++ usb6fire_pcm_buffers_destroy(rt);
++ kfree(rt);
++ return ret;
++ }
++
+ rt->chip = chip;
+ rt->stream_state = STREAM_DISABLED;
+ rt->rate = ARRAY_SIZE(rates);
+@@ -611,6 +645,7 @@ int __devinit usb6fire_pcm_init(struct sfire_chip *chip)
+
+ ret = snd_pcm_new(chip->card, "DMX6FireUSB", 0, 1, 1, &pcm);
+ if (ret < 0) {
++ usb6fire_pcm_buffers_destroy(rt);
+ kfree(rt);
+ snd_printk(KERN_ERR PREFIX "cannot create pcm instance.\n");
+ return ret;
+@@ -626,6 +661,7 @@ int __devinit usb6fire_pcm_init(struct sfire_chip *chip)
+ snd_dma_continuous_data(GFP_KERNEL),
+ MAX_BUFSIZE, MAX_BUFSIZE);
+ if (ret) {
++ usb6fire_pcm_buffers_destroy(rt);
+ kfree(rt);
+ snd_printk(KERN_ERR PREFIX
+ "error preallocating pcm buffers.\n");
+@@ -670,6 +706,9 @@ void usb6fire_pcm_abort(struct sfire_chip *chip)
+
+ void usb6fire_pcm_destroy(struct sfire_chip *chip)
+ {
+- kfree(chip->pcm);
++ struct pcm_runtime *rt = chip->pcm;
++
++ usb6fire_pcm_buffers_destroy(rt);
++ kfree(rt);
+ chip->pcm = NULL;
+ }
+diff --git a/sound/usb/6fire/pcm.h b/sound/usb/6fire/pcm.h
+index 2bee813..a8e8899 100644
+--- a/sound/usb/6fire/pcm.h
++++ b/sound/usb/6fire/pcm.h
+@@ -33,7 +33,7 @@ struct pcm_urb {
+ struct urb instance;
+ struct usb_iso_packet_descriptor packets[PCM_N_PACKETS_PER_URB];
+ /* END DO NOT SEPARATE */
+- u8 buffer[PCM_N_PACKETS_PER_URB * PCM_MAX_PACKET_SIZE];
++ u8 *buffer;
+
+ struct pcm_urb *peer;
+ };
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index aeb26eb..41b9fe0 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -720,8 +720,20 @@ static int check_input_term(struct mixer_build *state, int id, struct usb_audio_
+ return 0;
+ }
+ case UAC1_PROCESSING_UNIT:
+- case UAC1_EXTENSION_UNIT: {
++ case UAC1_EXTENSION_UNIT:
++ /* UAC2_PROCESSING_UNIT_V2 */
++ /* UAC2_EFFECT_UNIT */
++ case UAC2_EXTENSION_UNIT_V2: {
+ struct uac_processing_unit_descriptor *d = p1;
++
++ if (state->mixer->protocol == UAC_VERSION_2 &&
++ hdr[2] == UAC2_EFFECT_UNIT) {
++ /* UAC2/UAC1 unit IDs overlap here in an
++ * uncompatible way. Ignore this unit for now.
++ */
++ return 0;
++ }
++
+ if (d->bNrInPins) {
+ id = d->baSourceID[0];
+ break; /* continue to parse */
+@@ -1956,6 +1968,8 @@ static int parse_audio_unit(struct mixer_build *state, int unitid)
+ return parse_audio_extension_unit(state, unitid, p1);
+ else /* UAC_VERSION_2 */
+ return parse_audio_processing_unit(state, unitid, p1);
++ case UAC2_EXTENSION_UNIT_V2:
++ return parse_audio_extension_unit(state, unitid, p1);
+ default:
+ snd_printk(KERN_ERR "usbaudio: unit %u: unexpected type 0x%02x\n", unitid, p1[2]);
+ return -EINVAL;
+diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
+index 78284b1..42dffa0 100644
+--- a/tools/perf/util/map.c
++++ b/tools/perf/util/map.c
+@@ -15,7 +15,8 @@ const char *map_type__name[MAP__NR_TYPES] = {
+
+ static inline int is_anon_memory(const char *filename)
+ {
+- return strcmp(filename, "//anon") == 0;
++ return !strcmp(filename, "//anon") ||
++ !strcmp(filename, "/anon_hugepage (deleted)");
+ }
+
+ static inline int is_no_dso_memory(const char *filename)
diff --git a/1051_linux-3.2.52.patch b/1051_linux-3.2.52.patch
new file mode 100644
index 00000000..d9c21e4f
--- /dev/null
+++ b/1051_linux-3.2.52.patch
@@ -0,0 +1,5221 @@
+diff --git a/Makefile b/Makefile
+index 0f1193666ddb..1dd2c094a3d3 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 51
++SUBLEVEL = 52
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/mach-versatile/pci.c b/arch/arm/mach-versatile/pci.c
+index c898deb3ada0..189ed0069dee 100644
+--- a/arch/arm/mach-versatile/pci.c
++++ b/arch/arm/mach-versatile/pci.c
+@@ -43,9 +43,9 @@
+ #define PCI_IMAP0 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x0)
+ #define PCI_IMAP1 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x4)
+ #define PCI_IMAP2 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x8)
+-#define PCI_SMAP0 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x10)
+-#define PCI_SMAP1 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x14)
+-#define PCI_SMAP2 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x18)
++#define PCI_SMAP0 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x14)
++#define PCI_SMAP1 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x18)
++#define PCI_SMAP2 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x1c)
+ #define PCI_SELFID __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0xc)
+
+ #define DEVICE_ID_OFFSET 0x00
+diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
+index fbdd12ea3a58..cc3f35dc102a 100644
+--- a/arch/arm/mm/init.c
++++ b/arch/arm/mm/init.c
+@@ -98,6 +98,9 @@ void show_mem(unsigned int filter)
+ printk("Mem-info:\n");
+ show_free_areas(filter);
+
++ if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
++ return;
++
+ for_each_bank (i, mi) {
+ struct membank *bank = &mi->bank[i];
+ unsigned int pfn1, pfn2;
+diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
+index f114a3b14c6a..ce6e7a80945e 100644
+--- a/arch/ia64/mm/contig.c
++++ b/arch/ia64/mm/contig.c
+@@ -46,6 +46,8 @@ void show_mem(unsigned int filter)
+ printk(KERN_INFO "Mem-info:\n");
+ show_free_areas(filter);
+ printk(KERN_INFO "Node memory in pages:\n");
++ if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
++ return;
+ for_each_online_pgdat(pgdat) {
+ unsigned long present;
+ unsigned long flags;
+diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
+index c641333cd997..2230817b4e93 100644
+--- a/arch/ia64/mm/discontig.c
++++ b/arch/ia64/mm/discontig.c
+@@ -623,6 +623,8 @@ void show_mem(unsigned int filter)
+
+ printk(KERN_INFO "Mem-info:\n");
+ show_free_areas(filter);
++ if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
++ return;
+ printk(KERN_INFO "Node memory in pages:\n");
+ for_each_online_pgdat(pgdat) {
+ unsigned long present;
+diff --git a/arch/m68k/kernel/vmlinux-nommu.lds b/arch/m68k/kernel/vmlinux-nommu.lds
+new file mode 100644
+index 000000000000..40e02d9c38b4
+--- /dev/null
++++ b/arch/m68k/kernel/vmlinux-nommu.lds
+@@ -0,0 +1,93 @@
++/*
++ * vmlinux.lds.S -- master linker script for m68knommu arch
++ *
++ * (C) Copyright 2002-2012, Greg Ungerer <gerg@snapgear.com>
++ *
++ * This linker script is equipped to build either ROM loaded or RAM
++ * run kernels.
++ */
++
++#if defined(CONFIG_RAMKERNEL)
++#define KTEXT_ADDR CONFIG_KERNELBASE
++#endif
++#if defined(CONFIG_ROMKERNEL)
++#define KTEXT_ADDR CONFIG_ROMSTART
++#define KDATA_ADDR CONFIG_KERNELBASE
++#define LOAD_OFFSET KDATA_ADDR + (ADDR(.text) + SIZEOF(.text))
++#endif
++
++#include <asm/page.h>
++#include <asm/thread_info.h>
++#include <asm-generic/vmlinux.lds.h>
++
++OUTPUT_ARCH(m68k)
++ENTRY(_start)
++
++jiffies = jiffies_64 + 4;
++
++SECTIONS {
++
++#ifdef CONFIG_ROMVEC
++ . = CONFIG_ROMVEC;
++ .romvec : {
++ __rom_start = .;
++ _romvec = .;
++ *(.romvec)
++ *(.data..initvect)
++ }
++#endif
++
++ . = KTEXT_ADDR;
++
++ _text = .;
++ _stext = .;
++ .text : {
++ HEAD_TEXT
++ TEXT_TEXT
++ SCHED_TEXT
++ LOCK_TEXT
++ *(.fixup)
++ . = ALIGN(16);
++ }
++ _etext = .;
++
++#ifdef KDATA_ADDR
++ . = KDATA_ADDR;
++#endif
++
++ _sdata = .;
++ RO_DATA_SECTION(PAGE_SIZE)
++ RW_DATA_SECTION(16, PAGE_SIZE, THREAD_SIZE)
++ _edata = .;
++
++ EXCEPTION_TABLE(16)
++ NOTES
++
++ . = ALIGN(PAGE_SIZE);
++ __init_begin = .;
++ INIT_TEXT_SECTION(PAGE_SIZE)
++ INIT_DATA_SECTION(16)
++ PERCPU_SECTION(16)
++ .m68k_fixup : {
++ __start_fixup = .;
++ *(.m68k_fixup)
++ __stop_fixup = .;
++ }
++ .init.data : {
++ . = ALIGN(PAGE_SIZE);
++ __init_end = .;
++ }
++
++ _sbss = .;
++ BSS_SECTION(0, 0, 0)
++ _ebss = .;
++
++ _end = .;
++
++ STABS_DEBUG
++ .comment 0 : { *(.comment) }
++
++ /* Sections to be discarded */
++ DISCARDS
++}
++
+diff --git a/arch/m68k/kernel/vmlinux.lds.S b/arch/m68k/kernel/vmlinux.lds.S
+index 030dabf0bc53..69ec79638870 100644
+--- a/arch/m68k/kernel/vmlinux.lds.S
++++ b/arch/m68k/kernel/vmlinux.lds.S
+@@ -1,5 +1,14 @@
+-#ifdef CONFIG_MMU
+-#include "vmlinux.lds_mm.S"
++#if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
++PHDRS
++{
++ text PT_LOAD FILEHDR PHDRS FLAGS (7);
++ data PT_LOAD FLAGS (7);
++}
++#ifdef CONFIG_SUN3
++#include "vmlinux-sun3.lds"
+ #else
+-#include "vmlinux.lds_no.S"
++#include "vmlinux-std.lds"
++#endif
++#else
++#include "vmlinux-nommu.lds"
+ #endif
+diff --git a/arch/m68k/kernel/vmlinux.lds_mm.S b/arch/m68k/kernel/vmlinux.lds_mm.S
+deleted file mode 100644
+index 99ba315bd0a8..000000000000
+--- a/arch/m68k/kernel/vmlinux.lds_mm.S
++++ /dev/null
+@@ -1,10 +0,0 @@
+-PHDRS
+-{
+- text PT_LOAD FILEHDR PHDRS FLAGS (7);
+- data PT_LOAD FLAGS (7);
+-}
+-#ifdef CONFIG_SUN3
+-#include "vmlinux-sun3.lds"
+-#else
+-#include "vmlinux-std.lds"
+-#endif
+diff --git a/arch/m68k/kernel/vmlinux.lds_no.S b/arch/m68k/kernel/vmlinux.lds_no.S
+deleted file mode 100644
+index 4e2389340837..000000000000
+--- a/arch/m68k/kernel/vmlinux.lds_no.S
++++ /dev/null
+@@ -1,187 +0,0 @@
+-/*
+- * vmlinux.lds.S -- master linker script for m68knommu arch
+- *
+- * (C) Copyright 2002-2006, Greg Ungerer <gerg@snapgear.com>
+- *
+- * This linker script is equipped to build either ROM loaded or RAM
+- * run kernels.
+- */
+-
+-#include <asm-generic/vmlinux.lds.h>
+-#include <asm/page.h>
+-#include <asm/thread_info.h>
+-
+-#if defined(CONFIG_RAMKERNEL)
+-#define RAM_START CONFIG_KERNELBASE
+-#define RAM_LENGTH (CONFIG_RAMBASE + CONFIG_RAMSIZE - CONFIG_KERNELBASE)
+-#define TEXT ram
+-#define DATA ram
+-#define INIT ram
+-#define BSSS ram
+-#endif
+-#if defined(CONFIG_ROMKERNEL) || defined(CONFIG_HIMEMKERNEL)
+-#define RAM_START CONFIG_RAMBASE
+-#define RAM_LENGTH CONFIG_RAMSIZE
+-#define ROMVEC_START CONFIG_ROMVEC
+-#define ROMVEC_LENGTH CONFIG_ROMVECSIZE
+-#define ROM_START CONFIG_ROMSTART
+-#define ROM_LENGTH CONFIG_ROMSIZE
+-#define TEXT rom
+-#define DATA ram
+-#define INIT ram
+-#define BSSS ram
+-#endif
+-
+-#ifndef DATA_ADDR
+-#define DATA_ADDR
+-#endif
+-
+-
+-OUTPUT_ARCH(m68k)
+-ENTRY(_start)
+-
+-MEMORY {
+- ram : ORIGIN = RAM_START, LENGTH = RAM_LENGTH
+-#ifdef ROM_START
+- romvec : ORIGIN = ROMVEC_START, LENGTH = ROMVEC_LENGTH
+- rom : ORIGIN = ROM_START, LENGTH = ROM_LENGTH
+-#endif
+-}
+-
+-jiffies = jiffies_64 + 4;
+-
+-SECTIONS {
+-
+-#ifdef ROMVEC_START
+- . = ROMVEC_START ;
+- .romvec : {
+- __rom_start = . ;
+- _romvec = .;
+- *(.data..initvect)
+- } > romvec
+-#endif
+-
+- .text : {
+- _text = .;
+- _stext = . ;
+- HEAD_TEXT
+- TEXT_TEXT
+- SCHED_TEXT
+- LOCK_TEXT
+- *(.text..lock)
+-
+- . = ALIGN(16); /* Exception table */
+- __start___ex_table = .;
+- *(__ex_table)
+- __stop___ex_table = .;
+-
+- *(.rodata) *(.rodata.*)
+- *(__vermagic) /* Kernel version magic */
+- *(.rodata1)
+- *(.rodata.str1.1)
+-
+- /* Kernel symbol table: Normal symbols */
+- . = ALIGN(4);
+- __start___ksymtab = .;
+- *(SORT(___ksymtab+*))
+- __stop___ksymtab = .;
+-
+- /* Kernel symbol table: GPL-only symbols */
+- __start___ksymtab_gpl = .;
+- *(SORT(___ksymtab_gpl+*))
+- __stop___ksymtab_gpl = .;
+-
+- /* Kernel symbol table: Normal unused symbols */
+- __start___ksymtab_unused = .;
+- *(SORT(___ksymtab_unused+*))
+- __stop___ksymtab_unused = .;
+-
+- /* Kernel symbol table: GPL-only unused symbols */
+- __start___ksymtab_unused_gpl = .;
+- *(SORT(___ksymtab_unused_gpl+*))
+- __stop___ksymtab_unused_gpl = .;
+-
+- /* Kernel symbol table: GPL-future symbols */
+- __start___ksymtab_gpl_future = .;
+- *(SORT(___ksymtab_gpl_future+*))
+- __stop___ksymtab_gpl_future = .;
+-
+- /* Kernel symbol table: Normal symbols */
+- __start___kcrctab = .;
+- *(SORT(___kcrctab+*))
+- __stop___kcrctab = .;
+-
+- /* Kernel symbol table: GPL-only symbols */
+- __start___kcrctab_gpl = .;
+- *(SORT(___kcrctab_gpl+*))
+- __stop___kcrctab_gpl = .;
+-
+- /* Kernel symbol table: Normal unused symbols */
+- __start___kcrctab_unused = .;
+- *(SORT(___kcrctab_unused+*))
+- __stop___kcrctab_unused = .;
+-
+- /* Kernel symbol table: GPL-only unused symbols */
+- __start___kcrctab_unused_gpl = .;
+- *(SORT(___kcrctab_unused_gpl+*))
+- __stop___kcrctab_unused_gpl = .;
+-
+- /* Kernel symbol table: GPL-future symbols */
+- __start___kcrctab_gpl_future = .;
+- *(SORT(___kcrctab_gpl_future+*))
+- __stop___kcrctab_gpl_future = .;
+-
+- /* Kernel symbol table: strings */
+- *(__ksymtab_strings)
+-
+- /* Built-in module parameters */
+- . = ALIGN(4) ;
+- __start___param = .;
+- *(__param)
+- __stop___param = .;
+-
+- /* Built-in module versions */
+- . = ALIGN(4) ;
+- __start___modver = .;
+- *(__modver)
+- __stop___modver = .;
+-
+- . = ALIGN(4) ;
+- _etext = . ;
+- } > TEXT
+-
+- .data DATA_ADDR : {
+- . = ALIGN(4);
+- _sdata = . ;
+- DATA_DATA
+- CACHELINE_ALIGNED_DATA(32)
+- PAGE_ALIGNED_DATA(PAGE_SIZE)
+- *(.data..shared_aligned)
+- INIT_TASK_DATA(THREAD_SIZE)
+- _edata = . ;
+- } > DATA
+-
+- .init.text : {
+- . = ALIGN(PAGE_SIZE);
+- __init_begin = .;
+- } > INIT
+- INIT_TEXT_SECTION(PAGE_SIZE) > INIT
+- INIT_DATA_SECTION(16) > INIT
+- .init.data : {
+- . = ALIGN(PAGE_SIZE);
+- __init_end = .;
+- } > INIT
+-
+- .bss : {
+- . = ALIGN(4);
+- _sbss = . ;
+- *(.bss)
+- *(COMMON)
+- . = ALIGN(4) ;
+- _ebss = . ;
+- _end = . ;
+- } > BSSS
+-
+- DISCARDS
+-}
+-
+diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
+index 82f364e209fc..0b621625a6fb 100644
+--- a/arch/parisc/mm/init.c
++++ b/arch/parisc/mm/init.c
+@@ -685,6 +685,8 @@ void show_mem(unsigned int filter)
+
+ printk(KERN_INFO "Mem-info:\n");
+ show_free_areas(filter);
++ if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
++ return;
+ #ifndef CONFIG_DISCONTIGMEM
+ i = max_mapnr;
+ while (i-- > 0) {
+diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
+index 8184ee97e484..3fcbae0fa4a0 100644
+--- a/arch/powerpc/kernel/align.c
++++ b/arch/powerpc/kernel/align.c
+@@ -764,6 +764,16 @@ int fix_alignment(struct pt_regs *regs)
+ nb = aligninfo[instr].len;
+ flags = aligninfo[instr].flags;
+
++ /* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */
++ if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) {
++ nb = 8;
++ flags = LD+SW;
++ } else if (IS_XFORM(instruction) &&
++ ((instruction >> 1) & 0x3ff) == 660) {
++ nb = 8;
++ flags = ST+SW;
++ }
++
+ /* Byteswap little endian loads and stores */
+ swiz = 0;
+ if (regs->msr & MSR_LE) {
+diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
+index 0cfcf98aafca..d0b205cd1126 100644
+--- a/arch/powerpc/kernel/iommu.c
++++ b/arch/powerpc/kernel/iommu.c
+@@ -495,7 +495,7 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
+ /* number of bytes needed for the bitmap */
+ sz = (tbl->it_size + 7) >> 3;
+
+- page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz));
++ page = alloc_pages_node(nid, GFP_KERNEL, get_order(sz));
+ if (!page)
+ panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
+ tbl->it_map = page_address(page);
+diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
+index 826681dc342a..26af24bbaed4 100644
+--- a/arch/powerpc/kernel/lparcfg.c
++++ b/arch/powerpc/kernel/lparcfg.c
+@@ -375,6 +375,7 @@ static void parse_system_parameter_string(struct seq_file *m)
+ __pa(rtas_data_buf),
+ RTAS_DATA_BUF_SIZE);
+ memcpy(local_buffer, rtas_data_buf, SPLPAR_MAXLENGTH);
++ local_buffer[SPLPAR_MAXLENGTH - 1] = '\0';
+ spin_unlock(&rtas_data_buf_lock);
+
+ if (call_status != 0) {
+diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
+index 55be64db0ae0..ca683a18943b 100644
+--- a/arch/powerpc/kernel/sysfs.c
++++ b/arch/powerpc/kernel/sysfs.c
+@@ -18,6 +18,7 @@
+ #include <asm/machdep.h>
+ #include <asm/smp.h>
+ #include <asm/pmc.h>
++#include <asm/firmware.h>
+
+ #include "cacheinfo.h"
+
+@@ -178,14 +179,24 @@ SYSFS_PMCSETUP(purr, SPRN_PURR);
+ SYSFS_PMCSETUP(spurr, SPRN_SPURR);
+ SYSFS_PMCSETUP(dscr, SPRN_DSCR);
+
++/*
++ Lets only enable read for phyp resources and
++ enable write when needed with a separate function.
++ Lets be conservative and default to pseries.
++*/
+ static SYSDEV_ATTR(mmcra, 0600, show_mmcra, store_mmcra);
+ static SYSDEV_ATTR(spurr, 0600, show_spurr, NULL);
+ static SYSDEV_ATTR(dscr, 0600, show_dscr, store_dscr);
+-static SYSDEV_ATTR(purr, 0600, show_purr, store_purr);
++static SYSDEV_ATTR(purr, 0400, show_purr, store_purr);
+
+ unsigned long dscr_default = 0;
+ EXPORT_SYMBOL(dscr_default);
+
++static void add_write_permission_dev_attr(struct sysdev_attribute *attr)
++{
++ attr->attr.mode |= 0200;
++}
++
+ static ssize_t show_dscr_default(struct sysdev_class *class,
+ struct sysdev_class_attribute *attr, char *buf)
+ {
+@@ -394,8 +405,11 @@ static void __cpuinit register_cpu_online(unsigned int cpu)
+ if (cpu_has_feature(CPU_FTR_MMCRA))
+ sysdev_create_file(s, &attr_mmcra);
+
+- if (cpu_has_feature(CPU_FTR_PURR))
++ if (cpu_has_feature(CPU_FTR_PURR)) {
++ if (!firmware_has_feature(FW_FEATURE_LPAR))
++ add_write_permission_dev_attr(&attr_purr);
+ sysdev_create_file(s, &attr_purr);
++ }
+
+ if (cpu_has_feature(CPU_FTR_SPURR))
+ sysdev_create_file(s, &attr_spurr);
+diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S
+index 18245af38aea..3cdbc648d979 100644
+--- a/arch/powerpc/lib/checksum_64.S
++++ b/arch/powerpc/lib/checksum_64.S
+@@ -229,19 +229,35 @@ _GLOBAL(csum_partial)
+ blr
+
+
+- .macro source
++ .macro srcnr
+ 100:
+ .section __ex_table,"a"
+ .align 3
+- .llong 100b,.Lsrc_error
++ .llong 100b,.Lsrc_error_nr
+ .previous
+ .endm
+
+- .macro dest
++ .macro source
++150:
++ .section __ex_table,"a"
++ .align 3
++ .llong 150b,.Lsrc_error
++ .previous
++ .endm
++
++ .macro dstnr
+ 200:
+ .section __ex_table,"a"
+ .align 3
+- .llong 200b,.Ldest_error
++ .llong 200b,.Ldest_error_nr
++ .previous
++ .endm
++
++ .macro dest
++250:
++ .section __ex_table,"a"
++ .align 3
++ .llong 250b,.Ldest_error
+ .previous
+ .endm
+
+@@ -272,16 +288,16 @@ _GLOBAL(csum_partial_copy_generic)
+ rldicl. r6,r3,64-1,64-2 /* r6 = (r3 & 0x3) >> 1 */
+ beq .Lcopy_aligned
+
+- li r7,4
+- sub r6,r7,r6
++ li r9,4
++ sub r6,r9,r6
+ mtctr r6
+
+ 1:
+-source; lhz r6,0(r3) /* align to doubleword */
++srcnr; lhz r6,0(r3) /* align to doubleword */
+ subi r5,r5,2
+ addi r3,r3,2
+ adde r0,r0,r6
+-dest; sth r6,0(r4)
++dstnr; sth r6,0(r4)
+ addi r4,r4,2
+ bdnz 1b
+
+@@ -395,10 +411,10 @@ dest; std r16,56(r4)
+
+ mtctr r6
+ 3:
+-source; ld r6,0(r3)
++srcnr; ld r6,0(r3)
+ addi r3,r3,8
+ adde r0,r0,r6
+-dest; std r6,0(r4)
++dstnr; std r6,0(r4)
+ addi r4,r4,8
+ bdnz 3b
+
+@@ -408,10 +424,10 @@ dest; std r6,0(r4)
+ srdi. r6,r5,2
+ beq .Lcopy_tail_halfword
+
+-source; lwz r6,0(r3)
++srcnr; lwz r6,0(r3)
+ addi r3,r3,4
+ adde r0,r0,r6
+-dest; stw r6,0(r4)
++dstnr; stw r6,0(r4)
+ addi r4,r4,4
+ subi r5,r5,4
+
+@@ -419,10 +435,10 @@ dest; stw r6,0(r4)
+ srdi. r6,r5,1
+ beq .Lcopy_tail_byte
+
+-source; lhz r6,0(r3)
++srcnr; lhz r6,0(r3)
+ addi r3,r3,2
+ adde r0,r0,r6
+-dest; sth r6,0(r4)
++dstnr; sth r6,0(r4)
+ addi r4,r4,2
+ subi r5,r5,2
+
+@@ -430,10 +446,10 @@ dest; sth r6,0(r4)
+ andi. r6,r5,1
+ beq .Lcopy_finish
+
+-source; lbz r6,0(r3)
++srcnr; lbz r6,0(r3)
+ sldi r9,r6,8 /* Pad the byte out to 16 bits */
+ adde r0,r0,r9
+-dest; stb r6,0(r4)
++dstnr; stb r6,0(r4)
+
+ .Lcopy_finish:
+ addze r0,r0 /* add in final carry */
+@@ -443,6 +459,11 @@ dest; stb r6,0(r4)
+ blr
+
+ .Lsrc_error:
++ ld r14,STK_REG(r14)(r1)
++ ld r15,STK_REG(r15)(r1)
++ ld r16,STK_REG(r16)(r1)
++ addi r1,r1,STACKFRAMESIZE
++.Lsrc_error_nr:
+ cmpdi 0,r7,0
+ beqlr
+ li r6,-EFAULT
+@@ -450,6 +471,11 @@ dest; stb r6,0(r4)
+ blr
+
+ .Ldest_error:
++ ld r14,STK_REG(r14)(r1)
++ ld r15,STK_REG(r15)(r1)
++ ld r16,STK_REG(r16)(r1)
++ addi r1,r1,STACKFRAMESIZE
++.Ldest_error_nr:
+ cmpdi 0,r8,0
+ beqlr
+ li r6,-EFAULT
+diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
+index f445e98463e6..cfabc3d8821e 100644
+--- a/arch/sparc/kernel/entry.S
++++ b/arch/sparc/kernel/entry.S
+@@ -1177,7 +1177,7 @@ sys_sigreturn:
+ nop
+
+ call syscall_trace
+- nop
++ mov 1, %o1
+
+ 1:
+ /* We don't want to muck with user registers like a
+diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S
+index 79f310364849..7c0073500173 100644
+--- a/arch/sparc/kernel/ktlb.S
++++ b/arch/sparc/kernel/ktlb.S
+@@ -25,11 +25,10 @@ kvmap_itlb:
+ */
+ kvmap_itlb_4v:
+
+-kvmap_itlb_nonlinear:
+ /* Catch kernel NULL pointer calls. */
+ sethi %hi(PAGE_SIZE), %g5
+ cmp %g4, %g5
+- bleu,pn %xcc, kvmap_dtlb_longpath
++ blu,pn %xcc, kvmap_itlb_longpath
+ nop
+
+ KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_itlb_load)
+diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
+index 7f5f65d0b3fd..817187d42777 100644
+--- a/arch/sparc/kernel/syscalls.S
++++ b/arch/sparc/kernel/syscalls.S
+@@ -147,7 +147,7 @@ linux_syscall_trace32:
+ srl %i4, 0, %o4
+ srl %i1, 0, %o1
+ srl %i2, 0, %o2
+- ba,pt %xcc, 2f
++ ba,pt %xcc, 5f
+ srl %i3, 0, %o3
+
+ linux_syscall_trace:
+@@ -177,13 +177,13 @@ linux_sparc_syscall32:
+ srl %i1, 0, %o1 ! IEU0 Group
+ ldx [%g6 + TI_FLAGS], %l0 ! Load
+
+- srl %i5, 0, %o5 ! IEU1
++ srl %i3, 0, %o3 ! IEU0
+ srl %i2, 0, %o2 ! IEU0 Group
+ andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
+ bne,pn %icc, linux_syscall_trace32 ! CTI
+ mov %i0, %l5 ! IEU1
+- call %l7 ! CTI Group brk forced
+- srl %i3, 0, %o3 ! IEU0
++5: call %l7 ! CTI Group brk forced
++ srl %i5, 0, %o5 ! IEU1
+ ba,a,pt %xcc, 3f
+
+ /* Linux native system calls enter here... */
+diff --git a/arch/sparc/kernel/trampoline_64.S b/arch/sparc/kernel/trampoline_64.S
+index da1b781b5e65..8fa84a3897cb 100644
+--- a/arch/sparc/kernel/trampoline_64.S
++++ b/arch/sparc/kernel/trampoline_64.S
+@@ -131,7 +131,6 @@ startup_continue:
+ clr %l5
+ sethi %hi(num_kernel_image_mappings), %l6
+ lduw [%l6 + %lo(num_kernel_image_mappings)], %l6
+- add %l6, 1, %l6
+
+ mov 15, %l7
+ BRANCH_IF_ANY_CHEETAH(g1,g5,2f)
+@@ -224,7 +223,6 @@ niagara_lock_tlb:
+ clr %l5
+ sethi %hi(num_kernel_image_mappings), %l6
+ lduw [%l6 + %lo(num_kernel_image_mappings)], %l6
+- add %l6, 1, %l6
+
+ 1:
+ mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
+diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
+index 1b30bb3bfdb1..fbb800533032 100644
+--- a/arch/sparc/lib/ksyms.c
++++ b/arch/sparc/lib/ksyms.c
+@@ -131,15 +131,6 @@ EXPORT_SYMBOL(___copy_from_user);
+ EXPORT_SYMBOL(___copy_in_user);
+ EXPORT_SYMBOL(__clear_user);
+
+-/* RW semaphores */
+-EXPORT_SYMBOL(__down_read);
+-EXPORT_SYMBOL(__down_read_trylock);
+-EXPORT_SYMBOL(__down_write);
+-EXPORT_SYMBOL(__down_write_trylock);
+-EXPORT_SYMBOL(__up_read);
+-EXPORT_SYMBOL(__up_write);
+-EXPORT_SYMBOL(__downgrade_write);
+-
+ /* Atomic counter implementation. */
+ EXPORT_SYMBOL(atomic_add);
+ EXPORT_SYMBOL(atomic_add_ret);
+diff --git a/arch/unicore32/mm/init.c b/arch/unicore32/mm/init.c
+index 3b379cddbc64..d1af4ed5694d 100644
+--- a/arch/unicore32/mm/init.c
++++ b/arch/unicore32/mm/init.c
+@@ -65,6 +65,9 @@ void show_mem(unsigned int filter)
+ printk(KERN_DEFAULT "Mem-info:\n");
+ show_free_areas(filter);
+
++ if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
++ return;
++
+ for_each_bank(i, mi) {
+ struct membank *bank = &mi->bank[i];
+ unsigned int pfn1, pfn2;
+diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
+index 47f4e5ff55ac..a4e1b4bd4953 100644
+--- a/arch/x86/kernel/reboot.c
++++ b/arch/x86/kernel/reboot.c
+@@ -468,6 +468,22 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"),
+ },
+ },
++ { /* Handle problems with rebooting on the Dell PowerEdge C6100. */
++ .callback = set_pci_reboot,
++ .ident = "Dell PowerEdge C6100",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "C6100"),
++ },
++ },
++ { /* Some C6100 machines were shipped with vendor being 'Dell'. */
++ .callback = set_pci_reboot,
++ .ident = "Dell PowerEdge C6100",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "C6100"),
++ },
++ },
+ { }
+ };
+
+diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
+index f9537e3044ff..a18d20d52740 100644
+--- a/arch/x86/platform/efi/efi.c
++++ b/arch/x86/platform/efi/efi.c
+@@ -703,10 +703,13 @@ void __init efi_enter_virtual_mode(void)
+
+ for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
+ md = p;
+- if (!(md->attribute & EFI_MEMORY_RUNTIME) &&
+- md->type != EFI_BOOT_SERVICES_CODE &&
+- md->type != EFI_BOOT_SERVICES_DATA)
+- continue;
++ if (!(md->attribute & EFI_MEMORY_RUNTIME)) {
++#ifdef CONFIG_X86_64
++ if (md->type != EFI_BOOT_SERVICES_CODE &&
++ md->type != EFI_BOOT_SERVICES_DATA)
++#endif
++ continue;
++ }
+
+ size = md->num_pages << EFI_PAGE_SHIFT;
+ end = md->phys_addr + size;
+diff --git a/crypto/api.c b/crypto/api.c
+index 033a7147e5eb..cea3cf6c9897 100644
+--- a/crypto/api.c
++++ b/crypto/api.c
+@@ -34,6 +34,8 @@ EXPORT_SYMBOL_GPL(crypto_alg_sem);
+ BLOCKING_NOTIFIER_HEAD(crypto_chain);
+ EXPORT_SYMBOL_GPL(crypto_chain);
+
++static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg);
++
+ static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg)
+ {
+ atomic_inc(&alg->cra_refcnt);
+@@ -150,8 +152,11 @@ static struct crypto_alg *crypto_larval_add(const char *name, u32 type,
+ }
+ up_write(&crypto_alg_sem);
+
+- if (alg != &larval->alg)
++ if (alg != &larval->alg) {
+ kfree(larval);
++ if (crypto_is_larval(alg))
++ alg = crypto_larval_wait(alg);
++ }
+
+ return alg;
+ }
+diff --git a/drivers/acpi/acpi_ipmi.c b/drivers/acpi/acpi_ipmi.c
+index f40acef80269..a6977e12d574 100644
+--- a/drivers/acpi/acpi_ipmi.c
++++ b/drivers/acpi/acpi_ipmi.c
+@@ -39,6 +39,7 @@
+ #include <linux/ipmi.h>
+ #include <linux/device.h>
+ #include <linux/pnp.h>
++#include <linux/spinlock.h>
+
+ MODULE_AUTHOR("Zhao Yakui");
+ MODULE_DESCRIPTION("ACPI IPMI Opregion driver");
+@@ -57,7 +58,7 @@ struct acpi_ipmi_device {
+ struct list_head head;
+ /* the IPMI request message list */
+ struct list_head tx_msg_list;
+- struct mutex tx_msg_lock;
++ spinlock_t tx_msg_lock;
+ acpi_handle handle;
+ struct pnp_dev *pnp_dev;
+ ipmi_user_t user_interface;
+@@ -147,6 +148,7 @@ static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
+ struct kernel_ipmi_msg *msg;
+ struct acpi_ipmi_buffer *buffer;
+ struct acpi_ipmi_device *device;
++ unsigned long flags;
+
+ msg = &tx_msg->tx_message;
+ /*
+@@ -177,10 +179,10 @@ static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
+
+ /* Get the msgid */
+ device = tx_msg->device;
+- mutex_lock(&device->tx_msg_lock);
++ spin_lock_irqsave(&device->tx_msg_lock, flags);
+ device->curr_msgid++;
+ tx_msg->tx_msgid = device->curr_msgid;
+- mutex_unlock(&device->tx_msg_lock);
++ spin_unlock_irqrestore(&device->tx_msg_lock, flags);
+ }
+
+ static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg,
+@@ -242,6 +244,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
+ int msg_found = 0;
+ struct acpi_ipmi_msg *tx_msg;
+ struct pnp_dev *pnp_dev = ipmi_device->pnp_dev;
++ unsigned long flags;
+
+ if (msg->user != ipmi_device->user_interface) {
+ dev_warn(&pnp_dev->dev, "Unexpected response is returned. "
+@@ -250,7 +253,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
+ ipmi_free_recv_msg(msg);
+ return;
+ }
+- mutex_lock(&ipmi_device->tx_msg_lock);
++ spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
+ list_for_each_entry(tx_msg, &ipmi_device->tx_msg_list, head) {
+ if (msg->msgid == tx_msg->tx_msgid) {
+ msg_found = 1;
+@@ -258,7 +261,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
+ }
+ }
+
+- mutex_unlock(&ipmi_device->tx_msg_lock);
++ spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
+ if (!msg_found) {
+ dev_warn(&pnp_dev->dev, "Unexpected response (msg id %ld) is "
+ "returned.\n", msg->msgid);
+@@ -378,6 +381,7 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
+ struct acpi_ipmi_device *ipmi_device = handler_context;
+ int err, rem_time;
+ acpi_status status;
++ unsigned long flags;
+ /*
+ * IPMI opregion message.
+ * IPMI message is firstly written to the BMC and system software
+@@ -395,9 +399,9 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
+ return AE_NO_MEMORY;
+
+ acpi_format_ipmi_msg(tx_msg, address, value);
+- mutex_lock(&ipmi_device->tx_msg_lock);
++ spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
+ list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list);
+- mutex_unlock(&ipmi_device->tx_msg_lock);
++ spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
+ err = ipmi_request_settime(ipmi_device->user_interface,
+ &tx_msg->addr,
+ tx_msg->tx_msgid,
+@@ -413,9 +417,9 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
+ status = AE_OK;
+
+ end_label:
+- mutex_lock(&ipmi_device->tx_msg_lock);
++ spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
+ list_del(&tx_msg->head);
+- mutex_unlock(&ipmi_device->tx_msg_lock);
++ spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
+ kfree(tx_msg);
+ return status;
+ }
+@@ -457,7 +461,7 @@ static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device)
+
+ INIT_LIST_HEAD(&ipmi_device->head);
+
+- mutex_init(&ipmi_device->tx_msg_lock);
++ spin_lock_init(&ipmi_device->tx_msg_lock);
+ INIT_LIST_HEAD(&ipmi_device->tx_msg_list);
+ ipmi_install_space_handler(ipmi_device);
+
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index 51de186a8bfd..8176b82b2e4c 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -964,6 +964,10 @@ static struct dmi_system_id __initdata ec_dmi_table[] = {
+ ec_enlarge_storm_threshold, "CLEVO hardware", {
+ DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "M720T/M730T"),}, NULL},
++ {
++ ec_validate_ecdt, "ASUS hardware", {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),}, NULL},
+ {},
+ };
+
+diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
+index d3446f628d54..d7ad86536b3e 100644
+--- a/drivers/block/cciss.c
++++ b/drivers/block/cciss.c
+@@ -1186,6 +1186,7 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
+ int err;
+ u32 cp;
+
++ memset(&arg64, 0, sizeof(arg64));
+ err = 0;
+ err |=
+ copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
+diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
+index 9125bbeacd4d..504bc16e4506 100644
+--- a/drivers/block/cpqarray.c
++++ b/drivers/block/cpqarray.c
+@@ -1195,6 +1195,7 @@ out_passthru:
+ ida_pci_info_struct pciinfo;
+
+ if (!arg) return -EINVAL;
++ memset(&pciinfo, 0, sizeof(pciinfo));
+ pciinfo.bus = host->pci_dev->bus->number;
+ pciinfo.dev_fn = host->pci_dev->devfn;
+ pciinfo.board_id = host->board_id;
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index bde72f714789..3539f9b1e002 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -84,6 +84,7 @@ static struct usb_device_id ath3k_table[] = {
+ { USB_DEVICE(0x04CA, 0x3008) },
+ { USB_DEVICE(0x13d3, 0x3362) },
+ { USB_DEVICE(0x0CF3, 0xE004) },
++ { USB_DEVICE(0x0CF3, 0xE005) },
+ { USB_DEVICE(0x0930, 0x0219) },
+ { USB_DEVICE(0x0489, 0xe057) },
+ { USB_DEVICE(0x13d3, 0x3393) },
+@@ -125,6 +126,7 @@ static struct usb_device_id ath3k_blist_tbl[] = {
+ { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 1bd3924c332a..f18b5a28f10c 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -108,6 +108,7 @@ static struct usb_device_id btusb_table[] = {
+
+ /* Broadcom BCM20702A0 */
+ { USB_DEVICE(0x0b05, 0x17b5) },
++ { USB_DEVICE(0x0b05, 0x17cb) },
+ { USB_DEVICE(0x04ca, 0x2003) },
+ { USB_DEVICE(0x0489, 0xe042) },
+ { USB_DEVICE(0x413c, 0x8197) },
+@@ -154,6 +155,7 @@ static struct usb_device_id blacklist_table[] = {
+ { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index 7211f67d5bf4..72f460e22b76 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -125,6 +125,9 @@ static struct edid_quirk {
+
+ /* ViewSonic VA2026w */
+ { "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
++
++ /* Medion MD 30217 PG */
++ { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
+ };
+
+ /*** DDC fetch and block validation ***/
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index a07ccab34016..72163e87f0da 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -621,7 +621,18 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
+ DRM_DEBUG_KMS("aux_ch native nack\n");
+ return -EREMOTEIO;
+ case AUX_NATIVE_REPLY_DEFER:
+- udelay(100);
++ /*
++ * For now, just give more slack to branch devices. We
++ * could check the DPCD for I2C bit rate capabilities,
++ * and if available, adjust the interval. We could also
++ * be more careful with DP-to-Legacy adapters where a
++ * long legacy cable may force very low I2C bit rates.
++ */
++ if (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
++ DP_DWN_STRM_PORT_PRESENT)
++ usleep_range(500, 600);
++ else
++ usleep_range(300, 400);
+ continue;
+ default:
+ DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
+diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
+index cffb0071f877..356a252e6861 100644
+--- a/drivers/gpu/drm/i915/intel_opregion.c
++++ b/drivers/gpu/drm/i915/intel_opregion.c
+@@ -161,7 +161,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
+
+ max = intel_panel_get_max_backlight(dev);
+ intel_panel_set_backlight(dev, bclp * max / 255);
+- asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
++ asle->cblv = DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID;
+
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
+index f0dc04b3c70b..317129406451 100644
+--- a/drivers/gpu/drm/radeon/atombios_encoders.c
++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
+@@ -1385,8 +1385,12 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
+ atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+- /* some early dce3.2 boards have a bug in their transmitter control table */
+- if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730))
++ /* some dce3.x boards have a bug in their transmitter control table.
++ * ACTION_ENABLE_OUTPUT can probably be dropped since ACTION_ENABLE
++ * does the same thing and more.
++ */
++ if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730) &&
++ (rdev->family != CHIP_RS880))
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
+ }
+ if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index f5962a0832f9..a68057a03c6d 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -501,7 +501,8 @@ static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *other_mode)
+ {
+- u32 tmp;
++ u32 tmp, buffer_alloc, i;
++ u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
+ /*
+ * Line Buffer Setup
+ * There are 3 line buffers, each one shared by 2 display controllers.
+@@ -524,18 +525,34 @@ static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
+ * non-linked crtcs for maximum line buffer allocation.
+ */
+ if (radeon_crtc->base.enabled && mode) {
+- if (other_mode)
++ if (other_mode) {
+ tmp = 0; /* 1/2 */
+- else
++ buffer_alloc = 1;
++ } else {
+ tmp = 2; /* whole */
+- } else
++ buffer_alloc = 2;
++ }
++ } else {
+ tmp = 0;
++ buffer_alloc = 0;
++ }
+
+ /* second controller of the pair uses second half of the lb */
+ if (radeon_crtc->crtc_id % 2)
+ tmp += 4;
+ WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp);
+
++ if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
++ WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
++ DMIF_BUFFERS_ALLOCATED(buffer_alloc));
++ for (i = 0; i < rdev->usec_timeout; i++) {
++ if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
++ DMIF_BUFFERS_ALLOCATED_COMPLETED)
++ break;
++ udelay(1);
++ }
++ }
++
+ if (radeon_crtc->base.enabled && mode) {
+ switch (tmp) {
+ case 0:
+diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
+index fe44a953bac9..47f3bd2753f7 100644
+--- a/drivers/gpu/drm/radeon/evergreend.h
++++ b/drivers/gpu/drm/radeon/evergreend.h
+@@ -459,6 +459,10 @@
+ # define LATENCY_LOW_WATERMARK(x) ((x) << 0)
+ # define LATENCY_HIGH_WATERMARK(x) ((x) << 16)
+
++#define PIPE0_DMIF_BUFFER_CONTROL 0x0ca0
++# define DMIF_BUFFERS_ALLOCATED(x) ((x) << 0)
++# define DMIF_BUFFERS_ALLOCATED_COMPLETED (1 << 4)
++
+ #define IH_RB_CNTL 0x3e00
+ # define IH_RB_ENABLE (1 << 0)
+ # define IH_IB_SIZE(x) ((x) << 1) /* log2 */
+diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
+index 383b38ebb048..cda89c6b74f8 100644
+--- a/drivers/gpu/drm/radeon/radeon_atombios.c
++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
+@@ -709,13 +709,16 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
+ (ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *)
+ (ctx->bios + data_offset +
+ le16_to_cpu(router_obj->asObjects[k].usSrcDstTableOffset));
++ u8 *num_dst_objs = (u8 *)
++ ((u8 *)router_src_dst_table + 1 +
++ (router_src_dst_table->ucNumberOfSrc * 2));
++ u16 *dst_objs = (u16 *)(num_dst_objs + 1);
+ int enum_id;
+
+ router.router_id = router_obj_id;
+- for (enum_id = 0; enum_id < router_src_dst_table->ucNumberOfDst;
+- enum_id++) {
++ for (enum_id = 0; enum_id < (*num_dst_objs); enum_id++) {
+ if (le16_to_cpu(path->usConnObjectId) ==
+- le16_to_cpu(router_src_dst_table->usDstObjectID[enum_id]))
++ le16_to_cpu(dst_objs[enum_id]))
+ break;
+ }
+
+@@ -1616,7 +1619,9 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
+ kfree(edid);
+ }
+ }
+- record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD);
++ record += fake_edid_record->ucFakeEDIDLength ?
++ fake_edid_record->ucFakeEDIDLength + 2 :
++ sizeof(ATOM_FAKE_EDID_PATCH_RECORD);
+ break;
+ case LCD_PANEL_RESOLUTION_RECORD_TYPE:
+ panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record;
+diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
+index 6fd53b644dfc..b101843bff73 100644
+--- a/drivers/gpu/drm/radeon/radeon_connectors.c
++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
+@@ -1387,6 +1387,24 @@ struct drm_connector_funcs radeon_dp_connector_funcs = {
+ .force = radeon_dvi_force,
+ };
+
++static const struct drm_connector_funcs radeon_edp_connector_funcs = {
++ .dpms = drm_helper_connector_dpms,
++ .detect = radeon_dp_detect,
++ .fill_modes = drm_helper_probe_single_connector_modes,
++ .set_property = radeon_lvds_set_property,
++ .destroy = radeon_dp_connector_destroy,
++ .force = radeon_dvi_force,
++};
++
++static const struct drm_connector_funcs radeon_lvds_bridge_connector_funcs = {
++ .dpms = drm_helper_connector_dpms,
++ .detect = radeon_dp_detect,
++ .fill_modes = drm_helper_probe_single_connector_modes,
++ .set_property = radeon_lvds_set_property,
++ .destroy = radeon_dp_connector_destroy,
++ .force = radeon_dvi_force,
++};
++
+ void
+ radeon_add_atom_connector(struct drm_device *dev,
+ uint32_t connector_id,
+@@ -1478,8 +1496,6 @@ radeon_add_atom_connector(struct drm_device *dev,
+ goto failed;
+ radeon_dig_connector->igp_lane_info = igp_lane_info;
+ radeon_connector->con_priv = radeon_dig_connector;
+- drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
+- drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
+ if (i2c_bus->valid) {
+ /* add DP i2c bus */
+ if (connector_type == DRM_MODE_CONNECTOR_eDP)
+@@ -1496,6 +1512,10 @@ radeon_add_atom_connector(struct drm_device *dev,
+ case DRM_MODE_CONNECTOR_VGA:
+ case DRM_MODE_CONNECTOR_DVIA:
+ default:
++ drm_connector_init(dev, &radeon_connector->base,
++ &radeon_dp_connector_funcs, connector_type);
++ drm_connector_helper_add(&radeon_connector->base,
++ &radeon_dp_connector_helper_funcs);
+ connector->interlace_allowed = true;
+ connector->doublescan_allowed = true;
+ radeon_connector->dac_load_detect = true;
+@@ -1508,6 +1528,10 @@ radeon_add_atom_connector(struct drm_device *dev,
+ case DRM_MODE_CONNECTOR_HDMIA:
+ case DRM_MODE_CONNECTOR_HDMIB:
+ case DRM_MODE_CONNECTOR_DisplayPort:
++ drm_connector_init(dev, &radeon_connector->base,
++ &radeon_dp_connector_funcs, connector_type);
++ drm_connector_helper_add(&radeon_connector->base,
++ &radeon_dp_connector_helper_funcs);
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.underscan_property,
+ UNDERSCAN_OFF);
+@@ -1532,6 +1556,10 @@ radeon_add_atom_connector(struct drm_device *dev,
+ break;
+ case DRM_MODE_CONNECTOR_LVDS:
+ case DRM_MODE_CONNECTOR_eDP:
++ drm_connector_init(dev, &radeon_connector->base,
++ &radeon_lvds_bridge_connector_funcs, connector_type);
++ drm_connector_helper_add(&radeon_connector->base,
++ &radeon_dp_connector_helper_funcs);
+ drm_connector_attach_property(&radeon_connector->base,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_FULLSCREEN);
+@@ -1695,7 +1723,7 @@ radeon_add_atom_connector(struct drm_device *dev,
+ goto failed;
+ radeon_dig_connector->igp_lane_info = igp_lane_info;
+ radeon_connector->con_priv = radeon_dig_connector;
+- drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
++ drm_connector_init(dev, &radeon_connector->base, &radeon_edp_connector_funcs, connector_type);
+ drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
+ if (i2c_bus->valid) {
+ /* add DP i2c bus */
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index cd94abb1d776..8cde84b666d5 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -818,10 +818,16 @@ int radeon_device_init(struct radeon_device *rdev,
+ return r;
+ }
+ if (radeon_testing) {
+- radeon_test_moves(rdev);
++ if (rdev->accel_working)
++ radeon_test_moves(rdev);
++ else
++ DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
+ }
+ if (radeon_benchmarking) {
+- radeon_benchmark(rdev, radeon_benchmarking);
++ if (rdev->accel_working)
++ radeon_benchmark(rdev, radeon_benchmarking);
++ else
++ DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
+ }
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
+index 4dd9512fcc5d..c0874341b1a0 100644
+--- a/drivers/gpu/drm/radeon/rs400.c
++++ b/drivers/gpu/drm/radeon/rs400.c
+@@ -174,10 +174,13 @@ int rs400_gart_enable(struct radeon_device *rdev)
+ /* FIXME: according to doc we should set HIDE_MMCFG_BAR=0,
+ * AGPMODE30=0 & AGP30ENHANCED=0 in NB_CNTL */
+ if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
+- WREG32_MC(RS480_MC_MISC_CNTL,
+- (RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN));
++ tmp = RREG32_MC(RS480_MC_MISC_CNTL);
++ tmp |= RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN;
++ WREG32_MC(RS480_MC_MISC_CNTL, tmp);
+ } else {
+- WREG32_MC(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN);
++ tmp = RREG32_MC(RS480_MC_MISC_CNTL);
++ tmp |= RS480_GART_INDEX_REG_EN;
++ WREG32_MC(RS480_MC_MISC_CNTL, tmp);
+ }
+ /* Enable gart */
+ WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg));
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 611aafc20f90..9ac4389ecfa2 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -59,6 +59,8 @@ struct hid_report *hid_register_report(struct hid_device *device, unsigned type,
+ struct hid_report_enum *report_enum = device->report_enum + type;
+ struct hid_report *report;
+
++ if (id >= HID_MAX_IDS)
++ return NULL;
+ if (report_enum->report_id_hash[id])
+ return report_enum->report_id_hash[id];
+
+@@ -216,9 +218,9 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
+ {
+ struct hid_report *report;
+ struct hid_field *field;
+- int usages;
++ unsigned usages;
+ unsigned offset;
+- int i;
++ unsigned i;
+
+ report = hid_register_report(parser->device, report_type, parser->global.report_id);
+ if (!report) {
+@@ -237,7 +239,8 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
+ if (!parser->local.usage_index) /* Ignore padding fields */
+ return 0;
+
+- usages = max_t(int, parser->local.usage_index, parser->global.report_count);
++ usages = max_t(unsigned, parser->local.usage_index,
++ parser->global.report_count);
+
+ field = hid_register_field(report, usages, parser->global.report_count);
+ if (!field)
+@@ -248,7 +251,7 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
+ field->application = hid_lookup_collection(parser, HID_COLLECTION_APPLICATION);
+
+ for (i = 0; i < usages; i++) {
+- int j = i;
++ unsigned j = i;
+ /* Duplicate the last usage we parsed if we have excess values */
+ if (i >= parser->local.usage_index)
+ j = parser->local.usage_index - 1;
+@@ -380,8 +383,10 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
+
+ case HID_GLOBAL_ITEM_TAG_REPORT_ID:
+ parser->global.report_id = item_udata(item);
+- if (parser->global.report_id == 0) {
+- dbg_hid("report_id 0 is invalid\n");
++ if (parser->global.report_id == 0 ||
++ parser->global.report_id >= HID_MAX_IDS) {
++ dbg_hid("report_id %u is invalid\n",
++ parser->global.report_id);
+ return -1;
+ }
+ return 0;
+@@ -552,7 +557,7 @@ static void hid_device_release(struct device *dev)
+ for (i = 0; i < HID_REPORT_TYPES; i++) {
+ struct hid_report_enum *report_enum = device->report_enum + i;
+
+- for (j = 0; j < 256; j++) {
++ for (j = 0; j < HID_MAX_IDS; j++) {
+ struct hid_report *report = report_enum->report_id_hash[j];
+ if (report)
+ hid_free_report(report);
+@@ -710,6 +715,64 @@ err:
+ }
+ EXPORT_SYMBOL_GPL(hid_parse_report);
+
++static const char * const hid_report_names[] = {
++ "HID_INPUT_REPORT",
++ "HID_OUTPUT_REPORT",
++ "HID_FEATURE_REPORT",
++};
++/**
++ * hid_validate_values - validate existing device report's value indexes
++ *
++ * @device: hid device
++ * @type: which report type to examine
++ * @id: which report ID to examine (0 for first)
++ * @field_index: which report field to examine
++ * @report_counts: expected number of values
++ *
++ * Validate the number of values in a given field of a given report, after
++ * parsing.
++ */
++struct hid_report *hid_validate_values(struct hid_device *hid,
++ unsigned int type, unsigned int id,
++ unsigned int field_index,
++ unsigned int report_counts)
++{
++ struct hid_report *report;
++
++ if (type > HID_FEATURE_REPORT) {
++ hid_err(hid, "invalid HID report type %u\n", type);
++ return NULL;
++ }
++
++ if (id >= HID_MAX_IDS) {
++ hid_err(hid, "invalid HID report id %u\n", id);
++ return NULL;
++ }
++
++ /*
++ * Explicitly not using hid_get_report() here since it depends on
++ * ->numbered being checked, which may not always be the case when
++ * drivers go to access report values.
++ */
++ report = hid->report_enum[type].report_id_hash[id];
++ if (!report) {
++ hid_err(hid, "missing %s %u\n", hid_report_names[type], id);
++ return NULL;
++ }
++ if (report->maxfield <= field_index) {
++ hid_err(hid, "not enough fields in %s %u\n",
++ hid_report_names[type], id);
++ return NULL;
++ }
++ if (report->field[field_index]->report_count < report_counts) {
++ hid_err(hid, "not enough values in %s %u field %u\n",
++ hid_report_names[type], id, field_index);
++ return NULL;
++ }
++ return report;
++}
++EXPORT_SYMBOL_GPL(hid_validate_values);
++
+ /*
+ * Convert a signed n-bit integer to signed 32-bit integer. Common
+ * cases are done through the compiler, the screwed things has to be
+@@ -990,7 +1053,12 @@ EXPORT_SYMBOL_GPL(hid_output_report);
+
+ int hid_set_field(struct hid_field *field, unsigned offset, __s32 value)
+ {
+- unsigned size = field->report_size;
++ unsigned size;
++
++ if (!field)
++ return -1;
++
++ size = field->report_size;
+
+ hid_dump_input(field->report->device, field->usage + offset, value);
+
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 08075f274339..ca2b3e6c4e9f 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -581,6 +581,7 @@
+ #define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_16 0x0012
+ #define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_17 0x0013
+ #define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_18 0x0014
++#define USB_DEVICE_ID_NTRIG_DUOSENSE 0x1500
+
+ #define USB_VENDOR_ID_ONTRAK 0x0a07
+ #define USB_DEVICE_ID_ONTRAK_ADU100 0x0064
+diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
+index f333139d1a48..95c79a365c98 100644
+--- a/drivers/hid/hid-input.c
++++ b/drivers/hid/hid-input.c
+@@ -284,6 +284,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
+ if (field->flags & HID_MAIN_ITEM_CONSTANT)
+ goto ignore;
+
++ /* Ignore if report count is out of bounds. */
++ if (field->report_count < 1)
++ goto ignore;
++
+ /* only LED usages are supported in output fields */
+ if (field->report_type == HID_OUTPUT_REPORT &&
+ (usage->hid & HID_USAGE_PAGE) != HID_UP_LED) {
+@@ -887,10 +891,15 @@ static void report_features(struct hid_device *hid)
+
+ rep_enum = &hid->report_enum[HID_FEATURE_REPORT];
+ list_for_each_entry(rep, &rep_enum->report_list, list)
+- for (i = 0; i < rep->maxfield; i++)
++ for (i = 0; i < rep->maxfield; i++) {
++ /* Ignore if report count is out of bounds. */
++ if (rep->field[i]->report_count < 1)
++ continue;
++
+ for (j = 0; j < rep->field[i]->maxusage; j++)
+ drv->feature_mapping(hid, rep->field[i],
+ rep->field[i]->usage + j);
++ }
+ }
+
+ /*
+diff --git a/drivers/hid/hid-lg2ff.c b/drivers/hid/hid-lg2ff.c
+index 3c31bc650e5d..128f0117c7a7 100644
+--- a/drivers/hid/hid-lg2ff.c
++++ b/drivers/hid/hid-lg2ff.c
+@@ -66,26 +66,13 @@ int lg2ff_init(struct hid_device *hid)
+ struct hid_report *report;
+ struct hid_input *hidinput = list_entry(hid->inputs.next,
+ struct hid_input, list);
+- struct list_head *report_list =
+- &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+ struct input_dev *dev = hidinput->input;
+ int error;
+
+- if (list_empty(report_list)) {
+- hid_err(hid, "no output report found\n");
++ /* Check that the report looks ok */
++ report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7);
++ if (!report)
+ return -ENODEV;
+- }
+-
+- report = list_entry(report_list->next, struct hid_report, list);
+-
+- if (report->maxfield < 1) {
+- hid_err(hid, "output report is empty\n");
+- return -ENODEV;
+- }
+- if (report->field[0]->report_count < 7) {
+- hid_err(hid, "not enough values in the field\n");
+- return -ENODEV;
+- }
+
+ lg2ff = kmalloc(sizeof(struct lg2ff_device), GFP_KERNEL);
+ if (!lg2ff)
+diff --git a/drivers/hid/hid-lg3ff.c b/drivers/hid/hid-lg3ff.c
+index f98644c26c1d..91f981ff5601 100644
+--- a/drivers/hid/hid-lg3ff.c
++++ b/drivers/hid/hid-lg3ff.c
+@@ -68,10 +68,11 @@ static int hid_lg3ff_play(struct input_dev *dev, void *data,
+ int x, y;
+
+ /*
+- * Maxusage should always be 63 (maximum fields)
+- * likely a better way to ensure this data is clean
++ * Available values in the field should always be 63, but we only use up to
++ * 35. Instead, clear the entire area, however big it is.
+ */
+- memset(report->field[0]->value, 0, sizeof(__s32)*report->field[0]->maxusage);
++ memset(report->field[0]->value, 0,
++ sizeof(__s32) * report->field[0]->report_count);
+
+ switch (effect->type) {
+ case FF_CONSTANT:
+@@ -131,32 +132,14 @@ static const signed short ff3_joystick_ac[] = {
+ int lg3ff_init(struct hid_device *hid)
+ {
+ struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+- struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+ struct input_dev *dev = hidinput->input;
+- struct hid_report *report;
+- struct hid_field *field;
+ const signed short *ff_bits = ff3_joystick_ac;
+ int error;
+ int i;
+
+- /* Find the report to use */
+- if (list_empty(report_list)) {
+- hid_err(hid, "No output report found\n");
+- return -1;
+- }
+-
+ /* Check that the report looks ok */
+- report = list_entry(report_list->next, struct hid_report, list);
+- if (!report) {
+- hid_err(hid, "NULL output report\n");
+- return -1;
+- }
+-
+- field = report->field[0];
+- if (!field) {
+- hid_err(hid, "NULL field\n");
+- return -1;
+- }
++ if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 35))
++ return -ENODEV;
+
+ /* Assume single fixed device G940 */
+ for (i = 0; ff_bits[i] >= 0; i++)
+diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
+index 103f30d93f76..5c6bf4b5c533 100644
+--- a/drivers/hid/hid-lg4ff.c
++++ b/drivers/hid/hid-lg4ff.c
+@@ -339,33 +339,15 @@ static ssize_t lg4ff_range_store(struct device *dev, struct device_attribute *at
+ int lg4ff_init(struct hid_device *hid)
+ {
+ struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+- struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+ struct input_dev *dev = hidinput->input;
+- struct hid_report *report;
+- struct hid_field *field;
+ struct lg4ff_device_entry *entry;
+ struct usb_device_descriptor *udesc;
+ int error, i, j;
+ __u16 bcdDevice, rev_maj, rev_min;
+
+- /* Find the report to use */
+- if (list_empty(report_list)) {
+- hid_err(hid, "No output report found\n");
+- return -1;
+- }
+-
+ /* Check that the report looks ok */
+- report = list_entry(report_list->next, struct hid_report, list);
+- if (!report) {
+- hid_err(hid, "NULL output report\n");
++ if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7))
+ return -1;
+- }
+-
+- field = report->field[0];
+- if (!field) {
+- hid_err(hid, "NULL field\n");
+- return -1;
+- }
+
+ /* Check what wheel has been connected */
+ for (i = 0; i < ARRAY_SIZE(lg4ff_devices); i++) {
+diff --git a/drivers/hid/hid-lgff.c b/drivers/hid/hid-lgff.c
+index 27bc54f92f44..1d978daa70d7 100644
+--- a/drivers/hid/hid-lgff.c
++++ b/drivers/hid/hid-lgff.c
+@@ -130,27 +130,14 @@ static void hid_lgff_set_autocenter(struct input_dev *dev, u16 magnitude)
+ int lgff_init(struct hid_device* hid)
+ {
+ struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+- struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+ struct input_dev *dev = hidinput->input;
+- struct hid_report *report;
+- struct hid_field *field;
+ const signed short *ff_bits = ff_joystick;
+ int error;
+ int i;
+
+- /* Find the report to use */
+- if (list_empty(report_list)) {
+- hid_err(hid, "No output report found\n");
+- return -1;
+- }
+-
+ /* Check that the report looks ok */
+- report = list_entry(report_list->next, struct hid_report, list);
+- field = report->field[0];
+- if (!field) {
+- hid_err(hid, "NULL field\n");
+- return -1;
+- }
++ if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7))
++ return -ENODEV;
+
+ for (i = 0; i < ARRAY_SIZE(devices); i++) {
+ if (dev->id.vendor == devices[i].idVendor &&
+diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
+index 8821ecc9751f..828a0dd2a5ab 100644
+--- a/drivers/hid/hid-logitech-dj.c
++++ b/drivers/hid/hid-logitech-dj.c
+@@ -791,6 +791,12 @@ static int logi_dj_probe(struct hid_device *hdev,
+ goto hid_parse_fail;
+ }
+
++ if (!hid_validate_values(hdev, HID_OUTPUT_REPORT, REPORT_ID_DJ_SHORT,
++ 0, DJREPORT_SHORT_LENGTH - 1)) {
++ retval = -ENODEV;
++ goto hid_parse_fail;
++ }
++
+ /* Starts the usb device and connects to upper interfaces hiddev and
+ * hidraw */
+ retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c
+index 9fae2ebdd758..48cba8570469 100644
+--- a/drivers/hid/hid-ntrig.c
++++ b/drivers/hid/hid-ntrig.c
+@@ -115,7 +115,8 @@ static inline int ntrig_get_mode(struct hid_device *hdev)
+ struct hid_report *report = hdev->report_enum[HID_FEATURE_REPORT].
+ report_id_hash[0x0d];
+
+- if (!report)
++ if (!report || report->maxfield < 1 ||
++ report->field[0]->report_count < 1)
+ return -EINVAL;
+
+ usbhid_submit_report(hdev, report, USB_DIR_IN);
+diff --git a/drivers/hid/hid-picolcd.c b/drivers/hid/hid-picolcd.c
+index 01e7d2cd7c26..1daeacabc832 100644
+--- a/drivers/hid/hid-picolcd.c
++++ b/drivers/hid/hid-picolcd.c
+@@ -1424,7 +1424,7 @@ static ssize_t picolcd_operation_mode_store(struct device *dev,
+ buf += 10;
+ cnt -= 10;
+ }
+- if (!report)
++ if (!report || report->maxfield != 1)
+ return -EINVAL;
+
+ while (cnt > 0 && (buf[cnt-1] == '\n' || buf[cnt-1] == '\r'))
+diff --git a/drivers/hid/hid-pl.c b/drivers/hid/hid-pl.c
+index 070f93a5c11b..12786cdcbedd 100644
+--- a/drivers/hid/hid-pl.c
++++ b/drivers/hid/hid-pl.c
+@@ -129,8 +129,14 @@ static int plff_init(struct hid_device *hid)
+ strong = &report->field[0]->value[2];
+ weak = &report->field[0]->value[3];
+ debug("detected single-field device");
+- } else if (report->maxfield >= 4 && report->field[0]->maxusage == 1 &&
+- report->field[0]->usage[0].hid == (HID_UP_LED | 0x43)) {
++ } else if (report->field[0]->maxusage == 1 &&
++ report->field[0]->usage[0].hid ==
++ (HID_UP_LED | 0x43) &&
++ report->maxfield >= 4 &&
++ report->field[0]->report_count >= 1 &&
++ report->field[1]->report_count >= 1 &&
++ report->field[2]->report_count >= 1 &&
++ report->field[3]->report_count >= 1) {
+ report->field[0]->value[0] = 0x00;
+ report->field[1]->value[0] = 0x00;
+ strong = &report->field[2]->value[0];
+diff --git a/drivers/hid/hid-speedlink.c b/drivers/hid/hid-speedlink.c
+index 602013741718..2b03c9baac35 100644
+--- a/drivers/hid/hid-speedlink.c
++++ b/drivers/hid/hid-speedlink.c
+@@ -3,7 +3,7 @@
+ * Fixes "jumpy" cursor and removes nonexistent keyboard LEDS from
+ * the HID descriptor.
+ *
+- * Copyright (c) 2011 Stefan Kriwanek <mail@stefankriwanek.de>
++ * Copyright (c) 2011, 2013 Stefan Kriwanek <dev@stefankriwanek.de>
+ */
+
+ /*
+@@ -48,8 +48,13 @@ static int speedlink_event(struct hid_device *hdev, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+ {
+ /* No other conditions due to usage_table. */
+- /* Fix "jumpy" cursor (invalid events sent by device). */
+- if (value == 256)
++
++ /* This fixes the "jumpy" cursor occuring due to invalid events sent
++ * by the device. Some devices only send them with value==+256, others
++ * don't. However, catching abs(value)>=256 is restrictive enough not
++ * to interfere with devices that were bug-free (has been tested).
++ */
++ if (abs(value) >= 256)
+ return 1;
+ /* Drop useless distance 0 events (on button clicks etc.) as well */
+ if (value == 0)
+diff --git a/drivers/hid/hid-zpff.c b/drivers/hid/hid-zpff.c
+index f6ba81df71bd..f348f7f6526b 100644
+--- a/drivers/hid/hid-zpff.c
++++ b/drivers/hid/hid-zpff.c
+@@ -70,21 +70,13 @@ static int zpff_init(struct hid_device *hid)
+ struct hid_report *report;
+ struct hid_input *hidinput = list_entry(hid->inputs.next,
+ struct hid_input, list);
+- struct list_head *report_list =
+- &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+ struct input_dev *dev = hidinput->input;
+- int error;
++ int i, error;
+
+- if (list_empty(report_list)) {
+- hid_err(hid, "no output report found\n");
+- return -ENODEV;
+- }
+-
+- report = list_entry(report_list->next, struct hid_report, list);
+-
+- if (report->maxfield < 4) {
+- hid_err(hid, "not enough fields in report\n");
+- return -ENODEV;
++ for (i = 0; i < 4; i++) {
++ report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, i, 1);
++ if (!report)
++ return -ENODEV;
+ }
+
+ zpff = kzalloc(sizeof(struct zpff_device), GFP_KERNEL);
+diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
+index 17d15bb610d1..9e50f61933c4 100644
+--- a/drivers/hid/hidraw.c
++++ b/drivers/hid/hidraw.c
+@@ -42,7 +42,6 @@ static struct cdev hidraw_cdev;
+ static struct class *hidraw_class;
+ static struct hidraw *hidraw_table[HIDRAW_MAX_DEVICES];
+ static DEFINE_MUTEX(minors_lock);
+-static void drop_ref(struct hidraw *hid, int exists_bit);
+
+ static ssize_t hidraw_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
+ {
+@@ -296,14 +295,37 @@ out:
+
+ }
+
++static void drop_ref(struct hidraw *hidraw, int exists_bit)
++{
++ if (exists_bit) {
++ hid_hw_close(hidraw->hid);
++ hidraw->exist = 0;
++ if (hidraw->open)
++ wake_up_interruptible(&hidraw->wait);
++ } else {
++ --hidraw->open;
++ }
++
++ if (!hidraw->open && !hidraw->exist) {
++ device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor));
++ hidraw_table[hidraw->minor] = NULL;
++ kfree(hidraw);
++ }
++}
++
+ static int hidraw_release(struct inode * inode, struct file * file)
+ {
+ unsigned int minor = iminor(inode);
+ struct hidraw_list *list = file->private_data;
+
+- drop_ref(hidraw_table[minor], 0);
++ mutex_lock(&minors_lock);
++
+ list_del(&list->node);
+ kfree(list);
++
++ drop_ref(hidraw_table[minor], 0);
++
++ mutex_unlock(&minors_lock);
+ return 0;
+ }
+
+@@ -506,7 +528,12 @@ EXPORT_SYMBOL_GPL(hidraw_connect);
+ void hidraw_disconnect(struct hid_device *hid)
+ {
+ struct hidraw *hidraw = hid->hidraw;
++
++ mutex_lock(&minors_lock);
++
+ drop_ref(hidraw, 1);
++
++ mutex_unlock(&minors_lock);
+ }
+ EXPORT_SYMBOL_GPL(hidraw_disconnect);
+
+@@ -555,23 +582,3 @@ void hidraw_exit(void)
+ unregister_chrdev_region(dev_id, HIDRAW_MAX_DEVICES);
+
+ }
+-
+-static void drop_ref(struct hidraw *hidraw, int exists_bit)
+-{
+- mutex_lock(&minors_lock);
+- if (exists_bit) {
+- hid_hw_close(hidraw->hid);
+- hidraw->exist = 0;
+- if (hidraw->open)
+- wake_up_interruptible(&hidraw->wait);
+- } else {
+- --hidraw->open;
+- }
+-
+- if (!hidraw->open && !hidraw->exist) {
+- device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor));
+- hidraw_table[hidraw->minor] = NULL;
+- kfree(hidraw);
+- }
+- mutex_unlock(&minors_lock);
+-}
+diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
+index 96a1e0f70724..f98fbad966e7 100644
+--- a/drivers/hid/usbhid/hid-quirks.c
++++ b/drivers/hid/usbhid/hid-quirks.c
+@@ -99,6 +99,8 @@ static const struct hid_blacklist {
+ { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, HID_QUIRK_NO_INIT_REPORTS },
++ { USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS },
++
+ { 0, 0 }
+ };
+
+diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
+index d99aa8484777..30cac58422b7 100644
+--- a/drivers/hwmon/applesmc.c
++++ b/drivers/hwmon/applesmc.c
+@@ -344,8 +344,10 @@ static int applesmc_get_lower_bound(unsigned int *lo, const char *key)
+ while (begin != end) {
+ int middle = begin + (end - begin) / 2;
+ entry = applesmc_get_entry_by_index(middle);
+- if (IS_ERR(entry))
++ if (IS_ERR(entry)) {
++ *lo = 0;
+ return PTR_ERR(entry);
++ }
+ if (strcmp(entry->key, key) < 0)
+ begin = middle + 1;
+ else
+@@ -364,8 +366,10 @@ static int applesmc_get_upper_bound(unsigned int *hi, const char *key)
+ while (begin != end) {
+ int middle = begin + (end - begin) / 2;
+ entry = applesmc_get_entry_by_index(middle);
+- if (IS_ERR(entry))
++ if (IS_ERR(entry)) {
++ *hi = smcreg.key_count;
+ return PTR_ERR(entry);
++ }
+ if (strcmp(key, entry->key) < 0)
+ end = middle;
+ else
+@@ -485,16 +489,25 @@ static int applesmc_init_smcreg_try(void)
+ {
+ struct applesmc_registers *s = &smcreg;
+ bool left_light_sensor, right_light_sensor;
++ unsigned int count;
+ u8 tmp[1];
+ int ret;
+
+ if (s->init_complete)
+ return 0;
+
+- ret = read_register_count(&s->key_count);
++ ret = read_register_count(&count);
+ if (ret)
+ return ret;
+
++ if (s->cache && s->key_count != count) {
++ pr_warn("key count changed from %d to %d\n",
++ s->key_count, count);
++ kfree(s->cache);
++ s->cache = NULL;
++ }
++ s->key_count = count;
++
+ if (!s->cache)
+ s->cache = kcalloc(s->key_count, sizeof(*s->cache), GFP_KERNEL);
+ if (!s->cache)
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index f44a06740a6e..b4a4aaf81793 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -861,56 +861,54 @@ static int dma_pte_clear_range(struct dmar_domain *domain,
+ return order;
+ }
+
++static void dma_pte_free_level(struct dmar_domain *domain, int level,
++ struct dma_pte *pte, unsigned long pfn,
++ unsigned long start_pfn, unsigned long last_pfn)
++{
++ pfn = max(start_pfn, pfn);
++ pte = &pte[pfn_level_offset(pfn, level)];
++
++ do {
++ unsigned long level_pfn;
++ struct dma_pte *level_pte;
++
++ if (!dma_pte_present(pte) || dma_pte_superpage(pte))
++ goto next;
++
++ level_pfn = pfn & level_mask(level - 1);
++ level_pte = phys_to_virt(dma_pte_addr(pte));
++
++ if (level > 2)
++ dma_pte_free_level(domain, level - 1, level_pte,
++ level_pfn, start_pfn, last_pfn);
++
++ /* If range covers entire pagetable, free it */
++ if (!(start_pfn > level_pfn ||
++ last_pfn < level_pfn + level_size(level))) {
++ dma_clear_pte(pte);
++ domain_flush_cache(domain, pte, sizeof(*pte));
++ free_pgtable_page(level_pte);
++ }
++next:
++ pfn += level_size(level);
++ } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
++}
++
+ /* free page table pages. last level pte should already be cleared */
+ static void dma_pte_free_pagetable(struct dmar_domain *domain,
+ unsigned long start_pfn,
+ unsigned long last_pfn)
+ {
+ int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
+- struct dma_pte *first_pte, *pte;
+- int total = agaw_to_level(domain->agaw);
+- int level;
+- unsigned long tmp;
+- int large_page = 2;
+
+ BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
+ BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
+ BUG_ON(start_pfn > last_pfn);
+
+ /* We don't need lock here; nobody else touches the iova range */
+- level = 2;
+- while (level <= total) {
+- tmp = align_to_level(start_pfn, level);
+-
+- /* If we can't even clear one PTE at this level, we're done */
+- if (tmp + level_size(level) - 1 > last_pfn)
+- return;
+-
+- do {
+- large_page = level;
+- first_pte = pte = dma_pfn_level_pte(domain, tmp, level, &large_page);
+- if (large_page > level)
+- level = large_page + 1;
+- if (!pte) {
+- tmp = align_to_level(tmp + 1, level + 1);
+- continue;
+- }
+- do {
+- if (dma_pte_present(pte)) {
+- free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
+- dma_clear_pte(pte);
+- }
+- pte++;
+- tmp += level_size(level);
+- } while (!first_pte_in_page(pte) &&
+- tmp + level_size(level) - 1 <= last_pfn);
++ dma_pte_free_level(domain, agaw_to_level(domain->agaw),
++ domain->pgd, 0, start_pfn, last_pfn);
+
+- domain_flush_cache(domain, first_pte,
+- (void *)pte - (void *)first_pte);
+-
+- } while (tmp && tmp + level_size(level) - 1 <= last_pfn);
+- level++;
+- }
+ /* free pgd */
+ if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
+ free_pgtable_page(domain->pgd);
+diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
+index b4aaa7bf98a3..5c303168157a 100644
+--- a/drivers/md/dm-snap.c
++++ b/drivers/md/dm-snap.c
+@@ -721,17 +721,16 @@ static int calc_max_buckets(void)
+ */
+ static int init_hash_tables(struct dm_snapshot *s)
+ {
+- sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets;
++ sector_t hash_size, cow_dev_size, max_buckets;
+
+ /*
+ * Calculate based on the size of the original volume or
+ * the COW volume...
+ */
+ cow_dev_size = get_dev_size(s->cow->bdev);
+- origin_dev_size = get_dev_size(s->origin->bdev);
+ max_buckets = calc_max_buckets();
+
+- hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift;
++ hash_size = cow_dev_size >> s->store->chunk_shift;
+ hash_size = min(hash_size, max_buckets);
+
+ if (hash_size < 64)
+diff --git a/drivers/media/video/hdpvr/hdpvr-core.c b/drivers/media/video/hdpvr/hdpvr-core.c
+index 441dacf642bb..060353eac9bb 100644
+--- a/drivers/media/video/hdpvr/hdpvr-core.c
++++ b/drivers/media/video/hdpvr/hdpvr-core.c
+@@ -297,6 +297,11 @@ static int hdpvr_probe(struct usb_interface *interface,
+
+ dev->workqueue = 0;
+
++ /* init video transfer queues first of all */
++ /* to prevent oops in hdpvr_delete() on error paths */
++ INIT_LIST_HEAD(&dev->free_buff_list);
++ INIT_LIST_HEAD(&dev->rec_buff_list);
++
+ /* register v4l2_device early so it can be used for printks */
+ if (v4l2_device_register(&interface->dev, &dev->v4l2_dev)) {
+ err("v4l2_device_register failed");
+@@ -319,10 +324,6 @@ static int hdpvr_probe(struct usb_interface *interface,
+ if (!dev->workqueue)
+ goto error;
+
+- /* init video transfer queues */
+- INIT_LIST_HEAD(&dev->free_buff_list);
+- INIT_LIST_HEAD(&dev->rec_buff_list);
+-
+ dev->options = hdpvr_default_options;
+
+ if (default_video_input < HDPVR_VIDEO_INPUTS)
+@@ -373,12 +374,6 @@ static int hdpvr_probe(struct usb_interface *interface,
+ }
+ mutex_unlock(&dev->io_mutex);
+
+- if (hdpvr_register_videodev(dev, &interface->dev,
+- video_nr[atomic_inc_return(&dev_nr)])) {
+- v4l2_err(&dev->v4l2_dev, "registering videodev failed\n");
+- goto error;
+- }
+-
+ #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+ retval = hdpvr_register_i2c_adapter(dev);
+ if (retval < 0) {
+@@ -399,6 +394,13 @@ static int hdpvr_probe(struct usb_interface *interface,
+ }
+ #endif
+
++ retval = hdpvr_register_videodev(dev, &interface->dev,
++ video_nr[atomic_inc_return(&dev_nr)]);
++ if (retval < 0) {
++ v4l2_err(&dev->v4l2_dev, "registering videodev failed\n");
++ goto reg_fail;
++ }
++
+ /* let the user know what node this device is now attached to */
+ v4l2_info(&dev->v4l2_dev, "device now attached to %s\n",
+ video_device_node_name(dev->video_dev));
+diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c
+index 86f259cdfcbc..be9e74d687b9 100644
+--- a/drivers/mmc/host/tmio_mmc_dma.c
++++ b/drivers/mmc/host/tmio_mmc_dma.c
+@@ -92,6 +92,7 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
+ pio:
+ if (!desc) {
+ /* DMA failed, fall back to PIO */
++ tmio_mmc_enable_dma(host, false);
+ if (ret >= 0)
+ ret = -EIO;
+ host->chan_rx = NULL;
+@@ -104,7 +105,6 @@ pio:
+ }
+ dev_warn(&host->pdev->dev,
+ "DMA failed: %d, falling back to PIO\n", ret);
+- tmio_mmc_enable_dma(host, false);
+ }
+
+ dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
+@@ -173,6 +173,7 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
+ pio:
+ if (!desc) {
+ /* DMA failed, fall back to PIO */
++ tmio_mmc_enable_dma(host, false);
+ if (ret >= 0)
+ ret = -EIO;
+ host->chan_tx = NULL;
+@@ -185,7 +186,6 @@ pio:
+ }
+ dev_warn(&host->pdev->dev,
+ "DMA failed: %d, falling back to PIO\n", ret);
+- tmio_mmc_enable_dma(host, false);
+ }
+
+ dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__,
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index b436b84de123..1bf36ac44a10 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1911,6 +1911,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
+ struct bonding *bond = netdev_priv(bond_dev);
+ struct slave *slave, *oldcurrent;
+ struct sockaddr addr;
++ int old_flags = bond_dev->flags;
+ u32 old_features = bond_dev->features;
+
+ /* slave is not a slave or master is not master of this slave */
+@@ -2041,12 +2042,18 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
+ * already taken care of above when we detached the slave
+ */
+ if (!USES_PRIMARY(bond->params.mode)) {
+- /* unset promiscuity level from slave */
+- if (bond_dev->flags & IFF_PROMISC)
++ /* unset promiscuity level from slave
++ * NOTE: The NETDEV_CHANGEADDR call above may change the value
++ * of the IFF_PROMISC flag in the bond_dev, but we need the
++ * value of that flag before that change, as that was the value
++ * when this slave was attached, so we cache at the start of the
++ * function and use it here. Same goes for ALLMULTI below
++ */
++ if (old_flags & IFF_PROMISC)
+ dev_set_promiscuity(slave_dev, -1);
+
+ /* unset allmulti level from slave */
+- if (bond_dev->flags & IFF_ALLMULTI)
++ if (old_flags & IFF_ALLMULTI)
+ dev_set_allmulti(slave_dev, -1);
+
+ /* flush master's mc_list from slave */
+diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
+index e59d006e98e1..bb828c24ce6f 100644
+--- a/drivers/net/can/flexcan.c
++++ b/drivers/net/can/flexcan.c
+@@ -60,7 +60,7 @@
+ #define FLEXCAN_MCR_BCC BIT(16)
+ #define FLEXCAN_MCR_LPRIO_EN BIT(13)
+ #define FLEXCAN_MCR_AEN BIT(12)
+-#define FLEXCAN_MCR_MAXMB(x) ((x) & 0xf)
++#define FLEXCAN_MCR_MAXMB(x) ((x) & 0x1f)
+ #define FLEXCAN_MCR_IDAM_A (0 << 8)
+ #define FLEXCAN_MCR_IDAM_B (1 << 8)
+ #define FLEXCAN_MCR_IDAM_C (2 << 8)
+@@ -666,7 +666,6 @@ static int flexcan_chip_start(struct net_device *dev)
+ {
+ struct flexcan_priv *priv = netdev_priv(dev);
+ struct flexcan_regs __iomem *regs = priv->base;
+- unsigned int i;
+ int err;
+ u32 reg_mcr, reg_ctrl;
+
+@@ -700,9 +699,11 @@ static int flexcan_chip_start(struct net_device *dev)
+ *
+ */
+ reg_mcr = flexcan_read(&regs->mcr);
++ reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff);
+ reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_FEN | FLEXCAN_MCR_HALT |
+ FLEXCAN_MCR_SUPV | FLEXCAN_MCR_WRN_EN |
+- FLEXCAN_MCR_IDAM_C;
++ FLEXCAN_MCR_IDAM_C |
++ FLEXCAN_MCR_MAXMB(FLEXCAN_TX_BUF_ID);
+ dev_dbg(dev->dev.parent, "%s: writing mcr=0x%08x", __func__, reg_mcr);
+ flexcan_write(reg_mcr, &regs->mcr);
+
+@@ -732,16 +733,9 @@ static int flexcan_chip_start(struct net_device *dev)
+ dev_dbg(dev->dev.parent, "%s: writing ctrl=0x%08x", __func__, reg_ctrl);
+ flexcan_write(reg_ctrl, &regs->ctrl);
+
+- for (i = 0; i < ARRAY_SIZE(regs->cantxfg); i++) {
+- flexcan_write(0, &regs->cantxfg[i].can_ctrl);
+- flexcan_write(0, &regs->cantxfg[i].can_id);
+- flexcan_write(0, &regs->cantxfg[i].data[0]);
+- flexcan_write(0, &regs->cantxfg[i].data[1]);
+-
+- /* put MB into rx queue */
+- flexcan_write(FLEXCAN_MB_CNT_CODE(0x4),
+- &regs->cantxfg[i].can_ctrl);
+- }
++ /* Abort any pending TX, mark Mailbox as INACTIVE */
++ flexcan_write(FLEXCAN_MB_CNT_CODE(0x4),
++ &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl);
+
+ /* acceptance mask/acceptance code (accept everything) */
+ flexcan_write(0x0, &regs->rxgmask);
+diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
+index d0722a7d480b..fb9e7d34bcf2 100644
+--- a/drivers/net/ethernet/freescale/gianfar.c
++++ b/drivers/net/ethernet/freescale/gianfar.c
+@@ -397,7 +397,13 @@ static void gfar_init_mac(struct net_device *ndev)
+ if (ndev->features & NETIF_F_IP_CSUM)
+ tctrl |= TCTRL_INIT_CSUM;
+
+- tctrl |= TCTRL_TXSCHED_PRIO;
++ if (priv->prio_sched_en)
++ tctrl |= TCTRL_TXSCHED_PRIO;
++ else {
++ tctrl |= TCTRL_TXSCHED_WRRS;
++ gfar_write(&regs->tr03wt, DEFAULT_WRRS_WEIGHT);
++ gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
++ }
+
+ gfar_write(&regs->tctrl, tctrl);
+
+@@ -1157,6 +1163,9 @@ static int gfar_probe(struct platform_device *ofdev)
+ priv->rx_filer_enable = 1;
+ /* Enable most messages by default */
+ priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
++ /* use pritority h/w tx queue scheduling for single queue devices */
++ if (priv->num_tx_queues == 1)
++ priv->prio_sched_en = 1;
+
+ /* Carrier starts down, phylib will bring it up */
+ netif_carrier_off(dev);
+diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
+index 9aa43773e8e3..abeb79a67aee 100644
+--- a/drivers/net/ethernet/freescale/gianfar.h
++++ b/drivers/net/ethernet/freescale/gianfar.h
+@@ -304,8 +304,16 @@ extern const char gfar_driver_version[];
+ #define TCTRL_TFCPAUSE 0x00000008
+ #define TCTRL_TXSCHED_MASK 0x00000006
+ #define TCTRL_TXSCHED_INIT 0x00000000
++/* priority scheduling */
+ #define TCTRL_TXSCHED_PRIO 0x00000002
++/* weighted round-robin scheduling (WRRS) */
+ #define TCTRL_TXSCHED_WRRS 0x00000004
++/* default WRRS weight and policy setting,
++ * tailored to the tr03wt and tr47wt registers:
++ * equal weight for all Tx Qs, measured in 64byte units
++ */
++#define DEFAULT_WRRS_WEIGHT 0x18181818
++
+ #define TCTRL_INIT_CSUM (TCTRL_TUCSEN | TCTRL_IPCSEN)
+
+ #define IEVENT_INIT_CLEAR 0xffffffff
+@@ -1101,7 +1109,8 @@ struct gfar_private {
+ extended_hash:1,
+ bd_stash_en:1,
+ rx_filer_enable:1,
+- wol_en:1; /* Wake-on-LAN enabled */
++ wol_en:1, /* Wake-on-LAN enabled */
++ prio_sched_en:1; /* Enable priorty based Tx scheduling in Hw */
+ unsigned short padding;
+
+ /* PHY stuff */
+diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
+index 8f47907fd270..4236b82f9e35 100644
+--- a/drivers/net/ethernet/realtek/8139cp.c
++++ b/drivers/net/ethernet/realtek/8139cp.c
+@@ -478,7 +478,7 @@ rx_status_loop:
+
+ while (1) {
+ u32 status, len;
+- dma_addr_t mapping;
++ dma_addr_t mapping, new_mapping;
+ struct sk_buff *skb, *new_skb;
+ struct cp_desc *desc;
+ const unsigned buflen = cp->rx_buf_sz;
+@@ -520,6 +520,14 @@ rx_status_loop:
+ goto rx_next;
+ }
+
++ new_mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
++ PCI_DMA_FROMDEVICE);
++ if (dma_mapping_error(&cp->pdev->dev, new_mapping)) {
++ dev->stats.rx_dropped++;
++ kfree_skb(new_skb);
++ goto rx_next;
++ }
++
+ dma_unmap_single(&cp->pdev->dev, mapping,
+ buflen, PCI_DMA_FROMDEVICE);
+
+@@ -531,12 +539,11 @@ rx_status_loop:
+
+ skb_put(skb, len);
+
+- mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
+- PCI_DMA_FROMDEVICE);
+ cp->rx_skb[rx_tail] = new_skb;
+
+ cp_rx_skb(cp, skb, desc);
+ rx++;
++ mapping = new_mapping;
+
+ rx_next:
+ cp->rx_ring[rx_tail].opts2 = 0;
+@@ -704,6 +711,22 @@ static inline u32 cp_tx_vlan_tag(struct sk_buff *skb)
+ TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
+ }
+
++static void unwind_tx_frag_mapping(struct cp_private *cp, struct sk_buff *skb,
++ int first, int entry_last)
++{
++ int frag, index;
++ struct cp_desc *txd;
++ skb_frag_t *this_frag;
++ for (frag = 0; frag+first < entry_last; frag++) {
++ index = first+frag;
++ cp->tx_skb[index] = NULL;
++ txd = &cp->tx_ring[index];
++ this_frag = &skb_shinfo(skb)->frags[frag];
++ dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
++ skb_frag_size(this_frag), PCI_DMA_TODEVICE);
++ }
++}
++
+ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
+ struct net_device *dev)
+ {
+@@ -737,6 +760,9 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
+
+ len = skb->len;
+ mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
++ if (dma_mapping_error(&cp->pdev->dev, mapping))
++ goto out_dma_error;
++
+ txd->opts2 = opts2;
+ txd->addr = cpu_to_le64(mapping);
+ wmb();
+@@ -774,6 +800,9 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
+ first_len = skb_headlen(skb);
+ first_mapping = dma_map_single(&cp->pdev->dev, skb->data,
+ first_len, PCI_DMA_TODEVICE);
++ if (dma_mapping_error(&cp->pdev->dev, first_mapping))
++ goto out_dma_error;
++
+ cp->tx_skb[entry] = skb;
+ entry = NEXT_TX(entry);
+
+@@ -787,6 +816,11 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
+ mapping = dma_map_single(&cp->pdev->dev,
+ skb_frag_address(this_frag),
+ len, PCI_DMA_TODEVICE);
++ if (dma_mapping_error(&cp->pdev->dev, mapping)) {
++ unwind_tx_frag_mapping(cp, skb, first_entry, entry);
++ goto out_dma_error;
++ }
++
+ eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
+
+ ctrl = eor | len | DescOwn;
+@@ -845,11 +879,16 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
+ if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
+ netif_stop_queue(dev);
+
++out_unlock:
+ spin_unlock_irqrestore(&cp->lock, intr_flags);
+
+ cpw8(TxPoll, NormalTxPoll);
+
+ return NETDEV_TX_OK;
++out_dma_error:
++ kfree_skb(skb);
++ cp->dev->stats.tx_dropped++;
++ goto out_unlock;
+ }
+
+ /* Set or clear the multicast filter for this adaptor.
+@@ -1023,6 +1062,10 @@ static int cp_refill_rx(struct cp_private *cp)
+
+ mapping = dma_map_single(&cp->pdev->dev, skb->data,
+ cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
++ if (dma_mapping_error(&cp->pdev->dev, mapping)) {
++ kfree_skb(skb);
++ goto err_out;
++ }
+ cp->rx_skb[i] = skb;
+
+ cp->rx_ring[i].opts2 = 0;
+diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
+index 9ce8665b53bf..c231b3f67b2b 100644
+--- a/drivers/net/ethernet/sfc/rx.c
++++ b/drivers/net/ethernet/sfc/rx.c
+@@ -312,8 +312,9 @@ static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
+
+ index = rx_queue->added_count & rx_queue->ptr_mask;
+ new_buf = efx_rx_buffer(rx_queue, index);
+- new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1);
+ new_buf->u.page = rx_buf->u.page;
++ new_buf->page_offset = rx_buf->page_offset ^ (PAGE_SIZE >> 1);
++ new_buf->dma_addr = state->dma_addr + new_buf->page_offset;
+ new_buf->len = rx_buf->len;
+ new_buf->is_page = true;
+ ++rx_queue->added_count;
+diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
+index f34dd99fe579..f37e0aee27c6 100644
+--- a/drivers/net/ethernet/via/via-rhine.c
++++ b/drivers/net/ethernet/via/via-rhine.c
+@@ -32,7 +32,7 @@
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+ #define DRV_NAME "via-rhine"
+-#define DRV_VERSION "1.5.0"
++#define DRV_VERSION "1.5.1"
+ #define DRV_RELDATE "2010-10-09"
+
+
+@@ -1518,7 +1518,12 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
+ cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
+
+ if (unlikely(vlan_tx_tag_present(skb))) {
+- rp->tx_ring[entry].tx_status = cpu_to_le32((vlan_tx_tag_get(skb)) << 16);
++ u16 vid_pcp = vlan_tx_tag_get(skb);
++
++ /* drop CFI/DEI bit, register needs VID and PCP */
++ vid_pcp = (vid_pcp & VLAN_VID_MASK) |
++ ((vid_pcp & VLAN_PRIO_MASK) >> 1);
++ rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16);
+ /* request tagging */
+ rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
+ }
+diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
+index 2681b53820ee..e26945d9dea4 100644
+--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
++++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
+@@ -308,6 +308,12 @@ static int temac_dma_bd_init(struct net_device *ndev)
+ lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
+ lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
+
++ /* Init descriptor indexes */
++ lp->tx_bd_ci = 0;
++ lp->tx_bd_next = 0;
++ lp->tx_bd_tail = 0;
++ lp->rx_bd_ci = 0;
++
+ return 0;
+
+ out:
+diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
+index 96b9e3c93ce1..b0f901518b76 100644
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -641,6 +641,28 @@ static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
+ return 0;
+ }
+
++static unsigned long iov_pages(const struct iovec *iv, int offset,
++ unsigned long nr_segs)
++{
++ unsigned long seg, base;
++ int pages = 0, len, size;
++
++ while (nr_segs && (offset >= iv->iov_len)) {
++ offset -= iv->iov_len;
++ ++iv;
++ --nr_segs;
++ }
++
++ for (seg = 0; seg < nr_segs; seg++) {
++ base = (unsigned long)iv[seg].iov_base + offset;
++ len = iv[seg].iov_len - offset;
++ size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
++ pages += size;
++ offset = 0;
++ }
++
++ return pages;
++}
+
+ /* Get packet from user space buffer */
+ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
+@@ -687,31 +709,15 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
+ if (unlikely(count > UIO_MAXIOV))
+ goto err;
+
+- if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY))
+- zerocopy = true;
+-
+- if (zerocopy) {
+- /* Userspace may produce vectors with count greater than
+- * MAX_SKB_FRAGS, so we need to linearize parts of the skb
+- * to let the rest of data to be fit in the frags.
+- */
+- if (count > MAX_SKB_FRAGS) {
+- copylen = iov_length(iv, count - MAX_SKB_FRAGS);
+- if (copylen < vnet_hdr_len)
+- copylen = 0;
+- else
+- copylen -= vnet_hdr_len;
+- }
+- /* There are 256 bytes to be copied in skb, so there is enough
+- * room for skb expand head in case it is used.
+- * The rest buffer is mapped from userspace.
+- */
+- if (copylen < vnet_hdr.hdr_len)
+- copylen = vnet_hdr.hdr_len;
+- if (!copylen)
+- copylen = GOODCOPY_LEN;
++ if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
++ copylen = vnet_hdr.hdr_len ? vnet_hdr.hdr_len : GOODCOPY_LEN;
+ linear = copylen;
+- } else {
++ if (iov_pages(iv, vnet_hdr_len + copylen, count)
++ <= MAX_SKB_FRAGS)
++ zerocopy = true;
++ }
++
++ if (!zerocopy) {
+ copylen = len;
+ linear = vnet_hdr.hdr_len;
+ }
+@@ -723,9 +729,15 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
+
+ if (zerocopy)
+ err = zerocopy_sg_from_iovec(skb, iv, vnet_hdr_len, count);
+- else
++ else {
+ err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len,
+ len);
++ if (!err && m && m->msg_control) {
++ struct ubuf_info *uarg = m->msg_control;
++ uarg->callback(uarg);
++ }
++ }
++
+ if (err)
+ goto err_kfree;
+
+diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
+index ad6a9d91dfdd..2b349d3355f4 100644
+--- a/drivers/net/ppp/pptp.c
++++ b/drivers/net/ppp/pptp.c
+@@ -281,7 +281,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+ nf_reset(skb);
+
+ skb->ip_summed = CHECKSUM_NONE;
+- ip_select_ident(iph, &rt->dst, NULL);
++ ip_select_ident(skb, &rt->dst, NULL);
+ ip_send_check(iph);
+
+ ip_local_out(skb);
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index f4c5de6e3ed0..ee1aab0805d1 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -614,8 +614,9 @@ static ssize_t tun_get_user(struct tun_struct *tun,
+ int offset = 0;
+
+ if (!(tun->flags & TUN_NO_PI)) {
+- if ((len -= sizeof(pi)) > count)
++ if (len < sizeof(pi))
+ return -EINVAL;
++ len -= sizeof(pi);
+
+ if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi)))
+ return -EFAULT;
+@@ -623,8 +624,9 @@ static ssize_t tun_get_user(struct tun_struct *tun,
+ }
+
+ if (tun->flags & TUN_VNET_HDR) {
+- if ((len -= tun->vnet_hdr_sz) > count)
++ if (len < tun->vnet_hdr_sz)
+ return -EINVAL;
++ len -= tun->vnet_hdr_sz;
+
+ if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
+ return -EFAULT;
+diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
+index 2ba40cf85251..43aa06b3f7c6 100644
+--- a/drivers/net/usb/cdc_ether.c
++++ b/drivers/net/usb/cdc_ether.c
+@@ -615,6 +615,11 @@ static const struct usb_device_id products [] = {
+ .bInterfaceProtocol = USB_CDC_PROTO_NONE,
+ .driver_info = (unsigned long)&wwan_info,
+ }, {
++ /* Telit modules */
++ USB_VENDOR_AND_INTERFACE_INFO(0x1bc7, USB_CLASS_COMM,
++ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
++ .driver_info = (kernel_ulong_t) &wwan_info,
++}, {
+ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long) &cdc_info,
+diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
+index fbc0e4def767..136ecf32a5d5 100644
+--- a/drivers/net/usb/dm9601.c
++++ b/drivers/net/usb/dm9601.c
+@@ -384,7 +384,7 @@ static void dm9601_set_multicast(struct net_device *net)
+ rx_ctl |= 0x02;
+ } else if (net->flags & IFF_ALLMULTI ||
+ netdev_mc_count(net) > DM_MAX_MCAST) {
+- rx_ctl |= 0x04;
++ rx_ctl |= 0x08;
+ } else if (!netdev_mc_empty(net)) {
+ struct netdev_hw_addr *ha;
+
+diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+index 73be7ff149f9..f146824e676d 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
++++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+@@ -1016,6 +1016,10 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
+ * is_on == 0 means MRC CCK is OFF (more noise imm)
+ */
+ bool is_on = param ? 1 : 0;
++
++ if (ah->caps.rx_chainmask == 1)
++ break;
++
+ REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
+ AR_PHY_MRC_CCK_ENABLE, is_on);
+ REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
+diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
+index 1c269f50822b..7c70cf2b5d1a 100644
+--- a/drivers/net/wireless/ath/ath9k/ath9k.h
++++ b/drivers/net/wireless/ath/ath9k/ath9k.h
+@@ -77,10 +77,6 @@ struct ath_config {
+ sizeof(struct ath_buf_state)); \
+ } while (0)
+
+-#define ATH_RXBUF_RESET(_bf) do { \
+- (_bf)->bf_stale = false; \
+- } while (0)
+-
+ /**
+ * enum buffer_type - Buffer type flags
+ *
+@@ -308,6 +304,7 @@ struct ath_rx {
+ struct ath_buf *rx_bufptr;
+ struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX];
+
++ struct ath_buf *buf_hold;
+ struct sk_buff *frag;
+ };
+
+diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
+index d171a7242e4a..8326c1402ea7 100644
+--- a/drivers/net/wireless/ath/ath9k/recv.c
++++ b/drivers/net/wireless/ath/ath9k/recv.c
+@@ -78,8 +78,6 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
+ struct ath_desc *ds;
+ struct sk_buff *skb;
+
+- ATH_RXBUF_RESET(bf);
+-
+ ds = bf->bf_desc;
+ ds->ds_link = 0; /* link to null */
+ ds->ds_data = bf->bf_buf_addr;
+@@ -106,6 +104,14 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
+ sc->rx.rxlink = &ds->ds_link;
+ }
+
++static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_buf *bf)
++{
++ if (sc->rx.buf_hold)
++ ath_rx_buf_link(sc, sc->rx.buf_hold);
++
++ sc->rx.buf_hold = bf;
++}
++
+ static void ath_setdefantenna(struct ath_softc *sc, u32 antenna)
+ {
+ /* XXX block beacon interrupts */
+@@ -153,7 +159,6 @@ static bool ath_rx_edma_buf_link(struct ath_softc *sc,
+
+ skb = bf->bf_mpdu;
+
+- ATH_RXBUF_RESET(bf);
+ memset(skb->data, 0, ah->caps.rx_status_len);
+ dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
+ ah->caps.rx_status_len, DMA_TO_DEVICE);
+@@ -492,6 +497,7 @@ int ath_startrecv(struct ath_softc *sc)
+ if (list_empty(&sc->rx.rxbuf))
+ goto start_recv;
+
++ sc->rx.buf_hold = NULL;
+ sc->rx.rxlink = NULL;
+ list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) {
+ ath_rx_buf_link(sc, bf);
+@@ -742,6 +748,9 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
+ }
+
+ bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
++ if (bf == sc->rx.buf_hold)
++ return NULL;
++
+ ds = bf->bf_desc;
+
+ /*
+@@ -1974,7 +1983,7 @@ requeue:
+ if (edma) {
+ ath_rx_edma_buf_link(sc, qtype);
+ } else {
+- ath_rx_buf_link(sc, bf);
++ ath_rx_buf_relink(sc, bf);
+ ath9k_hw_rxena(ah);
+ }
+ } while (1);
+diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
+index 18da10047712..126ed31affe1 100644
+--- a/drivers/net/wireless/ath/ath9k/xmit.c
++++ b/drivers/net/wireless/ath/ath9k/xmit.c
+@@ -2423,6 +2423,7 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
+ for (acno = 0, ac = &an->ac[acno];
+ acno < WME_NUM_AC; acno++, ac++) {
+ ac->sched = false;
++ ac->clear_ps_filter = true;
+ ac->txq = sc->tx.txq_map[acno];
+ INIT_LIST_HEAD(&ac->tid_q);
+ }
+diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
+index 564218ca4853..078449385882 100644
+--- a/drivers/net/wireless/p54/p54usb.c
++++ b/drivers/net/wireless/p54/p54usb.c
+@@ -83,6 +83,7 @@ static struct usb_device_id p54u_table[] = {
+ {USB_DEVICE(0x06a9, 0x000e)}, /* Westell 802.11g USB (A90-211WG-01) */
+ {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */
+ {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */
++ {USB_DEVICE(0x07aa, 0x0020)}, /* Corega WLUSB2GTST USB */
+ {USB_DEVICE(0x0803, 0x4310)}, /* Zoom 4410a */
+ {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */
+ {USB_DEVICE(0x083a, 0x4531)}, /* T-Com Sinus 154 data II */
+diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
+index 67cbe5a1a928..8fb8c9e75688 100644
+--- a/drivers/net/wireless/rt2x00/rt2800lib.c
++++ b/drivers/net/wireless/rt2x00/rt2800lib.c
+@@ -2067,6 +2067,13 @@ static int rt2800_get_gain_calibration_delta(struct rt2x00_dev *rt2x00dev)
+ int i;
+
+ /*
++ * First check if temperature compensation is supported.
++ */
++ rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
++ if (!rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_EXTERNAL_TX_ALC))
++ return 0;
++
++ /*
+ * Read TSSI boundaries for temperature compensation from
+ * the EEPROM.
+ *
+diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
+index deb87e959e02..82baaa25dc41 100644
+--- a/drivers/net/wireless/rtlwifi/wifi.h
++++ b/drivers/net/wireless/rtlwifi/wifi.h
+@@ -1630,7 +1630,7 @@ struct rtl_priv {
+ that it points to the data allocated
+ beyond this structure like:
+ rtl_pci_priv or rtl_usb_priv */
+- u8 priv[0];
++ u8 priv[0] __aligned(sizeof(void *));
+ };
+
+ #define rtl_priv(hw) (((struct rtl_priv *)(hw)->priv))
+diff --git a/drivers/of/base.c b/drivers/of/base.c
+index 9b6588ef0673..37639a644563 100644
+--- a/drivers/of/base.c
++++ b/drivers/of/base.c
+@@ -1189,6 +1189,7 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
+ ap = dt_alloc(sizeof(*ap) + len + 1, 4);
+ if (!ap)
+ continue;
++ memset(ap, 0, sizeof(*ap) + len + 1);
+ ap->alias = start;
+ of_alias_add(ap, np, id, start, len);
+ }
+diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
+index 394ed9e79fd4..4aa30d86ca99 100644
+--- a/drivers/scsi/esp_scsi.c
++++ b/drivers/scsi/esp_scsi.c
+@@ -530,7 +530,7 @@ static int esp_need_to_nego_sync(struct esp_target_data *tp)
+ static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
+ struct esp_lun_data *lp)
+ {
+- if (!ent->tag[0]) {
++ if (!ent->orig_tag[0]) {
+ /* Non-tagged, slot already taken? */
+ if (lp->non_tagged_cmd)
+ return -EBUSY;
+@@ -564,9 +564,9 @@ static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
+ return -EBUSY;
+ }
+
+- BUG_ON(lp->tagged_cmds[ent->tag[1]]);
++ BUG_ON(lp->tagged_cmds[ent->orig_tag[1]]);
+
+- lp->tagged_cmds[ent->tag[1]] = ent;
++ lp->tagged_cmds[ent->orig_tag[1]] = ent;
+ lp->num_tagged++;
+
+ return 0;
+@@ -575,9 +575,9 @@ static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
+ static void esp_free_lun_tag(struct esp_cmd_entry *ent,
+ struct esp_lun_data *lp)
+ {
+- if (ent->tag[0]) {
+- BUG_ON(lp->tagged_cmds[ent->tag[1]] != ent);
+- lp->tagged_cmds[ent->tag[1]] = NULL;
++ if (ent->orig_tag[0]) {
++ BUG_ON(lp->tagged_cmds[ent->orig_tag[1]] != ent);
++ lp->tagged_cmds[ent->orig_tag[1]] = NULL;
+ lp->num_tagged--;
+ } else {
+ BUG_ON(lp->non_tagged_cmd != ent);
+@@ -667,6 +667,8 @@ static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
+ ent->tag[0] = 0;
+ ent->tag[1] = 0;
+ }
++ ent->orig_tag[0] = ent->tag[0];
++ ent->orig_tag[1] = ent->tag[1];
+
+ if (esp_alloc_lun_tag(ent, lp) < 0)
+ continue;
+diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h
+index 28e22acf87ea..cd68805e8d78 100644
+--- a/drivers/scsi/esp_scsi.h
++++ b/drivers/scsi/esp_scsi.h
+@@ -271,6 +271,7 @@ struct esp_cmd_entry {
+ #define ESP_CMD_FLAG_AUTOSENSE 0x04 /* Doing automatic REQUEST_SENSE */
+
+ u8 tag[2];
++ u8 orig_tag[2];
+
+ u8 status;
+ u8 message;
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index 96029e6d027f..c8744588e6b7 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -2105,7 +2105,7 @@ iscsi_if_rx(struct sk_buff *skb)
+ break;
+ err = iscsi_if_send_reply(group, nlh->nlmsg_seq,
+ nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
+- } while (err < 0 && err != -ECONNREFUSED);
++ } while (err < 0 && err != -ECONNREFUSED && err != -ESRCH);
+ skb_pull(skb, rlen);
+ }
+ mutex_unlock(&rx_queue_mutex);
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 17603daf9b57..f6d2b62ec005 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -2136,14 +2136,9 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
+ }
+ }
+
+- if (modepage == 0x3F) {
+- sd_printk(KERN_ERR, sdkp, "No Caching mode page "
+- "present\n");
+- goto defaults;
+- } else if ((buffer[offset] & 0x3f) != modepage) {
+- sd_printk(KERN_ERR, sdkp, "Got wrong page\n");
+- goto defaults;
+- }
++ sd_printk(KERN_ERR, sdkp, "No Caching mode page found\n");
++ goto defaults;
++
+ Page_found:
+ if (modepage == 8) {
+ sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0);
+diff --git a/drivers/staging/comedi/drivers/dt282x.c b/drivers/staging/comedi/drivers/dt282x.c
+index 95ebc267bb74..e3adb382d721 100644
+--- a/drivers/staging/comedi/drivers/dt282x.c
++++ b/drivers/staging/comedi/drivers/dt282x.c
+@@ -407,8 +407,9 @@ struct dt282x_private {
+ } \
+ udelay(5); \
+ } \
+- if (_i) \
++ if (_i) { \
+ b \
++ } \
+ } while (0)
+
+ static int dt282x_attach(struct comedi_device *dev,
+diff --git a/drivers/staging/comedi/drivers/ni_65xx.c b/drivers/staging/comedi/drivers/ni_65xx.c
+index 403fc0997d37..8b564ad17319 100644
+--- a/drivers/staging/comedi/drivers/ni_65xx.c
++++ b/drivers/staging/comedi/drivers/ni_65xx.c
+@@ -411,29 +411,25 @@ static int ni_65xx_dio_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data)
+ {
+- unsigned base_bitfield_channel;
+- const unsigned max_ports_per_bitfield = 5;
++ int base_bitfield_channel;
+ unsigned read_bits = 0;
+- unsigned j;
++ int last_port_offset = ni_65xx_port_by_channel(s->n_chan - 1);
++ int port_offset;
++
+ if (insn->n != 2)
+ return -EINVAL;
+ base_bitfield_channel = CR_CHAN(insn->chanspec);
+- for (j = 0; j < max_ports_per_bitfield; ++j) {
+- const unsigned port_offset =
+- ni_65xx_port_by_channel(base_bitfield_channel) + j;
+- const unsigned port =
+- sprivate(s)->base_port + port_offset;
+- unsigned base_port_channel;
++ for (port_offset = ni_65xx_port_by_channel(base_bitfield_channel);
++ port_offset <= last_port_offset; port_offset++) {
++ unsigned port = sprivate(s)->base_port + port_offset;
++ int base_port_channel = port_offset * ni_65xx_channels_per_port;
+ unsigned port_mask, port_data, port_read_bits;
+- int bitshift;
+- if (port >= ni_65xx_total_num_ports(board(dev)))
++ int bitshift = base_port_channel - base_bitfield_channel;
++
++ if (bitshift >= 32)
+ break;
+- base_port_channel = port_offset * ni_65xx_channels_per_port;
+ port_mask = data[0];
+ port_data = data[1];
+- bitshift = base_port_channel - base_bitfield_channel;
+- if (bitshift >= 32 || bitshift <= -32)
+- break;
+ if (bitshift > 0) {
+ port_mask >>= bitshift;
+ port_data >>= bitshift;
+diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
+index 754d54e8d4a8..f680766ae1e7 100644
+--- a/drivers/staging/vt6656/main_usb.c
++++ b/drivers/staging/vt6656/main_usb.c
+@@ -1221,6 +1221,8 @@ device_release_WPADEV(pDevice);
+ memset(pMgmt->abyCurrBSSID, 0, 6);
+ pMgmt->eCurrState = WMAC_STATE_IDLE;
+
++ pDevice->flags &= ~DEVICE_FLAGS_OPENED;
++
+ device_free_tx_bufs(pDevice);
+ device_free_rx_bufs(pDevice);
+ device_free_int_bufs(pDevice);
+@@ -1232,7 +1234,6 @@ device_release_WPADEV(pDevice);
+ usb_free_urb(pDevice->pInterruptURB);
+
+ BSSvClearNodeDBTable(pDevice, 0);
+- pDevice->flags &=(~DEVICE_FLAGS_OPENED);
+
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_close2 \n");
+
+diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
+index 926d483139b8..d197b3ecfbb4 100644
+--- a/drivers/staging/zram/zram_drv.c
++++ b/drivers/staging/zram/zram_drv.c
+@@ -709,9 +709,7 @@ static void zram_slot_free_notify(struct block_device *bdev,
+ struct zram *zram;
+
+ zram = bdev->bd_disk->private_data;
+- down_write(&zram->lock);
+ zram_free_page(zram, index);
+- up_write(&zram->lock);
+ zram_stat64_inc(zram, &zram->stats.notify_free);
+ }
+
+diff --git a/drivers/staging/zram/zram_drv.h b/drivers/staging/zram/zram_drv.h
+index 87f2fec1bc94..e5cd2469b6a0 100644
+--- a/drivers/staging/zram/zram_drv.h
++++ b/drivers/staging/zram/zram_drv.h
+@@ -107,9 +107,8 @@ struct zram {
+ void *compress_buffer;
+ struct table *table;
+ spinlock_t stat64_lock; /* protect 64-bit stats */
+- struct rw_semaphore lock; /* protect compression buffers, table,
+- * 32bit stat counters against concurrent
+- * notifications, reads and writes */
++ struct rw_semaphore lock; /* protect compression buffers and table
++ * against concurrent read and writes */
+ struct request_queue *queue;
+ struct gendisk *disk;
+ int init_done;
+diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
+index c0b487284d9b..f5440a7da589 100644
+--- a/drivers/tty/serial/pch_uart.c
++++ b/drivers/tty/serial/pch_uart.c
+@@ -552,11 +552,12 @@ static int dma_push_rx(struct eg20t_port *priv, int size)
+ dev_warn(port->dev, "Rx overrun: dropping %u bytes\n",
+ size - room);
+ if (!room)
+- return room;
++ goto out;
+
+ tty_insert_flip_string(tty, sg_virt(&priv->sg_rx), size);
+
+ port->icount.rx += room;
++out:
+ tty_kref_put(tty);
+
+ return room;
+@@ -970,6 +971,8 @@ static void pch_uart_err_ir(struct eg20t_port *priv, unsigned int lsr)
+ if (tty == NULL) {
+ for (i = 0; error_msg[i] != NULL; i++)
+ dev_err(&priv->pdev->dev, error_msg[i]);
++ } else {
++ tty_kref_put(tty);
+ }
+ }
+
+diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
+index fe8c04b739be..06dfb4fb331f 100644
+--- a/drivers/usb/class/cdc-wdm.c
++++ b/drivers/usb/class/cdc-wdm.c
+@@ -187,6 +187,7 @@ skip_error:
+ static void wdm_int_callback(struct urb *urb)
+ {
+ int rv = 0;
++ int responding;
+ int status = urb->status;
+ struct wdm_device *desc;
+ struct usb_ctrlrequest *req;
+@@ -260,8 +261,8 @@ static void wdm_int_callback(struct urb *urb)
+ desc->response->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ spin_lock(&desc->iuspin);
+ clear_bit(WDM_READ, &desc->flags);
+- set_bit(WDM_RESPONDING, &desc->flags);
+- if (!test_bit(WDM_DISCONNECTING, &desc->flags)
++ responding = test_and_set_bit(WDM_RESPONDING, &desc->flags);
++ if (!responding && !test_bit(WDM_DISCONNECTING, &desc->flags)
+ && !test_bit(WDM_SUSPENDING, &desc->flags)) {
+ rv = usb_submit_urb(desc->response, GFP_ATOMIC);
+ dev_dbg(&desc->intf->dev, "%s: usb_submit_urb %d",
+@@ -658,16 +659,20 @@ static void wdm_rxwork(struct work_struct *work)
+ {
+ struct wdm_device *desc = container_of(work, struct wdm_device, rxwork);
+ unsigned long flags;
+- int rv;
++ int rv = 0;
++ int responding;
+
+ spin_lock_irqsave(&desc->iuspin, flags);
+ if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
+ spin_unlock_irqrestore(&desc->iuspin, flags);
+ } else {
++ responding = test_and_set_bit(WDM_RESPONDING, &desc->flags);
+ spin_unlock_irqrestore(&desc->iuspin, flags);
+- rv = usb_submit_urb(desc->response, GFP_KERNEL);
++ if (!responding)
++ rv = usb_submit_urb(desc->response, GFP_KERNEL);
+ if (rv < 0 && rv != -EPERM) {
+ spin_lock_irqsave(&desc->iuspin, flags);
++ clear_bit(WDM_RESPONDING, &desc->flags);
+ if (!test_bit(WDM_DISCONNECTING, &desc->flags))
+ schedule_work(&desc->rxwork);
+ spin_unlock_irqrestore(&desc->iuspin, flags);
+diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
+index f4bdd0ce8d56..78609d302625 100644
+--- a/drivers/usb/core/config.c
++++ b/drivers/usb/core/config.c
+@@ -424,7 +424,8 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
+
+ memcpy(&config->desc, buffer, USB_DT_CONFIG_SIZE);
+ if (config->desc.bDescriptorType != USB_DT_CONFIG ||
+- config->desc.bLength < USB_DT_CONFIG_SIZE) {
++ config->desc.bLength < USB_DT_CONFIG_SIZE ||
++ config->desc.bLength > size) {
+ dev_err(ddev, "invalid descriptor for config index %d: "
+ "type = 0x%X, length = %d\n", cfgidx,
+ config->desc.bDescriptorType, config->desc.bLength);
+diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
+index 22f770abc399..49257b3053da 100644
+--- a/drivers/usb/core/devio.c
++++ b/drivers/usb/core/devio.c
+@@ -646,6 +646,22 @@ static int check_ctrlrecip(struct dev_state *ps, unsigned int requesttype,
+ if ((index & ~USB_DIR_IN) == 0)
+ return 0;
+ ret = findintfep(ps->dev, index);
++ if (ret < 0) {
++ /*
++ * Some not fully compliant Win apps seem to get
++ * index wrong and have the endpoint number here
++ * rather than the endpoint address (with the
++ * correct direction). Win does let this through,
++ * so we'll not reject it here but leave it to
++ * the device to not break KVM. But we warn.
++ */
++ ret = findintfep(ps->dev, index ^ 0x80);
++ if (ret >= 0)
++ dev_info(&ps->dev->dev,
++ "%s: process %i (%s) requesting ep %02x but needs %02x\n",
++ __func__, task_pid_nr(current),
++ current->comm, index, index ^ 0x80);
++ }
+ if (ret >= 0)
+ ret = checkintf(ps, ret);
+ break;
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 2768a7e7cf2e..a5ea85ff5c0d 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -3749,7 +3749,8 @@ static void hub_events(void)
+ hub->hdev->children[i - 1];
+
+ dev_dbg(hub_dev, "warm reset port %d\n", i);
+- if (!udev) {
++ if (!udev || !(portstatus &
++ USB_PORT_STAT_CONNECTION)) {
+ status = hub_port_reset(hub, i,
+ NULL, HUB_BH_RESET_TIME,
+ true);
+@@ -3759,8 +3760,8 @@ static void hub_events(void)
+ usb_lock_device(udev);
+ status = usb_reset_device(udev);
+ usb_unlock_device(udev);
++ connect_change = 0;
+ }
+- connect_change = 0;
+ }
+
+ if (connect_change)
+diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
+index f77c00042685..9edc582b7679 100644
+--- a/drivers/usb/dwc3/dwc3-pci.c
++++ b/drivers/usb/dwc3/dwc3-pci.c
+@@ -45,6 +45,8 @@
+ /* FIXME define these in <linux/pci_ids.h> */
+ #define PCI_VENDOR_ID_SYNOPSYS 0x16c3
+ #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd
++#define PCI_DEVICE_ID_INTEL_BYT 0x0f37
++#define PCI_DEVICE_ID_INTEL_MRFLD 0x119e
+
+ #define DWC3_PCI_DEVS_POSSIBLE 32
+
+@@ -191,6 +193,8 @@ static DEFINE_PCI_DEVICE_TABLE(dwc3_pci_id_table) = {
+ PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS,
+ PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3),
+ },
++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BYT), },
++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), },
+ { } /* Terminating Entry */
+ };
+ MODULE_DEVICE_TABLE(pci, dwc3_pci_id_table);
+diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
+index 55978fcfa4b2..08744737c5ab 100644
+--- a/drivers/usb/host/ehci-mxc.c
++++ b/drivers/usb/host/ehci-mxc.c
+@@ -296,7 +296,7 @@ static int __exit ehci_mxc_drv_remove(struct platform_device *pdev)
+ if (pdata && pdata->exit)
+ pdata->exit(pdev);
+
+- if (pdata->otg)
++ if (pdata && pdata->otg)
+ otg_shutdown(pdata->otg);
+
+ usb_remove_hcd(hcd);
+diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
+index b71e22e73263..29c042111714 100644
+--- a/drivers/usb/host/ehci-pci.c
++++ b/drivers/usb/host/ehci-pci.c
+@@ -543,7 +543,7 @@ static struct pci_driver ehci_pci_driver = {
+ .remove = usb_hcd_pci_remove,
+ .shutdown = usb_hcd_pci_shutdown,
+
+-#ifdef CONFIG_PM_SLEEP
++#ifdef CONFIG_PM
+ .driver = {
+ .pm = &usb_hcd_pci_pm_ops
+ },
+diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
+index bc01b064585a..839cb648d248 100644
+--- a/drivers/usb/host/ohci-pci.c
++++ b/drivers/usb/host/ohci-pci.c
+@@ -413,7 +413,7 @@ static struct pci_driver ohci_pci_driver = {
+ .remove = usb_hcd_pci_remove,
+ .shutdown = usb_hcd_pci_shutdown,
+
+-#ifdef CONFIG_PM_SLEEP
++#ifdef CONFIG_PM
+ .driver = {
+ .pm = &usb_hcd_pci_pm_ops
+ },
+diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c
+index c300bd2f7d1c..0f228c46eeda 100644
+--- a/drivers/usb/host/uhci-pci.c
++++ b/drivers/usb/host/uhci-pci.c
+@@ -293,7 +293,7 @@ static struct pci_driver uhci_pci_driver = {
+ .remove = usb_hcd_pci_remove,
+ .shutdown = uhci_shutdown,
+
+-#ifdef CONFIG_PM_SLEEP
++#ifdef CONFIG_PM
+ .driver = {
+ .pm = &usb_hcd_pci_pm_ops
+ },
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 79d27209c563..61b06688c916 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -330,7 +330,7 @@ static struct pci_driver xhci_pci_driver = {
+ /* suspend and resume implemented later */
+
+ .shutdown = usb_hcd_pci_shutdown,
+-#ifdef CONFIG_PM_SLEEP
++#ifdef CONFIG_PM
+ .driver = {
+ .pm = &usb_hcd_pci_pm_ops
+ },
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 633476e25bbc..2b4f42bac98f 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -879,8 +879,12 @@ remove_finished_td:
+ /* Otherwise ring the doorbell(s) to restart queued transfers */
+ ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
+ }
+- ep->stopped_td = NULL;
+- ep->stopped_trb = NULL;
++
++ /* Clear stopped_td and stopped_trb if endpoint is not halted */
++ if (!(ep->ep_state & EP_HALTED)) {
++ ep->stopped_td = NULL;
++ ep->stopped_trb = NULL;
++ }
+
+ /*
+ * Drop the lock and complete the URBs in the cancelled TD list.
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 6e1c92a11eb0..629aa743e45b 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -3484,10 +3484,21 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
+ {
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct xhci_virt_device *virt_dev;
++ struct device *dev = hcd->self.controller;
+ unsigned long flags;
+ u32 state;
+ int i, ret;
+
++#ifndef CONFIG_USB_DEFAULT_PERSIST
++ /*
++ * We called pm_runtime_get_noresume when the device was attached.
++ * Decrement the counter here to allow controller to runtime suspend
++ * if no devices remain.
++ */
++ if (xhci->quirks & XHCI_RESET_ON_RESUME)
++ pm_runtime_put_noidle(dev);
++#endif
++
+ ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
+ /* If the host is halted due to driver unload, we still need to free the
+ * device.
+@@ -3559,6 +3570,7 @@ static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
+ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
+ {
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
++ struct device *dev = hcd->self.controller;
+ unsigned long flags;
+ int timeleft;
+ int ret;
+@@ -3611,6 +3623,16 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
+ goto disable_slot;
+ }
+ udev->slot_id = xhci->slot_id;
++
++#ifndef CONFIG_USB_DEFAULT_PERSIST
++ /*
++ * If resetting upon resume, we can't put the controller into runtime
++ * suspend if there is a device attached.
++ */
++ if (xhci->quirks & XHCI_RESET_ON_RESUME)
++ pm_runtime_get_noresume(dev);
++#endif
++
+ /* Is this a LS or FS device under a HS hub? */
+ /* Hub or peripherial? */
+ return 1;
+diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
+index 9270d5c919cf..8e02ff2b7d7f 100644
+--- a/drivers/usb/serial/mos7720.c
++++ b/drivers/usb/serial/mos7720.c
+@@ -383,7 +383,7 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
+ kfree(urbtrack);
+ return -ENOMEM;
+ }
+- urbtrack->setup = kmalloc(sizeof(*urbtrack->setup), GFP_KERNEL);
++ urbtrack->setup = kmalloc(sizeof(*urbtrack->setup), GFP_ATOMIC);
+ if (!urbtrack->setup) {
+ usb_free_urb(urbtrack->urb);
+ kfree(urbtrack);
+@@ -391,8 +391,8 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
+ }
+ urbtrack->setup->bRequestType = (__u8)0x40;
+ urbtrack->setup->bRequest = (__u8)0x0e;
+- urbtrack->setup->wValue = get_reg_value(reg, dummy);
+- urbtrack->setup->wIndex = get_reg_index(reg);
++ urbtrack->setup->wValue = cpu_to_le16(get_reg_value(reg, dummy));
++ urbtrack->setup->wIndex = cpu_to_le16(get_reg_index(reg));
+ urbtrack->setup->wLength = 0;
+ usb_fill_control_urb(urbtrack->urb, usbdev,
+ usb_sndctrlpipe(usbdev, 0),
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index c2103f404b3c..536c4ad2f5b7 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -81,6 +81,7 @@ static void option_instat_callback(struct urb *urb);
+
+ #define HUAWEI_VENDOR_ID 0x12D1
+ #define HUAWEI_PRODUCT_E173 0x140C
++#define HUAWEI_PRODUCT_E1750 0x1406
+ #define HUAWEI_PRODUCT_K4505 0x1464
+ #define HUAWEI_PRODUCT_K3765 0x1465
+ #define HUAWEI_PRODUCT_K4605 0x14C6
+@@ -581,6 +582,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1750, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t) &net_intf2_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1442, USB_CLASS_COMM, 0x02, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff),
+diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
+index bf1c094f4ebf..b657de614a06 100644
+--- a/drivers/xen/grant-table.c
++++ b/drivers/xen/grant-table.c
+@@ -355,9 +355,18 @@ void gnttab_request_free_callback(struct gnttab_free_callback *callback,
+ void (*fn)(void *), void *arg, u16 count)
+ {
+ unsigned long flags;
++ struct gnttab_free_callback *cb;
++
+ spin_lock_irqsave(&gnttab_list_lock, flags);
+- if (callback->next)
+- goto out;
++
++ /* Check if the callback is already on the list */
++ cb = gnttab_free_callback_list;
++ while (cb) {
++ if (cb == callback)
++ goto out;
++ cb = cb->next;
++ }
++
+ callback->fn = fn;
+ callback->arg = arg;
+ callback->count = count;
+diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
+index f3a257d7a985..fb001cde0af3 100644
+--- a/fs/debugfs/inode.c
++++ b/fs/debugfs/inode.c
+@@ -380,8 +380,7 @@ EXPORT_SYMBOL_GPL(debugfs_remove);
+ */
+ void debugfs_remove_recursive(struct dentry *dentry)
+ {
+- struct dentry *child;
+- struct dentry *parent;
++ struct dentry *child, *next, *parent;
+
+ if (!dentry)
+ return;
+@@ -391,61 +390,37 @@ void debugfs_remove_recursive(struct dentry *dentry)
+ return;
+
+ parent = dentry;
++ down:
+ mutex_lock(&parent->d_inode->i_mutex);
++ list_for_each_entry_safe(child, next, &parent->d_subdirs, d_u.d_child) {
++ if (!debugfs_positive(child))
++ continue;
+
+- while (1) {
+- /*
+- * When all dentries under "parent" has been removed,
+- * walk up the tree until we reach our starting point.
+- */
+- if (list_empty(&parent->d_subdirs)) {
+- mutex_unlock(&parent->d_inode->i_mutex);
+- if (parent == dentry)
+- break;
+- parent = parent->d_parent;
+- mutex_lock(&parent->d_inode->i_mutex);
+- }
+- child = list_entry(parent->d_subdirs.next, struct dentry,
+- d_u.d_child);
+- next_sibling:
+-
+- /*
+- * If "child" isn't empty, walk down the tree and
+- * remove all its descendants first.
+- */
++ /* perhaps simple_empty(child) makes more sense */
+ if (!list_empty(&child->d_subdirs)) {
+ mutex_unlock(&parent->d_inode->i_mutex);
+ parent = child;
+- mutex_lock(&parent->d_inode->i_mutex);
+- continue;
+- }
+- __debugfs_remove(child, parent);
+- if (parent->d_subdirs.next == &child->d_u.d_child) {
+- /*
+- * Try the next sibling.
+- */
+- if (child->d_u.d_child.next != &parent->d_subdirs) {
+- child = list_entry(child->d_u.d_child.next,
+- struct dentry,
+- d_u.d_child);
+- goto next_sibling;
+- }
+-
+- /*
+- * Avoid infinite loop if we fail to remove
+- * one dentry.
+- */
+- mutex_unlock(&parent->d_inode->i_mutex);
+- break;
++ goto down;
+ }
+- simple_release_fs(&debugfs_mount, &debugfs_mount_count);
++ up:
++ if (!__debugfs_remove(child, parent))
++ simple_release_fs(&debugfs_mount, &debugfs_mount_count);
+ }
+
+- parent = dentry->d_parent;
++ mutex_unlock(&parent->d_inode->i_mutex);
++ child = parent;
++ parent = parent->d_parent;
+ mutex_lock(&parent->d_inode->i_mutex);
+- __debugfs_remove(dentry, parent);
++
++ if (child != dentry) {
++ next = list_entry(child->d_u.d_child.next, struct dentry,
++ d_u.d_child);
++ goto up;
++ }
++
++ if (!__debugfs_remove(child, parent))
++ simple_release_fs(&debugfs_mount, &debugfs_mount_count);
+ mutex_unlock(&parent->d_inode->i_mutex);
+- simple_release_fs(&debugfs_mount, &debugfs_mount_count);
+ }
+ EXPORT_SYMBOL_GPL(debugfs_remove_recursive);
+
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 3ca3b7fabb1b..2e0e34f957cc 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -2054,7 +2054,8 @@ int ext4_orphan_del(handle_t *handle, struct inode *inode)
+ int err = 0;
+
+ /* ext4_handle_valid() assumes a valid handle_t pointer */
+- if (handle && !ext4_handle_valid(handle))
++ if (handle && !ext4_handle_valid(handle) &&
++ !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS))
+ return 0;
+
+ mutex_lock(&EXT4_SB(inode->i_sb)->s_orphan_lock);
+diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
+index 5ef7afb9cf3e..06e2f73e64a8 100644
+--- a/fs/fuse/dir.c
++++ b/fs/fuse/dir.c
+@@ -1063,6 +1063,8 @@ static int parse_dirfile(char *buf, size_t nbytes, struct file *file,
+ return -EIO;
+ if (reclen > nbytes)
+ break;
++ if (memchr(dirent->name, '/', dirent->namelen) != NULL)
++ return -EIO;
+
+ over = filldir(dstbuf, dirent->name, dirent->namelen,
+ file->f_pos, dirent->ino, dirent->type);
+@@ -1282,6 +1284,7 @@ static int fuse_do_setattr(struct dentry *entry, struct iattr *attr,
+ {
+ struct inode *inode = entry->d_inode;
+ struct fuse_conn *fc = get_fuse_conn(inode);
++ struct fuse_inode *fi = get_fuse_inode(inode);
+ struct fuse_req *req;
+ struct fuse_setattr_in inarg;
+ struct fuse_attr_out outarg;
+@@ -1312,8 +1315,10 @@ static int fuse_do_setattr(struct dentry *entry, struct iattr *attr,
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+- if (is_truncate)
++ if (is_truncate) {
+ fuse_set_nowrite(inode);
++ set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
++ }
+
+ memset(&inarg, 0, sizeof(inarg));
+ memset(&outarg, 0, sizeof(outarg));
+@@ -1375,12 +1380,14 @@ static int fuse_do_setattr(struct dentry *entry, struct iattr *attr,
+ invalidate_inode_pages2(inode->i_mapping);
+ }
+
++ clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
+ return 0;
+
+ error:
+ if (is_truncate)
+ fuse_release_nowrite(inode);
+
++ clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
+ return err;
+ }
+
+@@ -1439,6 +1446,8 @@ static int fuse_setxattr(struct dentry *entry, const char *name,
+ fc->no_setxattr = 1;
+ err = -EOPNOTSUPP;
+ }
++ if (!err)
++ fuse_invalidate_attr(inode);
+ return err;
+ }
+
+@@ -1568,6 +1577,8 @@ static int fuse_removexattr(struct dentry *entry, const char *name)
+ fc->no_removexattr = 1;
+ err = -EOPNOTSUPP;
+ }
++ if (!err)
++ fuse_invalidate_attr(inode);
+ return err;
+ }
+
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index 5242006b9515..510d4aa26686 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -519,7 +519,8 @@ static void fuse_read_update_size(struct inode *inode, loff_t size,
+ struct fuse_inode *fi = get_fuse_inode(inode);
+
+ spin_lock(&fc->lock);
+- if (attr_ver == fi->attr_version && size < inode->i_size) {
++ if (attr_ver == fi->attr_version && size < inode->i_size &&
++ !test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) {
+ fi->attr_version = ++fc->attr_version;
+ i_size_write(inode, size);
+ }
+@@ -881,12 +882,16 @@ static ssize_t fuse_perform_write(struct file *file,
+ {
+ struct inode *inode = mapping->host;
+ struct fuse_conn *fc = get_fuse_conn(inode);
++ struct fuse_inode *fi = get_fuse_inode(inode);
+ int err = 0;
+ ssize_t res = 0;
+
+ if (is_bad_inode(inode))
+ return -EIO;
+
++ if (inode->i_size < pos + iov_iter_count(ii))
++ set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
++
+ do {
+ struct fuse_req *req;
+ ssize_t count;
+@@ -921,6 +926,7 @@ static ssize_t fuse_perform_write(struct file *file,
+ if (res > 0)
+ fuse_write_update_size(inode, pos);
+
++ clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
+ fuse_invalidate_attr(inode);
+
+ return res > 0 ? res : err;
+@@ -1251,7 +1257,6 @@ static int fuse_writepage_locked(struct page *page)
+
+ inc_bdi_stat(mapping->backing_dev_info, BDI_WRITEBACK);
+ inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP);
+- end_page_writeback(page);
+
+ spin_lock(&fc->lock);
+ list_add(&req->writepages_entry, &fi->writepages);
+@@ -1259,6 +1264,8 @@ static int fuse_writepage_locked(struct page *page)
+ fuse_flush_writepages(inode);
+ spin_unlock(&fc->lock);
+
++ end_page_writeback(page);
++
+ return 0;
+
+ err_free:
+diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
+index 89c4a58bc364..52ffd2496fb5 100644
+--- a/fs/fuse/fuse_i.h
++++ b/fs/fuse/fuse_i.h
+@@ -103,6 +103,15 @@ struct fuse_inode {
+
+ /** List of writepage requestst (pending or sent) */
+ struct list_head writepages;
++
++ /** Miscellaneous bits describing inode state */
++ unsigned long state;
++};
++
++/** FUSE inode state bits */
++enum {
++ /** An operation changing file size is in progress */
++ FUSE_I_SIZE_UNSTABLE,
+ };
+
+ struct fuse_conn;
+diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
+index 1f82d953305c..912c250b314a 100644
+--- a/fs/fuse/inode.c
++++ b/fs/fuse/inode.c
+@@ -92,6 +92,7 @@ static struct inode *fuse_alloc_inode(struct super_block *sb)
+ fi->attr_version = 0;
+ fi->writectr = 0;
+ fi->orig_ino = 0;
++ fi->state = 0;
+ INIT_LIST_HEAD(&fi->write_files);
+ INIT_LIST_HEAD(&fi->queued_writes);
+ INIT_LIST_HEAD(&fi->writepages);
+@@ -200,7 +201,8 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
+ loff_t oldsize;
+
+ spin_lock(&fc->lock);
+- if (attr_version != 0 && fi->attr_version > attr_version) {
++ if ((attr_version != 0 && fi->attr_version > attr_version) ||
++ test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) {
+ spin_unlock(&fc->lock);
+ return;
+ }
+diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
+index f950059525fc..a5f25a70c015 100644
+--- a/fs/isofs/inode.c
++++ b/fs/isofs/inode.c
+@@ -120,8 +120,8 @@ static void destroy_inodecache(void)
+
+ static int isofs_remount(struct super_block *sb, int *flags, char *data)
+ {
+- /* we probably want a lot more here */
+- *flags |= MS_RDONLY;
++ if (!(*flags & MS_RDONLY))
++ return -EROFS;
+ return 0;
+ }
+
+@@ -770,15 +770,6 @@ root_found:
+ */
+ s->s_maxbytes = 0x80000000000LL;
+
+- /*
+- * The CDROM is read-only, has no nodes (devices) on it, and since
+- * all of the files appear to be owned by root, we really do not want
+- * to allow suid. (suid or devices will not show up unless we have
+- * Rock Ridge extensions)
+- */
+-
+- s->s_flags |= MS_RDONLY /* | MS_NODEV | MS_NOSUID */;
+-
+ /* Set this for reference. Its not currently used except on write
+ which we don't have .. */
+
+@@ -1535,6 +1526,9 @@ struct inode *isofs_iget(struct super_block *sb,
+ static struct dentry *isofs_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
+ {
++ /* We don't support read-write mounts */
++ if (!(flags & MS_RDONLY))
++ return ERR_PTR(-EACCES);
+ return mount_bdev(fs_type, flags, dev_name, data, isofs_fill_super);
+ }
+
+diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
+index 65221a04c6f0..16eacece560f 100644
+--- a/fs/nilfs2/page.c
++++ b/fs/nilfs2/page.c
+@@ -94,6 +94,7 @@ void nilfs_forget_buffer(struct buffer_head *bh)
+ clear_buffer_nilfs_volatile(bh);
+ clear_buffer_nilfs_checked(bh);
+ clear_buffer_nilfs_redirected(bh);
++ clear_buffer_async_write(bh);
+ clear_buffer_dirty(bh);
+ if (nilfs_page_buffers_clean(page))
+ __nilfs_clear_page_dirty(page);
+@@ -390,6 +391,7 @@ void nilfs_clear_dirty_pages(struct address_space *mapping)
+ bh = head = page_buffers(page);
+ do {
+ lock_buffer(bh);
++ clear_buffer_async_write(bh);
+ clear_buffer_dirty(bh);
+ clear_buffer_nilfs_volatile(bh);
+ clear_buffer_nilfs_checked(bh);
+diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
+index 6f24e67162c0..233d3ed264fa 100644
+--- a/fs/nilfs2/segment.c
++++ b/fs/nilfs2/segment.c
+@@ -662,7 +662,7 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
+
+ bh = head = page_buffers(page);
+ do {
+- if (!buffer_dirty(bh))
++ if (!buffer_dirty(bh) || buffer_async_write(bh))
+ continue;
+ get_bh(bh);
+ list_add_tail(&bh->b_assoc_buffers, listp);
+@@ -696,7 +696,8 @@ static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
+ for (i = 0; i < pagevec_count(&pvec); i++) {
+ bh = head = page_buffers(pvec.pages[i]);
+ do {
+- if (buffer_dirty(bh)) {
++ if (buffer_dirty(bh) &&
++ !buffer_async_write(bh)) {
+ get_bh(bh);
+ list_add_tail(&bh->b_assoc_buffers,
+ listp);
+@@ -1576,6 +1577,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
+
+ list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
+ b_assoc_buffers) {
++ set_buffer_async_write(bh);
+ if (bh->b_page != bd_page) {
+ if (bd_page) {
+ lock_page(bd_page);
+@@ -1589,6 +1591,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
+
+ list_for_each_entry(bh, &segbuf->sb_payload_buffers,
+ b_assoc_buffers) {
++ set_buffer_async_write(bh);
+ if (bh == segbuf->sb_super_root) {
+ if (bh->b_page != bd_page) {
+ lock_page(bd_page);
+@@ -1674,6 +1677,7 @@ static void nilfs_abort_logs(struct list_head *logs, int err)
+ list_for_each_entry(segbuf, logs, sb_list) {
+ list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
+ b_assoc_buffers) {
++ clear_buffer_async_write(bh);
+ if (bh->b_page != bd_page) {
+ if (bd_page)
+ end_page_writeback(bd_page);
+@@ -1683,6 +1687,7 @@ static void nilfs_abort_logs(struct list_head *logs, int err)
+
+ list_for_each_entry(bh, &segbuf->sb_payload_buffers,
+ b_assoc_buffers) {
++ clear_buffer_async_write(bh);
+ if (bh == segbuf->sb_super_root) {
+ if (bh->b_page != bd_page) {
+ end_page_writeback(bd_page);
+@@ -1752,6 +1757,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
+ b_assoc_buffers) {
+ set_buffer_uptodate(bh);
+ clear_buffer_dirty(bh);
++ clear_buffer_async_write(bh);
+ if (bh->b_page != bd_page) {
+ if (bd_page)
+ end_page_writeback(bd_page);
+@@ -1773,6 +1779,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
+ b_assoc_buffers) {
+ set_buffer_uptodate(bh);
+ clear_buffer_dirty(bh);
++ clear_buffer_async_write(bh);
+ clear_buffer_delay(bh);
+ clear_buffer_nilfs_volatile(bh);
+ clear_buffer_nilfs_redirected(bh);
+diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
+index a50636025364..0c2f9122b262 100644
+--- a/fs/notify/fanotify/fanotify.c
++++ b/fs/notify/fanotify/fanotify.c
+@@ -18,6 +18,12 @@ static bool should_merge(struct fsnotify_event *old, struct fsnotify_event *new)
+ old->tgid == new->tgid) {
+ switch (old->data_type) {
+ case (FSNOTIFY_EVENT_PATH):
++#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
++ /* dont merge two permission events */
++ if ((old->mask & FAN_ALL_PERM_EVENTS) &&
++ (new->mask & FAN_ALL_PERM_EVENTS))
++ return false;
++#endif
+ if ((old->path.mnt == new->path.mnt) &&
+ (old->path.dentry == new->path.dentry))
+ return true;
+diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
+index 7eb1c0c7c166..cf228479879d 100644
+--- a/fs/ocfs2/extent_map.c
++++ b/fs/ocfs2/extent_map.c
+@@ -782,7 +782,6 @@ int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ cpos = map_start >> osb->s_clustersize_bits;
+ mapping_end = ocfs2_clusters_for_bytes(inode->i_sb,
+ map_start + map_len);
+- mapping_end -= cpos;
+ is_last = 0;
+ while (cpos < mapping_end && !is_last) {
+ u32 fe_flags;
+diff --git a/include/linux/hid.h b/include/linux/hid.h
+index 331e2ef7eaa5..19fe719ace3e 100644
+--- a/include/linux/hid.h
++++ b/include/linux/hid.h
+@@ -416,10 +416,12 @@ struct hid_report {
+ struct hid_device *device; /* associated device */
+ };
+
++#define HID_MAX_IDS 256
++
+ struct hid_report_enum {
+ unsigned numbered;
+ struct list_head report_list;
+- struct hid_report *report_id_hash[256];
++ struct hid_report *report_id_hash[HID_MAX_IDS];
+ };
+
+ #define HID_REPORT_TYPES 3
+@@ -716,6 +718,10 @@ void hid_output_report(struct hid_report *report, __u8 *data);
+ struct hid_device *hid_allocate_device(void);
+ struct hid_report *hid_register_report(struct hid_device *device, unsigned type, unsigned id);
+ int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size);
++struct hid_report *hid_validate_values(struct hid_device *hid,
++ unsigned int type, unsigned int id,
++ unsigned int field_index,
++ unsigned int report_counts);
+ int hid_check_keys_pressed(struct hid_device *hid);
+ int hid_connect(struct hid_device *hid, unsigned int connect_mask);
+ void hid_disconnect(struct hid_device *hid);
+diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h
+index ba45e6bc0764..f5a21d0a22ba 100644
+--- a/include/linux/icmpv6.h
++++ b/include/linux/icmpv6.h
+@@ -123,6 +123,8 @@ static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb)
+ #define ICMPV6_NOT_NEIGHBOUR 2
+ #define ICMPV6_ADDR_UNREACH 3
+ #define ICMPV6_PORT_UNREACH 4
++#define ICMPV6_POLICY_FAIL 5
++#define ICMPV6_REJECT_ROUTE 6
+
+ /*
+ * Codes for Time Exceeded
+diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
+index 0c997767429a..84b1447481b5 100644
+--- a/include/linux/ipv6.h
++++ b/include/linux/ipv6.h
+@@ -255,6 +255,7 @@ struct inet6_skb_parm {
+ #define IP6SKB_XFRM_TRANSFORMED 1
+ #define IP6SKB_FORWARDED 2
+ #define IP6SKB_REROUTED 4
++#define IP6SKB_FRAGMENTED 16
+ };
+
+ #define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb))
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index d0493f6064f3..305fd756fc4f 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -865,7 +865,8 @@ extern void pagefault_out_of_memory(void);
+ * Flags passed to show_mem() and show_free_areas() to suppress output in
+ * various contexts.
+ */
+-#define SHOW_MEM_FILTER_NODES (0x0001u) /* filter disallowed nodes */
++#define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */
++#define SHOW_MEM_FILTER_PAGE_COUNT (0x0002u) /* page type count */
+
+ extern void show_free_areas(unsigned int flags);
+ extern bool skip_free_areas_node(unsigned int flags, int nid);
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
+index 3cfcfea6bfc9..eeb6a29ee1e1 100644
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -927,7 +927,7 @@ struct perf_cpu_context {
+ int exclusive;
+ struct list_head rotation_list;
+ int jiffies_interval;
+- struct pmu *active_pmu;
++ struct pmu *unique_pmu;
+ struct perf_cgroup *cgrp;
+ };
+
+diff --git a/include/linux/rculist.h b/include/linux/rculist.h
+index 6f95e241ed68..3863352656e7 100644
+--- a/include/linux/rculist.h
++++ b/include/linux/rculist.h
+@@ -254,8 +254,9 @@ static inline void list_splice_init_rcu(struct list_head *list,
+ */
+ #define list_first_or_null_rcu(ptr, type, member) \
+ ({struct list_head *__ptr = (ptr); \
+- struct list_head __rcu *__next = list_next_rcu(__ptr); \
+- likely(__ptr != __next) ? container_of(__next, type, member) : NULL; \
++ struct list_head *__next = ACCESS_ONCE(__ptr->next); \
++ likely(__ptr != __next) ? \
++ list_entry_rcu(__next, type, member) : NULL; \
+ })
+
+ /**
+diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
+index 03354d557b79..0daa46b82043 100644
+--- a/include/linux/usb/hcd.h
++++ b/include/linux/usb/hcd.h
+@@ -395,7 +395,7 @@ extern int usb_hcd_pci_probe(struct pci_dev *dev,
+ extern void usb_hcd_pci_remove(struct pci_dev *dev);
+ extern void usb_hcd_pci_shutdown(struct pci_dev *dev);
+
+-#ifdef CONFIG_PM_SLEEP
++#ifdef CONFIG_PM
+ extern const struct dev_pm_ops usb_hcd_pci_pm_ops;
+ #endif
+ #endif /* CONFIG_PCI */
+diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
+index e9ff3fc5e688..34b06dadca05 100644
+--- a/include/net/inetpeer.h
++++ b/include/net/inetpeer.h
+@@ -41,6 +41,10 @@ struct inet_peer {
+ u32 pmtu_orig;
+ u32 pmtu_learned;
+ struct inetpeer_addr_base redirect_learned;
++ union {
++ struct list_head gc_list;
++ struct rcu_head gc_rcu;
++ };
+ /*
+ * Once inet_peer is queued for deletion (refcnt == -1), following fields
+ * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp
+@@ -96,6 +100,8 @@ static inline struct inet_peer *inet_getpeer_v6(const struct in6_addr *v6daddr,
+ extern void inet_putpeer(struct inet_peer *p);
+ extern bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
+
++extern void inetpeer_invalidate_tree(int family);
++
+ /*
+ * temporary check to make sure we dont access rid, ip_id_count, tcp_ts,
+ * tcp_ts_stamp if no refcount is taken on inet_peer
+diff --git a/include/net/ip.h b/include/net/ip.h
+index eca0ef7a495e..06aed72023f7 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -266,9 +266,11 @@ int ip_dont_fragment(struct sock *sk, struct dst_entry *dst)
+
+ extern void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more);
+
+-static inline void ip_select_ident(struct iphdr *iph, struct dst_entry *dst, struct sock *sk)
++static inline void ip_select_ident(struct sk_buff *skb, struct dst_entry *dst, struct sock *sk)
+ {
+- if (iph->frag_off & htons(IP_DF)) {
++ struct iphdr *iph = ip_hdr(skb);
++
++ if ((iph->frag_off & htons(IP_DF)) && !skb->local_df) {
+ /* This is only to work around buggy Windows95/2000
+ * VJ compression implementations. If the ID field
+ * does not change, they drop every other packet in
+@@ -280,9 +282,11 @@ static inline void ip_select_ident(struct iphdr *iph, struct dst_entry *dst, str
+ __ip_select_ident(iph, dst, 0);
+ }
+
+-static inline void ip_select_ident_more(struct iphdr *iph, struct dst_entry *dst, struct sock *sk, int more)
++static inline void ip_select_ident_more(struct sk_buff *skb, struct dst_entry *dst, struct sock *sk, int more)
+ {
+- if (iph->frag_off & htons(IP_DF)) {
++ struct iphdr *iph = ip_hdr(skb);
++
++ if ((iph->frag_off & htons(IP_DF)) && !skb->local_df) {
+ if (sk && inet_sk(sk)->inet_daddr) {
+ iph->id = htons(inet_sk(sk)->inet_id);
+ inet_sk(sk)->inet_id += 1 + more;
+diff --git a/include/net/ipip.h b/include/net/ipip.h
+index a32654d52730..4dccfe3bf731 100644
+--- a/include/net/ipip.h
++++ b/include/net/ipip.h
+@@ -50,7 +50,7 @@ struct ip_tunnel_prl_entry {
+ int pkt_len = skb->len - skb_transport_offset(skb); \
+ \
+ skb->ip_summed = CHECKSUM_NONE; \
+- ip_select_ident(iph, &rt->dst, NULL); \
++ ip_select_ident(skb, &rt->dst, NULL); \
+ \
+ err = ip_local_out(skb); \
+ if (likely(net_xmit_eval(err) == 0)) { \
+diff --git a/kernel/cgroup.c b/kernel/cgroup.c
+index d2a01fee0d42..2a1ffb7b1915 100644
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -3504,6 +3504,7 @@ static int cgroup_write_event_control(struct cgroup *cgrp, struct cftype *cft,
+ const char *buffer)
+ {
+ struct cgroup_event *event = NULL;
++ struct cgroup *cgrp_cfile;
+ unsigned int efd, cfd;
+ struct file *efile = NULL;
+ struct file *cfile = NULL;
+@@ -3559,6 +3560,16 @@ static int cgroup_write_event_control(struct cgroup *cgrp, struct cftype *cft,
+ goto fail;
+ }
+
++ /*
++ * The file to be monitored must be in the same cgroup as
++ * cgroup.event_control is.
++ */
++ cgrp_cfile = __d_cgrp(cfile->f_dentry->d_parent);
++ if (cgrp_cfile != cgrp) {
++ ret = -EINVAL;
++ goto fail;
++ }
++
+ if (!event->cft->register_event || !event->cft->unregister_event) {
+ ret = -EINVAL;
+ goto fail;
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 5bbe443b0177..83d5621b7d90 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -242,9 +242,9 @@ perf_cgroup_match(struct perf_event *event)
+ return !event->cgrp || event->cgrp == cpuctx->cgrp;
+ }
+
+-static inline void perf_get_cgroup(struct perf_event *event)
++static inline bool perf_tryget_cgroup(struct perf_event *event)
+ {
+- css_get(&event->cgrp->css);
++ return css_tryget(&event->cgrp->css);
+ }
+
+ static inline void perf_put_cgroup(struct perf_event *event)
+@@ -360,6 +360,8 @@ void perf_cgroup_switch(struct task_struct *task, int mode)
+
+ list_for_each_entry_rcu(pmu, &pmus, entry) {
+ cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
++ if (cpuctx->unique_pmu != pmu)
++ continue; /* ensure we process each cpuctx once */
+
+ /*
+ * perf_cgroup_events says at least one
+@@ -383,9 +385,10 @@ void perf_cgroup_switch(struct task_struct *task, int mode)
+
+ if (mode & PERF_CGROUP_SWIN) {
+ WARN_ON_ONCE(cpuctx->cgrp);
+- /* set cgrp before ctxsw in to
+- * allow event_filter_match() to not
+- * have to pass task around
++ /*
++ * set cgrp before ctxsw in to allow
++ * event_filter_match() to not have to pass
++ * task around
+ */
+ cpuctx->cgrp = perf_cgroup_from_task(task);
+ cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
+@@ -473,7 +476,11 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event,
+ event->cgrp = cgrp;
+
+ /* must be done before we fput() the file */
+- perf_get_cgroup(event);
++ if (!perf_tryget_cgroup(event)) {
++ event->cgrp = NULL;
++ ret = -ENOENT;
++ goto out;
++ }
+
+ /*
+ * all events in a group must monitor
+@@ -4377,7 +4384,7 @@ static void perf_event_task_event(struct perf_task_event *task_event)
+ rcu_read_lock();
+ list_for_each_entry_rcu(pmu, &pmus, entry) {
+ cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
+- if (cpuctx->active_pmu != pmu)
++ if (cpuctx->unique_pmu != pmu)
+ goto next;
+ perf_event_task_ctx(&cpuctx->ctx, task_event);
+
+@@ -4523,7 +4530,7 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
+ rcu_read_lock();
+ list_for_each_entry_rcu(pmu, &pmus, entry) {
+ cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
+- if (cpuctx->active_pmu != pmu)
++ if (cpuctx->unique_pmu != pmu)
+ goto next;
+ perf_event_comm_ctx(&cpuctx->ctx, comm_event);
+
+@@ -4719,7 +4726,7 @@ got_name:
+ rcu_read_lock();
+ list_for_each_entry_rcu(pmu, &pmus, entry) {
+ cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
+- if (cpuctx->active_pmu != pmu)
++ if (cpuctx->unique_pmu != pmu)
+ goto next;
+ perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
+ vma->vm_flags & VM_EXEC);
+@@ -5741,8 +5748,8 @@ static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
+
+ cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
+
+- if (cpuctx->active_pmu == old_pmu)
+- cpuctx->active_pmu = pmu;
++ if (cpuctx->unique_pmu == old_pmu)
++ cpuctx->unique_pmu = pmu;
+ }
+ }
+
+@@ -5877,7 +5884,7 @@ skip_type:
+ cpuctx->ctx.pmu = pmu;
+ cpuctx->jiffies_interval = 1;
+ INIT_LIST_HEAD(&cpuctx->rotation_list);
+- cpuctx->active_pmu = pmu;
++ cpuctx->unique_pmu = pmu;
+ }
+
+ got_cpu_context:
+diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
+index 59474c5ecf6c..c261da7ddc71 100644
+--- a/kernel/sched_fair.c
++++ b/kernel/sched_fair.c
+@@ -4890,11 +4890,15 @@ static void task_fork_fair(struct task_struct *p)
+
+ update_rq_clock(rq);
+
+- if (unlikely(task_cpu(p) != this_cpu)) {
+- rcu_read_lock();
+- __set_task_cpu(p, this_cpu);
+- rcu_read_unlock();
+- }
++ /*
++ * Not only the cpu but also the task_group of the parent might have
++ * been changed after parent->se.parent,cfs_rq were copied to
++ * child->se.parent,cfs_rq. So call __set_task_cpu() to make those
++ * of child point to valid ones.
++ */
++ rcu_read_lock();
++ __set_task_cpu(p, this_cpu);
++ rcu_read_unlock();
+
+ update_curr(cfs_rq);
+
+diff --git a/lib/show_mem.c b/lib/show_mem.c
+index 4407f8c9b1f7..b7c72311ad0c 100644
+--- a/lib/show_mem.c
++++ b/lib/show_mem.c
+@@ -18,6 +18,9 @@ void show_mem(unsigned int filter)
+ printk("Mem-Info:\n");
+ show_free_areas(filter);
+
++ if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
++ return;
++
+ for_each_online_pgdat(pgdat) {
+ unsigned long i, flags;
+
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index d80ac4bb0971..ed0ed8a41fba 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -1882,6 +1882,8 @@ static void collapse_huge_page(struct mm_struct *mm,
+ goto out;
+
+ vma = find_vma(mm, address);
++ if (!vma)
++ goto out;
+ hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
+ hend = vma->vm_end & HPAGE_PMD_MASK;
+ if (address < hstart || address + HPAGE_PMD_SIZE > hend)
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index d027a2489a9f..204de6ab6292 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -4385,7 +4385,13 @@ static int compare_thresholds(const void *a, const void *b)
+ const struct mem_cgroup_threshold *_a = a;
+ const struct mem_cgroup_threshold *_b = b;
+
+- return _a->threshold - _b->threshold;
++ if (_a->threshold > _b->threshold)
++ return 1;
++
++ if (_a->threshold < _b->threshold)
++ return -1;
++
++ return 0;
+ }
+
+ static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index b5afea28cf83..d8762b234d2a 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -1760,6 +1760,13 @@ void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
+ return;
+
+ /*
++ * Walking all memory to count page types is very expensive and should
++ * be inhibited in non-blockable contexts.
++ */
++ if (!(gfp_mask & __GFP_WAIT))
++ filter |= SHOW_MEM_FILTER_PAGE_COUNT;
++
++ /*
+ * This documents exceptions given to allocations in certain
+ * contexts that are allowed to allocate outside current's set
+ * of allowed nodes.
+diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
+index b81500c680af..a06deca97f0e 100644
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -1155,7 +1155,8 @@ static int br_ip6_multicast_query(struct net_bridge *br,
+ mld2q = (struct mld2_query *)icmp6_hdr(skb);
+ if (!mld2q->mld2q_nsrcs)
+ group = &mld2q->mld2q_mca;
+- max_delay = mld2q->mld2q_mrc ? MLDV2_MRC(mld2q->mld2q_mrc) : 1;
++
++ max_delay = max(msecs_to_jiffies(MLDV2_MRC(ntohs(mld2q->mld2q_mrc))), 1UL);
+ }
+
+ if (!group)
+diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
+index dd147d78a588..8ac946f32f9a 100644
+--- a/net/bridge/br_stp.c
++++ b/net/bridge/br_stp.c
+@@ -189,7 +189,7 @@ static void br_record_config_information(struct net_bridge_port *p,
+ p->designated_age = jiffies + bpdu->message_age;
+
+ mod_timer(&p->message_age_timer, jiffies
+- + (p->br->max_age - bpdu->message_age));
++ + (bpdu->max_age - bpdu->message_age));
+ }
+
+ /* called under bridge lock */
+diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
+index 5cf52225692e..84efbe4aa06a 100644
+--- a/net/caif/cfctrl.c
++++ b/net/caif/cfctrl.c
+@@ -288,9 +288,10 @@ int cfctrl_linkup_request(struct cflayer *layer,
+
+ count = cfctrl_cancel_req(&cfctrl->serv.layer,
+ user_layer);
+- if (count != 1)
++ if (count != 1) {
+ pr_err("Could not remove request (%d)", count);
+ return -ENODEV;
++ }
+ }
+ return 0;
+ }
+diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
+index f4f3f58f5234..a70f426c6d6f 100644
+--- a/net/ceph/osd_client.c
++++ b/net/ceph/osd_client.c
+@@ -1719,6 +1719,8 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc,
+ dout("osdc_start_request failed map, "
+ " will retry %lld\n", req->r_tid);
+ rc = 0;
++ } else {
++ __unregister_request(osdc, req);
+ }
+ goto out_unlock;
+ }
+diff --git a/net/core/netpoll.c b/net/core/netpoll.c
+index db4bb7a9ca78..9649cead0028 100644
+--- a/net/core/netpoll.c
++++ b/net/core/netpoll.c
+@@ -923,15 +923,14 @@ EXPORT_SYMBOL_GPL(__netpoll_cleanup);
+
+ void netpoll_cleanup(struct netpoll *np)
+ {
+- if (!np->dev)
+- return;
+-
+ rtnl_lock();
++ if (!np->dev)
++ goto out;
+ __netpoll_cleanup(np);
+- rtnl_unlock();
+-
+ dev_put(np->dev);
+ np->dev = NULL;
++out:
++ rtnl_unlock();
+ }
+ EXPORT_SYMBOL(netpoll_cleanup);
+
+diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
+index 77a65f031488..f0bdd36f207e 100644
+--- a/net/core/sysctl_net_core.c
++++ b/net/core/sysctl_net_core.c
+@@ -19,6 +19,9 @@
+ #include <net/sock.h>
+ #include <net/net_ratelimit.h>
+
++static int zero = 0;
++static int ushort_max = USHRT_MAX;
++
+ #ifdef CONFIG_RPS
+ static int rps_sock_flow_sysctl(ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+@@ -192,7 +195,9 @@ static struct ctl_table netns_core_table[] = {
+ .data = &init_net.core.sysctl_somaxconn,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+- .proc_handler = proc_dointvec
++ .extra1 = &zero,
++ .extra2 = &ushort_max,
++ .proc_handler = proc_dointvec_minmax
+ },
+ { }
+ };
+diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
+index cd2d639541bc..c7c6724c0d6e 100644
+--- a/net/ipv4/fib_trie.c
++++ b/net/ipv4/fib_trie.c
+@@ -72,7 +72,6 @@
+ #include <linux/init.h>
+ #include <linux/list.h>
+ #include <linux/slab.h>
+-#include <linux/prefetch.h>
+ #include <linux/export.h>
+ #include <net/net_namespace.h>
+ #include <net/ip.h>
+@@ -1773,10 +1772,8 @@ static struct leaf *leaf_walk_rcu(struct tnode *p, struct rt_trie_node *c)
+ if (!c)
+ continue;
+
+- if (IS_LEAF(c)) {
+- prefetch(rcu_dereference_rtnl(p->child[idx]));
++ if (IS_LEAF(c))
+ return (struct leaf *) c;
+- }
+
+ /* Rescan start scanning in new node */
+ p = (struct tnode *) c;
+diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
+index c8989a71bbd9..75b086029ec9 100644
+--- a/net/ipv4/igmp.c
++++ b/net/ipv4/igmp.c
+@@ -342,7 +342,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
+ pip->saddr = fl4.saddr;
+ pip->protocol = IPPROTO_IGMP;
+ pip->tot_len = 0; /* filled in later */
+- ip_select_ident(pip, &rt->dst, NULL);
++ ip_select_ident(skb, &rt->dst, NULL);
+ ((u8*)&pip[1])[0] = IPOPT_RA;
+ ((u8*)&pip[1])[1] = 4;
+ ((u8*)&pip[1])[2] = 0;
+@@ -683,7 +683,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
+ iph->daddr = dst;
+ iph->saddr = fl4.saddr;
+ iph->protocol = IPPROTO_IGMP;
+- ip_select_ident(iph, &rt->dst, NULL);
++ ip_select_ident(skb, &rt->dst, NULL);
+ ((u8*)&iph[1])[0] = IPOPT_RA;
+ ((u8*)&iph[1])[1] = 4;
+ ((u8*)&iph[1])[2] = 0;
+@@ -705,7 +705,7 @@ static void igmp_gq_timer_expire(unsigned long data)
+
+ in_dev->mr_gq_running = 0;
+ igmpv3_send_report(in_dev, NULL);
+- __in_dev_put(in_dev);
++ in_dev_put(in_dev);
+ }
+
+ static void igmp_ifc_timer_expire(unsigned long data)
+@@ -717,7 +717,7 @@ static void igmp_ifc_timer_expire(unsigned long data)
+ in_dev->mr_ifc_count--;
+ igmp_ifc_start_timer(in_dev, IGMP_Unsolicited_Report_Interval);
+ }
+- __in_dev_put(in_dev);
++ in_dev_put(in_dev);
+ }
+
+ static void igmp_ifc_event(struct in_device *in_dev)
+diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
+index 86f13c67ea85..58c4e696a6a7 100644
+--- a/net/ipv4/inetpeer.c
++++ b/net/ipv4/inetpeer.c
+@@ -17,6 +17,7 @@
+ #include <linux/kernel.h>
+ #include <linux/mm.h>
+ #include <linux/net.h>
++#include <linux/workqueue.h>
+ #include <net/ip.h>
+ #include <net/inetpeer.h>
+ #include <net/secure_seq.h>
+@@ -31,8 +32,8 @@
+ * At the moment of writing this notes identifier of IP packets is generated
+ * to be unpredictable using this code only for packets subjected
+ * (actually or potentially) to defragmentation. I.e. DF packets less than
+- * PMTU in size uses a constant ID and do not use this code (see
+- * ip_select_ident() in include/net/ip.h).
++ * PMTU in size when local fragmentation is disabled use a constant ID and do
++ * not use this code (see ip_select_ident() in include/net/ip.h).
+ *
+ * Route cache entries hold references to our nodes.
+ * New cache entries get references via lookup by destination IP address in
+@@ -66,6 +67,11 @@
+
+ static struct kmem_cache *peer_cachep __read_mostly;
+
++static LIST_HEAD(gc_list);
++static const int gc_delay = 60 * HZ;
++static struct delayed_work gc_work;
++static DEFINE_SPINLOCK(gc_lock);
++
+ #define node_height(x) x->avl_height
+
+ #define peer_avl_empty ((struct inet_peer *)&peer_fake_node)
+@@ -102,6 +108,50 @@ int inet_peer_threshold __read_mostly = 65536 + 128; /* start to throw entries m
+ int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */
+ int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */
+
++static void inetpeer_gc_worker(struct work_struct *work)
++{
++ struct inet_peer *p, *n;
++ LIST_HEAD(list);
++
++ spin_lock_bh(&gc_lock);
++ list_replace_init(&gc_list, &list);
++ spin_unlock_bh(&gc_lock);
++
++ if (list_empty(&list))
++ return;
++
++ list_for_each_entry_safe(p, n, &list, gc_list) {
++
++ if(need_resched())
++ cond_resched();
++
++ if (p->avl_left != peer_avl_empty) {
++ list_add_tail(&p->avl_left->gc_list, &list);
++ p->avl_left = peer_avl_empty;
++ }
++
++ if (p->avl_right != peer_avl_empty) {
++ list_add_tail(&p->avl_right->gc_list, &list);
++ p->avl_right = peer_avl_empty;
++ }
++
++ n = list_entry(p->gc_list.next, struct inet_peer, gc_list);
++
++ if (!atomic_read(&p->refcnt)) {
++ list_del(&p->gc_list);
++ kmem_cache_free(peer_cachep, p);
++ }
++ }
++
++ if (list_empty(&list))
++ return;
++
++ spin_lock_bh(&gc_lock);
++ list_splice(&list, &gc_list);
++ spin_unlock_bh(&gc_lock);
++
++ schedule_delayed_work(&gc_work, gc_delay);
++}
+
+ /* Called from ip_output.c:ip_init */
+ void __init inet_initpeers(void)
+@@ -126,6 +176,7 @@ void __init inet_initpeers(void)
+ 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
+ NULL);
+
++ INIT_DELAYED_WORK_DEFERRABLE(&gc_work, inetpeer_gc_worker);
+ }
+
+ static int addr_compare(const struct inetpeer_addr *a,
+@@ -448,7 +499,7 @@ relookup:
+ p->pmtu_expires = 0;
+ p->pmtu_orig = 0;
+ memset(&p->redirect_learned, 0, sizeof(p->redirect_learned));
+-
++ INIT_LIST_HEAD(&p->gc_list);
+
+ /* Link the node. */
+ link_to_pool(p, base);
+@@ -508,3 +559,38 @@ bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
+ return rc;
+ }
+ EXPORT_SYMBOL(inet_peer_xrlim_allow);
++
++static void inetpeer_inval_rcu(struct rcu_head *head)
++{
++ struct inet_peer *p = container_of(head, struct inet_peer, gc_rcu);
++
++ spin_lock_bh(&gc_lock);
++ list_add_tail(&p->gc_list, &gc_list);
++ spin_unlock_bh(&gc_lock);
++
++ schedule_delayed_work(&gc_work, gc_delay);
++}
++
++void inetpeer_invalidate_tree(int family)
++{
++ struct inet_peer *old, *new, *prev;
++ struct inet_peer_base *base = family_to_base(family);
++
++ write_seqlock_bh(&base->lock);
++
++ old = base->root;
++ if (old == peer_avl_empty_rcu)
++ goto out;
++
++ new = peer_avl_empty_rcu;
++
++ prev = cmpxchg(&base->root, old, new);
++ if (prev == old) {
++ base->total = 0;
++ call_rcu(&prev->gc_rcu, inetpeer_inval_rcu);
++ }
++
++out:
++ write_sequnlock_bh(&base->lock);
++}
++EXPORT_SYMBOL(inetpeer_invalidate_tree);
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index 0bc95f3977d2..daf408e8633c 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -162,7 +162,7 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
+ iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
+ iph->saddr = saddr;
+ iph->protocol = sk->sk_protocol;
+- ip_select_ident(iph, &rt->dst, sk);
++ ip_select_ident(skb, &rt->dst, sk);
+
+ if (opt && opt->opt.optlen) {
+ iph->ihl += opt->opt.optlen>>2;
+@@ -390,7 +390,7 @@ packet_routed:
+ ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0);
+ }
+
+- ip_select_ident_more(iph, &rt->dst, sk,
++ ip_select_ident_more(skb, &rt->dst, sk,
+ (skb_shinfo(skb)->gso_segs ?: 1) - 1);
+
+ skb->priority = sk->sk_priority;
+@@ -1334,7 +1334,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
+ iph->ihl = 5;
+ iph->tos = inet->tos;
+ iph->frag_off = df;
+- ip_select_ident(iph, &rt->dst, sk);
++ ip_select_ident(skb, &rt->dst, sk);
+ iph->ttl = ttl;
+ iph->protocol = sk->sk_protocol;
+ iph->saddr = fl4->saddr;
+diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
+index 006439403213..b5e64e4c0646 100644
+--- a/net/ipv4/ipmr.c
++++ b/net/ipv4/ipmr.c
+@@ -1576,7 +1576,7 @@ static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
+ iph->protocol = IPPROTO_IPIP;
+ iph->ihl = 5;
+ iph->tot_len = htons(skb->len);
+- ip_select_ident(iph, skb_dst(skb), NULL);
++ ip_select_ident(skb, skb_dst(skb), NULL);
+ ip_send_check(iph);
+
+ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
+index e1d4f30bb718..28150145ce02 100644
+--- a/net/ipv4/raw.c
++++ b/net/ipv4/raw.c
+@@ -380,7 +380,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
+ iph->check = 0;
+ iph->tot_len = htons(length);
+ if (!iph->id)
+- ip_select_ident(iph, &rt->dst, NULL);
++ ip_select_ident(skb, &rt->dst, NULL);
+
+ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+ }
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 94cdbc55ca7e..c45a155a3296 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -939,6 +939,7 @@ static void rt_cache_invalidate(struct net *net)
+ get_random_bytes(&shuffle, sizeof(shuffle));
+ atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
+ redirect_genid++;
++ inetpeer_invalidate_tree(AF_INET);
+ }
+
+ /*
+diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
+index f376b05cca81..b78eac2b7079 100644
+--- a/net/ipv4/tcp_cubic.c
++++ b/net/ipv4/tcp_cubic.c
+@@ -204,8 +204,8 @@ static u32 cubic_root(u64 a)
+ */
+ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
+ {
+- u64 offs;
+- u32 delta, t, bic_target, max_cnt;
++ u32 delta, bic_target, max_cnt;
++ u64 offs, t;
+
+ ca->ack_cnt++; /* count the number of ACKs */
+
+@@ -248,9 +248,11 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
+ * if the cwnd < 1 million packets !!!
+ */
+
++ t = (s32)(tcp_time_stamp - ca->epoch_start);
++ t += msecs_to_jiffies(ca->delay_min >> 3);
+ /* change the unit from HZ to bictcp_HZ */
+- t = ((tcp_time_stamp + msecs_to_jiffies(ca->delay_min>>3)
+- - ca->epoch_start) << BICTCP_HZ) / HZ;
++ t <<= BICTCP_HZ;
++ do_div(t, HZ);
+
+ if (t < ca->bic_K) /* t - K */
+ offs = ca->bic_K - t;
+@@ -412,7 +414,7 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
+ return;
+
+ /* Discard delay samples right after fast recovery */
+- if ((s32)(tcp_time_stamp - ca->epoch_start) < HZ)
++ if (ca->epoch_start && (s32)(tcp_time_stamp - ca->epoch_start) < HZ)
+ return;
+
+ delay = (rtt_us << 3) / USEC_PER_MSEC;
+diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
+index ed4bf11ef9f4..938553efbf8e 100644
+--- a/net/ipv4/xfrm4_mode_tunnel.c
++++ b/net/ipv4/xfrm4_mode_tunnel.c
+@@ -54,7 +54,7 @@ static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
+
+ top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) ?
+ 0 : (XFRM_MODE_SKB_CB(skb)->frag_off & htons(IP_DF));
+- ip_select_ident(top_iph, dst->child, NULL);
++ ip_select_ident(skb, dst->child, NULL);
+
+ top_iph->ttl = ip4_dst_hoplimit(dst->child);
+
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 314bda227495..5d41293a6005 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -913,12 +913,10 @@ retry:
+ if (ifp->flags & IFA_F_OPTIMISTIC)
+ addr_flags |= IFA_F_OPTIMISTIC;
+
+- ift = !max_addresses ||
+- ipv6_count_addresses(idev) < max_addresses ?
+- ipv6_add_addr(idev, &addr, tmp_plen,
+- ipv6_addr_type(&addr)&IPV6_ADDR_SCOPE_MASK,
+- addr_flags) : NULL;
+- if (!ift || IS_ERR(ift)) {
++ ift = ipv6_add_addr(idev, &addr, tmp_plen,
++ ipv6_addr_type(&addr)&IPV6_ADDR_SCOPE_MASK,
++ addr_flags);
++ if (IS_ERR(ift)) {
+ in6_ifa_put(ifp);
+ in6_dev_put(idev);
+ printk(KERN_INFO
+diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
+index 90868fb42757..d50545371b55 100644
+--- a/net/ipv6/icmp.c
++++ b/net/ipv6/icmp.c
+@@ -911,6 +911,14 @@ static const struct icmp6_err {
+ .err = ECONNREFUSED,
+ .fatal = 1,
+ },
++ { /* POLICY_FAIL */
++ .err = EACCES,
++ .fatal = 1,
++ },
++ { /* REJECT_ROUTE */
++ .err = EACCES,
++ .fatal = 1,
++ },
+ };
+
+ int icmpv6_err_convert(u8 type, u8 code, int *err)
+@@ -922,7 +930,7 @@ int icmpv6_err_convert(u8 type, u8 code, int *err)
+ switch (type) {
+ case ICMPV6_DEST_UNREACH:
+ fatal = 1;
+- if (code <= ICMPV6_PORT_UNREACH) {
++ if (code < ARRAY_SIZE(tab_unreach)) {
+ *err = tab_unreach[code].err;
+ fatal = tab_unreach[code].fatal;
+ }
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
+index 93718f3db79b..443724f54724 100644
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -862,14 +862,22 @@ static struct fib6_node * fib6_lookup_1(struct fib6_node *root,
+
+ if (ipv6_prefix_equal(&key->addr, args->addr, key->plen)) {
+ #ifdef CONFIG_IPV6_SUBTREES
+- if (fn->subtree)
+- fn = fib6_lookup_1(fn->subtree, args + 1);
++ if (fn->subtree) {
++ struct fib6_node *sfn;
++ sfn = fib6_lookup_1(fn->subtree,
++ args + 1);
++ if (!sfn)
++ goto backtrack;
++ fn = sfn;
++ }
+ #endif
+- if (!fn || fn->fn_flags & RTN_RTINFO)
++ if (fn->fn_flags & RTN_RTINFO)
+ return fn;
+ }
+ }
+-
++#ifdef CONFIG_IPV6_SUBTREES
++backtrack:
++#endif
+ if (fn->fn_flags & RTN_ROOT)
+ break;
+
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index db6004364313..91d0711caec6 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1125,6 +1125,8 @@ static inline int ip6_ufo_append_data(struct sock *sk,
+ * udp datagram
+ */
+ if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
++ struct frag_hdr fhdr;
++
+ skb = sock_alloc_send_skb(sk,
+ hh_len + fragheaderlen + transhdrlen + 20,
+ (flags & MSG_DONTWAIT), &err);
+@@ -1145,12 +1147,6 @@ static inline int ip6_ufo_append_data(struct sock *sk,
+
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ skb->csum = 0;
+- }
+-
+- err = skb_append_datato_frags(sk,skb, getfrag, from,
+- (length - transhdrlen));
+- if (!err) {
+- struct frag_hdr fhdr;
+
+ /* Specify the length of each IPv6 datagram fragment.
+ * It has to be a multiple of 8.
+@@ -1161,15 +1157,10 @@ static inline int ip6_ufo_append_data(struct sock *sk,
+ ipv6_select_ident(&fhdr, rt);
+ skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
+ __skb_queue_tail(&sk->sk_write_queue, skb);
+-
+- return 0;
+ }
+- /* There is not enough support do UPD LSO,
+- * so follow normal path
+- */
+- kfree_skb(skb);
+
+- return err;
++ return skb_append_datato_frags(sk, skb, getfrag, from,
++ (length - transhdrlen));
+ }
+
+ static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
+@@ -1342,27 +1333,27 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
+ * --yoshfuji
+ */
+
+- cork->length += length;
+- if (length > mtu) {
+- int proto = sk->sk_protocol;
+- if (dontfrag && (proto == IPPROTO_UDP || proto == IPPROTO_RAW)){
+- ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen);
+- return -EMSGSIZE;
+- }
+-
+- if (proto == IPPROTO_UDP &&
+- (rt->dst.dev->features & NETIF_F_UFO)) {
++ if ((length > mtu) && dontfrag && (sk->sk_protocol == IPPROTO_UDP ||
++ sk->sk_protocol == IPPROTO_RAW)) {
++ ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen);
++ return -EMSGSIZE;
++ }
+
+- err = ip6_ufo_append_data(sk, getfrag, from, length,
+- hh_len, fragheaderlen,
+- transhdrlen, mtu, flags, rt);
+- if (err)
+- goto error;
+- return 0;
+- }
++ skb = skb_peek_tail(&sk->sk_write_queue);
++ cork->length += length;
++ if (((length > mtu) ||
++ (skb && skb_is_gso(skb))) &&
++ (sk->sk_protocol == IPPROTO_UDP) &&
++ (rt->dst.dev->features & NETIF_F_UFO)) {
++ err = ip6_ufo_append_data(sk, getfrag, from, length,
++ hh_len, fragheaderlen,
++ transhdrlen, mtu, flags, rt);
++ if (err)
++ goto error;
++ return 0;
+ }
+
+- if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
++ if (!skb)
+ goto alloc_new_skb;
+
+ while (length > 0) {
+diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
+index c7ec4bb0c0f5..d20a9be8334e 100644
+--- a/net/ipv6/mcast.c
++++ b/net/ipv6/mcast.c
+@@ -2159,7 +2159,7 @@ static void mld_gq_timer_expire(unsigned long data)
+
+ idev->mc_gq_running = 0;
+ mld_send_report(idev, NULL);
+- __in6_dev_put(idev);
++ in6_dev_put(idev);
+ }
+
+ static void mld_ifc_timer_expire(unsigned long data)
+@@ -2172,7 +2172,7 @@ static void mld_ifc_timer_expire(unsigned long data)
+ if (idev->mc_ifc_count)
+ mld_ifc_start_timer(idev, idev->mc_maxdelay);
+ }
+- __in6_dev_put(idev);
++ in6_dev_put(idev);
+ }
+
+ static void mld_ifc_event(struct inet6_dev *idev)
+diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
+index 9ffc37f600a4..bc55358fc789 100644
+--- a/net/ipv6/ndisc.c
++++ b/net/ipv6/ndisc.c
+@@ -447,7 +447,6 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev,
+ struct sk_buff *skb;
+ struct icmp6hdr *hdr;
+ int len;
+- int err;
+ u8 *opt;
+
+ if (!dev->addr_len)
+@@ -457,14 +456,12 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev,
+ if (llinfo)
+ len += ndisc_opt_addr_space(dev);
+
+- skb = sock_alloc_send_skb(sk,
+- (MAX_HEADER + sizeof(struct ipv6hdr) +
+- len + LL_ALLOCATED_SPACE(dev)),
+- 1, &err);
++ skb = alloc_skb((MAX_HEADER + sizeof(struct ipv6hdr) +
++ len + LL_ALLOCATED_SPACE(dev)), GFP_ATOMIC);
+ if (!skb) {
+ ND_PRINTK0(KERN_ERR
+- "ICMPv6 ND: %s() failed to allocate an skb, err=%d.\n",
+- __func__, err);
++ "ICMPv6 ND: %s() failed to allocate an skb.\n",
++ __func__);
+ return NULL;
+ }
+
+@@ -492,6 +489,11 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev,
+ csum_partial(hdr,
+ len, 0));
+
++ /* Manually assign socket ownership as we avoid calling
++ * sock_alloc_send_pskb() to bypass wmem buffer limits
++ */
++ skb_set_owner_w(skb, sk);
++
+ return skb;
+ }
+
+diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
+index 411fe2ceb9bb..eba5deb79fe3 100644
+--- a/net/ipv6/reassembly.c
++++ b/net/ipv6/reassembly.c
+@@ -517,6 +517,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
+ head->tstamp = fq->q.stamp;
+ ipv6_hdr(head)->payload_len = htons(payload_len);
+ IP6CB(head)->nhoff = nhoff;
++ IP6CB(head)->flags |= IP6SKB_FRAGMENTED;
+
+ /* Yes, and fold redundant checksum back. 8) */
+ if (head->ip_summed == CHECKSUM_COMPLETE)
+@@ -552,6 +553,9 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
+ const struct ipv6hdr *hdr = ipv6_hdr(skb);
+ struct net *net = dev_net(skb_dst(skb)->dev);
+
++ if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED)
++ goto fail_hdr;
++
+ IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);
+
+ /* Jumbo payload inhibits frag. header */
+@@ -572,6 +576,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
+ ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS);
+
+ IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
++ IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
+ return 1;
+ }
+
+diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
+index aa2d7206ee8a..38c08136baa2 100644
+--- a/net/netfilter/ipvs/ip_vs_xmit.c
++++ b/net/netfilter/ipvs/ip_vs_xmit.c
+@@ -853,7 +853,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+ iph->daddr = cp->daddr.ip;
+ iph->saddr = saddr;
+ iph->ttl = old_iph->ttl;
+- ip_select_ident(iph, &rt->dst, NULL);
++ ip_select_ident(skb, &rt->dst, NULL);
+
+ /* Another hack: avoid icmp_send in ip_fragment */
+ skb->local_df = 1;
+diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
+index f08b9166119b..caa5affa4b29 100644
+--- a/net/sched/sch_htb.c
++++ b/net/sched/sch_htb.c
+@@ -86,7 +86,7 @@ struct htb_class {
+ unsigned int children;
+ struct htb_class *parent; /* parent class */
+
+- int prio; /* these two are used only by leaves... */
++ u32 prio; /* these two are used only by leaves... */
+ int quantum; /* but stored for parent-to-leaf return */
+
+ union {
+diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
+index 810427833bcd..0b6a3913ef53 100644
+--- a/net/sctp/ipv6.c
++++ b/net/sctp/ipv6.c
+@@ -205,45 +205,24 @@ out:
+ in6_dev_put(idev);
+ }
+
+-/* Based on tcp_v6_xmit() in tcp_ipv6.c. */
+ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
+ {
+ struct sock *sk = skb->sk;
+ struct ipv6_pinfo *np = inet6_sk(sk);
+- struct flowi6 fl6;
+-
+- memset(&fl6, 0, sizeof(fl6));
+-
+- fl6.flowi6_proto = sk->sk_protocol;
+-
+- /* Fill in the dest address from the route entry passed with the skb
+- * and the source address from the transport.
+- */
+- ipv6_addr_copy(&fl6.daddr, &transport->ipaddr.v6.sin6_addr);
+- ipv6_addr_copy(&fl6.saddr, &transport->saddr.v6.sin6_addr);
+-
+- fl6.flowlabel = np->flow_label;
+- IP6_ECN_flow_xmit(sk, fl6.flowlabel);
+- if (ipv6_addr_type(&fl6.saddr) & IPV6_ADDR_LINKLOCAL)
+- fl6.flowi6_oif = transport->saddr.v6.sin6_scope_id;
+- else
+- fl6.flowi6_oif = sk->sk_bound_dev_if;
+-
+- if (np->opt && np->opt->srcrt) {
+- struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt;
+- ipv6_addr_copy(&fl6.daddr, rt0->addr);
+- }
++ struct flowi6 *fl6 = &transport->fl.u.ip6;
+
+ SCTP_DEBUG_PRINTK("%s: skb:%p, len:%d, src:%pI6 dst:%pI6\n",
+ __func__, skb, skb->len,
+- &fl6.saddr, &fl6.daddr);
++ &fl6->saddr, &fl6->daddr);
+
+- SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS);
++ IP6_ECN_flow_xmit(sk, fl6->flowlabel);
+
+ if (!(transport->param_flags & SPP_PMTUD_ENABLE))
+ skb->local_df = 1;
+
+- return ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
++ SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS);
++
++ return ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
+ }
+
+ /* Returns the dst cache entry for the given source and destination ip
+@@ -256,10 +235,12 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
+ struct dst_entry *dst = NULL;
+ struct flowi6 *fl6 = &fl->u.ip6;
+ struct sctp_bind_addr *bp;
++ struct ipv6_pinfo *np = inet6_sk(sk);
+ struct sctp_sockaddr_entry *laddr;
+ union sctp_addr *baddr = NULL;
+ union sctp_addr *daddr = &t->ipaddr;
+ union sctp_addr dst_saddr;
++ struct in6_addr *final_p, final;
+ __u8 matchlen = 0;
+ __u8 bmatchlen;
+ sctp_scope_t scope;
+@@ -282,7 +263,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
+ SCTP_DEBUG_PRINTK("SRC=%pI6 - ", &fl6->saddr);
+ }
+
+- dst = ip6_dst_lookup_flow(sk, fl6, NULL, false);
++ final_p = fl6_update_dst(fl6, np->opt, &final);
++ dst = ip6_dst_lookup_flow(sk, fl6, final_p, false);
+ if (!asoc || saddr)
+ goto out;
+
+@@ -333,10 +315,12 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
+ }
+ }
+ rcu_read_unlock();
++
+ if (baddr) {
+ ipv6_addr_copy(&fl6->saddr, &baddr->v6.sin6_addr);
+ fl6->fl6_sport = baddr->v6.sin6_port;
+- dst = ip6_dst_lookup_flow(sk, fl6, NULL, false);
++ final_p = fl6_update_dst(fl6, np->opt, &final);
++ dst = ip6_dst_lookup_flow(sk, fl6, final_p, false);
+ }
+
+ out:
+diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
+index 9032d5001a9e..76388b083f28 100644
+--- a/net/sctp/sm_sideeffect.c
++++ b/net/sctp/sm_sideeffect.c
+@@ -1604,9 +1604,8 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
+ asoc->outqueue.outstanding_bytes;
+ sackh.num_gap_ack_blocks = 0;
+ sackh.num_dup_tsns = 0;
+- chunk->subh.sack_hdr = &sackh;
+ sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK,
+- SCTP_CHUNK(chunk));
++ SCTP_SACKH(&sackh));
+ break;
+
+ case SCTP_CMD_DISCARD_PACKET:
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index ba0108f8b833..c53d01e0a2cf 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -814,6 +814,9 @@ static int sctp_send_asconf_del_ip(struct sock *sk,
+ goto skip_mkasconf;
+ }
+
++ if (laddr == NULL)
++ return -EINVAL;
++
+ /* We do not need RCU protection throughout this loop
+ * because this is done under a socket lock from the
+ * setsockopt call.
+diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
+index e728d4ce2a1b..a224a38f0218 100644
+--- a/net/tipc/eth_media.c
++++ b/net/tipc/eth_media.c
+@@ -53,6 +53,7 @@ struct eth_bearer {
+ struct tipc_bearer *bearer;
+ struct net_device *dev;
+ struct packet_type tipc_packet_type;
++ struct work_struct setup;
+ };
+
+ static struct eth_bearer eth_bearers[MAX_ETH_BEARERS];
+@@ -121,6 +122,17 @@ static int recv_msg(struct sk_buff *buf, struct net_device *dev,
+ }
+
+ /**
++ * setup_bearer - setup association between Ethernet bearer and interface
++ */
++static void setup_bearer(struct work_struct *work)
++{
++ struct eth_bearer *eb_ptr =
++ container_of(work, struct eth_bearer, setup);
++
++ dev_add_pack(&eb_ptr->tipc_packet_type);
++}
++
++/**
+ * enable_bearer - attach TIPC bearer to an Ethernet interface
+ */
+
+@@ -164,7 +176,8 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
+ eb_ptr->tipc_packet_type.func = recv_msg;
+ eb_ptr->tipc_packet_type.af_packet_priv = eb_ptr;
+ INIT_LIST_HEAD(&(eb_ptr->tipc_packet_type.list));
+- dev_add_pack(&eb_ptr->tipc_packet_type);
++ INIT_WORK(&eb_ptr->setup, setup_bearer);
++ schedule_work(&eb_ptr->setup);
+
+ /* Associate TIPC bearer with Ethernet bearer */
+
+diff --git a/scripts/kernel-doc b/scripts/kernel-doc
+index d793001929cf..ba3d9dfada52 100755
+--- a/scripts/kernel-doc
++++ b/scripts/kernel-doc
+@@ -2044,6 +2044,9 @@ sub process_file($) {
+
+ $section_counter = 0;
+ while (<IN>) {
++ while (s/\\\s*$//) {
++ $_ .= <IN>;
++ }
+ if ($state == 0) {
+ if (/$doc_start/o) {
+ $state = 1; # next line is always the function name
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index a166a857956a..7ebe4b772fe4 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2621,6 +2621,7 @@ static struct snd_pci_quirk msi_black_list[] __devinitdata = {
+ SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */
+ SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */
+ SND_PCI_QUIRK(0x1043, 0x822d, "ASUS", 0), /* Athlon64 X2 + nvidia MCP55 */
++ SND_PCI_QUIRK(0x1179, 0xfb44, "Toshiba Satellite C870", 0), /* AMD Hudson */
+ SND_PCI_QUIRK(0x1849, 0x0888, "ASRock", 0), /* Athlon64 X2 + nvidia */
+ SND_PCI_QUIRK(0xa0a0, 0x0575, "Aopen MZ915-M", 0), /* ICH6 */
+ {}
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 55d9b302ef41..05f097a67a69 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -512,6 +512,17 @@ static int hdmi_channel_allocation(struct hdmi_eld *eld, int channels)
+ }
+ }
+
++ if (!ca) {
++ /* if there was no match, select the regular ALSA channel
++ * allocation with the matching number of channels */
++ for (i = 0; i < ARRAY_SIZE(channel_allocations); i++) {
++ if (channels == channel_allocations[i].channels) {
++ ca = channel_allocations[i].ca_index;
++ break;
++ }
++ }
++ }
++
+ snd_print_channel_allocation(eld->spk_alloc, buf, sizeof(buf));
+ snd_printdd("HDMI: select CA 0x%x for %d-channel allocation: %s\n",
+ ca, channels, buf);
+diff --git a/sound/soc/codecs/88pm860x-codec.c b/sound/soc/codecs/88pm860x-codec.c
+index 5ca122e51183..290f4d361ecc 100644
+--- a/sound/soc/codecs/88pm860x-codec.c
++++ b/sound/soc/codecs/88pm860x-codec.c
+@@ -351,6 +351,9 @@ static int snd_soc_put_volsw_2r_st(struct snd_kcontrol *kcontrol,
+ val = ucontrol->value.integer.value[0];
+ val2 = ucontrol->value.integer.value[1];
+
++ if (val >= ARRAY_SIZE(st_table) || val2 >= ARRAY_SIZE(st_table))
++ return -EINVAL;
++
+ err = snd_soc_update_bits(codec, reg, 0x3f, st_table[val].m);
+ if (err < 0)
+ return err;
+diff --git a/sound/soc/codecs/max98095.c b/sound/soc/codecs/max98095.c
+index 26d7b089fb9c..a52c15bcb778 100644
+--- a/sound/soc/codecs/max98095.c
++++ b/sound/soc/codecs/max98095.c
+@@ -1861,7 +1861,7 @@ static int max98095_put_eq_enum(struct snd_kcontrol *kcontrol,
+ struct max98095_pdata *pdata = max98095->pdata;
+ int channel = max98095_get_eq_channel(kcontrol->id.name);
+ struct max98095_cdata *cdata;
+- int sel = ucontrol->value.integer.value[0];
++ unsigned int sel = ucontrol->value.integer.value[0];
+ struct max98095_eq_cfg *coef_set;
+ int fs, best, best_val, i;
+ int regmask, regsave;
+@@ -2014,7 +2014,7 @@ static int max98095_put_bq_enum(struct snd_kcontrol *kcontrol,
+ struct max98095_pdata *pdata = max98095->pdata;
+ int channel = max98095_get_bq_channel(codec, kcontrol->id.name);
+ struct max98095_cdata *cdata;
+- int sel = ucontrol->value.integer.value[0];
++ unsigned int sel = ucontrol->value.integer.value[0];
+ struct max98095_biquad_cfg *coef_set;
+ int fs, best, best_val, i;
+ int regmask, regsave;
+diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
+index 2df253c18568..ef96ca6bfbe1 100644
+--- a/sound/soc/codecs/wm8960.c
++++ b/sound/soc/codecs/wm8960.c
+@@ -805,9 +805,9 @@ static int wm8960_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id,
+ if (pll_div.k) {
+ reg |= 0x20;
+
+- snd_soc_write(codec, WM8960_PLL2, (pll_div.k >> 18) & 0x3f);
+- snd_soc_write(codec, WM8960_PLL3, (pll_div.k >> 9) & 0x1ff);
+- snd_soc_write(codec, WM8960_PLL4, pll_div.k & 0x1ff);
++ snd_soc_write(codec, WM8960_PLL2, (pll_div.k >> 16) & 0xff);
++ snd_soc_write(codec, WM8960_PLL3, (pll_div.k >> 8) & 0xff);
++ snd_soc_write(codec, WM8960_PLL4, pll_div.k & 0xff);
+ }
+ snd_soc_write(codec, WM8960_PLL1, reg);
+
+diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
+index 42dffa07f819..f7a7b9d1d4c0 100644
+--- a/tools/perf/util/map.c
++++ b/tools/perf/util/map.c
+@@ -16,6 +16,7 @@ const char *map_type__name[MAP__NR_TYPES] = {
+ static inline int is_anon_memory(const char *filename)
+ {
+ return !strcmp(filename, "//anon") ||
++ !strcmp(filename, "/dev/zero (deleted)") ||
+ !strcmp(filename, "/anon_hugepage (deleted)");
+ }
+
diff --git a/1052_linux-3.2.53.patch b/1052_linux-3.2.53.patch
new file mode 100644
index 00000000..85ac7768
--- /dev/null
+++ b/1052_linux-3.2.53.patch
@@ -0,0 +1,3357 @@
+diff --git a/Makefile b/Makefile
+index 1dd2c094a3d3..90f57dc45a60 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 52
++SUBLEVEL = 53
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h
+index 1881b316ca45..b19559cf57a8 100644
+--- a/arch/mips/include/asm/jump_label.h
++++ b/arch/mips/include/asm/jump_label.h
+@@ -22,7 +22,7 @@
+
+ static __always_inline bool arch_static_branch(struct jump_label_key *key)
+ {
+- asm goto("1:\tnop\n\t"
++ asm_volatile_goto("1:\tnop\n\t"
+ "nop\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ WORD_INSN " 1b, %l[l_yes], %0\n\t"
+diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
+index 37aabd772fbb..d2d58258aea6 100644
+--- a/arch/parisc/kernel/head.S
++++ b/arch/parisc/kernel/head.S
+@@ -195,6 +195,8 @@ common_stext:
+ ldw MEM_PDC_HI(%r0),%r6
+ depd %r6, 31, 32, %r3 /* move to upper word */
+
++ mfctl %cr30,%r6 /* PCX-W2 firmware bug */
++
+ ldo PDC_PSW(%r0),%arg0 /* 21 */
+ ldo PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */
+ ldo PDC_PSW_WIDE_BIT(%r0),%arg2 /* 2 */
+@@ -203,6 +205,8 @@ common_stext:
+ copy %r0,%arg3
+
+ stext_pdc_ret:
++ mtctl %r6,%cr30 /* restore task thread info */
++
+ /* restore rfi target address*/
+ ldd TI_TASK-THREAD_SZ_ALGN(%sp), %r10
+ tophys_r1 %r10
+diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
+index f19e6604026a..cd8b02f69bf5 100644
+--- a/arch/parisc/kernel/traps.c
++++ b/arch/parisc/kernel/traps.c
+@@ -811,14 +811,14 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
+ else {
+
+ /*
+- * The kernel should never fault on its own address space.
++ * The kernel should never fault on its own address space,
++ * unless pagefault_disable() was called before.
+ */
+
+- if (fault_space == 0)
++ if (fault_space == 0 && !in_atomic())
+ {
+ pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
+ parisc_terminate("Kernel Fault", regs, code, fault_address);
+-
+ }
+ }
+
+diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
+index 938986e412f1..ee33888878c2 100644
+--- a/arch/powerpc/include/asm/jump_label.h
++++ b/arch/powerpc/include/asm/jump_label.h
+@@ -19,7 +19,7 @@
+
+ static __always_inline bool arch_static_branch(struct jump_label_key *key)
+ {
+- asm goto("1:\n\t"
++ asm_volatile_goto("1:\n\t"
+ "nop\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
+diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+index 5e8dc082a2eb..e3b3cf922591 100644
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -922,7 +922,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+ BEGIN_FTR_SECTION
+ mfspr r8, SPRN_DSCR
+ ld r7, HSTATE_DSCR(r13)
+- std r8, VCPU_DSCR(r7)
++ std r8, VCPU_DSCR(r9)
+ mtspr SPRN_DSCR, r7
+ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+
+diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h
+index 95a6cf2b5b67..8512d0a0fa56 100644
+--- a/arch/s390/include/asm/jump_label.h
++++ b/arch/s390/include/asm/jump_label.h
+@@ -15,7 +15,7 @@
+
+ static __always_inline bool arch_static_branch(struct jump_label_key *key)
+ {
+- asm goto("0: brcl 0,0\n"
++ asm_volatile_goto("0: brcl 0,0\n"
+ ".pushsection __jump_table, \"aw\"\n"
+ ASM_ALIGN "\n"
+ ASM_PTR " 0b, %l[label], %0\n"
+diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h
+index fc73a82366f8..e17b65bf7673 100644
+--- a/arch/sparc/include/asm/jump_label.h
++++ b/arch/sparc/include/asm/jump_label.h
+@@ -9,7 +9,7 @@
+
+ static __always_inline bool arch_static_branch(struct jump_label_key *key)
+ {
+- asm goto("1:\n\t"
++ asm_volatile_goto("1:\n\t"
+ "nop\n\t"
+ "nop\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+diff --git a/arch/tile/include/asm/percpu.h b/arch/tile/include/asm/percpu.h
+index 63294f5a8efb..4f7ae39fa202 100644
+--- a/arch/tile/include/asm/percpu.h
++++ b/arch/tile/include/asm/percpu.h
+@@ -15,9 +15,37 @@
+ #ifndef _ASM_TILE_PERCPU_H
+ #define _ASM_TILE_PERCPU_H
+
+-register unsigned long __my_cpu_offset __asm__("tp");
+-#define __my_cpu_offset __my_cpu_offset
+-#define set_my_cpu_offset(tp) (__my_cpu_offset = (tp))
++register unsigned long my_cpu_offset_reg asm("tp");
++
++#ifdef CONFIG_PREEMPT
++/*
++ * For full preemption, we can't just use the register variable
++ * directly, since we need barrier() to hazard against it, causing the
++ * compiler to reload anything computed from a previous "tp" value.
++ * But we also don't want to use volatile asm, since we'd like the
++ * compiler to be able to cache the value across multiple percpu reads.
++ * So we use a fake stack read as a hazard against barrier().
++ * The 'U' constraint is like 'm' but disallows postincrement.
++ */
++static inline unsigned long __my_cpu_offset(void)
++{
++ unsigned long tp;
++ register unsigned long *sp asm("sp");
++ asm("move %0, tp" : "=r" (tp) : "U" (*sp));
++ return tp;
++}
++#define __my_cpu_offset __my_cpu_offset()
++#else
++/*
++ * We don't need to hazard against barrier() since "tp" doesn't ever
++ * change with PREEMPT_NONE, and with PREEMPT_VOLUNTARY it only
++ * changes at function call points, at which we are already re-reading
++ * the value of "tp" due to "my_cpu_offset_reg" being a global variable.
++ */
++#define __my_cpu_offset my_cpu_offset_reg
++#endif
++
++#define set_my_cpu_offset(tp) (my_cpu_offset_reg = (tp))
+
+ #include <asm-generic/percpu.h>
+
+diff --git a/arch/um/kernel/exitcode.c b/arch/um/kernel/exitcode.c
+index 829df49dee99..41ebbfebb333 100644
+--- a/arch/um/kernel/exitcode.c
++++ b/arch/um/kernel/exitcode.c
+@@ -40,9 +40,11 @@ static ssize_t exitcode_proc_write(struct file *file,
+ const char __user *buffer, size_t count, loff_t *pos)
+ {
+ char *end, buf[sizeof("nnnnn\0")];
++ size_t size;
+ int tmp;
+
+- if (copy_from_user(buf, buffer, count))
++ size = min(count, sizeof(buf));
++ if (copy_from_user(buf, buffer, size))
+ return -EFAULT;
+
+ tmp = simple_strtol(buf, &end, 0);
+diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
+index 0c3b775ab954..a315f1ccbf93 100644
+--- a/arch/x86/include/asm/cpufeature.h
++++ b/arch/x86/include/asm/cpufeature.h
+@@ -334,7 +334,7 @@ extern const char * const x86_power_flags[32];
+ static __always_inline __pure bool __static_cpu_has(u16 bit)
+ {
+ #if __GNUC__ > 4 || __GNUC_MINOR__ >= 5
+- asm goto("1: jmp %l[t_no]\n"
++ asm_volatile_goto("1: jmp %l[t_no]\n"
+ "2:\n"
+ ".section .altinstructions,\"a\"\n"
+ " .long 1b - .\n"
+diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
+index a32b18ce6ead..e12c1bc3426b 100644
+--- a/arch/x86/include/asm/jump_label.h
++++ b/arch/x86/include/asm/jump_label.h
+@@ -13,7 +13,7 @@
+
+ static __always_inline bool arch_static_branch(struct jump_label_key *key)
+ {
+- asm goto("1:"
++ asm_volatile_goto("1:"
+ JUMP_LABEL_INITIAL_NOP
+ ".pushsection __jump_table, \"aw\" \n\t"
+ _ASM_ALIGN "\n\t"
+diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
+index f2220b5bdce6..cf3e9cb91eb7 100644
+--- a/arch/xtensa/kernel/signal.c
++++ b/arch/xtensa/kernel/signal.c
+@@ -346,7 +346,7 @@ static void setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+
+ sp = regs->areg[1];
+
+- if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! on_sig_stack(sp)) {
++ if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && sas_ss_flags(sp) == 0) {
+ sp = current->sas_ss_sp + current->sas_ss_size;
+ }
+
+diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
+index aea627ee2d2d..7d1a478c7500 100644
+--- a/drivers/ata/libata-eh.c
++++ b/drivers/ata/libata-eh.c
+@@ -1286,14 +1286,14 @@ void ata_eh_qc_complete(struct ata_queued_cmd *qc)
+ * should be retried. To be used from EH.
+ *
+ * SCSI midlayer limits the number of retries to scmd->allowed.
+- * scmd->retries is decremented for commands which get retried
++ * scmd->allowed is incremented for commands which get retried
+ * due to unrelated failures (qc->err_mask is zero).
+ */
+ void ata_eh_qc_retry(struct ata_queued_cmd *qc)
+ {
+ struct scsi_cmnd *scmd = qc->scsicmd;
+- if (!qc->err_mask && scmd->retries)
+- scmd->retries--;
++ if (!qc->err_mask)
++ scmd->allowed++;
+ __ata_eh_qc_complete(qc);
+ }
+
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index b651733d7628..c244f0ed61d6 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -668,7 +668,7 @@ static void set_timer_rand_state(unsigned int irq,
+ */
+ void add_device_randomness(const void *buf, unsigned int size)
+ {
+- unsigned long time = get_cycles() ^ jiffies;
++ unsigned long time = random_get_entropy() ^ jiffies;
+
+ mix_pool_bytes(&input_pool, buf, size, NULL);
+ mix_pool_bytes(&input_pool, &time, sizeof(time), NULL);
+@@ -705,7 +705,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
+ goto out;
+
+ sample.jiffies = jiffies;
+- sample.cycles = get_cycles();
++ sample.cycles = random_get_entropy();
+ sample.num = num;
+ mix_pool_bytes(&input_pool, &sample, sizeof(sample), NULL);
+
+@@ -772,7 +772,7 @@ void add_interrupt_randomness(int irq, int irq_flags)
+ struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness);
+ struct pt_regs *regs = get_irq_regs();
+ unsigned long now = jiffies;
+- __u32 input[4], cycles = get_cycles();
++ __u32 input[4], cycles = random_get_entropy();
+
+ input[0] = cycles ^ jiffies;
+ input[1] = irq;
+@@ -1480,12 +1480,11 @@ ctl_table random_table[] = {
+
+ static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
+
+-static int __init random_int_secret_init(void)
++int random_int_secret_init(void)
+ {
+ get_random_bytes(random_int_secret, sizeof(random_int_secret));
+ return 0;
+ }
+-late_initcall(random_int_secret_init);
+
+ /*
+ * Get a random word for internal kernel use only. Similar to urandom but
+@@ -1504,7 +1503,7 @@ unsigned int get_random_int(void)
+
+ hash = get_cpu_var(get_random_int_hash);
+
+- hash[0] += current->pid + jiffies + get_cycles();
++ hash[0] += current->pid + jiffies + random_get_entropy();
+ md5_transform(hash, random_int_secret);
+ ret = hash[0];
+ put_cpu_var(get_random_int_hash);
+diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
+index 46bbf4317dae..66d53841679a 100644
+--- a/drivers/connector/cn_proc.c
++++ b/drivers/connector/cn_proc.c
+@@ -64,6 +64,7 @@ void proc_fork_connector(struct task_struct *task)
+
+ msg = (struct cn_msg*)buffer;
+ ev = (struct proc_event*)msg->data;
++ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+ put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
+@@ -79,6 +80,7 @@ void proc_fork_connector(struct task_struct *task)
+ memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
+ msg->ack = 0; /* not used */
+ msg->len = sizeof(*ev);
++ msg->flags = 0; /* not used */
+ /* If cn_netlink_send() failed, the data is not sent */
+ cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+ }
+@@ -95,6 +97,7 @@ void proc_exec_connector(struct task_struct *task)
+
+ msg = (struct cn_msg*)buffer;
+ ev = (struct proc_event*)msg->data;
++ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+ put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
+@@ -105,6 +108,7 @@ void proc_exec_connector(struct task_struct *task)
+ memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
+ msg->ack = 0; /* not used */
+ msg->len = sizeof(*ev);
++ msg->flags = 0; /* not used */
+ cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+ }
+
+@@ -121,6 +125,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
+
+ msg = (struct cn_msg*)buffer;
+ ev = (struct proc_event*)msg->data;
++ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ ev->what = which_id;
+ ev->event_data.id.process_pid = task->pid;
+ ev->event_data.id.process_tgid = task->tgid;
+@@ -144,6 +149,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
+ memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
+ msg->ack = 0; /* not used */
+ msg->len = sizeof(*ev);
++ msg->flags = 0; /* not used */
+ cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+ }
+
+@@ -159,6 +165,7 @@ void proc_sid_connector(struct task_struct *task)
+
+ msg = (struct cn_msg *)buffer;
+ ev = (struct proc_event *)msg->data;
++ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+ put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
+@@ -169,6 +176,7 @@ void proc_sid_connector(struct task_struct *task)
+ memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
+ msg->ack = 0; /* not used */
+ msg->len = sizeof(*ev);
++ msg->flags = 0; /* not used */
+ cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+ }
+
+@@ -184,6 +192,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
+
+ msg = (struct cn_msg *)buffer;
+ ev = (struct proc_event *)msg->data;
++ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+ put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
+@@ -202,6 +211,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
+ memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
+ msg->ack = 0; /* not used */
+ msg->len = sizeof(*ev);
++ msg->flags = 0; /* not used */
+ cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+ }
+
+@@ -217,6 +227,7 @@ void proc_comm_connector(struct task_struct *task)
+
+ msg = (struct cn_msg *)buffer;
+ ev = (struct proc_event *)msg->data;
++ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+ put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
+@@ -228,6 +239,7 @@ void proc_comm_connector(struct task_struct *task)
+ memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
+ msg->ack = 0; /* not used */
+ msg->len = sizeof(*ev);
++ msg->flags = 0; /* not used */
+ cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+ }
+
+@@ -243,6 +255,7 @@ void proc_exit_connector(struct task_struct *task)
+
+ msg = (struct cn_msg*)buffer;
+ ev = (struct proc_event*)msg->data;
++ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+ put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
+@@ -255,6 +268,7 @@ void proc_exit_connector(struct task_struct *task)
+ memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
+ msg->ack = 0; /* not used */
+ msg->len = sizeof(*ev);
++ msg->flags = 0; /* not used */
+ cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+ }
+
+@@ -278,6 +292,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
+
+ msg = (struct cn_msg*)buffer;
+ ev = (struct proc_event*)msg->data;
++ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ msg->seq = rcvd_seq;
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+ put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
+@@ -287,6 +302,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
+ memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
+ msg->ack = rcvd_ack + 1;
+ msg->len = sizeof(*ev);
++ msg->flags = 0; /* not used */
+ cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+ }
+
+diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
+index dde6a0fad408..ea6efe86468e 100644
+--- a/drivers/connector/connector.c
++++ b/drivers/connector/connector.c
+@@ -157,17 +157,18 @@ static int cn_call_callback(struct sk_buff *skb)
+ static void cn_rx_skb(struct sk_buff *__skb)
+ {
+ struct nlmsghdr *nlh;
+- int err;
+ struct sk_buff *skb;
++ int len, err;
+
+ skb = skb_get(__skb);
+
+ if (skb->len >= NLMSG_SPACE(0)) {
+ nlh = nlmsg_hdr(skb);
++ len = nlmsg_len(nlh);
+
+- if (nlh->nlmsg_len < sizeof(struct cn_msg) ||
++ if (len < (int)sizeof(struct cn_msg) ||
+ skb->len < nlh->nlmsg_len ||
+- nlh->nlmsg_len > CONNECTOR_MAX_MSG_SIZE) {
++ len > CONNECTOR_MAX_MSG_SIZE) {
+ kfree_skb(skb);
+ return;
+ }
+diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
+index 40c187c60f44..acfe56787e5f 100644
+--- a/drivers/gpu/drm/drm_drv.c
++++ b/drivers/gpu/drm/drm_drv.c
+@@ -408,9 +408,16 @@ long drm_ioctl(struct file *filp,
+ asize = drv_size;
+ }
+ else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
++ u32 drv_size;
++
+ ioctl = &drm_ioctls[nr];
+- cmd = ioctl->cmd;
++
++ drv_size = _IOC_SIZE(ioctl->cmd);
+ usize = asize = _IOC_SIZE(cmd);
++ if (drv_size > asize)
++ asize = drv_size;
++
++ cmd = ioctl->cmd;
+ } else
+ goto err_i1;
+
+diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
+index 317129406451..475a275cf86b 100644
+--- a/drivers/gpu/drm/radeon/atombios_encoders.c
++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
+@@ -1390,7 +1390,7 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
+ * does the same thing and more.
+ */
+ if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730) &&
+- (rdev->family != CHIP_RS880))
++ (rdev->family != CHIP_RS780) && (rdev->family != CHIP_RS880))
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
+ }
+ if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index a68057a03c6d..5efba479fe7e 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -1797,7 +1797,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
+ rdev->config.evergreen.sx_max_export_size = 256;
+ rdev->config.evergreen.sx_max_export_pos_size = 64;
+ rdev->config.evergreen.sx_max_export_smx_size = 192;
+- rdev->config.evergreen.max_hw_contexts = 8;
++ rdev->config.evergreen.max_hw_contexts = 4;
+ rdev->config.evergreen.sq_num_cf_insts = 2;
+
+ rdev->config.evergreen.sc_prim_fifo_size = 0x40;
+diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
+index 30cac58422b7..0b86d47e49ed 100644
+--- a/drivers/hwmon/applesmc.c
++++ b/drivers/hwmon/applesmc.c
+@@ -212,6 +212,7 @@ static int send_argument(const char *key)
+
+ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
+ {
++ u8 status, data = 0;
+ int i;
+
+ if (send_command(cmd) || send_argument(key)) {
+@@ -219,6 +220,7 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
+ return -EIO;
+ }
+
++ /* This has no effect on newer (2012) SMCs */
+ outb(len, APPLESMC_DATA_PORT);
+
+ for (i = 0; i < len; i++) {
+@@ -229,6 +231,17 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
+ buffer[i] = inb(APPLESMC_DATA_PORT);
+ }
+
++ /* Read the data port until bit0 is cleared */
++ for (i = 0; i < 16; i++) {
++ udelay(APPLESMC_MIN_WAIT);
++ status = inb(APPLESMC_CMD_PORT);
++ if (!(status & 0x01))
++ break;
++ data = inb(APPLESMC_DATA_PORT);
++ }
++ if (i)
++ pr_warn("flushed %d bytes, last value is: %d\n", i, data);
++
+ return 0;
+ }
+
+diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
+index 3ac415675b6c..75c182bfd128 100644
+--- a/drivers/md/dm-snap-persistent.c
++++ b/drivers/md/dm-snap-persistent.c
+@@ -269,6 +269,14 @@ static chunk_t area_location(struct pstore *ps, chunk_t area)
+ return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area);
+ }
+
++static void skip_metadata(struct pstore *ps)
++{
++ uint32_t stride = ps->exceptions_per_area + 1;
++ chunk_t next_free = ps->next_free;
++ if (sector_div(next_free, stride) == NUM_SNAPSHOT_HDR_CHUNKS)
++ ps->next_free++;
++}
++
+ /*
+ * Read or write a metadata area. Remembering to skip the first
+ * chunk which holds the header.
+@@ -502,6 +510,8 @@ static int read_exceptions(struct pstore *ps,
+
+ ps->current_area--;
+
++ skip_metadata(ps);
++
+ return 0;
+ }
+
+@@ -616,8 +626,6 @@ static int persistent_prepare_exception(struct dm_exception_store *store,
+ struct dm_exception *e)
+ {
+ struct pstore *ps = get_info(store);
+- uint32_t stride;
+- chunk_t next_free;
+ sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev);
+
+ /* Is there enough room ? */
+@@ -630,10 +638,8 @@ static int persistent_prepare_exception(struct dm_exception_store *store,
+ * Move onto the next free pending, making sure to take
+ * into account the location of the metadata chunks.
+ */
+- stride = (ps->exceptions_per_area + 1);
+- next_free = ++ps->next_free;
+- if (sector_div(next_free, stride) == 1)
+- ps->next_free++;
++ ps->next_free++;
++ skip_metadata(ps);
+
+ atomic_inc(&ps->pending_count);
+ return 0;
+diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
+index a3190574c3e6..de87f82732c7 100644
+--- a/drivers/net/can/dev.c
++++ b/drivers/net/can/dev.c
+@@ -658,14 +658,14 @@ static size_t can_get_size(const struct net_device *dev)
+ size_t size;
+
+ size = nla_total_size(sizeof(u32)); /* IFLA_CAN_STATE */
+- size += sizeof(struct can_ctrlmode); /* IFLA_CAN_CTRLMODE */
++ size += nla_total_size(sizeof(struct can_ctrlmode)); /* IFLA_CAN_CTRLMODE */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */
+- size += sizeof(struct can_bittiming); /* IFLA_CAN_BITTIMING */
+- size += sizeof(struct can_clock); /* IFLA_CAN_CLOCK */
++ size += nla_total_size(sizeof(struct can_bittiming)); /* IFLA_CAN_BITTIMING */
++ size += nla_total_size(sizeof(struct can_clock)); /* IFLA_CAN_CLOCK */
+ if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */
+- size += sizeof(struct can_berr_counter);
++ size += nla_total_size(sizeof(struct can_berr_counter));
+ if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */
+- size += sizeof(struct can_bittiming_const);
++ size += nla_total_size(sizeof(struct can_bittiming_const));
+
+ return size;
+ }
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+index 4c50ac0a0f76..bbb669235fa2 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+@@ -516,6 +516,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ if (!bnx2x_fill_frag_skb(bp, fp, queue, skb, cqe, cqe_idx)) {
+ if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
+ __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
++ skb_record_rx_queue(skb, fp->index);
+ napi_gro_receive(&fp->napi, skb);
+ } else {
+ DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
+diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
+index 4236b82f9e35..4aa830f19277 100644
+--- a/drivers/net/ethernet/realtek/8139cp.c
++++ b/drivers/net/ethernet/realtek/8139cp.c
+@@ -1234,6 +1234,7 @@ static void cp_tx_timeout(struct net_device *dev)
+ cp_clean_rings(cp);
+ rc = cp_init_rings(cp);
+ cp_start_hw(cp);
++ cp_enable_irq(cp);
+
+ netif_wake_queue(dev);
+
+diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
+index fd8115ef577d..10668eb1bfe0 100644
+--- a/drivers/net/ethernet/ti/davinci_emac.c
++++ b/drivers/net/ethernet/ti/davinci_emac.c
+@@ -873,8 +873,7 @@ static void emac_dev_mcast_set(struct net_device *ndev)
+ netdev_mc_count(ndev) > EMAC_DEF_MAX_MULTICAST_ADDRESSES) {
+ mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
+ emac_add_mcast(priv, EMAC_ALL_MULTI_SET, NULL);
+- }
+- if (!netdev_mc_empty(ndev)) {
++ } else if (!netdev_mc_empty(ndev)) {
+ struct netdev_hw_addr *ha;
+
+ mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
+diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
+index ebb9f24eefb5..7a4c49145034 100644
+--- a/drivers/net/wan/farsync.c
++++ b/drivers/net/wan/farsync.c
+@@ -1972,6 +1972,7 @@ fst_get_iface(struct fst_card_info *card, struct fst_port_info *port,
+ }
+
+ i = port->index;
++ memset(&sync, 0, sizeof(sync));
+ sync.clock_rate = FST_RDL(card, portConfig[i].lineSpeed);
+ /* Lucky card and linux use same encoding here */
+ sync.clock_type = FST_RDB(card, portConfig[i].internalClock) ==
+diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
+index 44b707197258..c643d77720ea 100644
+--- a/drivers/net/wan/wanxl.c
++++ b/drivers/net/wan/wanxl.c
+@@ -355,6 +355,7 @@ static int wanxl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+ ifr->ifr_settings.size = size; /* data size wanted */
+ return -ENOBUFS;
+ }
++ memset(&line, 0, sizeof(line));
+ line.clock_type = get_status(port)->clocking;
+ line.clock_rate = 0;
+ line.loopback = 0;
+diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
+index a97a52adda66..408477d11ea4 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
++++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
+@@ -270,11 +270,6 @@ struct iwl_cfg iwl2000_2bgn_cfg = {
+ .ht_params = &iwl2000_ht_params,
+ };
+
+-struct iwl_cfg iwl2000_2bg_cfg = {
+- .name = "2000 Series 2x2 BG",
+- IWL_DEVICE_2000,
+-};
+-
+ struct iwl_cfg iwl2000_2bgn_d_cfg = {
+ .name = "2000D Series 2x2 BGN",
+ IWL_DEVICE_2000,
+@@ -304,11 +299,6 @@ struct iwl_cfg iwl2030_2bgn_cfg = {
+ .ht_params = &iwl2000_ht_params,
+ };
+
+-struct iwl_cfg iwl2030_2bg_cfg = {
+- .name = "2000 Series 2x2 BG/BT",
+- IWL_DEVICE_2030,
+-};
+-
+ #define IWL_DEVICE_105 \
+ .fw_name_pre = IWL105_FW_PRE, \
+ .ucode_api_max = IWL105_UCODE_API_MAX, \
+@@ -326,11 +316,6 @@ struct iwl_cfg iwl2030_2bg_cfg = {
+ .rx_with_siso_diversity = true, \
+ .iq_invert = true \
+
+-struct iwl_cfg iwl105_bg_cfg = {
+- .name = "105 Series 1x1 BG",
+- IWL_DEVICE_105,
+-};
+-
+ struct iwl_cfg iwl105_bgn_cfg = {
+ .name = "105 Series 1x1 BGN",
+ IWL_DEVICE_105,
+@@ -361,11 +346,6 @@ struct iwl_cfg iwl105_bgn_d_cfg = {
+ .rx_with_siso_diversity = true, \
+ .iq_invert = true \
+
+-struct iwl_cfg iwl135_bg_cfg = {
+- .name = "135 Series 1x1 BG/BT",
+- IWL_DEVICE_135,
+-};
+-
+ struct iwl_cfg iwl135_bgn_cfg = {
+ .name = "135 Series 1x1 BGN/BT",
+ IWL_DEVICE_135,
+diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
+index 4ac4ef0907ea..e1a43c432bc6 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
++++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
+@@ -411,6 +411,17 @@ struct iwl_cfg iwl6005_2agn_d_cfg = {
+ .ht_params = &iwl6000_ht_params,
+ };
+
++struct iwl_cfg iwl6005_2agn_mow1_cfg = {
++ .name = "Intel(R) Centrino(R) Advanced-N 6206 AGN",
++ IWL_DEVICE_6005,
++ .ht_params = &iwl6000_ht_params,
++};
++struct iwl_cfg iwl6005_2agn_mow2_cfg = {
++ .name = "Intel(R) Centrino(R) Advanced-N 6207 AGN",
++ IWL_DEVICE_6005,
++ .ht_params = &iwl6000_ht_params,
++};
++
+ #define IWL_DEVICE_6030 \
+ .fw_name_pre = IWL6030_FW_PRE, \
+ .ucode_api_max = IWL6000G2_UCODE_API_MAX, \
+@@ -469,14 +480,10 @@ struct iwl_cfg iwl6035_2agn_cfg = {
+ .ht_params = &iwl6000_ht_params,
+ };
+
+-struct iwl_cfg iwl6035_2abg_cfg = {
+- .name = "6035 Series 2x2 ABG/BT",
+- IWL_DEVICE_6030,
+-};
+-
+-struct iwl_cfg iwl6035_2bg_cfg = {
+- .name = "6035 Series 2x2 BG/BT",
+- IWL_DEVICE_6030,
++struct iwl_cfg iwl6035_2agn_sff_cfg = {
++ .name = "Intel(R) Centrino(R) Ultimate-N 6235 AGN",
++ IWL_DEVICE_6035,
++ .ht_params = &iwl6000_ht_params,
+ };
+
+ struct iwl_cfg iwl1030_bgn_cfg = {
+diff --git a/drivers/net/wireless/iwlwifi/iwl-cfg.h b/drivers/net/wireless/iwlwifi/iwl-cfg.h
+index 2a2dc4597ba1..e786497ce9c0 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-cfg.h
++++ b/drivers/net/wireless/iwlwifi/iwl-cfg.h
+@@ -80,6 +80,8 @@ extern struct iwl_cfg iwl6005_2abg_cfg;
+ extern struct iwl_cfg iwl6005_2bg_cfg;
+ extern struct iwl_cfg iwl6005_2agn_sff_cfg;
+ extern struct iwl_cfg iwl6005_2agn_d_cfg;
++extern struct iwl_cfg iwl6005_2agn_mow1_cfg;
++extern struct iwl_cfg iwl6005_2agn_mow2_cfg;
+ extern struct iwl_cfg iwl1030_bgn_cfg;
+ extern struct iwl_cfg iwl1030_bg_cfg;
+ extern struct iwl_cfg iwl6030_2agn_cfg;
+@@ -101,17 +103,12 @@ extern struct iwl_cfg iwl100_bg_cfg;
+ extern struct iwl_cfg iwl130_bgn_cfg;
+ extern struct iwl_cfg iwl130_bg_cfg;
+ extern struct iwl_cfg iwl2000_2bgn_cfg;
+-extern struct iwl_cfg iwl2000_2bg_cfg;
+ extern struct iwl_cfg iwl2000_2bgn_d_cfg;
+ extern struct iwl_cfg iwl2030_2bgn_cfg;
+-extern struct iwl_cfg iwl2030_2bg_cfg;
+ extern struct iwl_cfg iwl6035_2agn_cfg;
+-extern struct iwl_cfg iwl6035_2abg_cfg;
+-extern struct iwl_cfg iwl6035_2bg_cfg;
+-extern struct iwl_cfg iwl105_bg_cfg;
++extern struct iwl_cfg iwl6035_2agn_sff_cfg;
+ extern struct iwl_cfg iwl105_bgn_cfg;
+ extern struct iwl_cfg iwl105_bgn_d_cfg;
+-extern struct iwl_cfg iwl135_bg_cfg;
+ extern struct iwl_cfg iwl135_bgn_cfg;
+
+ #endif /* __iwl_pci_h__ */
+diff --git a/drivers/net/wireless/iwlwifi/iwl-pci.c b/drivers/net/wireless/iwlwifi/iwl-pci.c
+index 346dc9ba2657..62a0f81cbb00 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-pci.c
++++ b/drivers/net/wireless/iwlwifi/iwl-pci.c
+@@ -236,13 +236,16 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
+
+ /* 6x00 Series */
+ {IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_3agn_cfg)},
++ {IWL_PCI_DEVICE(0x422B, 0x1108, iwl6000_3agn_cfg)},
+ {IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_3agn_cfg)},
++ {IWL_PCI_DEVICE(0x422B, 0x1128, iwl6000_3agn_cfg)},
+ {IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_2bg_cfg)},
+ {IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)},
++ {IWL_PCI_DEVICE(0x4238, 0x1118, iwl6000_3agn_cfg)},
+ {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)},
+
+@@ -250,13 +253,19 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
+ {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6005_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6005_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6005_2bg_cfg)},
++ {IWL_PCI_DEVICE(0x0082, 0x1308, iwl6005_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1321, iwl6005_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)},
++ {IWL_PCI_DEVICE(0x0082, 0x1328, iwl6005_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)},
++ {IWL_PCI_DEVICE(0x0085, 0x1318, iwl6005_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)},
+ {IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_2agn_sff_cfg)},
+- {IWL_PCI_DEVICE(0x0082, 0x1341, iwl6005_2agn_d_cfg)},
++ {IWL_PCI_DEVICE(0x0085, 0xC228, iwl6005_2agn_sff_cfg)},
++ {IWL_PCI_DEVICE(0x0082, 0x4820, iwl6005_2agn_d_cfg)},
++ {IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_2agn_mow1_cfg)},/* low 5GHz active */
++ {IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_2agn_mow2_cfg)},/* high 5GHz active */
+
+ /* 6x30 Series */
+ {IWL_PCI_DEVICE(0x008A, 0x5305, iwl1030_bgn_cfg)},
+@@ -326,46 +335,33 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
+ {IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0891, 0x4222, iwl2000_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0890, 0x4422, iwl2000_2bgn_cfg)},
+- {IWL_PCI_DEVICE(0x0890, 0x4026, iwl2000_2bg_cfg)},
+- {IWL_PCI_DEVICE(0x0891, 0x4226, iwl2000_2bg_cfg)},
+- {IWL_PCI_DEVICE(0x0890, 0x4426, iwl2000_2bg_cfg)},
+ {IWL_PCI_DEVICE(0x0890, 0x4822, iwl2000_2bgn_d_cfg)},
+
+ /* 2x30 Series */
+ {IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0888, 0x4262, iwl2030_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0887, 0x4462, iwl2030_2bgn_cfg)},
+- {IWL_PCI_DEVICE(0x0887, 0x4066, iwl2030_2bg_cfg)},
+- {IWL_PCI_DEVICE(0x0888, 0x4266, iwl2030_2bg_cfg)},
+- {IWL_PCI_DEVICE(0x0887, 0x4466, iwl2030_2bg_cfg)},
+
+ /* 6x35 Series */
+ {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)},
++ {IWL_PCI_DEVICE(0x088E, 0x406A, iwl6035_2agn_sff_cfg)},
+ {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)},
++ {IWL_PCI_DEVICE(0x088F, 0x426A, iwl6035_2agn_sff_cfg)},
+ {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)},
+- {IWL_PCI_DEVICE(0x088E, 0x4064, iwl6035_2abg_cfg)},
+- {IWL_PCI_DEVICE(0x088F, 0x4264, iwl6035_2abg_cfg)},
+- {IWL_PCI_DEVICE(0x088E, 0x4464, iwl6035_2abg_cfg)},
+- {IWL_PCI_DEVICE(0x088E, 0x4066, iwl6035_2bg_cfg)},
+- {IWL_PCI_DEVICE(0x088F, 0x4266, iwl6035_2bg_cfg)},
+- {IWL_PCI_DEVICE(0x088E, 0x4466, iwl6035_2bg_cfg)},
++ {IWL_PCI_DEVICE(0x088E, 0x446A, iwl6035_2agn_sff_cfg)},
++ {IWL_PCI_DEVICE(0x088E, 0x4860, iwl6035_2agn_cfg)},
++ {IWL_PCI_DEVICE(0x088F, 0x5260, iwl6035_2agn_cfg)},
+
+ /* 105 Series */
+ {IWL_PCI_DEVICE(0x0894, 0x0022, iwl105_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0895, 0x0222, iwl105_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0894, 0x0422, iwl105_bgn_cfg)},
+- {IWL_PCI_DEVICE(0x0894, 0x0026, iwl105_bg_cfg)},
+- {IWL_PCI_DEVICE(0x0895, 0x0226, iwl105_bg_cfg)},
+- {IWL_PCI_DEVICE(0x0894, 0x0426, iwl105_bg_cfg)},
+ {IWL_PCI_DEVICE(0x0894, 0x0822, iwl105_bgn_d_cfg)},
+
+ /* 135 Series */
+ {IWL_PCI_DEVICE(0x0892, 0x0062, iwl135_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_bgn_cfg)},
+- {IWL_PCI_DEVICE(0x0892, 0x0066, iwl135_bg_cfg)},
+- {IWL_PCI_DEVICE(0x0893, 0x0266, iwl135_bg_cfg)},
+- {IWL_PCI_DEVICE(0x0892, 0x0466, iwl135_bg_cfg)},
+
+ {0}
+ };
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+index bc33b147f44f..a7e1a2c35b4d 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+@@ -343,7 +343,8 @@ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
+ (bool)GET_RX_DESC_PAGGR(pdesc));
+ rx_status->mactime = GET_RX_DESC_TSFL(pdesc);
+ if (phystatus) {
+- p_drvinfo = (struct rx_fwinfo_92c *)(pdesc + RTL_RX_DESC_SIZE);
++ p_drvinfo = (struct rx_fwinfo_92c *)(skb->data +
++ stats->rx_bufshift);
+ rtl92c_translate_rx_signal_stuff(hw, skb, stats, pdesc,
+ p_drvinfo);
+ }
+diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
+index 9d7f1723dd8f..093bf0a2fc83 100644
+--- a/drivers/net/xen-netback/common.h
++++ b/drivers/net/xen-netback/common.h
+@@ -88,6 +88,7 @@ struct xenvif {
+ unsigned long credit_usec;
+ unsigned long remaining_credit;
+ struct timer_list credit_timeout;
++ u64 credit_window_start;
+
+ /* Statistics */
+ unsigned long rx_gso_checksum_fixup;
+diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
+index 8eaf0e2aaf2e..2cb9c9262fa8 100644
+--- a/drivers/net/xen-netback/interface.c
++++ b/drivers/net/xen-netback/interface.c
+@@ -272,8 +272,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
+ vif->credit_bytes = vif->remaining_credit = ~0UL;
+ vif->credit_usec = 0UL;
+ init_timer(&vif->credit_timeout);
+- /* Initialize 'expires' now: it's used to track the credit window. */
+- vif->credit_timeout.expires = jiffies;
++ vif->credit_window_start = get_jiffies_64();
+
+ dev->netdev_ops = &xenvif_netdev_ops;
+ dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
+diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
+index fd2b92d61bba..9a4626c35f5a 100644
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -1365,9 +1365,8 @@ out:
+
+ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
+ {
+- unsigned long now = jiffies;
+- unsigned long next_credit =
+- vif->credit_timeout.expires +
++ u64 now = get_jiffies_64();
++ u64 next_credit = vif->credit_window_start +
+ msecs_to_jiffies(vif->credit_usec / 1000);
+
+ /* Timer could already be pending in rare cases. */
+@@ -1375,8 +1374,8 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
+ return true;
+
+ /* Passed the point where we can replenish credit? */
+- if (time_after_eq(now, next_credit)) {
+- vif->credit_timeout.expires = now;
++ if (time_after_eq64(now, next_credit)) {
++ vif->credit_window_start = now;
+ tx_add_credit(vif);
+ }
+
+@@ -1388,6 +1387,7 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
+ tx_credit_callback;
+ mod_timer(&vif->credit_timeout,
+ next_credit);
++ vif->credit_window_start = next_credit;
+
+ return true;
+ }
+diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
+index 8b25f9cc0010..41f08e58c79c 100644
+--- a/drivers/pci/setup-res.c
++++ b/drivers/pci/setup-res.c
+@@ -188,7 +188,8 @@ static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
+ return ret;
+ }
+
+-static int _pci_assign_resource(struct pci_dev *dev, int resno, int size, resource_size_t min_align)
++static int _pci_assign_resource(struct pci_dev *dev, int resno,
++ resource_size_t size, resource_size_t min_align)
+ {
+ struct resource *res = dev->resource + resno;
+ struct pci_bus *bus;
+diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
+index 705e13e470af..2e658d24db4c 100644
+--- a/drivers/scsi/aacraid/linit.c
++++ b/drivers/scsi/aacraid/linit.c
+@@ -771,6 +771,8 @@ static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long
+ static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
+ {
+ struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
++ if (!capable(CAP_SYS_RAWIO))
++ return -EPERM;
+ return aac_compat_do_ioctl(dev, cmd, (unsigned long)arg);
+ }
+
+diff --git a/drivers/staging/bcm/Bcmchar.c b/drivers/staging/bcm/Bcmchar.c
+index 2fa658eb74dc..391b768de78d 100644
+--- a/drivers/staging/bcm/Bcmchar.c
++++ b/drivers/staging/bcm/Bcmchar.c
+@@ -1932,6 +1932,7 @@ cntrlEnd:
+
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Called IOCTL_BCM_GET_DEVICE_DRIVER_INFO\n");
+
++ memset(&DevInfo, 0, sizeof(DevInfo));
+ DevInfo.MaxRDMBufferSize = BUFFER_4K;
+ DevInfo.u32DSDStartOffset = EEPROM_CALPARAM_START;
+ DevInfo.u32RxAlignmentCorrection = 0;
+diff --git a/drivers/staging/wlags49_h2/wl_priv.c b/drivers/staging/wlags49_h2/wl_priv.c
+index 260d4f0d47b4..b3d2e179c4e5 100644
+--- a/drivers/staging/wlags49_h2/wl_priv.c
++++ b/drivers/staging/wlags49_h2/wl_priv.c
+@@ -570,6 +570,7 @@ int wvlan_uil_put_info( struct uilreq *urq, struct wl_private *lp )
+ ltv_t *pLtv;
+ bool_t ltvAllocated = FALSE;
+ ENCSTRCT sEncryption;
++ size_t len;
+
+ #ifdef USE_WDS
+ hcf_16 hcfPort = HCF_PORT_0;
+@@ -686,7 +687,8 @@ int wvlan_uil_put_info( struct uilreq *urq, struct wl_private *lp )
+ break;
+ case CFG_CNF_OWN_NAME:
+ memset( lp->StationName, 0, sizeof( lp->StationName ));
+- memcpy( (void *)lp->StationName, (void *)&pLtv->u.u8[2], (size_t)pLtv->u.u16[0]);
++ len = min_t(size_t, pLtv->u.u16[0], sizeof(lp->StationName));
++ strlcpy(lp->StationName, &pLtv->u.u8[2], len);
+ pLtv->u.u16[0] = CNV_INT_TO_LITTLE( pLtv->u.u16[0] );
+ break;
+ case CFG_CNF_LOAD_BALANCING:
+@@ -1800,6 +1802,7 @@ int wvlan_set_station_nickname(struct net_device *dev,
+ {
+ struct wl_private *lp = wl_priv(dev);
+ unsigned long flags;
++ size_t len;
+ int ret = 0;
+ /*------------------------------------------------------------------------*/
+
+@@ -1810,8 +1813,8 @@ int wvlan_set_station_nickname(struct net_device *dev,
+ wl_lock(lp, &flags);
+
+ memset( lp->StationName, 0, sizeof( lp->StationName ));
+-
+- memcpy( lp->StationName, extra, wrqu->data.length);
++ len = min_t(size_t, wrqu->data.length, sizeof(lp->StationName));
++ strlcpy(lp->StationName, extra, len);
+
+ /* Commit the adapter parameters */
+ wl_apply( lp );
+diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
+index d197b3ecfbb4..e1a4994a3cc0 100644
+--- a/drivers/staging/zram/zram_drv.c
++++ b/drivers/staging/zram/zram_drv.c
+@@ -553,7 +553,7 @@ static inline int valid_io_request(struct zram *zram, struct bio *bio)
+ end = start + (bio->bi_size >> SECTOR_SHIFT);
+ bound = zram->disksize >> SECTOR_SHIFT;
+ /* out of range range */
+- if (unlikely(start >= bound || end >= bound || start > end))
++ if (unlikely(start >= bound || end > bound || start > end))
+ return 0;
+
+ /* I/O request is valid */
+diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
+index 5c12137c5266..e813227a00e2 100644
+--- a/drivers/target/target_core_pscsi.c
++++ b/drivers/target/target_core_pscsi.c
+@@ -129,10 +129,10 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
+ * pSCSI Host ID and enable for phba mode
+ */
+ sh = scsi_host_lookup(phv->phv_host_id);
+- if (IS_ERR(sh)) {
++ if (!sh) {
+ pr_err("pSCSI: Unable to locate SCSI Host for"
+ " phv_host_id: %d\n", phv->phv_host_id);
+- return PTR_ERR(sh);
++ return -EINVAL;
+ }
+
+ phv->phv_lld_host = sh;
+@@ -564,10 +564,10 @@ static struct se_device *pscsi_create_virtdevice(
+ sh = phv->phv_lld_host;
+ } else {
+ sh = scsi_host_lookup(pdv->pdv_host_id);
+- if (IS_ERR(sh)) {
++ if (!sh) {
+ pr_err("pSCSI: Unable to locate"
+ " pdv_host_id: %d\n", pdv->pdv_host_id);
+- return ERR_CAST(sh);
++ return ERR_PTR(-EINVAL);
+ }
+ }
+ } else {
+diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
+index a783d533a1a6..af57648865c4 100644
+--- a/drivers/uio/uio.c
++++ b/drivers/uio/uio.c
+@@ -650,16 +650,28 @@ static int uio_mmap_physical(struct vm_area_struct *vma)
+ {
+ struct uio_device *idev = vma->vm_private_data;
+ int mi = uio_find_mem_index(vma);
++ struct uio_mem *mem;
+ if (mi < 0)
+ return -EINVAL;
++ mem = idev->info->mem + mi;
+
+- vma->vm_flags |= VM_IO | VM_RESERVED;
++ if (vma->vm_end - vma->vm_start > mem->size)
++ return -EINVAL;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
++ /*
++ * We cannot use the vm_iomap_memory() helper here,
++ * because vma->vm_pgoff is the map index we looked
++ * up above in uio_find_mem_index(), rather than an
++ * actual page offset into the mmap.
++ *
++ * So we just do the physical mmap without a page
++ * offset.
++ */
+ return remap_pfn_range(vma,
+ vma->vm_start,
+- idev->info->mem[mi].addr >> PAGE_SHIFT,
++ mem->addr >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+ }
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index f52182dbf4f7..bcde6f65b1c6 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -97,6 +97,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ /* Alcor Micro Corp. Hub */
+ { USB_DEVICE(0x058f, 0x9254), .driver_info = USB_QUIRK_RESET_RESUME },
+
++ /* MicroTouch Systems touchscreen */
++ { USB_DEVICE(0x0596, 0x051e), .driver_info = USB_QUIRK_RESET_RESUME },
++
+ /* appletouch */
+ { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
+
+@@ -130,6 +133,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ /* Broadcom BCM92035DGROM BT dongle */
+ { USB_DEVICE(0x0a5c, 0x2021), .driver_info = USB_QUIRK_RESET_RESUME },
+
++ /* MAYA44USB sound device */
++ { USB_DEVICE(0x0a92, 0x0091), .driver_info = USB_QUIRK_RESET_RESUME },
++
+ /* Action Semiconductor flash disk */
+ { USB_DEVICE(0x10d6, 0x2200), .driver_info =
+ USB_QUIRK_STRING_FETCH_255 },
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index 24107a70eb1d..107e6b44ed15 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -1007,20 +1007,6 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
+ t1 = xhci_port_state_to_neutral(t1);
+ if (t1 != t2)
+ xhci_writel(xhci, t2, port_array[port_index]);
+-
+- if (hcd->speed != HCD_USB3) {
+- /* enable remote wake up for USB 2.0 */
+- __le32 __iomem *addr;
+- u32 tmp;
+-
+- /* Add one to the port status register address to get
+- * the port power control register address.
+- */
+- addr = port_array[port_index] + 1;
+- tmp = xhci_readl(xhci, addr);
+- tmp |= PORT_RWE;
+- xhci_writel(xhci, tmp, addr);
+- }
+ }
+ hcd->state = HC_STATE_SUSPENDED;
+ bus_state->next_statechange = jiffies + msecs_to_jiffies(10);
+@@ -1099,20 +1085,6 @@ int xhci_bus_resume(struct usb_hcd *hcd)
+ xhci_ring_device(xhci, slot_id);
+ } else
+ xhci_writel(xhci, temp, port_array[port_index]);
+-
+- if (hcd->speed != HCD_USB3) {
+- /* disable remote wake up for USB 2.0 */
+- __le32 __iomem *addr;
+- u32 tmp;
+-
+- /* Add one to the port status register address to get
+- * the port power control register address.
+- */
+- addr = port_array[port_index] + 1;
+- tmp = xhci_readl(xhci, addr);
+- tmp &= ~PORT_RWE;
+- xhci_writel(xhci, tmp, addr);
+- }
+ }
+
+ (void) xhci_readl(xhci, &xhci->op_regs->command);
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 61b06688c916..827f933666d3 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -34,6 +34,9 @@
+ #define PCI_VENDOR_ID_ETRON 0x1b6f
+ #define PCI_DEVICE_ID_ASROCK_P67 0x7023
+
++#define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31
++#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
++
+ static const char hcd_name[] = "xhci_hcd";
+
+ /* called after powerup, by probe or system-pm "wakeup" */
+@@ -67,6 +70,14 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ xhci_dbg(xhci, "QUIRK: Fresco Logic xHC needs configure"
+ " endpoint cmd after reset endpoint\n");
+ }
++ if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK &&
++ pdev->revision == 0x4) {
++ xhci->quirks |= XHCI_SLOW_SUSPEND;
++ xhci_dbg(xhci,
++ "QUIRK: Fresco Logic xHC revision %u"
++ "must be suspended extra slowly",
++ pdev->revision);
++ }
+ /* Fresco Logic confirms: all revisions of this chip do not
+ * support MSI, even though some of them claim to in their PCI
+ * capabilities.
+@@ -103,6 +114,15 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ xhci->quirks |= XHCI_SPURIOUS_REBOOT;
+ xhci->quirks |= XHCI_AVOID_BEI;
+ }
++ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
++ (pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI ||
++ pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI)) {
++ /* Workaround for occasional spurious wakeups from S5 (or
++ * any other sleep) on Haswell machines with LPT and LPT-LP
++ * with the new Intel BIOS
++ */
++ xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
++ }
+ if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
+ pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
+ xhci->quirks |= XHCI_RESET_ON_RESUME;
+@@ -202,6 +222,11 @@ static void xhci_pci_remove(struct pci_dev *dev)
+ usb_put_hcd(xhci->shared_hcd);
+ }
+ usb_hcd_pci_remove(dev);
++
++ /* Workaround for spurious wakeups at shutdown with HSW */
++ if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
++ pci_set_power_state(dev, PCI_D3hot);
++
+ kfree(xhci);
+ }
+
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 629aa743e45b..03c35da16e48 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -763,12 +763,19 @@ void xhci_shutdown(struct usb_hcd *hcd)
+
+ spin_lock_irq(&xhci->lock);
+ xhci_halt(xhci);
++ /* Workaround for spurious wakeups at shutdown with HSW */
++ if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
++ xhci_reset(xhci);
+ spin_unlock_irq(&xhci->lock);
+
+ xhci_cleanup_msix(xhci);
+
+ xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n",
+ xhci_readl(xhci, &xhci->op_regs->status));
++
++ /* Yet another workaround for spurious wakeups at shutdown with HSW */
++ if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
++ pci_set_power_state(to_pci_dev(hcd->self.controller), PCI_D3hot);
+ }
+
+ #ifdef CONFIG_PM
+@@ -869,6 +876,7 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci)
+ int xhci_suspend(struct xhci_hcd *xhci)
+ {
+ int rc = 0;
++ unsigned int delay = XHCI_MAX_HALT_USEC;
+ struct usb_hcd *hcd = xhci_to_hcd(xhci);
+ u32 command;
+
+@@ -887,8 +895,12 @@ int xhci_suspend(struct xhci_hcd *xhci)
+ command = xhci_readl(xhci, &xhci->op_regs->command);
+ command &= ~CMD_RUN;
+ xhci_writel(xhci, command, &xhci->op_regs->command);
++
++ /* Some chips from Fresco Logic need an extraordinary delay */
++ delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1;
++
+ if (handshake(xhci, &xhci->op_regs->status,
+- STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC)) {
++ STS_HALT, STS_HALT, delay)) {
+ xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
+ spin_unlock_irq(&xhci->lock);
+ return -ETIMEDOUT;
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 8b4cce450b9d..cf4fd241fc77 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1493,6 +1493,8 @@ struct xhci_hcd {
+ #define XHCI_SPURIOUS_REBOOT (1 << 13)
+ #define XHCI_COMP_MODE_QUIRK (1 << 14)
+ #define XHCI_AVOID_BEI (1 << 15)
++#define XHCI_SLOW_SUSPEND (1 << 17)
++#define XHCI_SPURIOUS_WAKEUP (1 << 18)
+ unsigned int num_active_eps;
+ unsigned int limit_active_eps;
+ /* There are two roothubs to keep track of bus suspend info for */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 536c4ad2f5b7..d8ace827598b 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -457,6 +457,10 @@ static void option_instat_callback(struct urb *urb);
+ #define CHANGHONG_VENDOR_ID 0x2077
+ #define CHANGHONG_PRODUCT_CH690 0x7001
+
++/* Inovia */
++#define INOVIA_VENDOR_ID 0x20a6
++#define INOVIA_SEW858 0x1105
++
+ /* some devices interfaces need special handling due to a number of reasons */
+ enum option_blacklist_reason {
+ OPTION_BLACKLIST_NONE = 0,
+@@ -703,6 +707,222 @@ static const struct usb_device_id option_ids[] = {
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x01) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x02) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x03) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x04) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x05) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x06) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x10) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x12) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x13) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x14) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x15) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x17) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x18) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x19) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x1A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x1B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x1C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x31) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x32) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x33) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x34) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x35) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x36) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x48) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x49) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x4A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x4B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x4C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x61) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x62) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x63) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x64) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x65) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x66) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x78) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x79) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x01) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x02) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x03) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x04) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x05) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x06) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x10) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x12) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x13) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x14) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x15) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x17) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x18) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x19) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x1A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x1B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x1C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x31) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x32) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x33) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x34) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x35) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x36) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x48) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x49) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x4A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x4B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x4C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x61) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x62) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x63) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x64) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x65) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x66) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x78) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x79) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x01) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x02) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x03) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x04) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x05) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x06) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x10) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x12) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x13) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x14) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x15) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x17) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x18) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x19) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x1A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x1B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x1C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x31) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x32) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x33) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x34) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x35) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x36) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x48) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x49) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x4A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x4B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x4C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x61) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x62) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x63) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x64) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x65) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x66) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x78) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x79) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x01) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x02) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x03) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x04) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x05) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x06) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x10) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x12) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x13) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x14) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x15) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x17) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x18) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x19) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x1A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x1B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x1C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x31) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x32) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x33) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x34) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x35) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x36) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x48) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x49) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x4A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x4B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x4C) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x61) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x62) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x63) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x64) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x65) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x66) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6D) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6E) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x78) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x79) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7A) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7B) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7C) },
+
+
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
+@@ -1279,7 +1499,9 @@ static const struct usb_device_id option_ids[] = {
+
+ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
+ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) },
+- { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200) },
++ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200),
++ .driver_info = (kernel_ulong_t)&net_intf6_blacklist
++ },
+ { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
+ { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
+@@ -1367,6 +1589,7 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
++ { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
+ { } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, option_ids);
+diff --git a/drivers/video/au1100fb.c b/drivers/video/au1100fb.c
+index 649cb35de4ed..1be8b5d6d093 100644
+--- a/drivers/video/au1100fb.c
++++ b/drivers/video/au1100fb.c
+@@ -387,39 +387,13 @@ void au1100fb_fb_rotate(struct fb_info *fbi, int angle)
+ int au1100fb_fb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
+ {
+ struct au1100fb_device *fbdev;
+- unsigned int len;
+- unsigned long start=0, off;
+
+ fbdev = to_au1100fb_device(fbi);
+
+- if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) {
+- return -EINVAL;
+- }
+-
+- start = fbdev->fb_phys & PAGE_MASK;
+- len = PAGE_ALIGN((start & ~PAGE_MASK) + fbdev->fb_len);
+-
+- off = vma->vm_pgoff << PAGE_SHIFT;
+-
+- if ((vma->vm_end - vma->vm_start + off) > len) {
+- return -EINVAL;
+- }
+-
+- off += start;
+- vma->vm_pgoff = off >> PAGE_SHIFT;
+-
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ pgprot_val(vma->vm_page_prot) |= (6 << 9); //CCA=6
+
+- vma->vm_flags |= VM_IO;
+-
+- if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
+- vma->vm_end - vma->vm_start,
+- vma->vm_page_prot)) {
+- return -EAGAIN;
+- }
+-
+- return 0;
++ return vm_iomap_memory(vma, fbdev->fb_phys, fbdev->fb_len);
+ }
+
+ static struct fb_ops au1100fb_ops =
+diff --git a/drivers/video/au1200fb.c b/drivers/video/au1200fb.c
+index 72005598040f..5bd7d887cd7a 100644
+--- a/drivers/video/au1200fb.c
++++ b/drivers/video/au1200fb.c
+@@ -1216,38 +1216,13 @@ static int au1200fb_fb_blank(int blank_mode, struct fb_info *fbi)
+ * method mainly to allow the use of the TLB streaming flag (CCA=6)
+ */
+ static int au1200fb_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+-
+ {
+- unsigned int len;
+- unsigned long start=0, off;
+ struct au1200fb_device *fbdev = info->par;
+
+- if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) {
+- return -EINVAL;
+- }
+-
+- start = fbdev->fb_phys & PAGE_MASK;
+- len = PAGE_ALIGN((start & ~PAGE_MASK) + fbdev->fb_len);
+-
+- off = vma->vm_pgoff << PAGE_SHIFT;
+-
+- if ((vma->vm_end - vma->vm_start + off) > len) {
+- return -EINVAL;
+- }
+-
+- off += start;
+- vma->vm_pgoff = off >> PAGE_SHIFT;
+-
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ pgprot_val(vma->vm_page_prot) |= _CACHE_MASK; /* CCA=7 */
+
+- vma->vm_flags |= VM_IO;
+-
+- return io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
+- vma->vm_end - vma->vm_start,
+- vma->vm_page_prot);
+-
+- return 0;
++ return vm_iomap_memory(vma, fbdev->fb_phys, fbdev->fb_len);
+ }
+
+ static void set_global(u_int cmd, struct au1200_lcd_global_regs_t *pdata)
+diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
+index ac1ad48c2376..5ce56e7598b1 100644
+--- a/fs/ecryptfs/keystore.c
++++ b/fs/ecryptfs/keystore.c
+@@ -1151,7 +1151,7 @@ decrypt_pki_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
+ struct ecryptfs_msg_ctx *msg_ctx;
+ struct ecryptfs_message *msg = NULL;
+ char *auth_tok_sig;
+- char *payload;
++ char *payload = NULL;
+ size_t payload_len;
+ int rc;
+
+@@ -1206,6 +1206,7 @@ decrypt_pki_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
+ out:
+ if (msg)
+ kfree(msg);
++ kfree(payload);
+ return rc;
+ }
+
+diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
+index 34f0a072b935..3268697a08d6 100644
+--- a/fs/ext3/dir.c
++++ b/fs/ext3/dir.c
+@@ -25,6 +25,7 @@
+ #include <linux/jbd.h>
+ #include <linux/ext3_fs.h>
+ #include <linux/buffer_head.h>
++#include <linux/compat.h>
+ #include <linux/slab.h>
+ #include <linux/rbtree.h>
+
+@@ -32,24 +33,8 @@ static unsigned char ext3_filetype_table[] = {
+ DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
+ };
+
+-static int ext3_readdir(struct file *, void *, filldir_t);
+ static int ext3_dx_readdir(struct file * filp,
+ void * dirent, filldir_t filldir);
+-static int ext3_release_dir (struct inode * inode,
+- struct file * filp);
+-
+-const struct file_operations ext3_dir_operations = {
+- .llseek = generic_file_llseek,
+- .read = generic_read_dir,
+- .readdir = ext3_readdir, /* we take BKL. needed?*/
+- .unlocked_ioctl = ext3_ioctl,
+-#ifdef CONFIG_COMPAT
+- .compat_ioctl = ext3_compat_ioctl,
+-#endif
+- .fsync = ext3_sync_file, /* BKL held */
+- .release = ext3_release_dir,
+-};
+-
+
+ static unsigned char get_dtype(struct super_block *sb, int filetype)
+ {
+@@ -60,6 +45,25 @@ static unsigned char get_dtype(struct super_block *sb, int filetype)
+ return (ext3_filetype_table[filetype]);
+ }
+
++/**
++ * Check if the given dir-inode refers to an htree-indexed directory
++ * (or a directory which chould potentially get coverted to use htree
++ * indexing).
++ *
++ * Return 1 if it is a dx dir, 0 if not
++ */
++static int is_dx_dir(struct inode *inode)
++{
++ struct super_block *sb = inode->i_sb;
++
++ if (EXT3_HAS_COMPAT_FEATURE(inode->i_sb,
++ EXT3_FEATURE_COMPAT_DIR_INDEX) &&
++ ((EXT3_I(inode)->i_flags & EXT3_INDEX_FL) ||
++ ((inode->i_size >> sb->s_blocksize_bits) == 1)))
++ return 1;
++
++ return 0;
++}
+
+ int ext3_check_dir_entry (const char * function, struct inode * dir,
+ struct ext3_dir_entry_2 * de,
+@@ -99,18 +103,13 @@ static int ext3_readdir(struct file * filp,
+ unsigned long offset;
+ int i, stored;
+ struct ext3_dir_entry_2 *de;
+- struct super_block *sb;
+ int err;
+ struct inode *inode = filp->f_path.dentry->d_inode;
++ struct super_block *sb = inode->i_sb;
+ int ret = 0;
+ int dir_has_error = 0;
+
+- sb = inode->i_sb;
+-
+- if (EXT3_HAS_COMPAT_FEATURE(inode->i_sb,
+- EXT3_FEATURE_COMPAT_DIR_INDEX) &&
+- ((EXT3_I(inode)->i_flags & EXT3_INDEX_FL) ||
+- ((inode->i_size >> sb->s_blocksize_bits) == 1))) {
++ if (is_dx_dir(inode)) {
+ err = ext3_dx_readdir(filp, dirent, filldir);
+ if (err != ERR_BAD_DX_DIR) {
+ ret = err;
+@@ -232,22 +231,87 @@ out:
+ return ret;
+ }
+
++static inline int is_32bit_api(void)
++{
++#ifdef CONFIG_COMPAT
++ return is_compat_task();
++#else
++ return (BITS_PER_LONG == 32);
++#endif
++}
++
+ /*
+ * These functions convert from the major/minor hash to an f_pos
+- * value.
++ * value for dx directories
+ *
+- * Currently we only use major hash numer. This is unfortunate, but
+- * on 32-bit machines, the same VFS interface is used for lseek and
+- * llseek, so if we use the 64 bit offset, then the 32-bit versions of
+- * lseek/telldir/seekdir will blow out spectacularly, and from within
+- * the ext2 low-level routine, we don't know if we're being called by
+- * a 64-bit version of the system call or the 32-bit version of the
+- * system call. Worse yet, NFSv2 only allows for a 32-bit readdir
+- * cookie. Sigh.
++ * Upper layer (for example NFS) should specify FMODE_32BITHASH or
++ * FMODE_64BITHASH explicitly. On the other hand, we allow ext3 to be mounted
++ * directly on both 32-bit and 64-bit nodes, under such case, neither
++ * FMODE_32BITHASH nor FMODE_64BITHASH is specified.
+ */
+-#define hash2pos(major, minor) (major >> 1)
+-#define pos2maj_hash(pos) ((pos << 1) & 0xffffffff)
+-#define pos2min_hash(pos) (0)
++static inline loff_t hash2pos(struct file *filp, __u32 major, __u32 minor)
++{
++ if ((filp->f_mode & FMODE_32BITHASH) ||
++ (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
++ return major >> 1;
++ else
++ return ((__u64)(major >> 1) << 32) | (__u64)minor;
++}
++
++static inline __u32 pos2maj_hash(struct file *filp, loff_t pos)
++{
++ if ((filp->f_mode & FMODE_32BITHASH) ||
++ (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
++ return (pos << 1) & 0xffffffff;
++ else
++ return ((pos >> 32) << 1) & 0xffffffff;
++}
++
++static inline __u32 pos2min_hash(struct file *filp, loff_t pos)
++{
++ if ((filp->f_mode & FMODE_32BITHASH) ||
++ (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
++ return 0;
++ else
++ return pos & 0xffffffff;
++}
++
++/*
++ * Return 32- or 64-bit end-of-file for dx directories
++ */
++static inline loff_t ext3_get_htree_eof(struct file *filp)
++{
++ if ((filp->f_mode & FMODE_32BITHASH) ||
++ (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
++ return EXT3_HTREE_EOF_32BIT;
++ else
++ return EXT3_HTREE_EOF_64BIT;
++}
++
++
++/*
++ * ext3_dir_llseek() calls generic_file_llseek[_size]() to handle both
++ * non-htree and htree directories, where the "offset" is in terms
++ * of the filename hash value instead of the byte offset.
++ *
++ * Because we may return a 64-bit hash that is well beyond s_maxbytes,
++ * we need to pass the max hash as the maximum allowable offset in
++ * the htree directory case.
++ *
++ * NOTE: offsets obtained *before* ext3_set_inode_flag(dir, EXT3_INODE_INDEX)
++ * will be invalid once the directory was converted into a dx directory
++ */
++loff_t ext3_dir_llseek(struct file *file, loff_t offset, int origin)
++{
++ struct inode *inode = file->f_mapping->host;
++ int dx_dir = is_dx_dir(inode);
++
++ if (likely(dx_dir))
++ return generic_file_llseek_size(file, offset, origin,
++ ext3_get_htree_eof(file));
++ else
++ return generic_file_llseek(file, offset, origin);
++}
+
+ /*
+ * This structure holds the nodes of the red-black tree used to store
+@@ -308,15 +372,16 @@ static void free_rb_tree_fname(struct rb_root *root)
+ }
+
+
+-static struct dir_private_info *ext3_htree_create_dir_info(loff_t pos)
++static struct dir_private_info *ext3_htree_create_dir_info(struct file *filp,
++ loff_t pos)
+ {
+ struct dir_private_info *p;
+
+ p = kzalloc(sizeof(struct dir_private_info), GFP_KERNEL);
+ if (!p)
+ return NULL;
+- p->curr_hash = pos2maj_hash(pos);
+- p->curr_minor_hash = pos2min_hash(pos);
++ p->curr_hash = pos2maj_hash(filp, pos);
++ p->curr_minor_hash = pos2min_hash(filp, pos);
+ return p;
+ }
+
+@@ -406,7 +471,7 @@ static int call_filldir(struct file * filp, void * dirent,
+ printk("call_filldir: called with null fname?!?\n");
+ return 0;
+ }
+- curr_pos = hash2pos(fname->hash, fname->minor_hash);
++ curr_pos = hash2pos(filp, fname->hash, fname->minor_hash);
+ while (fname) {
+ error = filldir(dirent, fname->name,
+ fname->name_len, curr_pos,
+@@ -431,13 +496,13 @@ static int ext3_dx_readdir(struct file * filp,
+ int ret;
+
+ if (!info) {
+- info = ext3_htree_create_dir_info(filp->f_pos);
++ info = ext3_htree_create_dir_info(filp, filp->f_pos);
+ if (!info)
+ return -ENOMEM;
+ filp->private_data = info;
+ }
+
+- if (filp->f_pos == EXT3_HTREE_EOF)
++ if (filp->f_pos == ext3_get_htree_eof(filp))
+ return 0; /* EOF */
+
+ /* Some one has messed with f_pos; reset the world */
+@@ -445,8 +510,8 @@ static int ext3_dx_readdir(struct file * filp,
+ free_rb_tree_fname(&info->root);
+ info->curr_node = NULL;
+ info->extra_fname = NULL;
+- info->curr_hash = pos2maj_hash(filp->f_pos);
+- info->curr_minor_hash = pos2min_hash(filp->f_pos);
++ info->curr_hash = pos2maj_hash(filp, filp->f_pos);
++ info->curr_minor_hash = pos2min_hash(filp, filp->f_pos);
+ }
+
+ /*
+@@ -478,7 +543,7 @@ static int ext3_dx_readdir(struct file * filp,
+ if (ret < 0)
+ return ret;
+ if (ret == 0) {
+- filp->f_pos = EXT3_HTREE_EOF;
++ filp->f_pos = ext3_get_htree_eof(filp);
+ break;
+ }
+ info->curr_node = rb_first(&info->root);
+@@ -498,7 +563,7 @@ static int ext3_dx_readdir(struct file * filp,
+ info->curr_minor_hash = fname->minor_hash;
+ } else {
+ if (info->next_hash == ~0) {
+- filp->f_pos = EXT3_HTREE_EOF;
++ filp->f_pos = ext3_get_htree_eof(filp);
+ break;
+ }
+ info->curr_hash = info->next_hash;
+@@ -517,3 +582,15 @@ static int ext3_release_dir (struct inode * inode, struct file * filp)
+
+ return 0;
+ }
++
++const struct file_operations ext3_dir_operations = {
++ .llseek = ext3_dir_llseek,
++ .read = generic_read_dir,
++ .readdir = ext3_readdir,
++ .unlocked_ioctl = ext3_ioctl,
++#ifdef CONFIG_COMPAT
++ .compat_ioctl = ext3_compat_ioctl,
++#endif
++ .fsync = ext3_sync_file,
++ .release = ext3_release_dir,
++};
+diff --git a/fs/ext3/hash.c b/fs/ext3/hash.c
+index 7d215b4d4f2e..d4d3ade46da8 100644
+--- a/fs/ext3/hash.c
++++ b/fs/ext3/hash.c
+@@ -200,8 +200,8 @@ int ext3fs_dirhash(const char *name, int len, struct dx_hash_info *hinfo)
+ return -1;
+ }
+ hash = hash & ~1;
+- if (hash == (EXT3_HTREE_EOF << 1))
+- hash = (EXT3_HTREE_EOF-1) << 1;
++ if (hash == (EXT3_HTREE_EOF_32BIT << 1))
++ hash = (EXT3_HTREE_EOF_32BIT - 1) << 1;
+ hinfo->hash = hash;
+ hinfo->minor_hash = minor_hash;
+ return 0;
+diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
+index 164c56092e58..689d1b1a3f45 100644
+--- a/fs/ext4/dir.c
++++ b/fs/ext4/dir.c
+@@ -32,24 +32,8 @@ static unsigned char ext4_filetype_table[] = {
+ DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
+ };
+
+-static int ext4_readdir(struct file *, void *, filldir_t);
+ static int ext4_dx_readdir(struct file *filp,
+ void *dirent, filldir_t filldir);
+-static int ext4_release_dir(struct inode *inode,
+- struct file *filp);
+-
+-const struct file_operations ext4_dir_operations = {
+- .llseek = ext4_llseek,
+- .read = generic_read_dir,
+- .readdir = ext4_readdir, /* we take BKL. needed?*/
+- .unlocked_ioctl = ext4_ioctl,
+-#ifdef CONFIG_COMPAT
+- .compat_ioctl = ext4_compat_ioctl,
+-#endif
+- .fsync = ext4_sync_file,
+- .release = ext4_release_dir,
+-};
+-
+
+ static unsigned char get_dtype(struct super_block *sb, int filetype)
+ {
+@@ -60,6 +44,26 @@ static unsigned char get_dtype(struct super_block *sb, int filetype)
+ return (ext4_filetype_table[filetype]);
+ }
+
++/**
++ * Check if the given dir-inode refers to an htree-indexed directory
++ * (or a directory which chould potentially get coverted to use htree
++ * indexing).
++ *
++ * Return 1 if it is a dx dir, 0 if not
++ */
++static int is_dx_dir(struct inode *inode)
++{
++ struct super_block *sb = inode->i_sb;
++
++ if (EXT4_HAS_COMPAT_FEATURE(inode->i_sb,
++ EXT4_FEATURE_COMPAT_DIR_INDEX) &&
++ ((ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) ||
++ ((inode->i_size >> sb->s_blocksize_bits) == 1)))
++ return 1;
++
++ return 0;
++}
++
+ /*
+ * Return 0 if the directory entry is OK, and 1 if there is a problem
+ *
+@@ -115,18 +119,13 @@ static int ext4_readdir(struct file *filp,
+ unsigned int offset;
+ int i, stored;
+ struct ext4_dir_entry_2 *de;
+- struct super_block *sb;
+ int err;
+ struct inode *inode = filp->f_path.dentry->d_inode;
++ struct super_block *sb = inode->i_sb;
+ int ret = 0;
+ int dir_has_error = 0;
+
+- sb = inode->i_sb;
+-
+- if (EXT4_HAS_COMPAT_FEATURE(inode->i_sb,
+- EXT4_FEATURE_COMPAT_DIR_INDEX) &&
+- ((ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) ||
+- ((inode->i_size >> sb->s_blocksize_bits) == 1))) {
++ if (is_dx_dir(inode)) {
+ err = ext4_dx_readdir(filp, dirent, filldir);
+ if (err != ERR_BAD_DX_DIR) {
+ ret = err;
+@@ -254,22 +253,134 @@ out:
+ return ret;
+ }
+
++static inline int is_32bit_api(void)
++{
++#ifdef CONFIG_COMPAT
++ return is_compat_task();
++#else
++ return (BITS_PER_LONG == 32);
++#endif
++}
++
+ /*
+ * These functions convert from the major/minor hash to an f_pos
+- * value.
++ * value for dx directories
++ *
++ * Upper layer (for example NFS) should specify FMODE_32BITHASH or
++ * FMODE_64BITHASH explicitly. On the other hand, we allow ext4 to be mounted
++ * directly on both 32-bit and 64-bit nodes, under such case, neither
++ * FMODE_32BITHASH nor FMODE_64BITHASH is specified.
++ */
++static inline loff_t hash2pos(struct file *filp, __u32 major, __u32 minor)
++{
++ if ((filp->f_mode & FMODE_32BITHASH) ||
++ (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
++ return major >> 1;
++ else
++ return ((__u64)(major >> 1) << 32) | (__u64)minor;
++}
++
++static inline __u32 pos2maj_hash(struct file *filp, loff_t pos)
++{
++ if ((filp->f_mode & FMODE_32BITHASH) ||
++ (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
++ return (pos << 1) & 0xffffffff;
++ else
++ return ((pos >> 32) << 1) & 0xffffffff;
++}
++
++static inline __u32 pos2min_hash(struct file *filp, loff_t pos)
++{
++ if ((filp->f_mode & FMODE_32BITHASH) ||
++ (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
++ return 0;
++ else
++ return pos & 0xffffffff;
++}
++
++/*
++ * Return 32- or 64-bit end-of-file for dx directories
++ */
++static inline loff_t ext4_get_htree_eof(struct file *filp)
++{
++ if ((filp->f_mode & FMODE_32BITHASH) ||
++ (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
++ return EXT4_HTREE_EOF_32BIT;
++ else
++ return EXT4_HTREE_EOF_64BIT;
++}
++
++
++/*
++ * ext4_dir_llseek() based on generic_file_llseek() to handle both
++ * non-htree and htree directories, where the "offset" is in terms
++ * of the filename hash value instead of the byte offset.
+ *
+- * Currently we only use major hash numer. This is unfortunate, but
+- * on 32-bit machines, the same VFS interface is used for lseek and
+- * llseek, so if we use the 64 bit offset, then the 32-bit versions of
+- * lseek/telldir/seekdir will blow out spectacularly, and from within
+- * the ext2 low-level routine, we don't know if we're being called by
+- * a 64-bit version of the system call or the 32-bit version of the
+- * system call. Worse yet, NFSv2 only allows for a 32-bit readdir
+- * cookie. Sigh.
++ * NOTE: offsets obtained *before* ext4_set_inode_flag(dir, EXT4_INODE_INDEX)
++ * will be invalid once the directory was converted into a dx directory
+ */
+-#define hash2pos(major, minor) (major >> 1)
+-#define pos2maj_hash(pos) ((pos << 1) & 0xffffffff)
+-#define pos2min_hash(pos) (0)
++loff_t ext4_dir_llseek(struct file *file, loff_t offset, int origin)
++{
++ struct inode *inode = file->f_mapping->host;
++ loff_t ret = -EINVAL;
++ int dx_dir = is_dx_dir(inode);
++
++ mutex_lock(&inode->i_mutex);
++
++ /* NOTE: relative offsets with dx directories might not work
++ * as expected, as it is difficult to figure out the
++ * correct offset between dx hashes */
++
++ switch (origin) {
++ case SEEK_END:
++ if (unlikely(offset > 0))
++ goto out_err; /* not supported for directories */
++
++ /* so only negative offsets are left, does that have a
++ * meaning for directories at all? */
++ if (dx_dir)
++ offset += ext4_get_htree_eof(file);
++ else
++ offset += inode->i_size;
++ break;
++ case SEEK_CUR:
++ /*
++ * Here we special-case the lseek(fd, 0, SEEK_CUR)
++ * position-querying operation. Avoid rewriting the "same"
++ * f_pos value back to the file because a concurrent read(),
++ * write() or lseek() might have altered it
++ */
++ if (offset == 0) {
++ offset = file->f_pos;
++ goto out_ok;
++ }
++
++ offset += file->f_pos;
++ break;
++ }
++
++ if (unlikely(offset < 0))
++ goto out_err;
++
++ if (!dx_dir) {
++ if (offset > inode->i_sb->s_maxbytes)
++ goto out_err;
++ } else if (offset > ext4_get_htree_eof(file))
++ goto out_err;
++
++ /* Special lock needed here? */
++ if (offset != file->f_pos) {
++ file->f_pos = offset;
++ file->f_version = 0;
++ }
++
++out_ok:
++ ret = offset;
++out_err:
++ mutex_unlock(&inode->i_mutex);
++
++ return ret;
++}
+
+ /*
+ * This structure holds the nodes of the red-black tree used to store
+@@ -330,15 +441,16 @@ static void free_rb_tree_fname(struct rb_root *root)
+ }
+
+
+-static struct dir_private_info *ext4_htree_create_dir_info(loff_t pos)
++static struct dir_private_info *ext4_htree_create_dir_info(struct file *filp,
++ loff_t pos)
+ {
+ struct dir_private_info *p;
+
+ p = kzalloc(sizeof(struct dir_private_info), GFP_KERNEL);
+ if (!p)
+ return NULL;
+- p->curr_hash = pos2maj_hash(pos);
+- p->curr_minor_hash = pos2min_hash(pos);
++ p->curr_hash = pos2maj_hash(filp, pos);
++ p->curr_minor_hash = pos2min_hash(filp, pos);
+ return p;
+ }
+
+@@ -429,7 +541,7 @@ static int call_filldir(struct file *filp, void *dirent,
+ "null fname?!?\n");
+ return 0;
+ }
+- curr_pos = hash2pos(fname->hash, fname->minor_hash);
++ curr_pos = hash2pos(filp, fname->hash, fname->minor_hash);
+ while (fname) {
+ error = filldir(dirent, fname->name,
+ fname->name_len, curr_pos,
+@@ -454,13 +566,13 @@ static int ext4_dx_readdir(struct file *filp,
+ int ret;
+
+ if (!info) {
+- info = ext4_htree_create_dir_info(filp->f_pos);
++ info = ext4_htree_create_dir_info(filp, filp->f_pos);
+ if (!info)
+ return -ENOMEM;
+ filp->private_data = info;
+ }
+
+- if (filp->f_pos == EXT4_HTREE_EOF)
++ if (filp->f_pos == ext4_get_htree_eof(filp))
+ return 0; /* EOF */
+
+ /* Some one has messed with f_pos; reset the world */
+@@ -468,8 +580,8 @@ static int ext4_dx_readdir(struct file *filp,
+ free_rb_tree_fname(&info->root);
+ info->curr_node = NULL;
+ info->extra_fname = NULL;
+- info->curr_hash = pos2maj_hash(filp->f_pos);
+- info->curr_minor_hash = pos2min_hash(filp->f_pos);
++ info->curr_hash = pos2maj_hash(filp, filp->f_pos);
++ info->curr_minor_hash = pos2min_hash(filp, filp->f_pos);
+ }
+
+ /*
+@@ -501,7 +613,7 @@ static int ext4_dx_readdir(struct file *filp,
+ if (ret < 0)
+ return ret;
+ if (ret == 0) {
+- filp->f_pos = EXT4_HTREE_EOF;
++ filp->f_pos = ext4_get_htree_eof(filp);
+ break;
+ }
+ info->curr_node = rb_first(&info->root);
+@@ -521,7 +633,7 @@ static int ext4_dx_readdir(struct file *filp,
+ info->curr_minor_hash = fname->minor_hash;
+ } else {
+ if (info->next_hash == ~0) {
+- filp->f_pos = EXT4_HTREE_EOF;
++ filp->f_pos = ext4_get_htree_eof(filp);
+ break;
+ }
+ info->curr_hash = info->next_hash;
+@@ -540,3 +652,15 @@ static int ext4_release_dir(struct inode *inode, struct file *filp)
+
+ return 0;
+ }
++
++const struct file_operations ext4_dir_operations = {
++ .llseek = ext4_dir_llseek,
++ .read = generic_read_dir,
++ .readdir = ext4_readdir,
++ .unlocked_ioctl = ext4_ioctl,
++#ifdef CONFIG_COMPAT
++ .compat_ioctl = ext4_compat_ioctl,
++#endif
++ .fsync = ext4_sync_file,
++ .release = ext4_release_dir,
++};
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 60b6ca575f88..22c71b9dd727 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -1597,7 +1597,11 @@ struct dx_hash_info
+ u32 *seed;
+ };
+
+-#define EXT4_HTREE_EOF 0x7fffffff
++
++/* 32 and 64 bit signed EOF for dx directories */
++#define EXT4_HTREE_EOF_32BIT ((1UL << (32 - 1)) - 1)
++#define EXT4_HTREE_EOF_64BIT ((1ULL << (64 - 1)) - 1)
++
+
+ /*
+ * Control parameters used by ext4_htree_next_block
+diff --git a/fs/ext4/hash.c b/fs/ext4/hash.c
+index ac8f168c8ab4..fa8e4911d354 100644
+--- a/fs/ext4/hash.c
++++ b/fs/ext4/hash.c
+@@ -200,8 +200,8 @@ int ext4fs_dirhash(const char *name, int len, struct dx_hash_info *hinfo)
+ return -1;
+ }
+ hash = hash & ~1;
+- if (hash == (EXT4_HTREE_EOF << 1))
+- hash = (EXT4_HTREE_EOF-1) << 1;
++ if (hash == (EXT4_HTREE_EOF_32BIT << 1))
++ hash = (EXT4_HTREE_EOF_32BIT - 1) << 1;
+ hinfo->hash = hash;
+ hinfo->minor_hash = minor_hash;
+ return 0;
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index d5498b2de8af..b4e9f3fcc3fd 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -1269,6 +1269,8 @@ retry:
+ s_min_extra_isize) {
+ tried_min_extra_isize++;
+ new_extra_isize = s_min_extra_isize;
++ kfree(is); is = NULL;
++ kfree(bs); bs = NULL;
+ goto retry;
+ }
+ error = -1;
+diff --git a/fs/jfs/jfs_inode.c b/fs/jfs/jfs_inode.c
+index c1a3e603279c..7f464c513ba0 100644
+--- a/fs/jfs/jfs_inode.c
++++ b/fs/jfs/jfs_inode.c
+@@ -95,7 +95,7 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
+
+ if (insert_inode_locked(inode) < 0) {
+ rc = -EINVAL;
+- goto fail_unlock;
++ goto fail_put;
+ }
+
+ inode_init_owner(inode, parent, mode);
+@@ -156,7 +156,6 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
+ fail_drop:
+ dquot_drop(inode);
+ inode->i_flags |= S_NOQUOTA;
+-fail_unlock:
+ clear_nlink(inode);
+ unlock_new_inode(inode);
+ fail_put:
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index 561a3dcc8cf1..61b697e69ef3 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -726,12 +726,13 @@ static int nfsd_open_break_lease(struct inode *inode, int access)
+
+ /*
+ * Open an existing file or directory.
+- * The access argument indicates the type of open (read/write/lock)
++ * The may_flags argument indicates the type of open (read/write/lock)
++ * and additional flags.
+ * N.B. After this call fhp needs an fh_put
+ */
+ __be32
+ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
+- int access, struct file **filp)
++ int may_flags, struct file **filp)
+ {
+ struct dentry *dentry;
+ struct inode *inode;
+@@ -746,7 +747,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
+ * and (hopefully) checked permission - so allow OWNER_OVERRIDE
+ * in case a chmod has now revoked permission.
+ */
+- err = fh_verify(rqstp, fhp, type, access | NFSD_MAY_OWNER_OVERRIDE);
++ err = fh_verify(rqstp, fhp, type, may_flags | NFSD_MAY_OWNER_OVERRIDE);
+ if (err)
+ goto out;
+
+@@ -757,7 +758,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
+ * or any access when mandatory locking enabled
+ */
+ err = nfserr_perm;
+- if (IS_APPEND(inode) && (access & NFSD_MAY_WRITE))
++ if (IS_APPEND(inode) && (may_flags & NFSD_MAY_WRITE))
+ goto out;
+ /*
+ * We must ignore files (but only files) which might have mandatory
+@@ -770,12 +771,12 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
+ if (!inode->i_fop)
+ goto out;
+
+- host_err = nfsd_open_break_lease(inode, access);
++ host_err = nfsd_open_break_lease(inode, may_flags);
+ if (host_err) /* NOMEM or WOULDBLOCK */
+ goto out_nfserr;
+
+- if (access & NFSD_MAY_WRITE) {
+- if (access & NFSD_MAY_READ)
++ if (may_flags & NFSD_MAY_WRITE) {
++ if (may_flags & NFSD_MAY_READ)
+ flags = O_RDWR|O_LARGEFILE;
+ else
+ flags = O_WRONLY|O_LARGEFILE;
+@@ -785,8 +786,15 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
+ if (IS_ERR(*filp)) {
+ host_err = PTR_ERR(*filp);
+ *filp = NULL;
+- } else
+- host_err = ima_file_check(*filp, access);
++ } else {
++ host_err = ima_file_check(*filp, may_flags);
++
++ if (may_flags & NFSD_MAY_64BIT_COOKIE)
++ (*filp)->f_mode |= FMODE_64BITHASH;
++ else
++ (*filp)->f_mode |= FMODE_32BITHASH;
++ }
++
+ out_nfserr:
+ err = nfserrno(host_err);
+ out:
+@@ -2016,8 +2024,13 @@ nfsd_readdir(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t *offsetp,
+ __be32 err;
+ struct file *file;
+ loff_t offset = *offsetp;
++ int may_flags = NFSD_MAY_READ;
++
++ /* NFSv2 only supports 32 bit cookies */
++ if (rqstp->rq_vers > 2)
++ may_flags |= NFSD_MAY_64BIT_COOKIE;
+
+- err = nfsd_open(rqstp, fhp, S_IFDIR, NFSD_MAY_READ, &file);
++ err = nfsd_open(rqstp, fhp, S_IFDIR, may_flags, &file);
+ if (err)
+ goto out;
+
+diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
+index 3f54ad03bb2b..85d4d422458a 100644
+--- a/fs/nfsd/vfs.h
++++ b/fs/nfsd/vfs.h
+@@ -27,6 +27,8 @@
+ #define NFSD_MAY_BYPASS_GSS 0x400
+ #define NFSD_MAY_READ_IF_EXEC 0x800
+
++#define NFSD_MAY_64BIT_COOKIE 0x1000 /* 64 bit readdir cookies for >= NFSv3 */
++
+ #define NFSD_MAY_CREATE (NFSD_MAY_EXEC|NFSD_MAY_WRITE)
+ #define NFSD_MAY_REMOVE (NFSD_MAY_EXEC|NFSD_MAY_WRITE|NFSD_MAY_TRUNC)
+
+diff --git a/fs/statfs.c b/fs/statfs.c
+index 9cf04a118965..a133c3ef2be0 100644
+--- a/fs/statfs.c
++++ b/fs/statfs.c
+@@ -86,7 +86,7 @@ int user_statfs(const char __user *pathname, struct kstatfs *st)
+
+ int fd_statfs(int fd, struct kstatfs *st)
+ {
+- struct file *file = fget(fd);
++ struct file *file = fget_raw(fd);
+ int error = -EBADF;
+ if (file) {
+ error = vfs_statfs(&file->f_path, st);
+diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
+index 7639f1804df1..8f4ae681cdc1 100644
+--- a/include/drm/drm_mode.h
++++ b/include/drm/drm_mode.h
+@@ -184,6 +184,8 @@ struct drm_mode_get_connector {
+ __u32 connection;
+ __u32 mm_width, mm_height; /**< HxW in millimeters */
+ __u32 subpixel;
++
++ __u32 pad;
+ };
+
+ #define DRM_MODE_PROP_PENDING (1<<0)
+diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
+index 3fd17c249221..5633053ac7c2 100644
+--- a/include/linux/compiler-gcc.h
++++ b/include/linux/compiler-gcc.h
+@@ -5,6 +5,9 @@
+ /*
+ * Common definitions for all gcc versions go here.
+ */
++#define GCC_VERSION (__GNUC__ * 10000 \
++ + __GNUC_MINOR__ * 100 \
++ + __GNUC_PATCHLEVEL__)
+
+
+ /* Optimization barrier */
+diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
+index dfadc96e9d63..643d6c4a5993 100644
+--- a/include/linux/compiler-gcc4.h
++++ b/include/linux/compiler-gcc4.h
+@@ -29,6 +29,21 @@
+ the kernel context */
+ #define __cold __attribute__((__cold__))
+
++/*
++ * GCC 'asm goto' miscompiles certain code sequences:
++ *
++ * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
++ *
++ * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
++ * Fixed in GCC 4.8.2 and later versions.
++ *
++ * (asm goto is automatically volatile - the naming reflects this.)
++ */
++#if GCC_VERSION <= 40801
++# define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
++#else
++# define asm_volatile_goto(x...) do { asm goto(x); } while (0)
++#endif
+
+ #if __GNUC_MINOR__ >= 5
+ /*
+diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
+index dec99116a0e4..d59ab1265967 100644
+--- a/include/linux/ext3_fs.h
++++ b/include/linux/ext3_fs.h
+@@ -781,7 +781,11 @@ struct dx_hash_info
+ u32 *seed;
+ };
+
+-#define EXT3_HTREE_EOF 0x7fffffff
++
++/* 32 and 64 bit signed EOF for dx directories */
++#define EXT3_HTREE_EOF_32BIT ((1UL << (32 - 1)) - 1)
++#define EXT3_HTREE_EOF_64BIT ((1ULL << (64 - 1)) - 1)
++
+
+ /*
+ * Control parameters used by ext3_htree_next_block
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index a2768177ad26..dd743859f04e 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -92,6 +92,10 @@ struct inodes_stat_t {
+ /* File is opened using open(.., 3, ..) and is writeable only for ioctls
+ (specialy hack for floppy.c) */
+ #define FMODE_WRITE_IOCTL ((__force fmode_t)0x100)
++/* 32bit hashes as llseek() offset (for directories) */
++#define FMODE_32BITHASH ((__force fmode_t)0x200)
++/* 64bit hashes as llseek() offset (for directories) */
++#define FMODE_64BITHASH ((__force fmode_t)0x400)
+
+ /*
+ * Don't update ctime and mtime.
+@@ -907,9 +911,11 @@ static inline loff_t i_size_read(const struct inode *inode)
+ static inline void i_size_write(struct inode *inode, loff_t i_size)
+ {
+ #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
++ preempt_disable();
+ write_seqcount_begin(&inode->i_size_seqcount);
+ inode->i_size = i_size;
+ write_seqcount_end(&inode->i_size_seqcount);
++ preempt_enable();
+ #elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
+ preempt_disable();
+ inode->i_size = i_size;
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
+index eeb6a29ee1e1..8d5b91ec2b19 100644
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -300,13 +300,15 @@ struct perf_event_mmap_page {
+ /*
+ * Control data for the mmap() data buffer.
+ *
+- * User-space reading the @data_head value should issue an rmb(), on
+- * SMP capable platforms, after reading this value -- see
+- * perf_event_wakeup().
++ * User-space reading the @data_head value should issue an smp_rmb(),
++ * after reading this value.
+ *
+ * When the mapping is PROT_WRITE the @data_tail value should be
+- * written by userspace to reflect the last read data. In this case
+- * the kernel will not over-write unread data.
++ * written by userspace to reflect the last read data, after issueing
++ * an smp_mb() to separate the data read from the ->data_tail store.
++ * In this case the kernel will not over-write unread data.
++ *
++ * See perf_output_put_handle() for the data ordering.
+ */
+ __u64 data_head; /* head in the data section */
+ __u64 data_tail; /* user-space written tail */
+diff --git a/include/linux/random.h b/include/linux/random.h
+index 29e217a7e6d0..7e77cee62bd9 100644
+--- a/include/linux/random.h
++++ b/include/linux/random.h
+@@ -58,6 +58,7 @@ extern void add_interrupt_randomness(int irq, int irq_flags);
+ extern void get_random_bytes(void *buf, int nbytes);
+ extern void get_random_bytes_arch(void *buf, int nbytes);
+ void generate_random_uuid(unsigned char uuid_out[16]);
++extern int random_int_secret_init(void);
+
+ #ifndef MODULE
+ extern const struct file_operations random_fops, urandom_fops;
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index efe50aff13ab..85180bf6c04b 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -737,6 +737,16 @@ static inline int skb_cloned(const struct sk_buff *skb)
+ (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
+ }
+
++static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
++{
++ might_sleep_if(pri & __GFP_WAIT);
++
++ if (skb_cloned(skb))
++ return pskb_expand_head(skb, 0, 0, pri);
++
++ return 0;
++}
++
+ /**
+ * skb_header_cloned - is the header a clone
+ * @skb: buffer to check
+@@ -1157,6 +1167,11 @@ static inline int skb_pagelen(const struct sk_buff *skb)
+ return len + skb_headlen(skb);
+ }
+
++static inline bool skb_has_frags(const struct sk_buff *skb)
++{
++ return skb_shinfo(skb)->nr_frags;
++}
++
+ /**
+ * __skb_fill_page_desc - initialise a paged fragment in an skb
+ * @skb: buffer containing fragment to be initialised
+diff --git a/include/linux/timex.h b/include/linux/timex.h
+index 08e90fb81acc..5fee575ebccf 100644
+--- a/include/linux/timex.h
++++ b/include/linux/timex.h
+@@ -173,6 +173,20 @@ struct timex {
+
+ #include <asm/timex.h>
+
++#ifndef random_get_entropy
++/*
++ * The random_get_entropy() function is used by the /dev/random driver
++ * in order to extract entropy via the relative unpredictability of
++ * when an interrupt takes places versus a high speed, fine-grained
++ * timing source or cycle counter. Since it will be occurred on every
++ * single interrupt, it must have a very low cost/overhead.
++ *
++ * By default we use get_cycles() for this purpose, but individual
++ * architectures may override this in their asm/timex.h header file.
++ */
++#define random_get_entropy() get_cycles()
++#endif
++
+ /*
+ * SHIFT_PLL is used as a dampening factor to define how much we
+ * adjust the frequency correction for a given offset in PLL mode.
+diff --git a/include/net/cipso_ipv4.h b/include/net/cipso_ipv4.h
+index a7a683e30b64..a8c2ef6d3b93 100644
+--- a/include/net/cipso_ipv4.h
++++ b/include/net/cipso_ipv4.h
+@@ -290,6 +290,7 @@ static inline int cipso_v4_validate(const struct sk_buff *skb,
+ unsigned char err_offset = 0;
+ u8 opt_len = opt[1];
+ u8 opt_iter;
++ u8 tag_len;
+
+ if (opt_len < 8) {
+ err_offset = 1;
+@@ -302,11 +303,12 @@ static inline int cipso_v4_validate(const struct sk_buff *skb,
+ }
+
+ for (opt_iter = 6; opt_iter < opt_len;) {
+- if (opt[opt_iter + 1] > (opt_len - opt_iter)) {
++ tag_len = opt[opt_iter + 1];
++ if ((tag_len == 0) || (opt[opt_iter + 1] > (opt_len - opt_iter))) {
+ err_offset = opt_iter + 1;
+ goto out;
+ }
+- opt_iter += opt[opt_iter + 1];
++ opt_iter += tag_len;
+ }
+
+ out:
+diff --git a/include/net/dst.h b/include/net/dst.h
+index 16010d1899a3..86ef78ddf9c3 100644
+--- a/include/net/dst.h
++++ b/include/net/dst.h
+@@ -459,10 +459,22 @@ static inline struct dst_entry *xfrm_lookup(struct net *net,
+ {
+ return dst_orig;
+ }
++
++static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
++{
++ return NULL;
++}
++
+ #else
+ extern struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
+ const struct flowi *fl, struct sock *sk,
+ int flags);
++
++/* skb attached with this dst needs transformation if dst->xfrm is valid */
++static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
++{
++ return dst->xfrm;
++}
+ #endif
+
+ #endif /* _NET_DST_H */
+diff --git a/init/main.c b/init/main.c
+index 5d0eb1d2af8f..7474450c4503 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -68,6 +68,7 @@
+ #include <linux/shmem_fs.h>
+ #include <linux/slab.h>
+ #include <linux/perf_event.h>
++#include <linux/random.h>
+
+ #include <asm/io.h>
+ #include <asm/bugs.h>
+@@ -732,6 +733,7 @@ static void __init do_basic_setup(void)
+ do_ctors();
+ usermodehelper_enable();
+ do_initcalls();
++ random_int_secret_init();
+ }
+
+ static void __init do_pre_smp_initcalls(void)
+diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
+index 7f3011c6b57f..58c3b5134221 100644
+--- a/kernel/events/ring_buffer.c
++++ b/kernel/events/ring_buffer.c
+@@ -75,10 +75,31 @@ again:
+ goto out;
+
+ /*
+- * Publish the known good head. Rely on the full barrier implied
+- * by atomic_dec_and_test() order the rb->head read and this
+- * write.
++ * Since the mmap() consumer (userspace) can run on a different CPU:
++ *
++ * kernel user
++ *
++ * READ ->data_tail READ ->data_head
++ * smp_mb() (A) smp_rmb() (C)
++ * WRITE $data READ $data
++ * smp_wmb() (B) smp_mb() (D)
++ * STORE ->data_head WRITE ->data_tail
++ *
++ * Where A pairs with D, and B pairs with C.
++ *
++ * I don't think A needs to be a full barrier because we won't in fact
++ * write data until we see the store from userspace. So we simply don't
++ * issue the data WRITE until we observe it. Be conservative for now.
++ *
++ * OTOH, D needs to be a full barrier since it separates the data READ
++ * from the tail WRITE.
++ *
++ * For B a WMB is sufficient since it separates two WRITEs, and for C
++ * an RMB is sufficient since it separates two READs.
++ *
++ * See perf_output_begin().
+ */
++ smp_wmb();
+ rb->user_page->data_head = head;
+
+ /*
+@@ -142,9 +163,11 @@ int perf_output_begin(struct perf_output_handle *handle,
+ * Userspace could choose to issue a mb() before updating the
+ * tail pointer. So that all reads will be completed before the
+ * write is issued.
++ *
++ * See perf_output_put_handle().
+ */
+ tail = ACCESS_ONCE(rb->user_page->data_tail);
+- smp_rmb();
++ smp_mb();
+ offset = head = local_read(&rb->head);
+ head += size;
+ if (unlikely(!perf_output_space(rb, tail, offset, head)))
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index ce1067f819cc..c5a12a7d1287 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -534,9 +534,12 @@ int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
+ if (isspace(ch)) {
+ parser->buffer[parser->idx] = 0;
+ parser->cont = false;
+- } else {
++ } else if (parser->idx < parser->size - 1) {
+ parser->cont = true;
+ parser->buffer[parser->idx++] = ch;
++ } else {
++ ret = -EINVAL;
++ goto out;
+ }
+
+ *ppos += read;
+diff --git a/lib/scatterlist.c b/lib/scatterlist.c
+index 4ceb05d772ae..2ffcb3c601ea 100644
+--- a/lib/scatterlist.c
++++ b/lib/scatterlist.c
+@@ -419,7 +419,8 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
+ if (miter->addr) {
+ miter->__offset += miter->consumed;
+
+- if (miter->__flags & SG_MITER_TO_SG)
++ if ((miter->__flags & SG_MITER_TO_SG) &&
++ !PageSlab(miter->page))
+ flush_kernel_dcache_page(miter->page);
+
+ if (miter->__flags & SG_MITER_ATOMIC) {
+diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
+index 235c2197dbb6..c7056123c2a1 100644
+--- a/net/8021q/vlan_netlink.c
++++ b/net/8021q/vlan_netlink.c
+@@ -152,7 +152,7 @@ static size_t vlan_get_size(const struct net_device *dev)
+ struct vlan_dev_info *vlan = vlan_dev_info(dev);
+
+ return nla_total_size(2) + /* IFLA_VLAN_ID */
+- sizeof(struct ifla_vlan_flags) + /* IFLA_VLAN_FLAGS */
++ nla_total_size(sizeof(struct ifla_vlan_flags)) + /* IFLA_VLAN_FLAGS */
+ vlan_qos_map_size(vlan->nr_ingress_mappings) +
+ vlan_qos_map_size(vlan->nr_egress_mappings);
+ }
+diff --git a/net/compat.c b/net/compat.c
+index 8c979cccdbd9..3139ef298145 100644
+--- a/net/compat.c
++++ b/net/compat.c
+@@ -71,6 +71,8 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
+ __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
+ __get_user(kmsg->msg_flags, &umsg->msg_flags))
+ return -EFAULT;
++ if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
++ return -EINVAL;
+ kmsg->msg_name = compat_ptr(tmp1);
+ kmsg->msg_iov = compat_ptr(tmp2);
+ kmsg->msg_control = compat_ptr(tmp3);
+diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
+index 984ec656b03b..4afcf31bdfeb 100644
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -268,7 +268,7 @@ begintw:
+ }
+ if (unlikely(!INET_TW_MATCH(sk, net, hash, acookie,
+ saddr, daddr, ports, dif))) {
+- sock_put(sk);
++ inet_twsk_put(inet_twsk(sk));
+ goto begintw;
+ }
+ goto out;
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index daf408e8633c..16191f01132a 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -833,7 +833,7 @@ static int __ip_append_data(struct sock *sk,
+ csummode = CHECKSUM_PARTIAL;
+
+ cork->length += length;
+- if (((length > mtu) || (skb && skb_is_gso(skb))) &&
++ if (((length > mtu) || (skb && skb_has_frags(skb))) &&
+ (sk->sk_protocol == IPPROTO_UDP) &&
+ (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len) {
+ err = ip_ufo_append_data(sk, queue, getfrag, from, length,
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index c45a155a3296..6768ce2f50b1 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -2727,7 +2727,7 @@ static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4)
+ RT_SCOPE_LINK);
+ goto make_route;
+ }
+- if (fl4->saddr) {
++ if (!fl4->saddr) {
+ if (ipv4_is_multicast(fl4->daddr))
+ fl4->saddr = inet_select_addr(dev_out, 0,
+ fl4->flowi4_scope);
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 872b41d04a39..c1ed01eb2012 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -1469,7 +1469,10 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
+ tp->lost_cnt_hint -= tcp_skb_pcount(prev);
+ }
+
+- TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(prev)->tcp_flags;
++ TCP_SKB_CB(prev)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
++ if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
++ TCP_SKB_CB(prev)->end_seq++;
++
+ if (skb == tcp_highest_sack(sk))
+ tcp_advance_highest_sack(sk, skb);
+
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 3add4860777f..0d5a11834d91 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -933,6 +933,9 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
+ static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
+ unsigned int mss_now)
+ {
++ /* Make sure we own this skb before messing gso_size/gso_segs */
++ WARN_ON_ONCE(skb_cloned(skb));
++
+ if (skb->len <= mss_now || !sk_can_gso(sk) ||
+ skb->ip_summed == CHECKSUM_NONE) {
+ /* Avoid the costly divide in the normal
+@@ -1014,9 +1017,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
+ if (nsize < 0)
+ nsize = 0;
+
+- if (skb_cloned(skb) &&
+- skb_is_nonlinear(skb) &&
+- pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
++ if (skb_unclone(skb, GFP_ATOMIC))
+ return -ENOMEM;
+
+ /* Get a new skb... force flag on. */
+@@ -2129,6 +2130,8 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
+ int oldpcount = tcp_skb_pcount(skb);
+
+ if (unlikely(oldpcount > 1)) {
++ if (skb_unclone(skb, GFP_ATOMIC))
++ return -ENOMEM;
+ tcp_init_tso_segs(sk, skb, cur_mss);
+ tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb));
+ }
+diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
+index 73f1a00a96af..e38290b7c7a1 100644
+--- a/net/ipv6/inet6_hashtables.c
++++ b/net/ipv6/inet6_hashtables.c
+@@ -110,7 +110,7 @@ begintw:
+ goto out;
+ }
+ if (!INET6_TW_MATCH(sk, net, hash, saddr, daddr, ports, dif)) {
+- sock_put(sk);
++ inet_twsk_put(inet_twsk(sk));
+ goto begintw;
+ }
+ goto out;
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 91d0711caec6..97675bfcab9f 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1342,7 +1342,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
+ skb = skb_peek_tail(&sk->sk_write_queue);
+ cork->length += length;
+ if (((length > mtu) ||
+- (skb && skb_is_gso(skb))) &&
++ (skb && skb_has_frags(skb))) &&
+ (sk->sk_protocol == IPPROTO_UDP) &&
+ (rt->dst.dev->features & NETIF_F_UFO)) {
+ err = ip6_ufo_append_data(sk, getfrag, from, length,
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 18ea73c5d628..bc9103de753b 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -791,7 +791,7 @@ static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort,
+ }
+
+ static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, int oif,
+- struct flowi6 *fl6, int flags)
++ struct flowi6 *fl6, int flags, bool input)
+ {
+ struct fib6_node *fn;
+ struct rt6_info *rt, *nrt;
+@@ -799,8 +799,11 @@ static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
+ int attempts = 3;
+ int err;
+ int reachable = net->ipv6.devconf_all->forwarding ? 0 : RT6_LOOKUP_F_REACHABLE;
++ int local = RTF_NONEXTHOP;
+
+ strict |= flags & RT6_LOOKUP_F_IFACE;
++ if (input)
++ local |= RTF_LOCAL;
+
+ relookup:
+ read_lock_bh(&table->tb6_lock);
+@@ -820,7 +823,7 @@ restart:
+ read_unlock_bh(&table->tb6_lock);
+
+ if (!dst_get_neighbour_raw(&rt->dst)
+- && !(rt->rt6i_flags & (RTF_NONEXTHOP | RTF_LOCAL)))
++ && !(rt->rt6i_flags & local))
+ nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
+ else if (!(rt->dst.flags & DST_HOST))
+ nrt = rt6_alloc_clone(rt, &fl6->daddr);
+@@ -864,7 +867,7 @@ out2:
+ static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table,
+ struct flowi6 *fl6, int flags)
+ {
+- return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, flags);
++ return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, flags, true);
+ }
+
+ void ip6_route_input(struct sk_buff *skb)
+@@ -890,7 +893,7 @@ void ip6_route_input(struct sk_buff *skb)
+ static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table,
+ struct flowi6 *fl6, int flags)
+ {
+- return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags);
++ return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags, false);
+ }
+
+ struct dst_entry * ip6_route_output(struct net *net, const struct sock *sk,
+diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
+index e579006526b3..85700795af03 100644
+--- a/net/l2tp/l2tp_ppp.c
++++ b/net/l2tp/l2tp_ppp.c
+@@ -357,7 +357,9 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
+ goto error_put_sess_tun;
+ }
+
++ local_bh_disable();
+ l2tp_xmit_skb(session, skb, session->hdr_len);
++ local_bh_enable();
+
+ sock_put(ps->tunnel_sock);
+ sock_put(sk);
+@@ -432,7 +434,9 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+ skb->data[0] = ppph[0];
+ skb->data[1] = ppph[1];
+
++ local_bh_disable();
+ l2tp_xmit_skb(session, skb, session->hdr_len);
++ local_bh_enable();
+
+ sock_put(sk_tun);
+ sock_put(sk);
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index 73495f18d95a..a9cf593f107f 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -708,6 +708,8 @@ struct tpt_led_trigger {
+ * that the scan completed.
+ * @SCAN_ABORTED: Set for our scan work function when the driver reported
+ * a scan complete for an aborted scan.
++ * @SCAN_HW_CANCELLED: Set for our scan work function when the scan is being
++ * cancelled.
+ */
+ enum {
+ SCAN_SW_SCANNING,
+@@ -715,6 +717,7 @@ enum {
+ SCAN_OFF_CHANNEL,
+ SCAN_COMPLETED,
+ SCAN_ABORTED,
++ SCAN_HW_CANCELLED,
+ };
+
+ /**
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index 7d882fc3ca96..db01d02ef372 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -2780,6 +2780,9 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
+ case NL80211_IFTYPE_ADHOC:
+ if (!bssid)
+ return 0;
++ if (compare_ether_addr(sdata->vif.addr, hdr->addr2) == 0 ||
++ compare_ether_addr(sdata->u.ibss.bssid, hdr->addr2) == 0)
++ return 0;
+ if (ieee80211_is_beacon(hdr->frame_control)) {
+ return 1;
+ }
+diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
+index 5279300163a5..0aeea498e499 100644
+--- a/net/mac80211/scan.c
++++ b/net/mac80211/scan.c
+@@ -224,6 +224,9 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
+ enum ieee80211_band band;
+ int i, ielen, n_chans;
+
++ if (test_bit(SCAN_HW_CANCELLED, &local->scanning))
++ return false;
++
+ do {
+ if (local->hw_scan_band == IEEE80211_NUM_BANDS)
+ return false;
+@@ -815,7 +818,23 @@ void ieee80211_scan_cancel(struct ieee80211_local *local)
+ if (!local->scan_req)
+ goto out;
+
++ /*
++ * We have a scan running and the driver already reported completion,
++ * but the worker hasn't run yet or is stuck on the mutex - mark it as
++ * cancelled.
++ */
++ if (test_bit(SCAN_HW_SCANNING, &local->scanning) &&
++ test_bit(SCAN_COMPLETED, &local->scanning)) {
++ set_bit(SCAN_HW_CANCELLED, &local->scanning);
++ goto out;
++ }
++
+ if (test_bit(SCAN_HW_SCANNING, &local->scanning)) {
++ /*
++ * Make sure that __ieee80211_scan_completed doesn't trigger a
++ * scan on another band.
++ */
++ set_bit(SCAN_HW_CANCELLED, &local->scanning);
+ if (local->ops->cancel_hw_scan)
+ drv_cancel_hw_scan(local, local->scan_sdata);
+ goto out;
+diff --git a/net/mac80211/status.c b/net/mac80211/status.c
+index 67df50ec417f..1a49354cda1e 100644
+--- a/net/mac80211/status.c
++++ b/net/mac80211/status.c
+@@ -181,6 +181,9 @@ static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb)
+ struct ieee80211_local *local = sta->local;
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+
++ if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
++ sta->last_rx = jiffies;
++
+ if (ieee80211_is_data_qos(mgmt->frame_control)) {
+ struct ieee80211_hdr *hdr = (void *) skb->data;
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
+index 93faf6a3a637..4a8c55bd60ee 100644
+--- a/net/netfilter/nf_conntrack_sip.c
++++ b/net/netfilter/nf_conntrack_sip.c
+@@ -1468,7 +1468,7 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
+
+ msglen = origlen = end - dptr;
+ if (msglen > datalen)
+- return NF_DROP;
++ return NF_ACCEPT;
+
+ ret = process_sip_msg(skb, ct, dataoff, &dptr, &msglen);
+ if (ret != NF_ACCEPT)
+diff --git a/net/sctp/output.c b/net/sctp/output.c
+index 32ba8d0e50e2..cf3e22c586a6 100644
+--- a/net/sctp/output.c
++++ b/net/sctp/output.c
+@@ -518,7 +518,8 @@ int sctp_packet_transmit(struct sctp_packet *packet)
+ * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>.
+ */
+ if (!sctp_checksum_disable) {
+- if (!(dst->dev->features & NETIF_F_SCTP_CSUM)) {
++ if (!(dst->dev->features & NETIF_F_SCTP_CSUM) ||
++ (dst_xfrm(dst) != NULL) || packet->ipfragok) {
+ __u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len);
+
+ /* 3) Put the resultant value into the checksum field in the
+diff --git a/net/socket.c b/net/socket.c
+index cf546a38bed3..bf7adaae4b7f 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -1876,6 +1876,16 @@ struct used_address {
+ unsigned int name_len;
+ };
+
++static int copy_msghdr_from_user(struct msghdr *kmsg,
++ struct msghdr __user *umsg)
++{
++ if (copy_from_user(kmsg, umsg, sizeof(struct msghdr)))
++ return -EFAULT;
++ if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
++ return -EINVAL;
++ return 0;
++}
++
+ static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
+ struct msghdr *msg_sys, unsigned flags,
+ struct used_address *used_address)
+@@ -1894,8 +1904,11 @@ static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
+ if (MSG_CMSG_COMPAT & flags) {
+ if (get_compat_msghdr(msg_sys, msg_compat))
+ return -EFAULT;
+- } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr)))
+- return -EFAULT;
++ } else {
++ err = copy_msghdr_from_user(msg_sys, msg);
++ if (err)
++ return err;
++ }
+
+ /* do not move before msg_sys is valid */
+ err = -EMSGSIZE;
+@@ -2110,8 +2123,11 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
+ if (MSG_CMSG_COMPAT & flags) {
+ if (get_compat_msghdr(msg_sys, msg_compat))
+ return -EFAULT;
+- } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr)))
+- return -EFAULT;
++ } else {
++ err = copy_msghdr_from_user(msg_sys, msg);
++ if (err)
++ return err;
++ }
+
+ err = -EMSGSIZE;
+ if (msg_sys->msg_iovlen > UIO_MAXIOV)
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 56115631e5c8..5122b22540be 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -1236,6 +1236,15 @@ static int unix_socketpair(struct socket *socka, struct socket *sockb)
+ return 0;
+ }
+
++static void unix_sock_inherit_flags(const struct socket *old,
++ struct socket *new)
++{
++ if (test_bit(SOCK_PASSCRED, &old->flags))
++ set_bit(SOCK_PASSCRED, &new->flags);
++ if (test_bit(SOCK_PASSSEC, &old->flags))
++ set_bit(SOCK_PASSSEC, &new->flags);
++}
++
+ static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
+ {
+ struct sock *sk = sock->sk;
+@@ -1270,6 +1279,7 @@ static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
+ /* attach accepted sock to socket */
+ unix_state_lock(tsk);
+ newsock->state = SS_CONNECTED;
++ unix_sock_inherit_flags(sock, newsock);
+ sock_graft(tsk, newsock);
+ unix_state_unlock(tsk);
+ return 0;
+diff --git a/net/wireless/radiotap.c b/net/wireless/radiotap.c
+index c4ad7958af52..617a310025b1 100644
+--- a/net/wireless/radiotap.c
++++ b/net/wireless/radiotap.c
+@@ -95,6 +95,10 @@ int ieee80211_radiotap_iterator_init(
+ struct ieee80211_radiotap_header *radiotap_header,
+ int max_length, const struct ieee80211_radiotap_vendor_namespaces *vns)
+ {
++ /* check the radiotap header can actually be present */
++ if (max_length < sizeof(struct ieee80211_radiotap_header))
++ return -EINVAL;
++
+ /* Linux only supports version 0 radiotap format */
+ if (radiotap_header->it_version)
+ return -EINVAL;
+@@ -129,7 +133,8 @@ int ieee80211_radiotap_iterator_init(
+ */
+
+ if ((unsigned long)iterator->_arg -
+- (unsigned long)iterator->_rtheader >
++ (unsigned long)iterator->_rtheader +
++ sizeof(uint32_t) >
+ (unsigned long)iterator->_max_length)
+ return -EINVAL;
+ }
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 1b43fdecdae8..92c913d2aa3d 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5798,6 +5798,8 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1025, 0x031c, "Gateway NV79", ALC662_FIXUP_SKU_IGNORE),
+ SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE),
+ SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
++ SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_ASUS_MODE4),
++ SND_PCI_QUIRK(0x1043, 0x1bf3, "ASUS N76VZ", ALC662_FIXUP_ASUS_MODE4),
+ SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT),
+ SND_PCI_QUIRK(0x105b, 0x0cd6, "Foxconn", ALC662_FIXUP_ASUS_MODE2),
+ SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
+diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
+index 3642e06e1d09..e44c0e3a89b6 100644
+--- a/sound/soc/codecs/wm_hubs.c
++++ b/sound/soc/codecs/wm_hubs.c
+@@ -414,6 +414,7 @@ static int hp_supply_event(struct snd_soc_dapm_widget *w,
+ hubs->hp_startup_mode);
+ break;
+ }
++ break;
+
+ case SND_SOC_DAPM_PRE_PMD:
+ snd_soc_update_bits(codec, WM8993_CHARGE_PUMP_1,
+diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
+index b516488d72b1..1d83a4039738 100644
+--- a/sound/soc/soc-dapm.c
++++ b/sound/soc/soc-dapm.c
+@@ -1523,7 +1523,7 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
+ w->active ? "active" : "inactive");
+
+ list_for_each_entry(p, &w->sources, list_sink) {
+- if (p->connected && !p->connected(w, p->sink))
++ if (p->connected && !p->connected(w, p->source))
+ continue;
+
+ if (p->connect)
+diff --git a/sound/usb/usx2y/usbusx2yaudio.c b/sound/usb/usx2y/usbusx2yaudio.c
+index d5724d8f6736..1226631ec349 100644
+--- a/sound/usb/usx2y/usbusx2yaudio.c
++++ b/sound/usb/usx2y/usbusx2yaudio.c
+@@ -299,19 +299,6 @@ static void usX2Y_error_urb_status(struct usX2Ydev *usX2Y,
+ usX2Y_clients_stop(usX2Y);
+ }
+
+-static void usX2Y_error_sequence(struct usX2Ydev *usX2Y,
+- struct snd_usX2Y_substream *subs, struct urb *urb)
+-{
+- snd_printk(KERN_ERR
+-"Sequence Error!(hcd_frame=%i ep=%i%s;wait=%i,frame=%i).\n"
+-"Most propably some urb of usb-frame %i is still missing.\n"
+-"Cause could be too long delays in usb-hcd interrupt handling.\n",
+- usb_get_current_frame_number(usX2Y->dev),
+- subs->endpoint, usb_pipein(urb->pipe) ? "in" : "out",
+- usX2Y->wait_iso_frame, urb->start_frame, usX2Y->wait_iso_frame);
+- usX2Y_clients_stop(usX2Y);
+-}
+-
+ static void i_usX2Y_urb_complete(struct urb *urb)
+ {
+ struct snd_usX2Y_substream *subs = urb->context;
+@@ -328,12 +315,9 @@ static void i_usX2Y_urb_complete(struct urb *urb)
+ usX2Y_error_urb_status(usX2Y, subs, urb);
+ return;
+ }
+- if (likely((urb->start_frame & 0xFFFF) == (usX2Y->wait_iso_frame & 0xFFFF)))
+- subs->completed_urb = urb;
+- else {
+- usX2Y_error_sequence(usX2Y, subs, urb);
+- return;
+- }
++
++ subs->completed_urb = urb;
++
+ {
+ struct snd_usX2Y_substream *capsubs = usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE],
+ *playbacksubs = usX2Y->subs[SNDRV_PCM_STREAM_PLAYBACK];
+diff --git a/sound/usb/usx2y/usx2yhwdeppcm.c b/sound/usb/usx2y/usx2yhwdeppcm.c
+index a51340f6f2db..83a8b8d68bf0 100644
+--- a/sound/usb/usx2y/usx2yhwdeppcm.c
++++ b/sound/usb/usx2y/usx2yhwdeppcm.c
+@@ -244,13 +244,8 @@ static void i_usX2Y_usbpcm_urb_complete(struct urb *urb)
+ usX2Y_error_urb_status(usX2Y, subs, urb);
+ return;
+ }
+- if (likely((urb->start_frame & 0xFFFF) == (usX2Y->wait_iso_frame & 0xFFFF)))
+- subs->completed_urb = urb;
+- else {
+- usX2Y_error_sequence(usX2Y, subs, urb);
+- return;
+- }
+
++ subs->completed_urb = urb;
+ capsubs = usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE];
+ capsubs2 = usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE + 2];
+ playbacksubs = usX2Y->subs[SNDRV_PCM_STREAM_PLAYBACK];
+diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
+index 5177964943e7..714fc359eb7a 100644
+--- a/tools/perf/builtin-sched.c
++++ b/tools/perf/builtin-sched.c
+@@ -14,6 +14,7 @@
+ #include "util/debug.h"
+
+ #include <sys/prctl.h>
++#include <sys/resource.h>
+
+ #include <semaphore.h>
+ #include <pthread.h>
diff --git a/1053_linux-3.2.54.patch b/1053_linux-3.2.54.patch
new file mode 100644
index 00000000..ffc221d8
--- /dev/null
+++ b/1053_linux-3.2.54.patch
@@ -0,0 +1,6825 @@
+diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
+index 1f2463671a1a..2a680896ee36 100644
+--- a/Documentation/sysctl/kernel.txt
++++ b/Documentation/sysctl/kernel.txt
+@@ -283,13 +283,24 @@ Default value is "/sbin/hotplug".
+ kptr_restrict:
+
+ This toggle indicates whether restrictions are placed on
+-exposing kernel addresses via /proc and other interfaces. When
+-kptr_restrict is set to (0), there are no restrictions. When
+-kptr_restrict is set to (1), the default, kernel pointers
+-printed using the %pK format specifier will be replaced with 0's
+-unless the user has CAP_SYSLOG. When kptr_restrict is set to
+-(2), kernel pointers printed using %pK will be replaced with 0's
+-regardless of privileges.
++exposing kernel addresses via /proc and other interfaces.
++
++When kptr_restrict is set to (0), the default, there are no restrictions.
++
++When kptr_restrict is set to (1), kernel pointers printed using the %pK
++format specifier will be replaced with 0's unless the user has CAP_SYSLOG
++and effective user and group ids are equal to the real ids. This is
++because %pK checks are done at read() time rather than open() time, so
++if permissions are elevated between the open() and the read() (e.g via
++a setuid binary) then %pK will not leak kernel pointers to unprivileged
++users. Note, this is a temporary solution only. The correct long-term
++solution is to do the permission checks at open() time. Consider removing
++world read permissions from files that use %pK, and using dmesg_restrict
++to protect against uses of %pK in dmesg(8) if leaking kernel pointer
++values to unprivileged users is a concern.
++
++When kptr_restrict is set to (2), kernel pointers printed using
++%pK will be replaced with 0's regardless of privileges.
+
+ ==============================================================
+
+diff --git a/Makefile b/Makefile
+index 90f57dc45a60..848be2634e15 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 53
++SUBLEVEL = 54
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
+index 7bb8bf972c09..b7c5d5db58aa 100644
+--- a/arch/arm/include/asm/assembler.h
++++ b/arch/arm/include/asm/assembler.h
+@@ -307,4 +307,12 @@
+ .size \name , . - \name
+ .endm
+
++ .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
++#ifndef CONFIG_CPU_USE_DOMAINS
++ adds \tmp, \addr, #\size - 1
++ sbcccs \tmp, \tmp, \limit
++ bcs \bad
++#endif
++ .endm
++
+ #endif /* __ASM_ASSEMBLER_H__ */
+diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
+index b293616a1a1a..292c3f88f01d 100644
+--- a/arch/arm/include/asm/uaccess.h
++++ b/arch/arm/include/asm/uaccess.h
+@@ -101,28 +101,39 @@ extern int __get_user_1(void *);
+ extern int __get_user_2(void *);
+ extern int __get_user_4(void *);
+
+-#define __get_user_x(__r2,__p,__e,__s,__i...) \
++#define __GUP_CLOBBER_1 "lr", "cc"
++#ifdef CONFIG_CPU_USE_DOMAINS
++#define __GUP_CLOBBER_2 "ip", "lr", "cc"
++#else
++#define __GUP_CLOBBER_2 "lr", "cc"
++#endif
++#define __GUP_CLOBBER_4 "lr", "cc"
++
++#define __get_user_x(__r2,__p,__e,__l,__s) \
+ __asm__ __volatile__ ( \
+ __asmeq("%0", "r0") __asmeq("%1", "r2") \
++ __asmeq("%3", "r1") \
+ "bl __get_user_" #__s \
+ : "=&r" (__e), "=r" (__r2) \
+- : "0" (__p) \
+- : __i, "cc")
++ : "0" (__p), "r" (__l) \
++ : __GUP_CLOBBER_##__s)
+
+ #define get_user(x,p) \
+ ({ \
++ unsigned long __limit = current_thread_info()->addr_limit - 1; \
+ register const typeof(*(p)) __user *__p asm("r0") = (p);\
+ register unsigned long __r2 asm("r2"); \
++ register unsigned long __l asm("r1") = __limit; \
+ register int __e asm("r0"); \
+ switch (sizeof(*(__p))) { \
+ case 1: \
+- __get_user_x(__r2, __p, __e, 1, "lr"); \
+- break; \
++ __get_user_x(__r2, __p, __e, __l, 1); \
++ break; \
+ case 2: \
+- __get_user_x(__r2, __p, __e, 2, "r3", "lr"); \
++ __get_user_x(__r2, __p, __e, __l, 2); \
+ break; \
+ case 4: \
+- __get_user_x(__r2, __p, __e, 4, "lr"); \
++ __get_user_x(__r2, __p, __e, __l, 4); \
+ break; \
+ default: __e = __get_user_bad(); break; \
+ } \
+@@ -135,31 +146,34 @@ extern int __put_user_2(void *, unsigned int);
+ extern int __put_user_4(void *, unsigned int);
+ extern int __put_user_8(void *, unsigned long long);
+
+-#define __put_user_x(__r2,__p,__e,__s) \
++#define __put_user_x(__r2,__p,__e,__l,__s) \
+ __asm__ __volatile__ ( \
+ __asmeq("%0", "r0") __asmeq("%2", "r2") \
++ __asmeq("%3", "r1") \
+ "bl __put_user_" #__s \
+ : "=&r" (__e) \
+- : "0" (__p), "r" (__r2) \
++ : "0" (__p), "r" (__r2), "r" (__l) \
+ : "ip", "lr", "cc")
+
+ #define put_user(x,p) \
+ ({ \
++ unsigned long __limit = current_thread_info()->addr_limit - 1; \
+ register const typeof(*(p)) __r2 asm("r2") = (x); \
+ register const typeof(*(p)) __user *__p asm("r0") = (p);\
++ register unsigned long __l asm("r1") = __limit; \
+ register int __e asm("r0"); \
+ switch (sizeof(*(__p))) { \
+ case 1: \
+- __put_user_x(__r2, __p, __e, 1); \
++ __put_user_x(__r2, __p, __e, __l, 1); \
+ break; \
+ case 2: \
+- __put_user_x(__r2, __p, __e, 2); \
++ __put_user_x(__r2, __p, __e, __l, 2); \
+ break; \
+ case 4: \
+- __put_user_x(__r2, __p, __e, 4); \
++ __put_user_x(__r2, __p, __e, __l, 4); \
+ break; \
+ case 8: \
+- __put_user_x(__r2, __p, __e, 8); \
++ __put_user_x(__r2, __p, __e, __l, 8); \
+ break; \
+ default: __e = __put_user_bad(); break; \
+ } \
+diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
+index e68d2512f683..d9e3c61d414d 100644
+--- a/arch/arm/kernel/process.c
++++ b/arch/arm/kernel/process.c
+@@ -468,6 +468,7 @@ EXPORT_SYMBOL(kernel_thread);
+ unsigned long get_wchan(struct task_struct *p)
+ {
+ struct stackframe frame;
++ unsigned long stack_page;
+ int count = 0;
+ if (!p || p == current || p->state == TASK_RUNNING)
+ return 0;
+@@ -476,9 +477,11 @@ unsigned long get_wchan(struct task_struct *p)
+ frame.sp = thread_saved_sp(p);
+ frame.lr = 0; /* recovered from the stack */
+ frame.pc = thread_saved_pc(p);
++ stack_page = (unsigned long)task_stack_page(p);
+ do {
+- int ret = unwind_frame(&frame);
+- if (ret < 0)
++ if (frame.sp < stack_page ||
++ frame.sp >= stack_page + THREAD_SIZE ||
++ unwind_frame(&frame) < 0)
+ return 0;
+ if (!in_sched_functions(frame.pc))
+ return frame.pc;
+diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
+index 00f79e59985b..af4e8c8a5422 100644
+--- a/arch/arm/kernel/stacktrace.c
++++ b/arch/arm/kernel/stacktrace.c
+@@ -31,7 +31,7 @@ int notrace unwind_frame(struct stackframe *frame)
+ high = ALIGN(low, THREAD_SIZE);
+
+ /* check current frame pointer is within bounds */
+- if (fp < (low + 12) || fp + 4 >= high)
++ if (fp < low + 12 || fp > high - 4)
+ return -EINVAL;
+
+ /* restore the registers from the stack frame */
+diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S
+index 1b049cd7a49a..4306fbf8cf86 100644
+--- a/arch/arm/lib/getuser.S
++++ b/arch/arm/lib/getuser.S
+@@ -16,8 +16,9 @@
+ * __get_user_X
+ *
+ * Inputs: r0 contains the address
++ * r1 contains the address limit, which must be preserved
+ * Outputs: r0 is the error code
+- * r2, r3 contains the zero-extended value
++ * r2 contains the zero-extended value
+ * lr corrupted
+ *
+ * No other registers must be altered. (see <asm/uaccess.h>
+@@ -27,33 +28,39 @@
+ * Note also that it is intended that __get_user_bad is not global.
+ */
+ #include <linux/linkage.h>
++#include <asm/assembler.h>
+ #include <asm/errno.h>
+ #include <asm/domain.h>
+
+ ENTRY(__get_user_1)
++ check_uaccess r0, 1, r1, r2, __get_user_bad
+ 1: T(ldrb) r2, [r0]
+ mov r0, #0
+ mov pc, lr
+ ENDPROC(__get_user_1)
+
+ ENTRY(__get_user_2)
+-#ifdef CONFIG_THUMB2_KERNEL
+-2: T(ldrb) r2, [r0]
+-3: T(ldrb) r3, [r0, #1]
++ check_uaccess r0, 2, r1, r2, __get_user_bad
++#ifdef CONFIG_CPU_USE_DOMAINS
++rb .req ip
++2: ldrbt r2, [r0], #1
++3: ldrbt rb, [r0], #0
+ #else
+-2: T(ldrb) r2, [r0], #1
+-3: T(ldrb) r3, [r0]
++rb .req r0
++2: ldrb r2, [r0]
++3: ldrb rb, [r0, #1]
+ #endif
+ #ifndef __ARMEB__
+- orr r2, r2, r3, lsl #8
++ orr r2, r2, rb, lsl #8
+ #else
+- orr r2, r3, r2, lsl #8
++ orr r2, rb, r2, lsl #8
+ #endif
+ mov r0, #0
+ mov pc, lr
+ ENDPROC(__get_user_2)
+
+ ENTRY(__get_user_4)
++ check_uaccess r0, 4, r1, r2, __get_user_bad
+ 4: T(ldr) r2, [r0]
+ mov r0, #0
+ mov pc, lr
+diff --git a/arch/arm/lib/putuser.S b/arch/arm/lib/putuser.S
+index c023fc11e86c..9a897fab4d97 100644
+--- a/arch/arm/lib/putuser.S
++++ b/arch/arm/lib/putuser.S
+@@ -16,6 +16,7 @@
+ * __put_user_X
+ *
+ * Inputs: r0 contains the address
++ * r1 contains the address limit, which must be preserved
+ * r2, r3 contains the value
+ * Outputs: r0 is the error code
+ * lr corrupted
+@@ -27,16 +28,19 @@
+ * Note also that it is intended that __put_user_bad is not global.
+ */
+ #include <linux/linkage.h>
++#include <asm/assembler.h>
+ #include <asm/errno.h>
+ #include <asm/domain.h>
+
+ ENTRY(__put_user_1)
++ check_uaccess r0, 1, r1, ip, __put_user_bad
+ 1: T(strb) r2, [r0]
+ mov r0, #0
+ mov pc, lr
+ ENDPROC(__put_user_1)
+
+ ENTRY(__put_user_2)
++ check_uaccess r0, 2, r1, ip, __put_user_bad
+ mov ip, r2, lsr #8
+ #ifdef CONFIG_THUMB2_KERNEL
+ #ifndef __ARMEB__
+@@ -60,12 +64,14 @@ ENTRY(__put_user_2)
+ ENDPROC(__put_user_2)
+
+ ENTRY(__put_user_4)
++ check_uaccess r0, 4, r1, ip, __put_user_bad
+ 4: T(str) r2, [r0]
+ mov r0, #0
+ mov pc, lr
+ ENDPROC(__put_user_4)
+
+ ENTRY(__put_user_8)
++ check_uaccess r0, 8, r1, ip, __put_user_bad
+ #ifdef CONFIG_THUMB2_KERNEL
+ 5: T(str) r2, [r0]
+ 6: T(str) r3, [r0, #4]
+diff --git a/arch/arm/mach-footbridge/common.c b/arch/arm/mach-footbridge/common.c
+index 38a44f9b9da2..5b91e4569b19 100644
+--- a/arch/arm/mach-footbridge/common.c
++++ b/arch/arm/mach-footbridge/common.c
+@@ -15,6 +15,7 @@
+ #include <linux/init.h>
+ #include <linux/io.h>
+ #include <linux/spinlock.h>
++#include <video/vga.h>
+
+ #include <asm/pgtable.h>
+ #include <asm/page.h>
+@@ -197,6 +198,8 @@ void __init footbridge_map_io(void)
+ */
+ if (footbridge_cfn_mode())
+ iotable_init(ebsa285_host_io_desc, ARRAY_SIZE(ebsa285_host_io_desc));
++
++ vga_base = PCIMEM_BASE;
+ }
+
+ #ifdef CONFIG_FOOTBRIDGE_ADDIN
+diff --git a/arch/arm/mach-footbridge/dc21285.c b/arch/arm/mach-footbridge/dc21285.c
+index 18c32a5541d9..a8dfa00e4596 100644
+--- a/arch/arm/mach-footbridge/dc21285.c
++++ b/arch/arm/mach-footbridge/dc21285.c
+@@ -18,7 +18,6 @@
+ #include <linux/irq.h>
+ #include <linux/io.h>
+ #include <linux/spinlock.h>
+-#include <video/vga.h>
+
+ #include <asm/irq.h>
+ #include <asm/system.h>
+@@ -297,7 +296,6 @@ void __init dc21285_preinit(void)
+ int cfn_mode;
+
+ pcibios_min_mem = 0x81000000;
+- vga_base = PCIMEM_BASE;
+
+ mem_size = (unsigned int)high_memory - PAGE_OFFSET;
+ for (mem_mask = 0x00100000; mem_mask < 0x10000000; mem_mask <<= 1)
+diff --git a/arch/arm/mach-integrator/integrator_cp.c b/arch/arm/mach-integrator/integrator_cp.c
+index 5de49c33e4d4..892d0d647eb4 100644
+--- a/arch/arm/mach-integrator/integrator_cp.c
++++ b/arch/arm/mach-integrator/integrator_cp.c
+@@ -384,7 +384,8 @@ static struct amba_device aaci_device = {
+ static void cp_clcd_enable(struct clcd_fb *fb)
+ {
+ struct fb_var_screeninfo *var = &fb->fb.var;
+- u32 val = CM_CTRL_STATIC1 | CM_CTRL_STATIC2;
++ u32 val = CM_CTRL_STATIC1 | CM_CTRL_STATIC2
++ | CM_CTRL_LCDEN0 | CM_CTRL_LCDEN1;
+
+ if (var->bits_per_pixel <= 8 ||
+ (var->bits_per_pixel == 16 && var->green.length == 5))
+diff --git a/arch/arm/mach-pxa/reset.c b/arch/arm/mach-pxa/reset.c
+index 01e9d643394a..0e253481f0ef 100644
+--- a/arch/arm/mach-pxa/reset.c
++++ b/arch/arm/mach-pxa/reset.c
+@@ -12,6 +12,7 @@
+
+ #include <mach/regs-ost.h>
+ #include <mach/reset.h>
++#include <mach/smemc.h>
+
+ unsigned int reset_status;
+ EXPORT_SYMBOL(reset_status);
+@@ -79,6 +80,12 @@ static void do_hw_reset(void)
+ OWER = OWER_WME;
+ OSSR = OSSR_M3;
+ OSMR3 = OSCR + 368640; /* ... in 100 ms */
++ /*
++ * SDRAM hangs on watchdog reset on Marvell PXA270 (erratum 71)
++ * we put SDRAM into self-refresh to prevent that
++ */
++ while (1)
++ writel_relaxed(MDREFR_SLFRSH, MDREFR);
+ }
+
+ void arch_reset(char mode, const char *cmd)
+@@ -99,4 +106,3 @@ void arch_reset(char mode, const char *cmd)
+ break;
+ }
+ }
+-
+diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c
+index 402b0c96613b..33dd57cfa3fd 100644
+--- a/arch/arm/mach-pxa/tosa.c
++++ b/arch/arm/mach-pxa/tosa.c
+@@ -424,57 +424,57 @@ static struct platform_device tosa_power_device = {
+ * Tosa Keyboard
+ */
+ static const uint32_t tosakbd_keymap[] = {
+- KEY(0, 2, KEY_W),
+- KEY(0, 6, KEY_K),
+- KEY(0, 7, KEY_BACKSPACE),
+- KEY(0, 8, KEY_P),
+- KEY(1, 1, KEY_Q),
+- KEY(1, 2, KEY_E),
+- KEY(1, 3, KEY_T),
+- KEY(1, 4, KEY_Y),
+- KEY(1, 6, KEY_O),
+- KEY(1, 7, KEY_I),
+- KEY(1, 8, KEY_COMMA),
+- KEY(2, 1, KEY_A),
+- KEY(2, 2, KEY_D),
+- KEY(2, 3, KEY_G),
+- KEY(2, 4, KEY_U),
+- KEY(2, 6, KEY_L),
+- KEY(2, 7, KEY_ENTER),
+- KEY(2, 8, KEY_DOT),
+- KEY(3, 1, KEY_Z),
+- KEY(3, 2, KEY_C),
+- KEY(3, 3, KEY_V),
+- KEY(3, 4, KEY_J),
+- KEY(3, 5, TOSA_KEY_ADDRESSBOOK),
+- KEY(3, 6, TOSA_KEY_CANCEL),
+- KEY(3, 7, TOSA_KEY_CENTER),
+- KEY(3, 8, TOSA_KEY_OK),
+- KEY(3, 9, KEY_LEFTSHIFT),
+- KEY(4, 1, KEY_S),
+- KEY(4, 2, KEY_R),
+- KEY(4, 3, KEY_B),
+- KEY(4, 4, KEY_N),
+- KEY(4, 5, TOSA_KEY_CALENDAR),
+- KEY(4, 6, TOSA_KEY_HOMEPAGE),
+- KEY(4, 7, KEY_LEFTCTRL),
+- KEY(4, 8, TOSA_KEY_LIGHT),
+- KEY(4, 10, KEY_RIGHTSHIFT),
+- KEY(5, 1, KEY_TAB),
+- KEY(5, 2, KEY_SLASH),
+- KEY(5, 3, KEY_H),
+- KEY(5, 4, KEY_M),
+- KEY(5, 5, TOSA_KEY_MENU),
+- KEY(5, 7, KEY_UP),
+- KEY(5, 11, TOSA_KEY_FN),
+- KEY(6, 1, KEY_X),
+- KEY(6, 2, KEY_F),
+- KEY(6, 3, KEY_SPACE),
+- KEY(6, 4, KEY_APOSTROPHE),
+- KEY(6, 5, TOSA_KEY_MAIL),
+- KEY(6, 6, KEY_LEFT),
+- KEY(6, 7, KEY_DOWN),
+- KEY(6, 8, KEY_RIGHT),
++ KEY(0, 1, KEY_W),
++ KEY(0, 5, KEY_K),
++ KEY(0, 6, KEY_BACKSPACE),
++ KEY(0, 7, KEY_P),
++ KEY(1, 0, KEY_Q),
++ KEY(1, 1, KEY_E),
++ KEY(1, 2, KEY_T),
++ KEY(1, 3, KEY_Y),
++ KEY(1, 5, KEY_O),
++ KEY(1, 6, KEY_I),
++ KEY(1, 7, KEY_COMMA),
++ KEY(2, 0, KEY_A),
++ KEY(2, 1, KEY_D),
++ KEY(2, 2, KEY_G),
++ KEY(2, 3, KEY_U),
++ KEY(2, 5, KEY_L),
++ KEY(2, 6, KEY_ENTER),
++ KEY(2, 7, KEY_DOT),
++ KEY(3, 0, KEY_Z),
++ KEY(3, 1, KEY_C),
++ KEY(3, 2, KEY_V),
++ KEY(3, 3, KEY_J),
++ KEY(3, 4, TOSA_KEY_ADDRESSBOOK),
++ KEY(3, 5, TOSA_KEY_CANCEL),
++ KEY(3, 6, TOSA_KEY_CENTER),
++ KEY(3, 7, TOSA_KEY_OK),
++ KEY(3, 8, KEY_LEFTSHIFT),
++ KEY(4, 0, KEY_S),
++ KEY(4, 1, KEY_R),
++ KEY(4, 2, KEY_B),
++ KEY(4, 3, KEY_N),
++ KEY(4, 4, TOSA_KEY_CALENDAR),
++ KEY(4, 5, TOSA_KEY_HOMEPAGE),
++ KEY(4, 6, KEY_LEFTCTRL),
++ KEY(4, 7, TOSA_KEY_LIGHT),
++ KEY(4, 9, KEY_RIGHTSHIFT),
++ KEY(5, 0, KEY_TAB),
++ KEY(5, 1, KEY_SLASH),
++ KEY(5, 2, KEY_H),
++ KEY(5, 3, KEY_M),
++ KEY(5, 4, TOSA_KEY_MENU),
++ KEY(5, 6, KEY_UP),
++ KEY(5, 10, TOSA_KEY_FN),
++ KEY(6, 0, KEY_X),
++ KEY(6, 1, KEY_F),
++ KEY(6, 2, KEY_SPACE),
++ KEY(6, 3, KEY_APOSTROPHE),
++ KEY(6, 4, TOSA_KEY_MAIL),
++ KEY(6, 5, KEY_LEFT),
++ KEY(6, 6, KEY_DOWN),
++ KEY(6, 7, KEY_RIGHT),
+ };
+
+ static struct matrix_keymap_data tosakbd_keymap_data = {
+diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c
+index 3dd133f18415..ef8d9d805e34 100644
+--- a/arch/arm/mach-sa1100/assabet.c
++++ b/arch/arm/mach-sa1100/assabet.c
+@@ -411,6 +411,9 @@ static void __init assabet_map_io(void)
+ * Its called GPCLKR0 in my SA1110 manual.
+ */
+ Ser1SDCR0 |= SDCR0_SUS;
++ MSC1 = (MSC1 & ~0xffff) |
++ MSC_NonBrst | MSC_32BitStMem |
++ MSC_RdAcc(2) | MSC_WrAcc(2) | MSC_Rec(0);
+
+ if (machine_has_neponset()) {
+ #ifdef CONFIG_ASSABET_NEPONSET
+diff --git a/arch/avr32/boot/u-boot/head.S b/arch/avr32/boot/u-boot/head.S
+index 4488fa27fe94..2ffc298f061b 100644
+--- a/arch/avr32/boot/u-boot/head.S
++++ b/arch/avr32/boot/u-boot/head.S
+@@ -8,6 +8,8 @@
+ * published by the Free Software Foundation.
+ */
+ #include <asm/setup.h>
++#include <asm/thread_info.h>
++#include <asm/sysreg.h>
+
+ /*
+ * The kernel is loaded where we want it to be and all caches
+@@ -20,11 +22,6 @@
+ .section .init.text,"ax"
+ .global _start
+ _start:
+- /* Check if the boot loader actually provided a tag table */
+- lddpc r0, magic_number
+- cp.w r12, r0
+- brne no_tag_table
+-
+ /* Initialize .bss */
+ lddpc r2, bss_start_addr
+ lddpc r3, end_addr
+@@ -34,6 +31,25 @@ _start:
+ cp r2, r3
+ brlo 1b
+
++ /* Initialize status register */
++ lddpc r0, init_sr
++ mtsr SYSREG_SR, r0
++
++ /* Set initial stack pointer */
++ lddpc sp, stack_addr
++ sub sp, -THREAD_SIZE
++
++#ifdef CONFIG_FRAME_POINTER
++ /* Mark last stack frame */
++ mov lr, 0
++ mov r7, 0
++#endif
++
++ /* Check if the boot loader actually provided a tag table */
++ lddpc r0, magic_number
++ cp.w r12, r0
++ brne no_tag_table
++
+ /*
+ * Save the tag table address for later use. This must be done
+ * _after_ .bss has been initialized...
+@@ -53,8 +69,15 @@ bss_start_addr:
+ .long __bss_start
+ end_addr:
+ .long _end
++init_sr:
++ .long 0x007f0000 /* Supervisor mode, everything masked */
++stack_addr:
++ .long init_thread_union
++panic_addr:
++ .long panic
+
+ no_tag_table:
+ sub r12, pc, (. - 2f)
+- bral panic
++ /* branch to panic() which can be far away with that construct */
++ lddpc pc, panic_addr
+ 2: .asciz "Boot loader didn't provide correct magic number\n"
+diff --git a/arch/avr32/kernel/entry-avr32b.S b/arch/avr32/kernel/entry-avr32b.S
+index 169268c40ae2..a91e8980a590 100644
+--- a/arch/avr32/kernel/entry-avr32b.S
++++ b/arch/avr32/kernel/entry-avr32b.S
+@@ -399,9 +399,10 @@ handle_critical:
+ /* We should never get here... */
+ bad_return:
+ sub r12, pc, (. - 1f)
+- bral panic
++ lddpc pc, 2f
+ .align 2
+ 1: .asciz "Return from critical exception!"
++2: .long panic
+
+ .align 1
+ do_bus_error_write:
+diff --git a/arch/avr32/kernel/head.S b/arch/avr32/kernel/head.S
+index 6163bd0acb95..59eae6dfbed2 100644
+--- a/arch/avr32/kernel/head.S
++++ b/arch/avr32/kernel/head.S
+@@ -10,33 +10,13 @@
+ #include <linux/linkage.h>
+
+ #include <asm/page.h>
+-#include <asm/thread_info.h>
+-#include <asm/sysreg.h>
+
+ .section .init.text,"ax"
+ .global kernel_entry
+ kernel_entry:
+- /* Initialize status register */
+- lddpc r0, init_sr
+- mtsr SYSREG_SR, r0
+-
+- /* Set initial stack pointer */
+- lddpc sp, stack_addr
+- sub sp, -THREAD_SIZE
+-
+-#ifdef CONFIG_FRAME_POINTER
+- /* Mark last stack frame */
+- mov lr, 0
+- mov r7, 0
+-#endif
+-
+ /* Start the show */
+ lddpc pc, kernel_start_addr
+
+ .align 2
+-init_sr:
+- .long 0x007f0000 /* Supervisor mode, everything masked */
+-stack_addr:
+- .long init_thread_union
+ kernel_start_addr:
+ .long start_kernel
+diff --git a/arch/cris/include/asm/io.h b/arch/cris/include/asm/io.h
+index ac12ae2b9286..db9a16c704f3 100644
+--- a/arch/cris/include/asm/io.h
++++ b/arch/cris/include/asm/io.h
+@@ -3,6 +3,7 @@
+
+ #include <asm/page.h> /* for __va, __pa */
+ #include <arch/io.h>
++#include <asm-generic/iomap.h>
+ #include <linux/kernel.h>
+
+ struct cris_io_operations
+diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h
+index d9f397fae03e..fba7696368d4 100644
+--- a/arch/ia64/include/asm/processor.h
++++ b/arch/ia64/include/asm/processor.h
+@@ -320,7 +320,7 @@ struct thread_struct {
+ regs->loadrs = 0; \
+ regs->r8 = get_dumpable(current->mm); /* set "don't zap registers" flag */ \
+ regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \
+- if (unlikely(!get_dumpable(current->mm))) { \
++ if (unlikely(get_dumpable(current->mm) != SUID_DUMP_USER)) { \
+ /* \
+ * Zap scratch regs to avoid leaking bits between processes with different \
+ * uid/privileges. \
+diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
+index 836a5a19eb2c..fa1e56ba984f 100644
+--- a/arch/powerpc/kernel/signal_32.c
++++ b/arch/powerpc/kernel/signal_32.c
+@@ -445,6 +445,12 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
+ #endif /* CONFIG_ALTIVEC */
+ if (copy_fpr_to_user(&frame->mc_fregs, current))
+ return 1;
++
++ /*
++ * Clear the MSR VSX bit to indicate there is no valid state attached
++ * to this context, except in the specific case below where we set it.
++ */
++ msr &= ~MSR_VSX;
+ #ifdef CONFIG_VSX
+ /*
+ * Copy VSR 0-31 upper half from thread_struct to local
+diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
+index a50b5ec281dc..60d1f75bfdae 100644
+--- a/arch/powerpc/kernel/signal_64.c
++++ b/arch/powerpc/kernel/signal_64.c
+@@ -116,6 +116,12 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
+ flush_fp_to_thread(current);
+ /* copy fpr regs and fpscr */
+ err |= copy_fpr_to_user(&sc->fp_regs, current);
++
++ /*
++ * Clear the MSR VSX bit to indicate there is no valid state attached
++ * to this context, except in the specific case below where we set it.
++ */
++ msr &= ~MSR_VSX;
+ #ifdef CONFIG_VSX
+ /*
+ * Copy VSX low doubleword to local buffer for formatting,
+diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
+index e74f86e3f9a9..304680aedb71 100644
+--- a/arch/powerpc/kernel/time.c
++++ b/arch/powerpc/kernel/time.c
+@@ -235,8 +235,6 @@ static u64 scan_dispatch_log(u64 stop_tb)
+ if (i == vpa->dtl_idx)
+ return 0;
+ while (i < vpa->dtl_idx) {
+- if (dtl_consumer)
+- dtl_consumer(dtl, i);
+ dtb = dtl->timebase;
+ tb_delta = dtl->enqueue_to_dispatch_time +
+ dtl->ready_to_enqueue_time;
+@@ -249,6 +247,8 @@ static u64 scan_dispatch_log(u64 stop_tb)
+ }
+ if (dtb > stop_tb)
+ break;
++ if (dtl_consumer)
++ dtl_consumer(dtl, i);
+ stolen += tb_delta;
+ ++i;
+ ++dtl;
+diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
+index f65af61996bd..dfb1c1926c3c 100644
+--- a/arch/powerpc/kernel/vio.c
++++ b/arch/powerpc/kernel/vio.c
+@@ -1351,11 +1351,15 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+ const char *cp;
+
+ dn = dev->of_node;
+- if (!dn)
+- return -ENODEV;
++ if (!dn) {
++ strcpy(buf, "\n");
++ return strlen(buf);
++ }
+ cp = of_get_property(dn, "compatible", NULL);
+- if (!cp)
+- return -ENODEV;
++ if (!cp) {
++ strcpy(buf, "\n");
++ return strlen(buf);
++ }
+
+ return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp);
+ }
+diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
+index a9ce135893f8..3ec8b394146d 100644
+--- a/arch/s390/crypto/aes_s390.c
++++ b/arch/s390/crypto/aes_s390.c
+@@ -35,7 +35,6 @@ static u8 *ctrblk;
+ static char keylen_flag;
+
+ struct s390_aes_ctx {
+- u8 iv[AES_BLOCK_SIZE];
+ u8 key[AES_MAX_KEY_SIZE];
+ long enc;
+ long dec;
+@@ -56,8 +55,7 @@ struct pcc_param {
+
+ struct s390_xts_ctx {
+ u8 key[32];
+- u8 xts_param[16];
+- struct pcc_param pcc;
++ u8 pcc_key[32];
+ long enc;
+ long dec;
+ int key_len;
+@@ -442,29 +440,35 @@ static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ return aes_set_key(tfm, in_key, key_len);
+ }
+
+-static int cbc_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
++static int cbc_aes_crypt(struct blkcipher_desc *desc, long func,
+ struct blkcipher_walk *walk)
+ {
++ struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+ int ret = blkcipher_walk_virt(desc, walk);
+ unsigned int nbytes = walk->nbytes;
++ struct {
++ u8 iv[AES_BLOCK_SIZE];
++ u8 key[AES_MAX_KEY_SIZE];
++ } param;
+
+ if (!nbytes)
+ goto out;
+
+- memcpy(param, walk->iv, AES_BLOCK_SIZE);
++ memcpy(param.iv, walk->iv, AES_BLOCK_SIZE);
++ memcpy(param.key, sctx->key, sctx->key_len);
+ do {
+ /* only use complete blocks */
+ unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
+ u8 *out = walk->dst.virt.addr;
+ u8 *in = walk->src.virt.addr;
+
+- ret = crypt_s390_kmc(func, param, out, in, n);
++ ret = crypt_s390_kmc(func, &param, out, in, n);
+ BUG_ON((ret < 0) || (ret != n));
+
+ nbytes &= AES_BLOCK_SIZE - 1;
+ ret = blkcipher_walk_done(desc, walk, nbytes);
+ } while ((nbytes = walk->nbytes));
+- memcpy(walk->iv, param, AES_BLOCK_SIZE);
++ memcpy(walk->iv, param.iv, AES_BLOCK_SIZE);
+
+ out:
+ return ret;
+@@ -481,7 +485,7 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
+ return fallback_blk_enc(desc, dst, src, nbytes);
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+- return cbc_aes_crypt(desc, sctx->enc, sctx->iv, &walk);
++ return cbc_aes_crypt(desc, sctx->enc, &walk);
+ }
+
+ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
+@@ -495,7 +499,7 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
+ return fallback_blk_dec(desc, dst, src, nbytes);
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+- return cbc_aes_crypt(desc, sctx->dec, sctx->iv, &walk);
++ return cbc_aes_crypt(desc, sctx->dec, &walk);
+ }
+
+ static struct crypto_alg cbc_aes_alg = {
+@@ -587,7 +591,7 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ xts_ctx->enc = KM_XTS_128_ENCRYPT;
+ xts_ctx->dec = KM_XTS_128_DECRYPT;
+ memcpy(xts_ctx->key + 16, in_key, 16);
+- memcpy(xts_ctx->pcc.key + 16, in_key + 16, 16);
++ memcpy(xts_ctx->pcc_key + 16, in_key + 16, 16);
+ break;
+ case 48:
+ xts_ctx->enc = 0;
+@@ -598,7 +602,7 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ xts_ctx->enc = KM_XTS_256_ENCRYPT;
+ xts_ctx->dec = KM_XTS_256_DECRYPT;
+ memcpy(xts_ctx->key, in_key, 32);
+- memcpy(xts_ctx->pcc.key, in_key + 32, 32);
++ memcpy(xts_ctx->pcc_key, in_key + 32, 32);
+ break;
+ default:
+ *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+@@ -617,28 +621,32 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
+ unsigned int nbytes = walk->nbytes;
+ unsigned int n;
+ u8 *in, *out;
+- void *param;
++ struct pcc_param pcc_param;
++ struct {
++ u8 key[32];
++ u8 init[16];
++ } xts_param;
+
+ if (!nbytes)
+ goto out;
+
+- memset(xts_ctx->pcc.block, 0, sizeof(xts_ctx->pcc.block));
+- memset(xts_ctx->pcc.bit, 0, sizeof(xts_ctx->pcc.bit));
+- memset(xts_ctx->pcc.xts, 0, sizeof(xts_ctx->pcc.xts));
+- memcpy(xts_ctx->pcc.tweak, walk->iv, sizeof(xts_ctx->pcc.tweak));
+- param = xts_ctx->pcc.key + offset;
+- ret = crypt_s390_pcc(func, param);
++ memset(pcc_param.block, 0, sizeof(pcc_param.block));
++ memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
++ memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
++ memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
++ memcpy(pcc_param.key, xts_ctx->pcc_key, 32);
++ ret = crypt_s390_pcc(func, &pcc_param.key[offset]);
+ BUG_ON(ret < 0);
+
+- memcpy(xts_ctx->xts_param, xts_ctx->pcc.xts, 16);
+- param = xts_ctx->key + offset;
++ memcpy(xts_param.key, xts_ctx->key, 32);
++ memcpy(xts_param.init, pcc_param.xts, 16);
+ do {
+ /* only use complete blocks */
+ n = nbytes & ~(AES_BLOCK_SIZE - 1);
+ out = walk->dst.virt.addr;
+ in = walk->src.virt.addr;
+
+- ret = crypt_s390_km(func, param, out, in, n);
++ ret = crypt_s390_km(func, &xts_param.key[offset], out, in, n);
+ BUG_ON(ret < 0 || ret != n);
+
+ nbytes &= AES_BLOCK_SIZE - 1;
+diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c
+index 425162e22af5..2f53b892fd80 100644
+--- a/arch/um/os-Linux/start_up.c
++++ b/arch/um/os-Linux/start_up.c
+@@ -15,6 +15,8 @@
+ #include <sys/mman.h>
+ #include <sys/stat.h>
+ #include <sys/wait.h>
++#include <sys/time.h>
++#include <sys/resource.h>
+ #include <asm/unistd.h>
+ #include "init.h"
+ #include "os.h"
+diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
+index 95365a82b6a0..e80542b45329 100644
+--- a/arch/x86/boot/Makefile
++++ b/arch/x86/boot/Makefile
+@@ -51,18 +51,18 @@ $(obj)/cpustr.h: $(obj)/mkcpustr FORCE
+
+ # How to compile the 16-bit code. Note we always compile for -march=i386,
+ # that way we can complain to the user if the CPU is insufficient.
+-KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
++KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ \
+ -DDISABLE_BRANCH_PROFILING \
+ -Wall -Wstrict-prototypes \
+ -march=i386 -mregparm=3 \
+ -include $(srctree)/$(src)/code16gcc.h \
+ -fno-strict-aliasing -fomit-frame-pointer \
++ -mno-mmx -mno-sse \
+ $(call cc-option, -ffreestanding) \
+ $(call cc-option, -fno-toplevel-reorder,\
+- $(call cc-option, -fno-unit-at-a-time)) \
++ $(call cc-option, -fno-unit-at-a-time)) \
+ $(call cc-option, -fno-stack-protector) \
+ $(call cc-option, -mpreferred-stack-boundary=2)
+-KBUILD_CFLAGS += $(call cc-option, -m32)
+ KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
+ GCOV_PROFILE := n
+
+diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
+index 77453c64d2b4..cda5cef79ad4 100644
+--- a/arch/x86/boot/compressed/Makefile
++++ b/arch/x86/boot/compressed/Makefile
+@@ -12,6 +12,7 @@ KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
+ cflags-$(CONFIG_X86_32) := -march=i386
+ cflags-$(CONFIG_X86_64) := -mcmodel=small
+ KBUILD_CFLAGS += $(cflags-y)
++KBUILD_CFLAGS += -mno-mmx -mno-sse
+ KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
+ KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
+
+diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
+index 13ad89971d47..69e231b9d925 100644
+--- a/arch/x86/kernel/crash.c
++++ b/arch/x86/kernel/crash.c
+@@ -95,10 +95,10 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
+ cpu_emergency_vmxoff();
+ cpu_emergency_svm_disable();
+
+- lapic_shutdown();
+ #if defined(CONFIG_X86_IO_APIC)
+ disable_IO_APIC();
+ #endif
++ lapic_shutdown();
+ #ifdef CONFIG_HPET_TIMER
+ hpet_disable();
+ #endif
+diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
+index 1ef962b47557..f9b9eaa3bbe5 100644
+--- a/arch/x86/kernel/microcode_amd.c
++++ b/arch/x86/kernel/microcode_amd.c
+@@ -331,7 +331,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device)
+ snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86);
+
+ if (request_firmware(&fw, (const char *)fw_name, device)) {
+- pr_err("failed to load file %s\n", fw_name);
++ pr_debug("failed to load file %s\n", fw_name);
+ goto out;
+ }
+
+diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
+index a4e1b4bd4953..f411acaf980c 100644
+--- a/arch/x86/kernel/reboot.c
++++ b/arch/x86/kernel/reboot.c
+@@ -652,6 +652,13 @@ void native_machine_shutdown(void)
+
+ /* The boot cpu is always logical cpu 0 */
+ int reboot_cpu_id = 0;
++#endif
++
++#ifdef CONFIG_X86_IO_APIC
++ disable_IO_APIC();
++#endif
++
++#ifdef CONFIG_SMP
+
+ #ifdef CONFIG_X86_32
+ /* See if there has been given a command line override */
+@@ -675,10 +682,6 @@ void native_machine_shutdown(void)
+
+ lapic_shutdown();
+
+-#ifdef CONFIG_X86_IO_APIC
+- disable_IO_APIC();
+-#endif
+-
+ #ifdef CONFIG_HPET_TIMER
+ hpet_disable();
+ #endif
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 54abb40199d6..43e775337816 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -537,7 +537,8 @@ static u32 apic_get_tmcct(struct kvm_lapic *apic)
+ ASSERT(apic != NULL);
+
+ /* if initial count is 0, current count should also be 0 */
+- if (apic_get_reg(apic, APIC_TMICT) == 0)
++ if (apic_get_reg(apic, APIC_TMICT) == 0 ||
++ apic->lapic_timer.period == 0)
+ return 0;
+
+ remaining = hrtimer_get_remaining(&apic->lapic_timer.timer);
+diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
+index a18d20d52740..bee75a6052a4 100644
+--- a/arch/x86/platform/efi/efi.c
++++ b/arch/x86/platform/efi/efi.c
+@@ -614,11 +614,6 @@ void __init efi_init(void)
+
+ set_bit(EFI_MEMMAP, &x86_efi_facility);
+
+-#ifdef CONFIG_X86_32
+- x86_platform.get_wallclock = efi_get_time;
+- x86_platform.set_wallclock = efi_set_rtc_mmss;
+-#endif
+-
+ #if EFI_DEBUG
+ print_efi_memmap();
+ #endif
+diff --git a/block/blk-core.c b/block/blk-core.c
+index 49d9e91ff4b6..a219c8914daa 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -483,6 +483,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
+ }
+
+ if (blk_throtl_init(q)) {
++ bdi_destroy(&q->backing_dev_info);
+ kmem_cache_free(blk_requestq_cachep, q);
+ return NULL;
+ }
+@@ -2015,6 +2016,7 @@ void blk_start_request(struct request *req)
+ if (unlikely(blk_bidi_rq(req)))
+ req->next_rq->resid_len = blk_rq_bytes(req->next_rq);
+
++ BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
+ blk_add_timer(req);
+ }
+ EXPORT_SYMBOL(blk_start_request);
+diff --git a/block/blk-timeout.c b/block/blk-timeout.c
+index 780354888958..b1182ea52427 100644
+--- a/block/blk-timeout.c
++++ b/block/blk-timeout.c
+@@ -90,8 +90,8 @@ static void blk_rq_timed_out(struct request *req)
+ __blk_complete_request(req);
+ break;
+ case BLK_EH_RESET_TIMER:
+- blk_clear_rq_complete(req);
+ blk_add_timer(req);
++ blk_clear_rq_complete(req);
+ break;
+ case BLK_EH_NOT_HANDLED:
+ /*
+@@ -173,7 +173,6 @@ void blk_add_timer(struct request *req)
+ return;
+
+ BUG_ON(!list_empty(&req->timeout_list));
+- BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
+
+ /*
+ * Some LLDs, like scsi, peek at the timeout to prevent a
+diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
+index 0262210cad38..850246206b12 100644
+--- a/crypto/algif_hash.c
++++ b/crypto/algif_hash.c
+@@ -114,6 +114,9 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
+ struct hash_ctx *ctx = ask->private;
+ int err;
+
++ if (flags & MSG_SENDPAGE_NOTLAST)
++ flags |= MSG_MORE;
++
+ lock_sock(sk);
+ sg_init_table(ctx->sgl.sg, 1);
+ sg_set_page(ctx->sgl.sg, page, size, offset);
+@@ -161,8 +164,6 @@ static int hash_recvmsg(struct kiocb *unused, struct socket *sock,
+ else if (len < ds)
+ msg->msg_flags |= MSG_TRUNC;
+
+- msg->msg_namelen = 0;
+-
+ lock_sock(sk);
+ if (ctx->more) {
+ ctx->more = 0;
+diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
+index a1c4f0a55583..a19c027b29bd 100644
+--- a/crypto/algif_skcipher.c
++++ b/crypto/algif_skcipher.c
+@@ -378,6 +378,9 @@ static ssize_t skcipher_sendpage(struct socket *sock, struct page *page,
+ struct skcipher_sg_list *sgl;
+ int err = -EINVAL;
+
++ if (flags & MSG_SENDPAGE_NOTLAST)
++ flags |= MSG_MORE;
++
+ lock_sock(sk);
+ if (!ctx->more && ctx->used)
+ goto unlock;
+@@ -432,7 +435,6 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
+ long copied = 0;
+
+ lock_sock(sk);
+- msg->msg_namelen = 0;
+ for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0;
+ iovlen--, iov++) {
+ unsigned long seglen = iov->iov_len;
+diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
+index ffa0245e2abc..6056178453c4 100644
+--- a/crypto/ansi_cprng.c
++++ b/crypto/ansi_cprng.c
+@@ -230,11 +230,11 @@ remainder:
+ */
+ if (byte_count < DEFAULT_BLK_SZ) {
+ empty_rbuf:
+- for (; ctx->rand_data_valid < DEFAULT_BLK_SZ;
+- ctx->rand_data_valid++) {
++ while (ctx->rand_data_valid < DEFAULT_BLK_SZ) {
+ *ptr = ctx->rand_data[ctx->rand_data_valid];
+ ptr++;
+ byte_count--;
++ ctx->rand_data_valid++;
+ if (byte_count == 0)
+ goto done;
+ }
+diff --git a/crypto/authenc.c b/crypto/authenc.c
+index 5ef7ba6b6a76..d21da2f0f508 100644
+--- a/crypto/authenc.c
++++ b/crypto/authenc.c
+@@ -368,9 +368,10 @@ static void crypto_authenc_encrypt_done(struct crypto_async_request *req,
+ if (!err) {
+ struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
+ struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
+- struct ablkcipher_request *abreq = aead_request_ctx(areq);
+- u8 *iv = (u8 *)(abreq + 1) +
+- crypto_ablkcipher_reqsize(ctx->enc);
++ struct authenc_request_ctx *areq_ctx = aead_request_ctx(areq);
++ struct ablkcipher_request *abreq = (void *)(areq_ctx->tail
++ + ctx->reqoff);
++ u8 *iv = (u8 *)abreq - crypto_ablkcipher_ivsize(ctx->enc);
+
+ err = crypto_authenc_genicv(areq, iv, 0);
+ }
+diff --git a/crypto/ccm.c b/crypto/ccm.c
+index c36d654cf56a..2002ca74c5cf 100644
+--- a/crypto/ccm.c
++++ b/crypto/ccm.c
+@@ -271,7 +271,8 @@ static int crypto_ccm_auth(struct aead_request *req, struct scatterlist *plain,
+ }
+
+ /* compute plaintext into mac */
+- get_data_to_compute(cipher, pctx, plain, cryptlen);
++ if (cryptlen)
++ get_data_to_compute(cipher, pctx, plain, cryptlen);
+
+ out:
+ return err;
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 0445f52c3e2c..d29f6d5abf05 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -303,6 +303,10 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, 0x8d66), board_ahci }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x8d6e), board_ahci }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x23a3), board_ahci }, /* Coleto Creek AHCI */
++ { PCI_VDEVICE(INTEL, 0x9c83), board_ahci }, /* Wildcat Point-LP AHCI */
++ { PCI_VDEVICE(INTEL, 0x9c85), board_ahci }, /* Wildcat Point-LP RAID */
++ { PCI_VDEVICE(INTEL, 0x9c87), board_ahci }, /* Wildcat Point-LP RAID */
++ { PCI_VDEVICE(INTEL, 0x9c8f), board_ahci }, /* Wildcat Point-LP RAID */
+
+ /* JMicron 360/1/3/5/6, match class to avoid IDE function */
+ { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+@@ -437,6 +441,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ .driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */
+ { PCI_DEVICE(0x1b4b, 0x91a3),
+ .driver_data = board_ahci_yes_fbs },
++ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230),
++ .driver_data = board_ahci_yes_fbs },
+
+ /* Promise */
+ { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
+diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
+index 43b875810d1b..6692108af534 100644
+--- a/drivers/ata/ahci_platform.c
++++ b/drivers/ata/ahci_platform.c
+@@ -204,6 +204,7 @@ static int __devexit ahci_remove(struct platform_device *pdev)
+
+ static const struct of_device_id ahci_of_match[] = {
+ { .compatible = "calxeda,hb-ahci", },
++ { .compatible = "ibm,476gtr-ahci", },
+ {},
+ };
+ MODULE_DEVICE_TABLE(of, ahci_of_match);
+diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
+index 60def03da06e..de2802c13cd1 100644
+--- a/drivers/ata/libahci.c
++++ b/drivers/ata/libahci.c
+@@ -1247,9 +1247,11 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
+ {
+ struct ata_port *ap = link->ap;
+ struct ahci_host_priv *hpriv = ap->host->private_data;
++ struct ahci_port_priv *pp = ap->private_data;
+ const char *reason = NULL;
+ unsigned long now, msecs;
+ struct ata_taskfile tf;
++ bool fbs_disabled = false;
+ int rc;
+
+ DPRINTK("ENTER\n");
+@@ -1259,6 +1261,16 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
+ if (rc && rc != -EOPNOTSUPP)
+ ata_link_warn(link, "failed to reset engine (errno=%d)\n", rc);
+
++ /*
++ * According to AHCI-1.2 9.3.9: if FBS is enable, software shall
++ * clear PxFBS.EN to '0' prior to issuing software reset to devices
++ * that is attached to port multiplier.
++ */
++ if (!ata_is_host_link(link) && pp->fbs_enabled) {
++ ahci_disable_fbs(ap);
++ fbs_disabled = true;
++ }
++
+ ata_tf_init(link->device, &tf);
+
+ /* issue the first D2H Register FIS */
+@@ -1299,6 +1311,10 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
+ } else
+ *class = ahci_dev_classify(ap);
+
++ /* re-enable FBS if disabled before */
++ if (fbs_disabled)
++ ahci_enable_fbs(ap);
++
+ DPRINTK("EXIT, class=%u\n", *class);
+ return 0;
+
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index d54b7d6c35da..a0a39872d612 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4067,6 +4067,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
+ { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
+ { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
++ { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
+
+ /* Devices we expect to fail diagnostics */
+
+diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
+index ce9dc6207f37..c01f040c1014 100644
+--- a/drivers/ata/libata-transport.c
++++ b/drivers/ata/libata-transport.c
+@@ -312,25 +312,25 @@ int ata_tport_add(struct device *parent,
+ /*
+ * ATA link attributes
+ */
++static int noop(int x) { return x; }
+
+-
+-#define ata_link_show_linkspeed(field) \
++#define ata_link_show_linkspeed(field, format) \
+ static ssize_t \
+ show_ata_link_##field(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+ { \
+ struct ata_link *link = transport_class_to_link(dev); \
+ \
+- return sprintf(buf,"%s\n", sata_spd_string(fls(link->field))); \
++ return sprintf(buf, "%s\n", sata_spd_string(format(link->field))); \
+ }
+
+-#define ata_link_linkspeed_attr(field) \
+- ata_link_show_linkspeed(field) \
++#define ata_link_linkspeed_attr(field, format) \
++ ata_link_show_linkspeed(field, format) \
+ static DEVICE_ATTR(field, S_IRUGO, show_ata_link_##field, NULL)
+
+-ata_link_linkspeed_attr(hw_sata_spd_limit);
+-ata_link_linkspeed_attr(sata_spd_limit);
+-ata_link_linkspeed_attr(sata_spd);
++ata_link_linkspeed_attr(hw_sata_spd_limit, fls);
++ata_link_linkspeed_attr(sata_spd_limit, fls);
++ata_link_linkspeed_attr(sata_spd, noop);
+
+
+ static DECLARE_TRANSPORT_CLASS(ata_link_class,
+diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
+index 1c052127548c..b0e75ce3c8fc 100644
+--- a/drivers/atm/idt77252.c
++++ b/drivers/atm/idt77252.c
+@@ -3513,7 +3513,7 @@ init_card(struct atm_dev *dev)
+ tmp = dev_get_by_name(&init_net, tname); /* jhs: was "tmp = dev_get(tname);" */
+ if (tmp) {
+ memcpy(card->atmdev->esi, tmp->dev_addr, 6);
+-
++ dev_put(tmp);
+ printk("%s: ESI %pM\n", card->name, card->atmdev->esi);
+ }
+ /*
+diff --git a/drivers/block/brd.c b/drivers/block/brd.c
+index 968a0d48904e..f35975faf93c 100644
+--- a/drivers/block/brd.c
++++ b/drivers/block/brd.c
+@@ -547,7 +547,7 @@ static struct kobject *brd_probe(dev_t dev, int *part, void *data)
+
+ mutex_lock(&brd_devices_mutex);
+ brd = brd_init_one(MINOR(dev) >> part_shift);
+- kobj = brd ? get_disk(brd->brd_disk) : ERR_PTR(-ENOMEM);
++ kobj = brd ? get_disk(brd->brd_disk) : NULL;
+ mutex_unlock(&brd_devices_mutex);
+
+ *part = 0;
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index a365562bb292..d6591358bbe7 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -1635,7 +1635,7 @@ static int loop_add(struct loop_device **l, int i)
+
+ lo->lo_queue = blk_alloc_queue(GFP_KERNEL);
+ if (!lo->lo_queue)
+- goto out_free_dev;
++ goto out_free_idr;
+
+ disk = lo->lo_disk = alloc_disk(1 << part_shift);
+ if (!disk)
+@@ -1679,6 +1679,8 @@ static int loop_add(struct loop_device **l, int i)
+
+ out_free_queue:
+ blk_cleanup_queue(lo->lo_queue);
++out_free_idr:
++ idr_remove(&loop_index_idr, i);
+ out_free_dev:
+ kfree(lo);
+ out:
+@@ -1742,7 +1744,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data)
+ if (err < 0)
+ err = loop_add(&lo, MINOR(dev) >> part_shift);
+ if (err < 0)
+- kobj = ERR_PTR(err);
++ kobj = NULL;
+ else
+ kobj = get_disk(lo->lo_disk);
+ mutex_unlock(&loop_index_mutex);
+diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
+index 6e40072fbf67..51efcbcf7193 100644
+--- a/drivers/char/i8k.c
++++ b/drivers/char/i8k.c
+@@ -664,6 +664,13 @@ static struct dmi_system_id __initdata i8k_dmi_table[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro"),
+ },
+ },
++ {
++ .ident = "Dell XPS421",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "XPS L421X"),
++ },
++ },
+ { }
+ };
+
+diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
+index 66d53841679a..094a7107301d 100644
+--- a/drivers/connector/cn_proc.c
++++ b/drivers/connector/cn_proc.c
+@@ -31,11 +31,23 @@
+ #include <linux/ptrace.h>
+ #include <linux/atomic.h>
+
+-#include <asm/unaligned.h>
+-
+ #include <linux/cn_proc.h>
+
+-#define CN_PROC_MSG_SIZE (sizeof(struct cn_msg) + sizeof(struct proc_event))
++/*
++ * Size of a cn_msg followed by a proc_event structure. Since the
++ * sizeof struct cn_msg is a multiple of 4 bytes, but not 8 bytes, we
++ * add one 4-byte word to the size here, and then start the actual
++ * cn_msg structure 4 bytes into the stack buffer. The result is that
++ * the immediately following proc_event structure is aligned to 8 bytes.
++ */
++#define CN_PROC_MSG_SIZE (sizeof(struct cn_msg) + sizeof(struct proc_event) + 4)
++
++/* See comment above; we test our assumption about sizeof struct cn_msg here. */
++static inline struct cn_msg *buffer_to_cn_msg(__u8 *buffer)
++{
++ BUILD_BUG_ON(sizeof(struct cn_msg) != 20);
++ return (struct cn_msg *)(buffer + 4);
++}
+
+ static atomic_t proc_event_num_listeners = ATOMIC_INIT(0);
+ static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC };
+@@ -55,19 +67,19 @@ void proc_fork_connector(struct task_struct *task)
+ {
+ struct cn_msg *msg;
+ struct proc_event *ev;
+- __u8 buffer[CN_PROC_MSG_SIZE];
++ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
+ struct timespec ts;
+ struct task_struct *parent;
+
+ if (atomic_read(&proc_event_num_listeners) < 1)
+ return;
+
+- msg = (struct cn_msg*)buffer;
++ msg = buffer_to_cn_msg(buffer);
+ ev = (struct proc_event*)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
++ ev->timestamp_ns = timespec_to_ns(&ts);
+ ev->what = PROC_EVENT_FORK;
+ rcu_read_lock();
+ parent = rcu_dereference(task->real_parent);
+@@ -90,17 +102,17 @@ void proc_exec_connector(struct task_struct *task)
+ struct cn_msg *msg;
+ struct proc_event *ev;
+ struct timespec ts;
+- __u8 buffer[CN_PROC_MSG_SIZE];
++ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
+
+ if (atomic_read(&proc_event_num_listeners) < 1)
+ return;
+
+- msg = (struct cn_msg*)buffer;
++ msg = buffer_to_cn_msg(buffer);
+ ev = (struct proc_event*)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
++ ev->timestamp_ns = timespec_to_ns(&ts);
+ ev->what = PROC_EVENT_EXEC;
+ ev->event_data.exec.process_pid = task->pid;
+ ev->event_data.exec.process_tgid = task->tgid;
+@@ -116,14 +128,14 @@ void proc_id_connector(struct task_struct *task, int which_id)
+ {
+ struct cn_msg *msg;
+ struct proc_event *ev;
+- __u8 buffer[CN_PROC_MSG_SIZE];
++ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
+ struct timespec ts;
+ const struct cred *cred;
+
+ if (atomic_read(&proc_event_num_listeners) < 1)
+ return;
+
+- msg = (struct cn_msg*)buffer;
++ msg = buffer_to_cn_msg(buffer);
+ ev = (struct proc_event*)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ ev->what = which_id;
+@@ -144,7 +156,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
+ rcu_read_unlock();
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
++ ev->timestamp_ns = timespec_to_ns(&ts);
+
+ memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
+ msg->ack = 0; /* not used */
+@@ -158,17 +170,17 @@ void proc_sid_connector(struct task_struct *task)
+ struct cn_msg *msg;
+ struct proc_event *ev;
+ struct timespec ts;
+- __u8 buffer[CN_PROC_MSG_SIZE];
++ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
+
+ if (atomic_read(&proc_event_num_listeners) < 1)
+ return;
+
+- msg = (struct cn_msg *)buffer;
++ msg = buffer_to_cn_msg(buffer);
+ ev = (struct proc_event *)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
++ ev->timestamp_ns = timespec_to_ns(&ts);
+ ev->what = PROC_EVENT_SID;
+ ev->event_data.sid.process_pid = task->pid;
+ ev->event_data.sid.process_tgid = task->tgid;
+@@ -185,17 +197,17 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
+ struct cn_msg *msg;
+ struct proc_event *ev;
+ struct timespec ts;
+- __u8 buffer[CN_PROC_MSG_SIZE];
++ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
+
+ if (atomic_read(&proc_event_num_listeners) < 1)
+ return;
+
+- msg = (struct cn_msg *)buffer;
++ msg = buffer_to_cn_msg(buffer);
+ ev = (struct proc_event *)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
++ ev->timestamp_ns = timespec_to_ns(&ts);
+ ev->what = PROC_EVENT_PTRACE;
+ ev->event_data.ptrace.process_pid = task->pid;
+ ev->event_data.ptrace.process_tgid = task->tgid;
+@@ -220,17 +232,17 @@ void proc_comm_connector(struct task_struct *task)
+ struct cn_msg *msg;
+ struct proc_event *ev;
+ struct timespec ts;
+- __u8 buffer[CN_PROC_MSG_SIZE];
++ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
+
+ if (atomic_read(&proc_event_num_listeners) < 1)
+ return;
+
+- msg = (struct cn_msg *)buffer;
++ msg = buffer_to_cn_msg(buffer);
+ ev = (struct proc_event *)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
++ ev->timestamp_ns = timespec_to_ns(&ts);
+ ev->what = PROC_EVENT_COMM;
+ ev->event_data.comm.process_pid = task->pid;
+ ev->event_data.comm.process_tgid = task->tgid;
+@@ -247,18 +259,18 @@ void proc_exit_connector(struct task_struct *task)
+ {
+ struct cn_msg *msg;
+ struct proc_event *ev;
+- __u8 buffer[CN_PROC_MSG_SIZE];
++ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
+ struct timespec ts;
+
+ if (atomic_read(&proc_event_num_listeners) < 1)
+ return;
+
+- msg = (struct cn_msg*)buffer;
++ msg = buffer_to_cn_msg(buffer);
+ ev = (struct proc_event*)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
++ ev->timestamp_ns = timespec_to_ns(&ts);
+ ev->what = PROC_EVENT_EXIT;
+ ev->event_data.exit.process_pid = task->pid;
+ ev->event_data.exit.process_tgid = task->tgid;
+@@ -284,18 +296,18 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
+ {
+ struct cn_msg *msg;
+ struct proc_event *ev;
+- __u8 buffer[CN_PROC_MSG_SIZE];
++ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
+ struct timespec ts;
+
+ if (atomic_read(&proc_event_num_listeners) < 1)
+ return;
+
+- msg = (struct cn_msg*)buffer;
++ msg = buffer_to_cn_msg(buffer);
+ ev = (struct proc_event*)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ msg->seq = rcvd_seq;
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
++ ev->timestamp_ns = timespec_to_ns(&ts);
+ ev->cpu = -1;
+ ev->what = PROC_EVENT_NONE;
+ ev->event_data.ack.err = err;
+diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
+index edcffd6bf88c..34be13b01458 100644
+--- a/drivers/gpio/gpio-mpc8xxx.c
++++ b/drivers/gpio/gpio-mpc8xxx.c
+@@ -69,10 +69,14 @@ static int mpc8572_gpio_get(struct gpio_chip *gc, unsigned int gpio)
+ u32 val;
+ struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
+ struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm);
++ u32 out_mask, out_shadow;
+
+- val = in_be32(mm->regs + GPIO_DAT) & ~in_be32(mm->regs + GPIO_DIR);
++ out_mask = in_be32(mm->regs + GPIO_DIR);
+
+- return (val | mpc8xxx_gc->data) & mpc8xxx_gpio2mask(gpio);
++ val = in_be32(mm->regs + GPIO_DAT) & ~out_mask;
++ out_shadow = mpc8xxx_gc->data & out_mask;
++
++ return (val | out_shadow) & mpc8xxx_gpio2mask(gpio);
+ }
+
+ static int mpc8xxx_gpio_get(struct gpio_chip *gc, unsigned int gpio)
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index ee29c1fb7fb3..6d3669526de7 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -6063,7 +6063,9 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
+ intel_crtc->cursor_visible = visible;
+ }
+ /* and commit changes on next vblank */
++ POSTING_READ(CURCNTR(pipe));
+ I915_WRITE(CURBASE(pipe), base);
++ POSTING_READ(CURBASE(pipe));
+ }
+
+ static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
+@@ -6088,7 +6090,9 @@ static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
+ intel_crtc->cursor_visible = visible;
+ }
+ /* and commit changes on next vblank */
++ POSTING_READ(CURCNTR_IVB(pipe));
+ I915_WRITE(CURBASE_IVB(pipe), base);
++ POSTING_READ(CURBASE_IVB(pipe));
+ }
+
+ /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
+diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
+index 7ce3fde40743..bd0b1fca9338 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
++++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
+@@ -281,7 +281,8 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
+ list_for_each_safe(entry, tmp, list) {
+ nvbo = list_entry(entry, struct nouveau_bo, entry);
+
+- nouveau_bo_fence(nvbo, fence);
++ if (likely(fence))
++ nouveau_bo_fence(nvbo, fence);
+
+ if (unlikely(nvbo->validate_mapped)) {
+ ttm_bo_kunmap(&nvbo->kmap);
+diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+index daadf2111040..a9238b009d1a 100644
+--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
++++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+@@ -416,12 +416,40 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
+ /* Pin framebuffer & get tilling informations */
+ obj = radeon_fb->obj;
+ rbo = gem_to_radeon_bo(obj);
++retry:
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &base);
+ if (unlikely(r != 0)) {
+ radeon_bo_unreserve(rbo);
++
++ /* On old GPU like RN50 with little vram pining can fails because
++ * current fb is taking all space needed. So instead of unpining
++ * the old buffer after pining the new one, first unpin old one
++ * and then retry pining new one.
++ *
++ * As only master can set mode only master can pin and it is
++ * unlikely the master client will race with itself especialy
++ * on those old gpu with single crtc.
++ *
++ * We don't shutdown the display controller because new buffer
++ * will end up in same spot.
++ */
++ if (!atomic && fb && fb != crtc->fb) {
++ struct radeon_bo *old_rbo;
++ unsigned long nsize, osize;
++
++ old_rbo = gem_to_radeon_bo(to_radeon_framebuffer(fb)->obj);
++ osize = radeon_bo_size(old_rbo);
++ nsize = radeon_bo_size(rbo);
++ if (nsize <= osize && !radeon_bo_reserve(old_rbo, false)) {
++ radeon_bo_unpin(old_rbo);
++ radeon_bo_unreserve(old_rbo);
++ fb = NULL;
++ goto retry;
++ }
++ }
+ return -EINVAL;
+ }
+ radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
+diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
+index 0d27bff2de65..22a89cd5aa5d 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo.c
++++ b/drivers/gpu/drm/ttm/ttm_bo.c
+@@ -1101,24 +1101,32 @@ out_unlock:
+ return ret;
+ }
+
+-static int ttm_bo_mem_compat(struct ttm_placement *placement,
+- struct ttm_mem_reg *mem)
++static bool ttm_bo_mem_compat(struct ttm_placement *placement,
++ struct ttm_mem_reg *mem,
++ uint32_t *new_flags)
+ {
+ int i;
+
+ if (mem->mm_node && placement->lpfn != 0 &&
+ (mem->start < placement->fpfn ||
+ mem->start + mem->num_pages > placement->lpfn))
+- return -1;
++ return false;
+
+ for (i = 0; i < placement->num_placement; i++) {
+- if ((placement->placement[i] & mem->placement &
+- TTM_PL_MASK_CACHING) &&
+- (placement->placement[i] & mem->placement &
+- TTM_PL_MASK_MEM))
+- return i;
++ *new_flags = placement->placement[i];
++ if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
++ (*new_flags & mem->placement & TTM_PL_MASK_MEM))
++ return true;
++ }
++
++ for (i = 0; i < placement->num_busy_placement; i++) {
++ *new_flags = placement->busy_placement[i];
++ if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
++ (*new_flags & mem->placement & TTM_PL_MASK_MEM))
++ return true;
+ }
+- return -1;
++
++ return false;
+ }
+
+ int ttm_bo_validate(struct ttm_buffer_object *bo,
+@@ -1127,6 +1135,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
+ bool no_wait_gpu)
+ {
+ int ret;
++ uint32_t new_flags;
+
+ BUG_ON(!atomic_read(&bo->reserved));
+ /* Check that range is valid */
+@@ -1137,8 +1146,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
+ /*
+ * Check whether we need to move buffer.
+ */
+- ret = ttm_bo_mem_compat(placement, &bo->mem);
+- if (ret < 0) {
++ if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
+ ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu);
+ if (ret)
+ return ret;
+@@ -1147,7 +1155,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
+ * Use the access and other non-mapping-related flag bits from
+ * the compatible memory placement flags to the active flags
+ */
+- ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
++ ttm_flag_masked(&bo->mem.placement, new_flags,
+ ~TTM_PL_MASK_MEMTYPE);
+ }
+ /*
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index 13af0f1f08de..a605ba1e8448 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -66,7 +66,7 @@ struct mt_device {
+ unsigned last_field_index; /* last field index of the report */
+ unsigned last_slot_field; /* the last field of a slot */
+ int last_mt_collection; /* last known mt-related collection */
+- __s8 inputmode; /* InputMode HID feature, -1 if non-existent */
++ __s16 inputmode; /* InputMode HID feature, -1 if non-existent */
+ __u8 num_received; /* how many contacts we received */
+ __u8 num_expected; /* expected last contact index */
+ __u8 maxcontacts;
+diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
+index 6df0b4681710..a42a7b0cd8eb 100644
+--- a/drivers/hwmon/lm78.c
++++ b/drivers/hwmon/lm78.c
+@@ -90,6 +90,8 @@ static inline u8 FAN_TO_REG(long rpm, int div)
+ {
+ if (rpm <= 0)
+ return 255;
++ if (rpm > 1350000)
++ return 1;
+ return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
+ }
+
+diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
+index 615bc4f4e530..6d5ece19566f 100644
+--- a/drivers/hwmon/lm90.c
++++ b/drivers/hwmon/lm90.c
+@@ -268,7 +268,7 @@ static const struct lm90_params lm90_params[] = {
+ [max6696] = {
+ .flags = LM90_HAVE_EMERGENCY
+ | LM90_HAVE_EMERGENCY_ALARM | LM90_HAVE_TEMP3,
+- .alert_alarms = 0x187c,
++ .alert_alarms = 0x1c7c,
+ .max_convrate = 6,
+ .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
+ },
+@@ -1474,19 +1474,22 @@ static void lm90_alert(struct i2c_client *client, unsigned int flag)
+ if ((alarms & 0x7f) == 0 && (alarms2 & 0xfe) == 0) {
+ dev_info(&client->dev, "Everything OK\n");
+ } else {
+- if (alarms & 0x61)
++ if ((alarms & 0x61) || (alarms2 & 0x80))
+ dev_warn(&client->dev,
+ "temp%d out of range, please check!\n", 1);
+- if (alarms & 0x1a)
++ if ((alarms & 0x1a) || (alarms2 & 0x20))
+ dev_warn(&client->dev,
+ "temp%d out of range, please check!\n", 2);
+ if (alarms & 0x04)
+ dev_warn(&client->dev,
+ "temp%d diode open, please check!\n", 2);
+
+- if (alarms2 & 0x18)
++ if (alarms2 & 0x5a)
+ dev_warn(&client->dev,
+ "temp%d out of range, please check!\n", 3);
++ if (alarms2 & 0x04)
++ dev_warn(&client->dev,
++ "temp%d diode open, please check!\n", 3);
+
+ /* Disable ALERT# output, because these chips don't implement
+ SMBus alert correctly; they should only hold the alert line
+diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c
+index 47d7ce9af8fb..5ab6953d4274 100644
+--- a/drivers/hwmon/sis5595.c
++++ b/drivers/hwmon/sis5595.c
+@@ -133,6 +133,8 @@ static inline u8 FAN_TO_REG(long rpm, int div)
+ {
+ if (rpm <= 0)
+ return 255;
++ if (rpm > 1350000)
++ return 1;
+ return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
+ }
+
+diff --git a/drivers/hwmon/vt8231.c b/drivers/hwmon/vt8231.c
+index db3b2e8d2a67..6df67a9c8e75 100644
+--- a/drivers/hwmon/vt8231.c
++++ b/drivers/hwmon/vt8231.c
+@@ -139,7 +139,7 @@ static const u8 regtempmin[] = { 0x3a, 0x3e, 0x2c, 0x2e, 0x30, 0x32 };
+ */
+ static inline u8 FAN_TO_REG(long rpm, int div)
+ {
+- if (rpm == 0)
++ if (rpm <= 0 || rpm > 1310720)
+ return 0;
+ return SENSORS_LIMIT(1310720 / (rpm * div), 1, 255);
+ }
+diff --git a/drivers/hwmon/w83l786ng.c b/drivers/hwmon/w83l786ng.c
+index 0254e181893d..b9c0a7f7750f 100644
+--- a/drivers/hwmon/w83l786ng.c
++++ b/drivers/hwmon/w83l786ng.c
+@@ -447,8 +447,11 @@ store_pwm(struct device *dev, struct device_attribute *attr,
+ struct w83l786ng_data *data = i2c_get_clientdata(client);
+ u32 val = SENSORS_LIMIT(simple_strtoul(buf, NULL, 10), 0, 255);
+
++ val = DIV_ROUND_CLOSEST(val, 0x11);
++
+ mutex_lock(&data->update_lock);
+- data->pwm[nr] = val;
++ data->pwm[nr] = val * 0x11;
++ val |= w83l786ng_read_value(client, W83L786NG_REG_PWM[nr]) & 0xf0;
+ w83l786ng_write_value(client, W83L786NG_REG_PWM[nr], val);
+ mutex_unlock(&data->update_lock);
+ return count;
+@@ -471,7 +474,7 @@ store_pwm_enable(struct device *dev, struct device_attribute *attr,
+ mutex_lock(&data->update_lock);
+ reg = w83l786ng_read_value(client, W83L786NG_REG_FAN_CFG);
+ data->pwm_enable[nr] = val;
+- reg &= ~(0x02 << W83L786NG_PWM_ENABLE_SHIFT[nr]);
++ reg &= ~(0x03 << W83L786NG_PWM_ENABLE_SHIFT[nr]);
+ reg |= (val - 1) << W83L786NG_PWM_ENABLE_SHIFT[nr];
+ w83l786ng_write_value(client, W83L786NG_REG_FAN_CFG, reg);
+ mutex_unlock(&data->update_lock);
+@@ -740,9 +743,10 @@ static struct w83l786ng_data *w83l786ng_update_device(struct device *dev)
+ ((pwmcfg >> W83L786NG_PWM_MODE_SHIFT[i]) & 1)
+ ? 0 : 1;
+ data->pwm_enable[i] =
+- ((pwmcfg >> W83L786NG_PWM_ENABLE_SHIFT[i]) & 2) + 1;
+- data->pwm[i] = w83l786ng_read_value(client,
+- W83L786NG_REG_PWM[i]);
++ ((pwmcfg >> W83L786NG_PWM_ENABLE_SHIFT[i]) & 3) + 1;
++ data->pwm[i] =
++ (w83l786ng_read_value(client, W83L786NG_REG_PWM[i])
++ & 0x0f) * 0x11;
+ }
+
+
+diff --git a/drivers/infiniband/hw/ipath/ipath_user_sdma.c b/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+index f5cb13b21445..cc04b7ba3488 100644
+--- a/drivers/infiniband/hw/ipath/ipath_user_sdma.c
++++ b/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+@@ -280,9 +280,7 @@ static int ipath_user_sdma_pin_pages(const struct ipath_devdata *dd,
+ int j;
+ int ret;
+
+- ret = get_user_pages(current, current->mm, addr,
+- npages, 0, 1, pages, NULL);
+-
++ ret = get_user_pages_fast(addr, npages, 0, pages);
+ if (ret != npages) {
+ int i;
+
+@@ -811,10 +809,7 @@ int ipath_user_sdma_writev(struct ipath_devdata *dd,
+ while (dim) {
+ const int mxp = 8;
+
+- down_write(&current->mm->mmap_sem);
+ ret = ipath_user_sdma_queue_pkts(dd, pq, &list, iov, dim, mxp);
+- up_write(&current->mm->mmap_sem);
+-
+ if (ret <= 0)
+ goto done_unlock;
+ else {
+diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c
+index 82442085cbe6..573b4601d5b9 100644
+--- a/drivers/infiniband/hw/qib/qib_user_sdma.c
++++ b/drivers/infiniband/hw/qib/qib_user_sdma.c
+@@ -284,8 +284,7 @@ static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
+ int j;
+ int ret;
+
+- ret = get_user_pages(current, current->mm, addr,
+- npages, 0, 1, pages, NULL);
++ ret = get_user_pages_fast(addr, npages, 0, pages);
+
+ if (ret != npages) {
+ int i;
+@@ -830,10 +829,7 @@ int qib_user_sdma_writev(struct qib_ctxtdata *rcd,
+ while (dim) {
+ const int mxp = 8;
+
+- down_write(&current->mm->mmap_sem);
+ ret = qib_user_sdma_queue_pkts(dd, pq, &list, iov, dim, mxp);
+- up_write(&current->mm->mmap_sem);
+-
+ if (ret <= 0)
+ goto done_unlock;
+ else {
+diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c
+index 509135f225db..4df80fb7a314 100644
+--- a/drivers/isdn/isdnloop/isdnloop.c
++++ b/drivers/isdn/isdnloop/isdnloop.c
+@@ -1083,8 +1083,10 @@ isdnloop_start(isdnloop_card * card, isdnloop_sdef * sdefp)
+ spin_unlock_irqrestore(&card->isdnloop_lock, flags);
+ return -ENOMEM;
+ }
+- for (i = 0; i < 3; i++)
+- strcpy(card->s0num[i], sdef.num[i]);
++ for (i = 0; i < 3; i++) {
++ strlcpy(card->s0num[i], sdef.num[i],
++ sizeof(card->s0num[0]));
++ }
+ break;
+ case ISDN_PTYPE_1TR6:
+ if (isdnloop_fake(card, "DRV1.04TC-1TR6-CAPI-CNS-BASIS-29.11.95",
+@@ -1097,7 +1099,7 @@ isdnloop_start(isdnloop_card * card, isdnloop_sdef * sdefp)
+ spin_unlock_irqrestore(&card->isdnloop_lock, flags);
+ return -ENOMEM;
+ }
+- strcpy(card->s0num[0], sdef.num[0]);
++ strlcpy(card->s0num[0], sdef.num[0], sizeof(card->s0num[0]));
+ card->s0num[1][0] = '\0';
+ card->s0num[2][0] = '\0';
+ break;
+diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
+index 738ea8dd0adf..98e8274b26e4 100644
+--- a/drivers/isdn/mISDN/socket.c
++++ b/drivers/isdn/mISDN/socket.c
+@@ -117,7 +117,6 @@ mISDN_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+ {
+ struct sk_buff *skb;
+ struct sock *sk = sock->sk;
+- struct sockaddr_mISDN *maddr;
+
+ int copied, err;
+
+@@ -135,9 +134,9 @@ mISDN_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if (!skb)
+ return err;
+
+- if (msg->msg_namelen >= sizeof(struct sockaddr_mISDN)) {
+- msg->msg_namelen = sizeof(struct sockaddr_mISDN);
+- maddr = (struct sockaddr_mISDN *)msg->msg_name;
++ if (msg->msg_name) {
++ struct sockaddr_mISDN *maddr = msg->msg_name;
++
+ maddr->family = AF_ISDN;
+ maddr->dev = _pms(sk)->dev->id;
+ if ((sk->sk_protocol == ISDN_P_LAPD_TE) ||
+@@ -150,11 +149,7 @@ mISDN_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+ maddr->sapi = _pms(sk)->ch.addr & 0xFF;
+ maddr->tei = (_pms(sk)->ch.addr >> 8) & 0xFF;
+ }
+- } else {
+- if (msg->msg_namelen)
+- printk(KERN_WARNING "%s: too small namelen %d\n",
+- __func__, msg->msg_namelen);
+- msg->msg_namelen = 0;
++ msg->msg_namelen = sizeof(*maddr);
+ }
+
+ copied = skb->len + MISDN_HEADER_LEN;
+diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
+index a5dfcc061fc0..910d2f8aa7dc 100644
+--- a/drivers/md/dm-bufio.c
++++ b/drivers/md/dm-bufio.c
+@@ -1611,6 +1611,11 @@ static int __init dm_bufio_init(void)
+ {
+ __u64 mem;
+
++ dm_bufio_allocated_kmem_cache = 0;
++ dm_bufio_allocated_get_free_pages = 0;
++ dm_bufio_allocated_vmalloc = 0;
++ dm_bufio_current_allocated = 0;
++
+ memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
+ memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);
+
+diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
+index 11431acc751c..3f123f1cbe48 100644
+--- a/drivers/md/dm-delay.c
++++ b/drivers/md/dm-delay.c
+@@ -20,6 +20,7 @@
+ struct delay_c {
+ struct timer_list delay_timer;
+ struct mutex timer_lock;
++ struct workqueue_struct *kdelayd_wq;
+ struct work_struct flush_expired_bios;
+ struct list_head delayed_bios;
+ atomic_t may_delay;
+@@ -45,14 +46,13 @@ struct dm_delay_info {
+
+ static DEFINE_MUTEX(delayed_bios_lock);
+
+-static struct workqueue_struct *kdelayd_wq;
+ static struct kmem_cache *delayed_cache;
+
+ static void handle_delayed_timer(unsigned long data)
+ {
+ struct delay_c *dc = (struct delay_c *)data;
+
+- queue_work(kdelayd_wq, &dc->flush_expired_bios);
++ queue_work(dc->kdelayd_wq, &dc->flush_expired_bios);
+ }
+
+ static void queue_timeout(struct delay_c *dc, unsigned long expires)
+@@ -190,6 +190,12 @@ out:
+ goto bad_dev_write;
+ }
+
++ dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
++ if (!dc->kdelayd_wq) {
++ DMERR("Couldn't start kdelayd");
++ goto bad_queue;
++ }
++
+ setup_timer(&dc->delay_timer, handle_delayed_timer, (unsigned long)dc);
+
+ INIT_WORK(&dc->flush_expired_bios, flush_expired_bios);
+@@ -202,6 +208,8 @@ out:
+ ti->private = dc;
+ return 0;
+
++bad_queue:
++ mempool_destroy(dc->delayed_pool);
+ bad_dev_write:
+ if (dc->dev_write)
+ dm_put_device(ti, dc->dev_write);
+@@ -216,7 +224,7 @@ static void delay_dtr(struct dm_target *ti)
+ {
+ struct delay_c *dc = ti->private;
+
+- flush_workqueue(kdelayd_wq);
++ destroy_workqueue(dc->kdelayd_wq);
+
+ dm_put_device(ti, dc->dev_read);
+
+@@ -350,12 +358,6 @@ static int __init dm_delay_init(void)
+ {
+ int r = -ENOMEM;
+
+- kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
+- if (!kdelayd_wq) {
+- DMERR("Couldn't start kdelayd");
+- goto bad_queue;
+- }
+-
+ delayed_cache = KMEM_CACHE(dm_delay_info, 0);
+ if (!delayed_cache) {
+ DMERR("Couldn't create delayed bio cache.");
+@@ -373,8 +375,6 @@ static int __init dm_delay_init(void)
+ bad_register:
+ kmem_cache_destroy(delayed_cache);
+ bad_memcache:
+- destroy_workqueue(kdelayd_wq);
+-bad_queue:
+ return r;
+ }
+
+@@ -382,7 +382,6 @@ static void __exit dm_delay_exit(void)
+ {
+ dm_unregister_target(&delay_target);
+ kmem_cache_destroy(delayed_cache);
+- destroy_workqueue(kdelayd_wq);
+ }
+
+ /* Module hooks */
+diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
+index 7e766f92a718..84ad530eedfb 100644
+--- a/drivers/md/dm-mpath.c
++++ b/drivers/md/dm-mpath.c
+@@ -84,6 +84,7 @@ struct multipath {
+ unsigned queue_io; /* Must we queue all I/O? */
+ unsigned queue_if_no_path; /* Queue I/O if last path fails? */
+ unsigned saved_queue_if_no_path;/* Saved state during suspension */
++ unsigned pg_init_disabled:1; /* pg_init is not currently allowed */
+ unsigned pg_init_retries; /* Number of times to retry pg_init */
+ unsigned pg_init_count; /* Number of times pg_init called */
+ unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */
+@@ -473,7 +474,8 @@ static void process_queued_ios(struct work_struct *work)
+ (!pgpath && !m->queue_if_no_path))
+ must_queue = 0;
+
+- if (m->pg_init_required && !m->pg_init_in_progress && pgpath)
++ if (m->pg_init_required && !m->pg_init_in_progress && pgpath &&
++ !m->pg_init_disabled)
+ __pg_init_all_paths(m);
+
+ out:
+@@ -887,10 +889,20 @@ static void multipath_wait_for_pg_init_completion(struct multipath *m)
+
+ static void flush_multipath_work(struct multipath *m)
+ {
++ unsigned long flags;
++
++ spin_lock_irqsave(&m->lock, flags);
++ m->pg_init_disabled = 1;
++ spin_unlock_irqrestore(&m->lock, flags);
++
+ flush_workqueue(kmpath_handlerd);
+ multipath_wait_for_pg_init_completion(m);
+ flush_workqueue(kmultipathd);
+ flush_work_sync(&m->trigger_event);
++
++ spin_lock_irqsave(&m->lock, flags);
++ m->pg_init_disabled = 0;
++ spin_unlock_irqrestore(&m->lock, flags);
+ }
+
+ static void multipath_dtr(struct dm_target *ti)
+@@ -1111,7 +1123,7 @@ static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
+
+ spin_lock_irqsave(&m->lock, flags);
+
+- if (m->pg_init_count <= m->pg_init_retries)
++ if (m->pg_init_count <= m->pg_init_retries && !m->pg_init_disabled)
+ m->pg_init_required = 1;
+ else
+ limit_reached = 1;
+@@ -1621,7 +1633,7 @@ out:
+ *---------------------------------------------------------------*/
+ static struct target_type multipath_target = {
+ .name = "multipath",
+- .version = {1, 3, 1},
++ .version = {1, 3, 2},
+ .module = THIS_MODULE,
+ .ctr = multipath_ctr,
+ .dtr = multipath_dtr,
+diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
+index 5c303168157a..fec79e7fe917 100644
+--- a/drivers/md/dm-snap.c
++++ b/drivers/md/dm-snap.c
+@@ -66,6 +66,18 @@ struct dm_snapshot {
+
+ atomic_t pending_exceptions_count;
+
++ /* Protected by "lock" */
++ sector_t exception_start_sequence;
++
++ /* Protected by kcopyd single-threaded callback */
++ sector_t exception_complete_sequence;
++
++ /*
++ * A list of pending exceptions that completed out of order.
++ * Protected by kcopyd single-threaded callback.
++ */
++ struct list_head out_of_order_list;
++
+ mempool_t *pending_pool;
+
+ struct dm_exception_table pending;
+@@ -171,6 +183,14 @@ struct dm_snap_pending_exception {
+ */
+ int started;
+
++ /* There was copying error. */
++ int copy_error;
++
++ /* A sequence number, it is used for in-order completion. */
++ sector_t exception_sequence;
++
++ struct list_head out_of_order_entry;
++
+ /*
+ * For writing a complete chunk, bypassing the copy.
+ */
+@@ -1090,6 +1110,9 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ s->valid = 1;
+ s->active = 0;
+ atomic_set(&s->pending_exceptions_count, 0);
++ s->exception_start_sequence = 0;
++ s->exception_complete_sequence = 0;
++ INIT_LIST_HEAD(&s->out_of_order_list);
+ init_rwsem(&s->lock);
+ INIT_LIST_HEAD(&s->list);
+ spin_lock_init(&s->pe_lock);
+@@ -1448,6 +1471,19 @@ static void commit_callback(void *context, int success)
+ pending_complete(pe, success);
+ }
+
++static void complete_exception(struct dm_snap_pending_exception *pe)
++{
++ struct dm_snapshot *s = pe->snap;
++
++ if (unlikely(pe->copy_error))
++ pending_complete(pe, 0);
++
++ else
++ /* Update the metadata if we are persistent */
++ s->store->type->commit_exception(s->store, &pe->e,
++ commit_callback, pe);
++}
++
+ /*
+ * Called when the copy I/O has finished. kcopyd actually runs
+ * this code so don't block.
+@@ -1457,13 +1493,32 @@ static void copy_callback(int read_err, unsigned long write_err, void *context)
+ struct dm_snap_pending_exception *pe = context;
+ struct dm_snapshot *s = pe->snap;
+
+- if (read_err || write_err)
+- pending_complete(pe, 0);
++ pe->copy_error = read_err || write_err;
+
+- else
+- /* Update the metadata if we are persistent */
+- s->store->type->commit_exception(s->store, &pe->e,
+- commit_callback, pe);
++ if (pe->exception_sequence == s->exception_complete_sequence) {
++ s->exception_complete_sequence++;
++ complete_exception(pe);
++
++ while (!list_empty(&s->out_of_order_list)) {
++ pe = list_entry(s->out_of_order_list.next,
++ struct dm_snap_pending_exception, out_of_order_entry);
++ if (pe->exception_sequence != s->exception_complete_sequence)
++ break;
++ s->exception_complete_sequence++;
++ list_del(&pe->out_of_order_entry);
++ complete_exception(pe);
++ }
++ } else {
++ struct list_head *lh;
++ struct dm_snap_pending_exception *pe2;
++
++ list_for_each_prev(lh, &s->out_of_order_list) {
++ pe2 = list_entry(lh, struct dm_snap_pending_exception, out_of_order_entry);
++ if (pe2->exception_sequence < pe->exception_sequence)
++ break;
++ }
++ list_add(&pe->out_of_order_entry, lh);
++ }
+ }
+
+ /*
+@@ -1558,6 +1613,8 @@ __find_pending_exception(struct dm_snapshot *s,
+ return NULL;
+ }
+
++ pe->exception_sequence = s->exception_start_sequence++;
++
+ dm_insert_exception(&s->pending, &pe->e);
+
+ return pe;
+@@ -2200,7 +2257,7 @@ static struct target_type origin_target = {
+
+ static struct target_type snapshot_target = {
+ .name = "snapshot",
+- .version = {1, 10, 1},
++ .version = {1, 10, 2},
+ .module = THIS_MODULE,
+ .ctr = snapshot_ctr,
+ .dtr = snapshot_dtr,
+diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
+index 52848ab56c95..5c52582b0179 100644
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -215,6 +215,11 @@ int dm_table_create(struct dm_table **result, fmode_t mode,
+
+ num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
+
++ if (!num_targets) {
++ kfree(t);
++ return -ENOMEM;
++ }
++
+ if (alloc_targets(t, num_targets)) {
+ kfree(t);
+ t = NULL;
+@@ -581,14 +586,28 @@ static int adjoin(struct dm_table *table, struct dm_target *ti)
+
+ /*
+ * Used to dynamically allocate the arg array.
++ *
++ * We do first allocation with GFP_NOIO because dm-mpath and dm-thin must
++ * process messages even if some device is suspended. These messages have a
++ * small fixed number of arguments.
++ *
++ * On the other hand, dm-switch needs to process bulk data using messages and
++ * excessive use of GFP_NOIO could cause trouble.
+ */
+ static char **realloc_argv(unsigned *array_size, char **old_argv)
+ {
+ char **argv;
+ unsigned new_size;
++ gfp_t gfp;
+
+- new_size = *array_size ? *array_size * 2 : 64;
+- argv = kmalloc(new_size * sizeof(*argv), GFP_KERNEL);
++ if (*array_size) {
++ new_size = *array_size * 2;
++ gfp = GFP_KERNEL;
++ } else {
++ new_size = 8;
++ gfp = GFP_NOIO;
++ }
++ argv = kmalloc(new_size * sizeof(*argv), gfp);
+ if (argv) {
+ memcpy(argv, old_argv, *array_size * sizeof(*argv));
+ *array_size = new_size;
+diff --git a/drivers/media/video/saa7164/saa7164-core.c b/drivers/media/video/saa7164/saa7164-core.c
+index 3b7d7b4e3034..8f3c47e1325f 100644
+--- a/drivers/media/video/saa7164/saa7164-core.c
++++ b/drivers/media/video/saa7164/saa7164-core.c
+@@ -1386,9 +1386,11 @@ static int __devinit saa7164_initdev(struct pci_dev *pci_dev,
+ if (fw_debug) {
+ dev->kthread = kthread_run(saa7164_thread_function, dev,
+ "saa7164 debug");
+- if (!dev->kthread)
++ if (IS_ERR(dev->kthread)) {
++ dev->kthread = NULL;
+ printk(KERN_ERR "%s() Failed to create "
+ "debug kernel thread\n", __func__);
++ }
+ }
+
+ } /* != BOARD_UNKNOWN */
+diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
+index 00e5fcac8fdf..cbee842f8b6b 100644
+--- a/drivers/misc/enclosure.c
++++ b/drivers/misc/enclosure.c
+@@ -198,6 +198,13 @@ static void enclosure_remove_links(struct enclosure_component *cdev)
+ {
+ char name[ENCLOSURE_NAME_SIZE];
+
++ /*
++ * In odd circumstances, like multipath devices, something else may
++ * already have removed the links, so check for this condition first.
++ */
++ if (!cdev->dev->kobj.sd)
++ return;
++
+ enclosure_link_name(cdev, name);
+ sysfs_remove_link(&cdev->dev->kobj, name);
+ sysfs_remove_link(&cdev->cdev.kobj, "device");
+diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
+index 74793af15a42..4802f7ff158e 100644
+--- a/drivers/mmc/card/block.c
++++ b/drivers/mmc/card/block.c
+@@ -634,7 +634,7 @@ static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
+ * Otherwise we don't understand what happened, so abort.
+ */
+ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
+- struct mmc_blk_request *brq, int *ecc_err)
++ struct mmc_blk_request *brq, int *ecc_err, int *gen_err)
+ {
+ bool prev_cmd_status_valid = true;
+ u32 status, stop_status = 0;
+@@ -665,6 +665,16 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
+ (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
+ *ecc_err = 1;
+
++ /* Flag General errors */
++ if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
++ if ((status & R1_ERROR) ||
++ (brq->stop.resp[0] & R1_ERROR)) {
++ pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
++ req->rq_disk->disk_name, __func__,
++ brq->stop.resp[0], status);
++ *gen_err = 1;
++ }
++
+ /*
+ * Check the current card state. If it is in some data transfer
+ * mode, tell it to stop (and hopefully transition back to TRAN.)
+@@ -684,6 +694,13 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
+ return ERR_ABORT;
+ if (stop_status & R1_CARD_ECC_FAILED)
+ *ecc_err = 1;
++ if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
++ if (stop_status & R1_ERROR) {
++ pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
++ req->rq_disk->disk_name, __func__,
++ stop_status);
++ *gen_err = 1;
++ }
+ }
+
+ /* Check for set block count errors */
+@@ -933,7 +950,7 @@ static int mmc_blk_err_check(struct mmc_card *card,
+ mmc_active);
+ struct mmc_blk_request *brq = &mq_mrq->brq;
+ struct request *req = mq_mrq->req;
+- int ecc_err = 0;
++ int ecc_err = 0, gen_err = 0;
+
+ /*
+ * sbc.error indicates a problem with the set block count
+@@ -947,7 +964,7 @@ static int mmc_blk_err_check(struct mmc_card *card,
+ */
+ if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
+ brq->data.error) {
+- switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err)) {
++ switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
+ case ERR_RETRY:
+ return MMC_BLK_RETRY;
+ case ERR_ABORT:
+@@ -975,6 +992,15 @@ static int mmc_blk_err_check(struct mmc_card *card,
+ */
+ if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
+ u32 status;
++
++ /* Check stop command response */
++ if (brq->stop.resp[0] & R1_ERROR) {
++ pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
++ req->rq_disk->disk_name, __func__,
++ brq->stop.resp[0]);
++ gen_err = 1;
++ }
++
+ do {
+ int err = get_card_status(card, &status, 5);
+ if (err) {
+@@ -982,6 +1008,14 @@ static int mmc_blk_err_check(struct mmc_card *card,
+ req->rq_disk->disk_name, err);
+ return MMC_BLK_CMD_ERR;
+ }
++
++ if (status & R1_ERROR) {
++ pr_err("%s: %s: general error sending status command, card status %#x\n",
++ req->rq_disk->disk_name, __func__,
++ status);
++ gen_err = 1;
++ }
++
+ /*
+ * Some cards mishandle the status bits,
+ * so make sure to check both the busy
+@@ -991,6 +1025,13 @@ static int mmc_blk_err_check(struct mmc_card *card,
+ (R1_CURRENT_STATE(status) == R1_STATE_PRG));
+ }
+
++ /* if general error occurs, retry the write operation. */
++ if (gen_err) {
++ pr_warning("%s: retrying write for general error\n",
++ req->rq_disk->disk_name);
++ return MMC_BLK_RETRY;
++ }
++
+ if (brq->data.error) {
+ pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
+ req->rq_disk->disk_name, brq->data.error,
+diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
+index 9f9982ff5aab..3d6beb79fb91 100644
+--- a/drivers/mtd/devices/m25p80.c
++++ b/drivers/mtd/devices/m25p80.c
+@@ -71,7 +71,7 @@
+
+ /* Define max times to check status register before we give up. */
+ #define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */
+-#define MAX_CMD_SIZE 5
++#define MAX_CMD_SIZE 6
+
+ #ifdef CONFIG_M25PXX_USE_FAST_READ
+ #define OPCODE_READ OPCODE_FAST_READ
+@@ -874,14 +874,13 @@ static int __devinit m25p_probe(struct spi_device *spi)
+ }
+ }
+
+- flash = kzalloc(sizeof *flash, GFP_KERNEL);
++ flash = devm_kzalloc(&spi->dev, sizeof(*flash), GFP_KERNEL);
+ if (!flash)
+ return -ENOMEM;
+- flash->command = kmalloc(MAX_CMD_SIZE + FAST_READ_DUMMY_BYTE, GFP_KERNEL);
+- if (!flash->command) {
+- kfree(flash);
++
++ flash->command = devm_kzalloc(&spi->dev, MAX_CMD_SIZE, GFP_KERNEL);
++ if (!flash->command)
+ return -ENOMEM;
+- }
+
+ flash->spi = spi;
+ mutex_init(&flash->lock);
+@@ -978,14 +977,10 @@ static int __devinit m25p_probe(struct spi_device *spi)
+ static int __devexit m25p_remove(struct spi_device *spi)
+ {
+ struct m25p *flash = dev_get_drvdata(&spi->dev);
+- int status;
+
+ /* Clean up MTD stuff. */
+- status = mtd_device_unregister(&flash->mtd);
+- if (status == 0) {
+- kfree(flash->command);
+- kfree(flash);
+- }
++ mtd_device_unregister(&flash->mtd);
++
+ return 0;
+ }
+
+diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+index f39f83e2e971..d6a77648f6d1 100644
+--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
++++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+@@ -227,8 +227,6 @@ static void dma_irq_callback(void *param)
+ struct gpmi_nand_data *this = param;
+ struct completion *dma_c = &this->dma_done;
+
+- complete(dma_c);
+-
+ switch (this->dma_type) {
+ case DMA_FOR_COMMAND:
+ dma_unmap_sg(this->dev, &this->cmd_sgl, 1, DMA_TO_DEVICE);
+@@ -253,6 +251,8 @@ static void dma_irq_callback(void *param)
+ default:
+ pr_err("in wrong DMA operation.\n");
+ }
++
++ complete(dma_c);
+ }
+
+ int start_dma_without_bch_irq(struct gpmi_nand_data *this,
+diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
+index daed698d657e..46ed2962ad08 100644
+--- a/drivers/mtd/nand/nand_base.c
++++ b/drivers/mtd/nand/nand_base.c
+@@ -2895,10 +2895,22 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
+ sanitize_string(p->model, sizeof(p->model));
+ if (!mtd->name)
+ mtd->name = p->model;
++
+ mtd->writesize = le32_to_cpu(p->byte_per_page);
+- mtd->erasesize = le32_to_cpu(p->pages_per_block) * mtd->writesize;
++
++ /*
++ * pages_per_block and blocks_per_lun may not be a power-of-2 size
++ * (don't ask me who thought of this...). MTD assumes that these
++ * dimensions will be power-of-2, so just truncate the remaining area.
++ */
++ mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
++ mtd->erasesize *= mtd->writesize;
++
+ mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
+- chip->chipsize = (uint64_t)le32_to_cpu(p->blocks_per_lun) * mtd->erasesize;
++
++ /* See erasesize comment */
++ chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
++ chip->chipsize *= (uint64_t)mtd->erasesize;
+ *busw = 0;
+ if (le16_to_cpu(p->features) & 1)
+ *busw = NAND_BUSWIDTH_16;
+diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
+index 8ed48c21b5dd..cf95bd8da7ff 100644
+--- a/drivers/net/bonding/bond_sysfs.c
++++ b/drivers/net/bonding/bond_sysfs.c
+@@ -534,8 +534,9 @@ static ssize_t bonding_store_arp_interval(struct device *d,
+ goto out;
+ }
+ if (bond->params.mode == BOND_MODE_ALB ||
+- bond->params.mode == BOND_MODE_TLB) {
+- pr_info("%s: ARP monitoring cannot be used with ALB/TLB. Only MII monitoring is supported on %s.\n",
++ bond->params.mode == BOND_MODE_TLB ||
++ bond->params.mode == BOND_MODE_8023AD) {
++ pr_info("%s: ARP monitoring cannot be used with ALB/TLB/802.3ad. Only MII monitoring is supported on %s.\n",
+ bond->dev->name, bond->dev->name);
+ ret = -EINVAL;
+ goto out;
+@@ -693,6 +694,8 @@ static ssize_t bonding_store_downdelay(struct device *d,
+ int new_value, ret = count;
+ struct bonding *bond = to_bond(d);
+
++ if (!rtnl_trylock())
++ return restart_syscall();
+ if (!(bond->params.miimon)) {
+ pr_err("%s: Unable to set down delay as MII monitoring is disabled\n",
+ bond->dev->name);
+@@ -726,6 +729,7 @@ static ssize_t bonding_store_downdelay(struct device *d,
+ }
+
+ out:
++ rtnl_unlock();
+ return ret;
+ }
+ static DEVICE_ATTR(downdelay, S_IRUGO | S_IWUSR,
+@@ -748,6 +752,8 @@ static ssize_t bonding_store_updelay(struct device *d,
+ int new_value, ret = count;
+ struct bonding *bond = to_bond(d);
+
++ if (!rtnl_trylock())
++ return restart_syscall();
+ if (!(bond->params.miimon)) {
+ pr_err("%s: Unable to set up delay as MII monitoring is disabled\n",
+ bond->dev->name);
+@@ -781,6 +787,7 @@ static ssize_t bonding_store_updelay(struct device *d,
+ }
+
+ out:
++ rtnl_unlock();
+ return ret;
+ }
+ static DEVICE_ATTR(updelay, S_IRUGO | S_IWUSR,
+diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
+index 64647d4cf64c..91d1b5af982b 100644
+--- a/drivers/net/can/c_can/c_can.c
++++ b/drivers/net/can/c_can/c_can.c
+@@ -764,9 +764,6 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
+ msg_ctrl_save = priv->read_reg(priv,
+ &priv->regs->ifregs[0].msg_cntrl);
+
+- if (msg_ctrl_save & IF_MCONT_EOB)
+- return num_rx_pkts;
+-
+ if (msg_ctrl_save & IF_MCONT_MSGLST) {
+ c_can_handle_lost_msg_obj(dev, 0, msg_obj);
+ num_rx_pkts++;
+@@ -774,6 +771,9 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
+ continue;
+ }
+
++ if (msg_ctrl_save & IF_MCONT_EOB)
++ return num_rx_pkts;
++
+ if (!(msg_ctrl_save & IF_MCONT_NEWDAT))
+ continue;
+
+diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
+index 6a1acfeed614..568b82141e4f 100644
+--- a/drivers/net/can/sja1000/sja1000.c
++++ b/drivers/net/can/sja1000/sja1000.c
+@@ -488,19 +488,19 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
+ uint8_t isrc, status;
+ int n = 0;
+
+- /* Shared interrupts and IRQ off? */
+- if (priv->read_reg(priv, REG_IER) == IRQ_OFF)
+- return IRQ_NONE;
+-
+ if (priv->pre_irq)
+ priv->pre_irq(priv);
+
++ /* Shared interrupts and IRQ off? */
++ if (priv->read_reg(priv, REG_IER) == IRQ_OFF)
++ goto out;
++
+ while ((isrc = priv->read_reg(priv, REG_IR)) && (n < SJA1000_MAX_IRQ)) {
+- n++;
++
+ status = priv->read_reg(priv, SJA1000_REG_SR);
+ /* check for absent controller due to hw unplug */
+ if (status == 0xFF && sja1000_is_absent(priv))
+- return IRQ_NONE;
++ goto out;
+
+ if (isrc & IRQ_WUI)
+ dev_warn(dev->dev.parent, "wakeup interrupt\n");
+@@ -519,7 +519,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
+ status = priv->read_reg(priv, SJA1000_REG_SR);
+ /* check for absent controller */
+ if (status == 0xFF && sja1000_is_absent(priv))
+- return IRQ_NONE;
++ goto out;
+ }
+ }
+ if (isrc & (IRQ_DOI | IRQ_EI | IRQ_BEI | IRQ_EPI | IRQ_ALI)) {
+@@ -527,8 +527,9 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
+ if (sja1000_err(dev, isrc, status))
+ break;
+ }
++ n++;
+ }
+-
++out:
+ if (priv->post_irq)
+ priv->post_irq(priv);
+
+diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
+index 5f53fbbf67be..ff1af41e4b5d 100644
+--- a/drivers/net/ethernet/smsc/smc91x.h
++++ b/drivers/net/ethernet/smsc/smc91x.h
+@@ -46,7 +46,8 @@
+ defined(CONFIG_MACH_LITTLETON) ||\
+ defined(CONFIG_MACH_ZYLONITE2) ||\
+ defined(CONFIG_ARCH_VIPER) ||\
+- defined(CONFIG_MACH_STARGATE2)
++ defined(CONFIG_MACH_STARGATE2) ||\
++ defined(CONFIG_ARCH_VERSATILE)
+
+ #include <asm/mach-types.h>
+
+@@ -154,6 +155,8 @@ static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
+ #define SMC_outl(v, a, r) writel(v, (a) + (r))
+ #define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
+ #define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
++#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
++#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
+ #define SMC_IRQ_FLAGS (-1) /* from resource */
+
+ /* We actually can't write halfwords properly if not word aligned */
+@@ -206,23 +209,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
+ #define RPC_LSA_DEFAULT RPC_LED_TX_RX
+ #define RPC_LSB_DEFAULT RPC_LED_100_10
+
+-#elif defined(CONFIG_ARCH_VERSATILE)
+-
+-#define SMC_CAN_USE_8BIT 1
+-#define SMC_CAN_USE_16BIT 1
+-#define SMC_CAN_USE_32BIT 1
+-#define SMC_NOWAIT 1
+-
+-#define SMC_inb(a, r) readb((a) + (r))
+-#define SMC_inw(a, r) readw((a) + (r))
+-#define SMC_inl(a, r) readl((a) + (r))
+-#define SMC_outb(v, a, r) writeb(v, (a) + (r))
+-#define SMC_outw(v, a, r) writew(v, (a) + (r))
+-#define SMC_outl(v, a, r) writel(v, (a) + (r))
+-#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
+-#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
+-#define SMC_IRQ_FLAGS (-1) /* from resource */
+-
+ #elif defined(CONFIG_MN10300)
+
+ /*
+diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
+index 11615842a57b..2f319d1cdd0d 100644
+--- a/drivers/net/ppp/pppoe.c
++++ b/drivers/net/ppp/pppoe.c
+@@ -985,8 +985,6 @@ static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if (error < 0)
+ goto end;
+
+- m->msg_namelen = 0;
+-
+ if (skb) {
+ total_len = min_t(size_t, total_len, skb->len);
+ error = skb_copy_datagram_iovec(skb, 0, m->msg_iov, total_len);
+diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
+index d8d8f0d0899f..35d86fae04af 100644
+--- a/drivers/net/wireless/libertas/debugfs.c
++++ b/drivers/net/wireless/libertas/debugfs.c
+@@ -919,7 +919,10 @@ static ssize_t lbs_debugfs_write(struct file *f, const char __user *buf,
+ char *p2;
+ struct debug_data *d = f->private_data;
+
+- pdata = kmalloc(cnt, GFP_KERNEL);
++ if (cnt == 0)
++ return 0;
++
++ pdata = kmalloc(cnt + 1, GFP_KERNEL);
+ if (pdata == NULL)
+ return 0;
+
+@@ -928,6 +931,7 @@ static ssize_t lbs_debugfs_write(struct file *f, const char __user *buf,
+ kfree(pdata);
+ return 0;
+ }
++ pdata[cnt] = '\0';
+
+ p0 = pdata;
+ for (i = 0; i < num_of_items; i++) {
+diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
+index 17f8720e3665..72b253d2de4c 100644
+--- a/drivers/net/wireless/mwifiex/sdio.c
++++ b/drivers/net/wireless/mwifiex/sdio.c
+@@ -936,7 +936,10 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
+ struct sk_buff *skb, u32 upld_typ)
+ {
+ u8 *cmd_buf;
++ __le16 *curr_ptr = (__le16 *)skb->data;
++ u16 pkt_len = le16_to_cpu(*curr_ptr);
+
++ skb_trim(skb, pkt_len);
+ skb_pull(skb, INTF_HEADER_LEN);
+
+ switch (upld_typ) {
+diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
+index 5d0f61508a2e..e2fa538c6c27 100644
+--- a/drivers/net/wireless/prism54/islpci_dev.c
++++ b/drivers/net/wireless/prism54/islpci_dev.c
+@@ -812,6 +812,10 @@ static const struct net_device_ops islpci_netdev_ops = {
+ .ndo_validate_addr = eth_validate_addr,
+ };
+
++static struct device_type wlan_type = {
++ .name = "wlan",
++};
++
+ struct net_device *
+ islpci_setup(struct pci_dev *pdev)
+ {
+@@ -822,9 +826,8 @@ islpci_setup(struct pci_dev *pdev)
+ return ndev;
+
+ pci_set_drvdata(pdev, ndev);
+-#if defined(SET_NETDEV_DEV)
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+-#endif
++ SET_NETDEV_DEVTYPE(ndev, &wlan_type);
+
+ /* setup the structure members */
+ ndev->base_addr = pci_resource_start(pdev, 0);
+diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
+index 0ea85f46659d..131b22b8512b 100644
+--- a/drivers/net/wireless/rt2x00/rt2400pci.c
++++ b/drivers/net/wireless/rt2x00/rt2400pci.c
+@@ -1253,7 +1253,7 @@ static void rt2400pci_fill_rxdone(struct queue_entry *entry,
+ */
+ rxdesc->timestamp = ((u64)rx_high << 32) | rx_low;
+ rxdesc->signal = rt2x00_get_field32(word2, RXD_W2_SIGNAL) & ~0x08;
+- rxdesc->rssi = rt2x00_get_field32(word2, RXD_W3_RSSI) -
++ rxdesc->rssi = rt2x00_get_field32(word3, RXD_W3_RSSI) -
+ entry->queue->rt2x00dev->rssi_offset;
+ rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT);
+
+diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
+index 921da9a67201..5c38281955d4 100644
+--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
++++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
+@@ -771,6 +771,9 @@ void rt2x00mac_flush(struct ieee80211_hw *hw, bool drop)
+ struct rt2x00_dev *rt2x00dev = hw->priv;
+ struct data_queue *queue;
+
++ if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
++ return;
++
+ tx_queue_for_each(rt2x00dev, queue)
+ rt2x00queue_flush_queue(queue, drop);
+ }
+diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
+index b4ce93436d2e..a917a2273c29 100644
+--- a/drivers/net/wireless/rtlwifi/base.c
++++ b/drivers/net/wireless/rtlwifi/base.c
+@@ -31,6 +31,7 @@
+
+ #include <linux/ip.h>
+ #include <linux/module.h>
++#include <linux/udp.h>
+ #include "wifi.h"
+ #include "rc.h"
+ #include "base.h"
+@@ -956,60 +957,51 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
+ if (!ieee80211_is_data(fc))
+ return false;
+
++ ip = (const struct iphdr *)(skb->data + mac_hdr_len +
++ SNAP_SIZE + PROTOC_TYPE_SIZE);
++ ether_type = be16_to_cpup((__be16 *)
++ (skb->data + mac_hdr_len + SNAP_SIZE));
+
+- ip = (struct iphdr *)((u8 *) skb->data + mac_hdr_len +
+- SNAP_SIZE + PROTOC_TYPE_SIZE);
+- ether_type = *(u16 *) ((u8 *) skb->data + mac_hdr_len + SNAP_SIZE);
+- /* ether_type = ntohs(ether_type); */
+-
+- if (ETH_P_IP == ether_type) {
+- if (IPPROTO_UDP == ip->protocol) {
+- struct udphdr *udp = (struct udphdr *)((u8 *) ip +
+- (ip->ihl << 2));
+- if (((((u8 *) udp)[1] == 68) &&
+- (((u8 *) udp)[3] == 67)) ||
+- ((((u8 *) udp)[1] == 67) &&
+- (((u8 *) udp)[3] == 68))) {
+- /*
+- * 68 : UDP BOOTP client
+- * 67 : UDP BOOTP server
+- */
+- RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV),
+- DBG_DMESG, ("dhcp %s !!\n",
+- (is_tx) ? "Tx" : "Rx"));
+-
+- if (is_tx) {
+- rtl_lps_leave(hw);
+- ppsc->last_delaylps_stamp_jiffies =
+- jiffies;
+- }
+-
+- return true;
+- }
+- }
+- } else if (ETH_P_ARP == ether_type) {
+- if (is_tx) {
+- rtl_lps_leave(hw);
+- ppsc->last_delaylps_stamp_jiffies = jiffies;
+- }
++ switch (ether_type) {
++ case ETH_P_IP: {
++ struct udphdr *udp;
++ u16 src;
++ u16 dst;
+
+- return true;
+- } else if (ETH_P_PAE == ether_type) {
+- RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
+- ("802.1X %s EAPOL pkt!!\n", (is_tx) ? "Tx" : "Rx"));
++ if (ip->protocol != IPPROTO_UDP)
++ return false;
++ udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2));
++ src = be16_to_cpu(udp->source);
++ dst = be16_to_cpu(udp->dest);
+
+- if (is_tx) {
+- rtl_lps_leave(hw);
+- ppsc->last_delaylps_stamp_jiffies = jiffies;
+- }
++ /* If this case involves port 68 (UDP BOOTP client) connecting
++ * with port 67 (UDP BOOTP server), then return true so that
++ * the lowest speed is used.
++ */
++ if (!((src == 68 && dst == 67) || (src == 67 && dst == 68)))
++ return false;
+
+- return true;
+- } else if (ETH_P_IPV6 == ether_type) {
+- /* IPv6 */
+- return true;
++ RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
++ ("dhcp %s !!\n", is_tx ? "Tx" : "Rx"));
++ break;
+ }
+-
+- return false;
++ case ETH_P_ARP:
++ break;
++ case ETH_P_PAE:
++ RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
++ ("802.1X %s EAPOL pkt!!\n", is_tx ? "Tx" : "Rx"));
++ break;
++ case ETH_P_IPV6:
++ /* TODO: Is this right? */
++ return false;
++ default:
++ return false;
++ }
++ if (is_tx) {
++ rtl_lps_leave(hw);
++ ppsc->last_delaylps_stamp_jiffies = jiffies;
++ }
++ return true;
+ }
+
+ /*********************************************************
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
+index 060a06f4a885..5515215d2ffb 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
+@@ -782,7 +782,7 @@ static long _rtl92c_signal_scale_mapping(struct ieee80211_hw *hw,
+
+ static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
+ struct rtl_stats *pstats,
+- struct rx_desc_92c *pdesc,
++ struct rx_desc_92c *p_desc,
+ struct rx_fwinfo_92c *p_drvinfo,
+ bool packet_match_bssid,
+ bool packet_toself,
+@@ -797,11 +797,11 @@ static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
+ u32 rssi, total_rssi = 0;
+ bool in_powersavemode = false;
+ bool is_cck_rate;
++ u8 *pdesc = (u8 *)p_desc;
+
+- is_cck_rate = RX_HAL_IS_CCK_RATE(pdesc);
++ is_cck_rate = RX_HAL_IS_CCK_RATE(p_desc);
+ pstats->packet_matchbssid = packet_match_bssid;
+ pstats->packet_toself = packet_toself;
+- pstats->is_cck = is_cck_rate;
+ pstats->packet_beacon = packet_beacon;
+ pstats->is_cck = is_cck_rate;
+ pstats->RX_SIGQ[0] = -1;
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+index a7e1a2c35b4d..a6ea2d991ab8 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+@@ -303,10 +303,10 @@ out:
+ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
+ struct rtl_stats *stats,
+ struct ieee80211_rx_status *rx_status,
+- u8 *p_desc, struct sk_buff *skb)
++ u8 *pdesc, struct sk_buff *skb)
+ {
+ struct rx_fwinfo_92c *p_drvinfo;
+- struct rx_desc_92c *pdesc = (struct rx_desc_92c *)p_desc;
++ struct rx_desc_92c *p_desc = (struct rx_desc_92c *)pdesc;
+ u32 phystatus = GET_RX_DESC_PHY_STATUS(pdesc);
+
+ stats->length = (u16) GET_RX_DESC_PKT_LEN(pdesc);
+@@ -345,11 +345,11 @@ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
+ if (phystatus) {
+ p_drvinfo = (struct rx_fwinfo_92c *)(skb->data +
+ stats->rx_bufshift);
+- rtl92c_translate_rx_signal_stuff(hw, skb, stats, pdesc,
++ rtl92c_translate_rx_signal_stuff(hw, skb, stats, p_desc,
+ p_drvinfo);
+ }
+ /*rx_status->qual = stats->signal; */
+- rx_status->signal = stats->rssi + 10;
++ rx_status->signal = stats->recvsignalpower + 10;
+ /*rx_status->noise = -stats->noise; */
+ return true;
+ }
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
+index 3637c0c33525..639b57b62cca 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
+@@ -529,7 +529,7 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
+ p_drvinfo);
+ }
+ /*rx_status->qual = stats->signal; */
+- rx_status->signal = stats->rssi + 10;
++ rx_status->signal = stats->recvsignalpower + 10;
+ /*rx_status->noise = -stats->noise; */
+ return true;
+ }
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/rf.c b/drivers/net/wireless/rtlwifi/rtl8192se/rf.c
+index 0ad50fe44aa2..13081d9c14d0 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192se/rf.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192se/rf.c
+@@ -274,7 +274,7 @@ static void _rtl92s_get_txpower_writeval_byregulatory(struct ieee80211_hw *hw,
+ rtlefuse->pwrgroup_ht40
+ [RF90_PATH_A][chnl - 1]) {
+ pwrdiff_limit[i] =
+- rtlefuse->pwrgroup_ht20
++ rtlefuse->pwrgroup_ht40
+ [RF90_PATH_A][chnl - 1];
+ }
+ } else {
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+index fbebe3ea0a22..542a8718ca90 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+@@ -582,7 +582,7 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
+ }
+
+ /*rx_status->qual = stats->signal; */
+- rx_status->signal = stats->rssi + 10;
++ rx_status->signal = stats->recvsignalpower + 10;
+ /*rx_status->noise = -stats->noise; */
+
+ return true;
+diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
+index 82baaa25dc41..5764ef74d2b1 100644
+--- a/drivers/net/wireless/rtlwifi/wifi.h
++++ b/drivers/net/wireless/rtlwifi/wifi.h
+@@ -73,11 +73,7 @@
+ #define RTL_SLOT_TIME_9 9
+ #define RTL_SLOT_TIME_20 20
+
+-/*related with tcp/ip. */
+-/*if_ehther.h*/
+-#define ETH_P_PAE 0x888E /*Port Access Entity (IEEE 802.1X) */
+-#define ETH_P_IP 0x0800 /*Internet Protocol packet */
+-#define ETH_P_ARP 0x0806 /*Address Resolution packet */
++/*related to tcp/ip. */
+ #define SNAP_SIZE 6
+ #define PROTOC_TYPE_SIZE 2
+
+diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
+index e0610bda1dea..7e41b70aaab7 100644
+--- a/drivers/pci/pcie/portdrv_pci.c
++++ b/drivers/pci/pcie/portdrv_pci.c
+@@ -151,7 +151,6 @@ static int __devinit pcie_portdrv_probe(struct pci_dev *dev,
+ static void pcie_portdrv_remove(struct pci_dev *dev)
+ {
+ pcie_port_device_remove(dev);
+- pci_disable_device(dev);
+ }
+
+ static int error_detected_iter(struct device *device, void *data)
+diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
+index e39b77a4609a..15406d5e593e 100644
+--- a/drivers/rtc/rtc-at91rm9200.c
++++ b/drivers/rtc/rtc-at91rm9200.c
+@@ -156,6 +156,8 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
+
+ at91_alarm_year = tm.tm_year;
+
++ tm.tm_mon = alrm->time.tm_mon;
++ tm.tm_mday = alrm->time.tm_mday;
+ tm.tm_hour = alrm->time.tm_hour;
+ tm.tm_min = alrm->time.tm_min;
+ tm.tm_sec = alrm->time.tm_sec;
+diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
+index fff57de78943..55f6488fc512 100644
+--- a/drivers/s390/net/qeth_core_main.c
++++ b/drivers/s390/net/qeth_core_main.c
+@@ -4322,7 +4322,7 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata)
+ struct qeth_cmd_buffer *iob;
+ struct qeth_ipa_cmd *cmd;
+ struct qeth_snmp_ureq *ureq;
+- int req_len;
++ unsigned int req_len;
+ struct qeth_arp_query_info qinfo = {0, };
+ int rc = 0;
+
+@@ -4338,6 +4338,10 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata)
+ /* skip 4 bytes (data_len struct member) to get req_len */
+ if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int)))
+ return -EFAULT;
++ if (req_len > (QETH_BUFSIZE - IPA_PDU_HEADER_SIZE -
++ sizeof(struct qeth_ipacmd_hdr) -
++ sizeof(struct qeth_ipacmd_setadpparms_hdr)))
++ return -EINVAL;
+ ureq = memdup_user(udata, req_len + sizeof(struct qeth_snmp_ureq_hdr));
+ if (IS_ERR(ureq)) {
+ QETH_CARD_TEXT(card, 2, "snmpnome");
+diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
+index 8a0b33033177..1254431d3053 100644
+--- a/drivers/scsi/aacraid/commctrl.c
++++ b/drivers/scsi/aacraid/commctrl.c
+@@ -508,7 +508,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
+ goto cleanup;
+ }
+
+- if (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr))) {
++ if ((fibsize < (sizeof(struct user_aac_srb) - sizeof(struct user_sgentry))) ||
++ (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))) {
+ rcode = -EINVAL;
+ goto cleanup;
+ }
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 0f485507c555..5b7e1bf1b2a0 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -1186,7 +1186,7 @@ static void complete_scsi_command(struct CommandList *cp)
+ "has check condition: aborted command: "
+ "ASC: 0x%x, ASCQ: 0x%x\n",
+ cp, asc, ascq);
+- cmd->result = DID_SOFT_ERROR << 16;
++ cmd->result |= DID_SOFT_ERROR << 16;
+ break;
+ }
+ /* Must be some other type of check condition */
+@@ -4465,7 +4465,7 @@ reinit_after_soft_reset:
+ hpsa_hba_inquiry(h);
+ hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
+ start_controller_lockup_detector(h);
+- return 1;
++ return 0;
+
+ clean4:
+ hpsa_free_sg_chain_blocks(h);
+diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
+index 4868fc9ad1fa..5e170e3143e2 100644
+--- a/drivers/scsi/libsas/sas_ata.c
++++ b/drivers/scsi/libsas/sas_ata.c
+@@ -197,7 +197,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
+ qc->tf.nsect = 0;
+ }
+
+- ata_tf_to_fis(&qc->tf, 1, 0, (u8*)&task->ata_task.fis);
++ ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, (u8 *)&task->ata_task.fis);
+ task->uldd_task = qc;
+ if (ata_is_atapi(qc->tf.protocol)) {
+ memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len);
+diff --git a/drivers/staging/tidspbridge/Kconfig b/drivers/staging/tidspbridge/Kconfig
+index 93de4f2e8bf8..b27d9aa08a71 100644
+--- a/drivers/staging/tidspbridge/Kconfig
++++ b/drivers/staging/tidspbridge/Kconfig
+@@ -4,7 +4,7 @@
+
+ menuconfig TIDSPBRIDGE
+ tristate "DSP Bridge driver"
+- depends on ARCH_OMAP3
++ depends on ARCH_OMAP3 && BROKEN
+ select OMAP_MBOX_FWK
+ help
+ DSP/BIOS Bridge is designed for platforms that contain a GPP and
+diff --git a/drivers/staging/zram/zram_sysfs.c b/drivers/staging/zram/zram_sysfs.c
+index 1fae1e9ae82c..fc552d8311e1 100644
+--- a/drivers/staging/zram/zram_sysfs.c
++++ b/drivers/staging/zram/zram_sysfs.c
+@@ -95,20 +95,27 @@ static ssize_t reset_store(struct device *dev,
+ zram = dev_to_zram(dev);
+ bdev = bdget_disk(zram->disk, 0);
+
++ if (!bdev)
++ return -ENOMEM;
++
+ /* Do not reset an active device! */
+- if (bdev->bd_holders)
+- return -EBUSY;
++ if (bdev->bd_holders) {
++ ret = -EBUSY;
++ goto out;
++ }
+
+ ret = strict_strtoul(buf, 10, &do_reset);
+ if (ret)
+- return ret;
++ goto out;
+
+- if (!do_reset)
+- return -EINVAL;
++ if (!do_reset) {
++ ret = -EINVAL;
++ goto out;
++ }
+
+ /* Make sure all pending I/O is finished */
+- if (bdev)
+- fsync_bdev(bdev);
++ fsync_bdev(bdev);
++ bdput(bdev);
+
+ down_write(&zram->init_lock);
+ if (zram->init_done)
+@@ -116,6 +123,10 @@ static ssize_t reset_store(struct device *dev,
+ up_write(&zram->init_lock);
+
+ return len;
++
++out:
++ bdput(bdev);
++ return ret;
+ }
+
+ static ssize_t num_reads_show(struct device *dev,
+diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
+index 1cd6ce373b83..59e7378f66c6 100644
+--- a/drivers/target/iscsi/iscsi_target_auth.c
++++ b/drivers/target/iscsi/iscsi_target_auth.c
+@@ -172,6 +172,7 @@ static int chap_server_compute_md5(
+ unsigned char client_digest[MD5_SIGNATURE_SIZE];
+ unsigned char server_digest[MD5_SIGNATURE_SIZE];
+ unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH];
++ size_t compare_len;
+ struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol;
+ struct crypto_hash *tfm;
+ struct hash_desc desc;
+@@ -210,7 +211,9 @@ static int chap_server_compute_md5(
+ goto out;
+ }
+
+- if (memcmp(chap_n, auth->userid, strlen(auth->userid)) != 0) {
++ /* Include the terminating NULL in the compare */
++ compare_len = strlen(auth->userid) + 1;
++ if (strncmp(chap_n, auth->userid, compare_len) != 0) {
+ pr_err("CHAP_N values do not match!\n");
+ goto out;
+ }
+diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
+index 7d85f8890162..3486d129eb9c 100644
+--- a/drivers/target/iscsi/iscsi_target_nego.c
++++ b/drivers/target/iscsi/iscsi_target_nego.c
+@@ -89,7 +89,7 @@ int extract_param(
+ if (len < 0)
+ return -1;
+
+- if (len > max_length) {
++ if (len >= max_length) {
+ pr_err("Length of input: %d exeeds max_length:"
+ " %d\n", len, max_length);
+ return -1;
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 0cdff38107e4..636ee9e5ff39 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1448,6 +1448,8 @@ static int acm_reset_resume(struct usb_interface *intf)
+
+ static const struct usb_device_id acm_ids[] = {
+ /* quirky and broken devices */
++ { USB_DEVICE(0x17ef, 0x7000), /* Lenovo USB modem */
++ .driver_info = NO_UNION_NORMAL, },/* has no union descriptor */
+ { USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */
+ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
+ },
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index a5ea85ff5c0d..7013165909c8 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -900,6 +900,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
+ clear_port_feature(hub->hdev, port1,
+ USB_PORT_FEAT_C_PORT_LINK_STATE);
+ }
++ if (portchange & USB_PORT_STAT_C_RESET) {
++ need_debounce_delay = true;
++ clear_port_feature(hub->hdev, port1,
++ USB_PORT_FEAT_C_RESET);
++ }
+
+ if ((portchange & USB_PORT_STAT_C_BH_RESET) &&
+ hub_is_superspeed(hub->hdev)) {
+@@ -3749,8 +3754,9 @@ static void hub_events(void)
+ hub->hdev->children[i - 1];
+
+ dev_dbg(hub_dev, "warm reset port %d\n", i);
+- if (!udev || !(portstatus &
+- USB_PORT_STAT_CONNECTION)) {
++ if (!udev ||
++ !(portstatus & USB_PORT_STAT_CONNECTION) ||
++ udev->state == USB_STATE_NOTATTACHED) {
+ status = hub_port_reset(hub, i,
+ NULL, HUB_BH_RESET_TIME,
+ true);
+@@ -4018,6 +4024,12 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
+ }
+ parent_hub = hdev_to_hub(parent_hdev);
+
++ /* Disable USB2 hardware LPM.
++ * It will be re-enabled by the enumeration process.
++ */
++ if (udev->usb2_hw_lpm_enabled == 1)
++ usb_set_usb2_hardware_lpm(udev, 0);
++
+ set_bit(port1, parent_hub->busy_bits);
+ for (i = 0; i < SET_CONFIG_TRIES; ++i) {
+
+diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
+index c0dcf698cb7a..c4134e82ed1f 100644
+--- a/drivers/usb/dwc3/ep0.c
++++ b/drivers/usb/dwc3/ep0.c
+@@ -394,6 +394,8 @@ static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
+ dep = dwc3_wIndex_to_dep(dwc, ctrl->wIndex);
+ if (!dep)
+ return -EINVAL;
++ if (set == 0 && (dep->flags & DWC3_EP_WEDGE))
++ break;
+ ret = __dwc3_gadget_ep_set_halt(dep, set);
+ if (ret)
+ return -EINVAL;
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 619ee1988732..5f2e3d033c32 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -903,9 +903,6 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
+ else
+ dep->flags |= DWC3_EP_STALL;
+ } else {
+- if (dep->flags & DWC3_EP_WEDGE)
+- return 0;
+-
+ ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
+ DWC3_DEPCMD_CLEARSTALL, &params);
+ if (ret)
+@@ -913,7 +910,7 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
+ value ? "set" : "clear",
+ dep->name);
+ else
+- dep->flags &= ~DWC3_EP_STALL;
++ dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
+ }
+
+ return ret;
+diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
+index f71b0787983f..4484ef1a00eb 100644
+--- a/drivers/usb/gadget/composite.c
++++ b/drivers/usb/gadget/composite.c
+@@ -585,6 +585,7 @@ static void reset_config(struct usb_composite_dev *cdev)
+ bitmap_zero(f->endpoints, 32);
+ }
+ cdev->config = NULL;
++ cdev->delayed_status = 0;
+ }
+
+ static int set_config(struct usb_composite_dev *cdev,
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index a3f6fe0aebb9..85504bb130a7 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -2192,6 +2192,20 @@ static void ftdi_set_termios(struct tty_struct *tty,
+ termios->c_cflag |= CRTSCTS;
+ }
+
++ /*
++ * All FTDI UART chips are limited to CS7/8. We won't pretend to
++ * support CS5/6 and revert the CSIZE setting instead.
++ */
++ if ((C_CSIZE(tty) != CS8) && (C_CSIZE(tty) != CS7)) {
++ dev_warn(&port->dev, "requested CSIZE setting not supported\n");
++
++ termios->c_cflag &= ~CSIZE;
++ if (old_termios)
++ termios->c_cflag |= old_termios->c_cflag & CSIZE;
++ else
++ termios->c_cflag |= CS8;
++ }
++
+ cflag = termios->c_cflag;
+
+ if (!old_termios)
+@@ -2228,13 +2242,16 @@ no_skip:
+ } else {
+ urb_value |= FTDI_SIO_SET_DATA_PARITY_NONE;
+ }
+- if (cflag & CSIZE) {
+- switch (cflag & CSIZE) {
+- case CS7: urb_value |= 7; dbg("Setting CS7"); break;
+- case CS8: urb_value |= 8; dbg("Setting CS8"); break;
+- default:
+- dev_err(&port->dev, "CSIZE was set but not CS7-CS8\n");
+- }
++ switch (cflag & CSIZE) {
++ case CS7:
++ urb_value |= 7;
++ dev_dbg(&port->dev, "Setting CS7\n");
++ break;
++ default:
++ case CS8:
++ urb_value |= 8;
++ dev_dbg(&port->dev, "Setting CS8\n");
++ break;
+ }
+
+ /* This is needed by the break command since it uses the same command
+diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
+index 9f0b2bff8ee4..c0e6486b27b2 100644
+--- a/drivers/usb/serial/generic.c
++++ b/drivers/usb/serial/generic.c
+@@ -228,14 +228,7 @@ retry:
+ return result;
+ }
+
+- /* Try sending off another urb, unless in irq context (in which case
+- * there will be no free urb). */
+- if (!in_irq())
+- goto retry;
+-
+- clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags);
+-
+- return 0;
++ goto retry; /* try sending off another urb */
+ }
+
+ /**
+diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
+index 5d2501eb8211..80fc40a0c99a 100644
+--- a/drivers/usb/serial/mos7840.c
++++ b/drivers/usb/serial/mos7840.c
+@@ -1689,7 +1689,11 @@ static int mos7840_tiocmget(struct tty_struct *tty)
+ return -ENODEV;
+
+ status = mos7840_get_uart_reg(port, MODEM_STATUS_REGISTER, &msr);
++ if (status != 1)
++ return -EIO;
+ status = mos7840_get_uart_reg(port, MODEM_CONTROL_REGISTER, &mcr);
++ if (status != 1)
++ return -EIO;
+ result = ((mcr & MCR_DTR) ? TIOCM_DTR : 0)
+ | ((mcr & MCR_RTS) ? TIOCM_RTS : 0)
+ | ((mcr & MCR_LOOPBACK) ? TIOCM_LOOP : 0)
+@@ -1983,25 +1987,25 @@ static void mos7840_change_port_settings(struct tty_struct *tty,
+ iflag = tty->termios->c_iflag;
+
+ /* Change the number of bits */
+- if (cflag & CSIZE) {
+- switch (cflag & CSIZE) {
+- case CS5:
+- lData = LCR_BITS_5;
+- break;
++ switch (cflag & CSIZE) {
++ case CS5:
++ lData = LCR_BITS_5;
++ break;
+
+- case CS6:
+- lData = LCR_BITS_6;
+- break;
++ case CS6:
++ lData = LCR_BITS_6;
++ break;
+
+- case CS7:
+- lData = LCR_BITS_7;
+- break;
+- default:
+- case CS8:
+- lData = LCR_BITS_8;
+- break;
+- }
++ case CS7:
++ lData = LCR_BITS_7;
++ break;
++
++ default:
++ case CS8:
++ lData = LCR_BITS_8;
++ break;
+ }
++
+ /* Change the Parity bit */
+ if (cflag & PARENB) {
+ if (cflag & PARODD) {
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index d8ace827598b..d6d0fb451f2b 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -85,6 +85,7 @@ static void option_instat_callback(struct urb *urb);
+ #define HUAWEI_PRODUCT_K4505 0x1464
+ #define HUAWEI_PRODUCT_K3765 0x1465
+ #define HUAWEI_PRODUCT_K4605 0x14C6
++#define HUAWEI_PRODUCT_E173S6 0x1C07
+
+ #define QUANTA_VENDOR_ID 0x0408
+ #define QUANTA_PRODUCT_Q101 0xEA02
+@@ -586,6 +587,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173S6, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1750, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t) &net_intf2_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) },
+@@ -648,6 +651,10 @@ static const struct usb_device_id option_ids[] = {
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6D) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6E) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x72) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x73) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x74) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x75) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x78) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x79) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7A) },
+@@ -702,6 +709,10 @@ static const struct usb_device_id option_ids[] = {
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6D) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6E) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x72) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x73) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x74) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x75) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x78) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x79) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7A) },
+@@ -756,6 +767,10 @@ static const struct usb_device_id option_ids[] = {
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6D) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6E) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x72) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x73) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x74) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x75) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x78) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x79) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7A) },
+@@ -810,6 +825,10 @@ static const struct usb_device_id option_ids[] = {
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6D) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6E) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x72) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x73) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x74) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x75) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x78) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x79) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7A) },
+@@ -864,6 +883,10 @@ static const struct usb_device_id option_ids[] = {
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6D) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6E) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x72) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x73) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x74) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x75) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x78) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x79) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7A) },
+@@ -918,6 +941,10 @@ static const struct usb_device_id option_ids[] = {
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6D) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6E) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6F) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x72) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x73) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x74) },
++ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x75) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x78) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x79) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7A) },
+@@ -1391,6 +1418,23 @@ static const struct usb_device_id option_ids[] = {
+ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff), /* ZTE MF91 */
+ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1545, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1546, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1547, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1565, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1566, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1567, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1589, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1590, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1591, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1592, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1594, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1596, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1598, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1600, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff,
+ 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
+diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
+index 317e503ef3b8..e3936c1002b3 100644
+--- a/drivers/usb/serial/pl2303.c
++++ b/drivers/usb/serial/pl2303.c
+@@ -290,24 +290,21 @@ static void pl2303_set_termios(struct tty_struct *tty,
+ dbg("0xa1:0x21:0:0 %d - %x %x %x %x %x %x %x", i,
+ buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]);
+
+- if (cflag & CSIZE) {
+- switch (cflag & CSIZE) {
+- case CS5:
+- buf[6] = 5;
+- break;
+- case CS6:
+- buf[6] = 6;
+- break;
+- case CS7:
+- buf[6] = 7;
+- break;
+- default:
+- case CS8:
+- buf[6] = 8;
+- break;
+- }
+- dbg("%s - data bits = %d", __func__, buf[6]);
++ switch (C_CSIZE(tty)) {
++ case CS5:
++ buf[6] = 5;
++ break;
++ case CS6:
++ buf[6] = 6;
++ break;
++ case CS7:
++ buf[6] = 7;
++ break;
++ default:
++ case CS8:
++ buf[6] = 8;
+ }
++ dev_dbg(&port->dev, "data bits = %d\n", buf[6]);
+
+ /* For reference buf[0]:buf[3] baud rate value */
+ /* NOTE: Only the values defined in baud_sup are supported !
+diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
+index f3179b0fa100..2f67b99282cc 100644
+--- a/drivers/usb/serial/spcp8x5.c
++++ b/drivers/usb/serial/spcp8x5.c
+@@ -394,22 +394,20 @@ static void spcp8x5_set_termios(struct tty_struct *tty,
+ }
+
+ /* Set Data Length : 00:5bit, 01:6bit, 10:7bit, 11:8bit */
+- if (cflag & CSIZE) {
+- switch (cflag & CSIZE) {
+- case CS5:
+- buf[1] |= SET_UART_FORMAT_SIZE_5;
+- break;
+- case CS6:
+- buf[1] |= SET_UART_FORMAT_SIZE_6;
+- break;
+- case CS7:
+- buf[1] |= SET_UART_FORMAT_SIZE_7;
+- break;
+- default:
+- case CS8:
+- buf[1] |= SET_UART_FORMAT_SIZE_8;
+- break;
+- }
++ switch (cflag & CSIZE) {
++ case CS5:
++ buf[1] |= SET_UART_FORMAT_SIZE_5;
++ break;
++ case CS6:
++ buf[1] |= SET_UART_FORMAT_SIZE_6;
++ break;
++ case CS7:
++ buf[1] |= SET_UART_FORMAT_SIZE_7;
++ break;
++ default:
++ case CS8:
++ buf[1] |= SET_UART_FORMAT_SIZE_8;
++ break;
+ }
+
+ /* Set Stop bit2 : 0:1bit 1:2bit */
+diff --git a/drivers/usb/wusbcore/wa-rpipe.c b/drivers/usb/wusbcore/wa-rpipe.c
+index f0d546c5a089..ca1031bbb250 100644
+--- a/drivers/usb/wusbcore/wa-rpipe.c
++++ b/drivers/usb/wusbcore/wa-rpipe.c
+@@ -332,7 +332,10 @@ static int rpipe_aim(struct wa_rpipe *rpipe, struct wahc *wa,
+ /* FIXME: compute so seg_size > ep->maxpktsize */
+ rpipe->descr.wBlocks = cpu_to_le16(16); /* given */
+ /* ep0 maxpktsize is 0x200 (WUSB1.0[4.8.1]) */
+- rpipe->descr.wMaxPacketSize = cpu_to_le16(ep->desc.wMaxPacketSize);
++ if (usb_endpoint_xfer_isoc(&ep->desc))
++ rpipe->descr.wMaxPacketSize = epcd->wOverTheAirPacketSize;
++ else
++ rpipe->descr.wMaxPacketSize = ep->desc.wMaxPacketSize;
+ rpipe->descr.bHSHubAddress = 0; /* reserved: zero */
+ rpipe->descr.bHSHubPort = wusb_port_no_to_idx(urb->dev->portnum);
+ /* FIXME: use maximum speed as supported or recommended by device */
+diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
+index 57c01ab09ad8..5f6df6ee6ab6 100644
+--- a/drivers/usb/wusbcore/wa-xfer.c
++++ b/drivers/usb/wusbcore/wa-xfer.c
+@@ -90,7 +90,8 @@
+ #include "wusbhc.h"
+
+ enum {
+- WA_SEGS_MAX = 255,
++ /* [WUSB] section 8.3.3 allocates 7 bits for the segment index. */
++ WA_SEGS_MAX = 128,
+ };
+
+ enum wa_seg_status {
+@@ -444,7 +445,7 @@ static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
+ xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
+ xfer->segs = (urb->transfer_buffer_length + xfer->seg_size - 1)
+ / xfer->seg_size;
+- if (xfer->segs >= WA_SEGS_MAX) {
++ if (xfer->segs > WA_SEGS_MAX) {
+ dev_err(dev, "BUG? ops, number of segments %d bigger than %d\n",
+ (int)(urb->transfer_buffer_length / xfer->seg_size),
+ WA_SEGS_MAX);
+diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c
+index 0443a4f71858..dab3a0c9f480 100644
+--- a/drivers/video/backlight/atmel-pwm-bl.c
++++ b/drivers/video/backlight/atmel-pwm-bl.c
+@@ -70,7 +70,7 @@ static int atmel_pwm_bl_set_intensity(struct backlight_device *bd)
+ static int atmel_pwm_bl_get_intensity(struct backlight_device *bd)
+ {
+ struct atmel_pwm_bl *pwmbl = bl_get_data(bd);
+- u8 intensity;
++ u32 intensity;
+
+ if (pwmbl->pdata->pwm_active_low) {
+ intensity = pwm_channel_readl(&pwmbl->pwmc, PWM_CDTY) -
+@@ -80,7 +80,7 @@ static int atmel_pwm_bl_get_intensity(struct backlight_device *bd)
+ pwm_channel_readl(&pwmbl->pwmc, PWM_CDTY);
+ }
+
+- return intensity;
++ return intensity & 0xffff;
+ }
+
+ static int atmel_pwm_bl_init_pwm(struct atmel_pwm_bl *pwmbl)
+@@ -211,7 +211,8 @@ static int __exit atmel_pwm_bl_remove(struct platform_device *pdev)
+ struct atmel_pwm_bl *pwmbl = platform_get_drvdata(pdev);
+
+ if (pwmbl->gpio_on != -1) {
+- gpio_set_value(pwmbl->gpio_on, 0);
++ gpio_set_value(pwmbl->gpio_on,
++ 0 ^ pwmbl->pdata->on_active_low);
+ gpio_free(pwmbl->gpio_on);
+ }
+ pwm_channel_disable(&pwmbl->pwmc);
+diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
+index c858a29325ff..969f74fa3ce8 100644
+--- a/fs/cifs/cifssmb.c
++++ b/fs/cifs/cifssmb.c
+@@ -3437,11 +3437,13 @@ static __u16 ACL_to_cifs_posix(char *parm_data, const char *pACL,
+ return 0;
+ }
+ cifs_acl->version = cpu_to_le16(1);
+- if (acl_type == ACL_TYPE_ACCESS)
++ if (acl_type == ACL_TYPE_ACCESS) {
+ cifs_acl->access_entry_count = cpu_to_le16(count);
+- else if (acl_type == ACL_TYPE_DEFAULT)
++ cifs_acl->default_entry_count = __constant_cpu_to_le16(0xFFFF);
++ } else if (acl_type == ACL_TYPE_DEFAULT) {
+ cifs_acl->default_entry_count = cpu_to_le16(count);
+- else {
++ cifs_acl->access_entry_count = __constant_cpu_to_le16(0xFFFF);
++ } else {
+ cFYI(1, "unknown ACL type %d", acl_type);
+ return 0;
+ }
+diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
+index 9a37a9b6de3a..5ef72c80f9ef 100644
+--- a/fs/configfs/dir.c
++++ b/fs/configfs/dir.c
+@@ -56,10 +56,19 @@ static void configfs_d_iput(struct dentry * dentry,
+ struct configfs_dirent *sd = dentry->d_fsdata;
+
+ if (sd) {
+- BUG_ON(sd->s_dentry != dentry);
+ /* Coordinate with configfs_readdir */
+ spin_lock(&configfs_dirent_lock);
+- sd->s_dentry = NULL;
++ /* Coordinate with configfs_attach_attr where will increase
++ * sd->s_count and update sd->s_dentry to new allocated one.
++ * Only set sd->dentry to null when this dentry is the only
++ * sd owner.
++ * If not do so, configfs_d_iput may run just after
++ * configfs_attach_attr and set sd->s_dentry to null
++ * even it's still in use.
++ */
++ if (atomic_read(&sd->s_count) <= 2)
++ sd->s_dentry = NULL;
++
+ spin_unlock(&configfs_dirent_lock);
+ configfs_put(sd);
+ }
+@@ -436,8 +445,11 @@ static int configfs_attach_attr(struct configfs_dirent * sd, struct dentry * den
+ struct configfs_attribute * attr = sd->s_element;
+ int error;
+
++ spin_lock(&configfs_dirent_lock);
+ dentry->d_fsdata = configfs_get(sd);
+ sd->s_dentry = dentry;
++ spin_unlock(&configfs_dirent_lock);
++
+ error = configfs_create(dentry, (attr->ca_mode & S_IALLUGO) | S_IFREG,
+ configfs_init_file);
+ if (error) {
+diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
+index d5d5297efe97..2a950478a9cd 100644
+--- a/fs/devpts/inode.c
++++ b/fs/devpts/inode.c
+@@ -413,6 +413,7 @@ static void devpts_kill_sb(struct super_block *sb)
+ {
+ struct pts_fs_info *fsi = DEVPTS_SB(sb);
+
++ ida_destroy(&fsi->allocated_ptys);
+ kfree(fsi);
+ kill_litter_super(sb);
+ }
+diff --git a/fs/exec.c b/fs/exec.c
+index a2d0e51019ff..78199ebce9d0 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -2032,6 +2032,12 @@ static int __get_dumpable(unsigned long mm_flags)
+ return (ret >= 2) ? 2 : ret;
+ }
+
++/*
++ * This returns the actual value of the suid_dumpable flag. For things
++ * that are using this for checking for privilege transitions, it must
++ * test against SUID_DUMP_USER rather than treating it as a boolean
++ * value.
++ */
+ int get_dumpable(struct mm_struct *mm)
+ {
+ return __get_dumpable(mm->flags);
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index b4e9f3fcc3fd..05617bda3449 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -1271,6 +1271,7 @@ retry:
+ new_extra_isize = s_min_extra_isize;
+ kfree(is); is = NULL;
+ kfree(bs); bs = NULL;
++ brelse(bh);
+ goto retry;
+ }
+ error = -1;
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 5639efd3298e..3d0293154393 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -3764,8 +3764,7 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
+ dprintk("%s ERROR %d, Reset session\n", __func__,
+ task->tk_status);
+ nfs4_schedule_session_recovery(clp->cl_session);
+- task->tk_status = 0;
+- return -EAGAIN;
++ goto wait_on_recovery;
+ #endif /* CONFIG_NFS_V4_1 */
+ case -NFS4ERR_DELAY:
+ nfs_inc_server_stats(server, NFSIOS_DELAY);
+@@ -3887,11 +3886,17 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
+ return;
+
+ switch (task->tk_status) {
+- case -NFS4ERR_STALE_STATEID:
+- case -NFS4ERR_EXPIRED:
+ case 0:
+ renew_lease(data->res.server, data->timestamp);
+ break;
++ case -NFS4ERR_ADMIN_REVOKED:
++ case -NFS4ERR_DELEG_REVOKED:
++ case -NFS4ERR_BAD_STATEID:
++ case -NFS4ERR_OLD_STATEID:
++ case -NFS4ERR_STALE_STATEID:
++ case -NFS4ERR_EXPIRED:
++ task->tk_status = 0;
++ break;
+ default:
+ if (nfs4_async_handle_error(task, data->res.server, NULL) ==
+ -EAGAIN) {
+@@ -4052,6 +4057,7 @@ static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock
+ status = 0;
+ }
+ request->fl_ops->fl_release_private(request);
++ request->fl_ops = NULL;
+ out:
+ return status;
+ }
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 99625b8192ff..ade5316d0cf5 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -177,8 +177,8 @@ static __be32 *read_buf(struct nfsd4_compoundargs *argp, u32 nbytes)
+ */
+ memcpy(p, argp->p, avail);
+ /* step to next page */
+- argp->pagelist++;
+ argp->p = page_address(argp->pagelist[0]);
++ argp->pagelist++;
+ if (argp->pagelen < PAGE_SIZE) {
+ argp->end = argp->p + (argp->pagelen>>2);
+ argp->pagelen = 0;
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index 61b697e69ef3..6a66fc08546d 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -297,41 +297,12 @@ commit_metadata(struct svc_fh *fhp)
+ }
+
+ /*
+- * Set various file attributes.
+- * N.B. After this call fhp needs an fh_put
++ * Go over the attributes and take care of the small differences between
++ * NFS semantics and what Linux expects.
+ */
+-__be32
+-nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
+- int check_guard, time_t guardtime)
++static void
++nfsd_sanitize_attrs(struct inode *inode, struct iattr *iap)
+ {
+- struct dentry *dentry;
+- struct inode *inode;
+- int accmode = NFSD_MAY_SATTR;
+- int ftype = 0;
+- __be32 err;
+- int host_err;
+- int size_change = 0;
+-
+- if (iap->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_SIZE))
+- accmode |= NFSD_MAY_WRITE|NFSD_MAY_OWNER_OVERRIDE;
+- if (iap->ia_valid & ATTR_SIZE)
+- ftype = S_IFREG;
+-
+- /* Get inode */
+- err = fh_verify(rqstp, fhp, ftype, accmode);
+- if (err)
+- goto out;
+-
+- dentry = fhp->fh_dentry;
+- inode = dentry->d_inode;
+-
+- /* Ignore any mode updates on symlinks */
+- if (S_ISLNK(inode->i_mode))
+- iap->ia_valid &= ~ATTR_MODE;
+-
+- if (!iap->ia_valid)
+- goto out;
+-
+ /*
+ * NFSv2 does not differentiate between "set-[ac]time-to-now"
+ * which only requires access, and "set-[ac]time-to-X" which
+@@ -341,8 +312,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
+ * convert to "set to now" instead of "set to explicit time"
+ *
+ * We only call inode_change_ok as the last test as technically
+- * it is not an interface that we should be using. It is only
+- * valid if the filesystem does not define it's own i_op->setattr.
++ * it is not an interface that we should be using.
+ */
+ #define BOTH_TIME_SET (ATTR_ATIME_SET | ATTR_MTIME_SET)
+ #define MAX_TOUCH_TIME_ERROR (30*60)
+@@ -368,30 +338,6 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
+ iap->ia_valid &= ~BOTH_TIME_SET;
+ }
+ }
+-
+- /*
+- * The size case is special.
+- * It changes the file as well as the attributes.
+- */
+- if (iap->ia_valid & ATTR_SIZE) {
+- if (iap->ia_size < inode->i_size) {
+- err = nfsd_permission(rqstp, fhp->fh_export, dentry,
+- NFSD_MAY_TRUNC|NFSD_MAY_OWNER_OVERRIDE);
+- if (err)
+- goto out;
+- }
+-
+- host_err = get_write_access(inode);
+- if (host_err)
+- goto out_nfserr;
+-
+- size_change = 1;
+- host_err = locks_verify_truncate(inode, NULL, iap->ia_size);
+- if (host_err) {
+- put_write_access(inode);
+- goto out_nfserr;
+- }
+- }
+
+ /* sanitize the mode change */
+ if (iap->ia_valid & ATTR_MODE) {
+@@ -414,32 +360,111 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
+ iap->ia_valid |= (ATTR_KILL_SUID | ATTR_KILL_SGID);
+ }
+ }
++}
+
+- /* Change the attributes. */
++static __be32
++nfsd_get_write_access(struct svc_rqst *rqstp, struct svc_fh *fhp,
++ struct iattr *iap)
++{
++ struct inode *inode = fhp->fh_dentry->d_inode;
++ int host_err;
+
+- iap->ia_valid |= ATTR_CTIME;
++ if (iap->ia_size < inode->i_size) {
++ __be32 err;
+
+- err = nfserr_notsync;
+- if (!check_guard || guardtime == inode->i_ctime.tv_sec) {
+- host_err = nfsd_break_lease(inode);
+- if (host_err)
+- goto out_nfserr;
+- fh_lock(fhp);
++ err = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
++ NFSD_MAY_TRUNC | NFSD_MAY_OWNER_OVERRIDE);
++ if (err)
++ return err;
++ }
+
+- host_err = notify_change(dentry, iap);
+- err = nfserrno(host_err);
+- fh_unlock(fhp);
++ host_err = get_write_access(inode);
++ if (host_err)
++ goto out_nfserrno;
++
++ host_err = locks_verify_truncate(inode, NULL, iap->ia_size);
++ if (host_err)
++ goto out_put_write_access;
++ return 0;
++
++out_put_write_access:
++ put_write_access(inode);
++out_nfserrno:
++ return nfserrno(host_err);
++}
++
++/*
++ * Set various file attributes. After this call fhp needs an fh_put.
++ */
++__be32
++nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
++ int check_guard, time_t guardtime)
++{
++ struct dentry *dentry;
++ struct inode *inode;
++ int accmode = NFSD_MAY_SATTR;
++ int ftype = 0;
++ __be32 err;
++ int host_err;
++ int size_change = 0;
++
++ if (iap->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_SIZE))
++ accmode |= NFSD_MAY_WRITE|NFSD_MAY_OWNER_OVERRIDE;
++ if (iap->ia_valid & ATTR_SIZE)
++ ftype = S_IFREG;
++
++ /* Get inode */
++ err = fh_verify(rqstp, fhp, ftype, accmode);
++ if (err)
++ goto out;
++
++ dentry = fhp->fh_dentry;
++ inode = dentry->d_inode;
++
++ /* Ignore any mode updates on symlinks */
++ if (S_ISLNK(inode->i_mode))
++ iap->ia_valid &= ~ATTR_MODE;
++
++ if (!iap->ia_valid)
++ goto out;
++
++ nfsd_sanitize_attrs(inode, iap);
++
++ /*
++ * The size case is special, it changes the file in addition to the
++ * attributes.
++ */
++ if (iap->ia_valid & ATTR_SIZE) {
++ err = nfsd_get_write_access(rqstp, fhp, iap);
++ if (err)
++ goto out;
++ size_change = 1;
+ }
++
++ iap->ia_valid |= ATTR_CTIME;
++
++ if (check_guard && guardtime != inode->i_ctime.tv_sec) {
++ err = nfserr_notsync;
++ goto out_put_write_access;
++ }
++
++ host_err = nfsd_break_lease(inode);
++ if (host_err)
++ goto out_put_write_access_nfserror;
++
++ fh_lock(fhp);
++ host_err = notify_change(dentry, iap);
++ fh_unlock(fhp);
++
++out_put_write_access_nfserror:
++ err = nfserrno(host_err);
++out_put_write_access:
+ if (size_change)
+ put_write_access(inode);
+ if (!err)
+ commit_metadata(fhp);
+ out:
+ return err;
+-
+-out_nfserr:
+- err = nfserrno(host_err);
+- goto out;
+ }
+
+ #if defined(CONFIG_NFSD_V2_ACL) || \
+diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
+index d99a90518909..eb519de68047 100644
+--- a/fs/xfs/xfs_ioctl.c
++++ b/fs/xfs/xfs_ioctl.c
+@@ -404,7 +404,8 @@ xfs_attrlist_by_handle(
+ return -XFS_ERROR(EPERM);
+ if (copy_from_user(&al_hreq, arg, sizeof(xfs_fsop_attrlist_handlereq_t)))
+ return -XFS_ERROR(EFAULT);
+- if (al_hreq.buflen > XATTR_LIST_MAX)
++ if (al_hreq.buflen < sizeof(struct attrlist) ||
++ al_hreq.buflen > XATTR_LIST_MAX)
+ return -XFS_ERROR(EINVAL);
+
+ /*
+diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
+index 54e623bfbb85..0d685b3fb609 100644
+--- a/fs/xfs/xfs_ioctl32.c
++++ b/fs/xfs/xfs_ioctl32.c
+@@ -361,7 +361,8 @@ xfs_compat_attrlist_by_handle(
+ if (copy_from_user(&al_hreq, arg,
+ sizeof(compat_xfs_fsop_attrlist_handlereq_t)))
+ return -XFS_ERROR(EFAULT);
+- if (al_hreq.buflen > XATTR_LIST_MAX)
++ if (al_hreq.buflen < sizeof(struct attrlist) ||
++ al_hreq.buflen > XATTR_LIST_MAX)
+ return -XFS_ERROR(EINVAL);
+
+ /*
+diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
+index 4fd95a323beb..0532279203af 100644
+--- a/include/crypto/scatterwalk.h
++++ b/include/crypto/scatterwalk.h
+@@ -58,6 +58,7 @@ static inline void scatterwalk_sg_chain(struct scatterlist *sg1, int num,
+ {
+ sg_set_page(&sg1[num - 1], (void *)sg2, 0, 0);
+ sg1[num - 1].page_link &= ~0x02;
++ sg1[num - 1].page_link |= 0x01;
+ }
+
+ static inline struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg)
+@@ -65,7 +66,7 @@ static inline struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg)
+ if (sg_is_last(sg))
+ return NULL;
+
+- return (++sg)->length ? sg : (void *)sg_page(sg);
++ return (++sg)->length ? sg : sg_chain_ptr(sg);
+ }
+
+ static inline void scatterwalk_crypto_chain(struct scatterlist *head,
+diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
+index acd8d4bf161c..d337419e9e32 100644
+--- a/include/linux/binfmts.h
++++ b/include/linux/binfmts.h
+@@ -112,9 +112,6 @@ extern void setup_new_exec(struct linux_binprm * bprm);
+ extern void would_dump(struct linux_binprm *, struct file *);
+
+ extern int suid_dumpable;
+-#define SUID_DUMP_DISABLE 0 /* No setuid dumping */
+-#define SUID_DUMP_USER 1 /* Dump as user of process */
+-#define SUID_DUMP_ROOT 2 /* Dump as root */
+
+ /* Stack area protections */
+ #define EXSTACK_DEFAULT 0 /* Whatever the arch defaults to */
+diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
+index d8e636e5607d..cba9593c4047 100644
+--- a/include/linux/compiler-intel.h
++++ b/include/linux/compiler-intel.h
+@@ -27,5 +27,3 @@
+ #define __must_be_array(a) 0
+
+ #endif
+-
+-#define uninitialized_var(x) x
+diff --git a/include/linux/msg.h b/include/linux/msg.h
+index 56abf1558fdd..70fc369ad24f 100644
+--- a/include/linux/msg.h
++++ b/include/linux/msg.h
+@@ -76,9 +76,9 @@ struct msginfo {
+
+ /* one msg_msg structure for each message */
+ struct msg_msg {
+- struct list_head m_list;
+- long m_type;
+- int m_ts; /* message text size */
++ struct list_head m_list;
++ long m_type;
++ size_t m_ts; /* message text size */
+ struct msg_msgseg* next;
+ void *security;
+ /* the actual message follows immediately */
+diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
+index daad4e626a3f..3887901bf48c 100644
+--- a/include/linux/mtd/map.h
++++ b/include/linux/mtd/map.h
+@@ -361,7 +361,7 @@ static inline map_word map_word_load_partial(struct map_info *map, map_word orig
+ bitpos = (map_bankwidth(map)-1-i)*8;
+ #endif
+ orig.x[0] &= ~(0xff << bitpos);
+- orig.x[0] |= buf[i-start] << bitpos;
++ orig.x[0] |= (unsigned long)buf[i-start] << bitpos;
+ }
+ }
+ return orig;
+@@ -380,7 +380,7 @@ static inline map_word map_word_ff(struct map_info *map)
+
+ if (map_bankwidth(map) < MAP_FF_LIMIT) {
+ int bw = 8 * map_bankwidth(map);
+- r.x[0] = (1 << bw) - 1;
++ r.x[0] = (1UL << bw) - 1;
+ } else {
+ for (i=0; i<map_words(map); i++)
+ r.x[i] = ~0UL;
+diff --git a/include/linux/net.h b/include/linux/net.h
+index b7ca08e1d993..bd4f6c720510 100644
+--- a/include/linux/net.h
++++ b/include/linux/net.h
+@@ -197,6 +197,14 @@ struct proto_ops {
+ #endif
+ int (*sendmsg) (struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t total_len);
++ /* Notes for implementing recvmsg:
++ * ===============================
++ * msg->msg_namelen should get updated by the recvmsg handlers
++ * iff msg_name != NULL. It is by default 0 to prevent
++ * returning uninitialized memory to user space. The recvfrom
++ * handlers can assume that msg.msg_name is either NULL or has
++ * a minimum size of sizeof(struct sockaddr_storage).
++ */
+ int (*recvmsg) (struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t total_len,
+ int flags);
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index 3db3da130c46..d93f41709c45 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -1579,6 +1579,7 @@
+ #define PCI_SUBDEVICE_ID_KEYSPAN_SX2 0x5334
+
+ #define PCI_VENDOR_ID_MARVELL 0x11ab
++#define PCI_VENDOR_ID_MARVELL_EXT 0x1b4b
+ #define PCI_DEVICE_ID_MARVELL_GT64111 0x4146
+ #define PCI_DEVICE_ID_MARVELL_GT64260 0x6430
+ #define PCI_DEVICE_ID_MARVELL_MV64360 0x6460
+diff --git a/include/linux/random.h b/include/linux/random.h
+index 7e77cee62bd9..f5e1311f491e 100644
+--- a/include/linux/random.h
++++ b/include/linux/random.h
+@@ -89,9 +89,9 @@ static inline void prandom32_seed(struct rnd_state *state, u64 seed)
+ {
+ u32 i = (seed >> 32) ^ (seed << 10) ^ seed;
+
+- state->s1 = __seed(i, 1);
+- state->s2 = __seed(i, 7);
+- state->s3 = __seed(i, 15);
++ state->s1 = __seed(i, 2);
++ state->s2 = __seed(i, 8);
++ state->s3 = __seed(i, 16);
+ }
+
+ #ifdef CONFIG_ARCH_RANDOM
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 8204898dfed8..312d047eb918 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -403,6 +403,10 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
+ extern void set_dumpable(struct mm_struct *mm, int value);
+ extern int get_dumpable(struct mm_struct *mm);
+
++#define SUID_DUMP_DISABLE 0 /* No setuid dumping */
++#define SUID_DUMP_USER 1 /* Dump as user of process */
++#define SUID_DUMP_ROOT 2 /* Dump as root */
++
+ /* mm flags */
+ /* dumpable bits */
+ #define MMF_DUMPABLE 0 /* core dump is permitted */
+diff --git a/include/net/ip.h b/include/net/ip.h
+index 06aed72023f7..b935e6ca5eb9 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -466,7 +466,7 @@ extern int compat_ip_getsockopt(struct sock *sk, int level,
+ int optname, char __user *optval, int __user *optlen);
+ extern int ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct sock *));
+
+-extern int ip_recv_error(struct sock *sk, struct msghdr *msg, int len);
++extern int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len);
+ extern void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
+ __be16 port, u32 info, u8 *payload);
+ extern void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
+diff --git a/include/net/ipv6.h b/include/net/ipv6.h
+index 4d549cfedf96..0580673d76ed 100644
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -602,8 +602,10 @@ extern int compat_ipv6_getsockopt(struct sock *sk,
+ extern int ip6_datagram_connect(struct sock *sk,
+ struct sockaddr *addr, int addr_len);
+
+-extern int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len);
+-extern int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len);
++extern int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
++ int *addr_len);
++extern int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len,
++ int *addr_len);
+ extern void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
+ u32 info, u8 *payload);
+ extern void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info);
+diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h
+index c42506212649..ab240bb608a8 100644
+--- a/include/sound/memalloc.h
++++ b/include/sound/memalloc.h
+@@ -101,7 +101,7 @@ static inline unsigned int snd_sgbuf_aligned_pages(size_t size)
+ static inline dma_addr_t snd_sgbuf_get_addr(struct snd_sg_buf *sgbuf, size_t offset)
+ {
+ dma_addr_t addr = sgbuf->table[offset >> PAGE_SHIFT].addr;
+- addr &= PAGE_MASK;
++ addr &= ~((dma_addr_t)PAGE_SIZE - 1);
+ return addr + offset % PAGE_SIZE;
+ }
+
+diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
+index 769724944fc6..763bf05ccb27 100644
+--- a/include/trace/ftrace.h
++++ b/include/trace/ftrace.h
+@@ -379,7 +379,8 @@ ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
+ __data_size += (len) * sizeof(type);
+
+ #undef __string
+-#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1)
++#define __string(item, src) __dynamic_array(char, item, \
++ strlen((src) ? (const char *)(src) : "(null)") + 1)
+
+ #undef DECLARE_EVENT_CLASS
+ #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
+@@ -504,7 +505,7 @@ static inline notrace int ftrace_get_offsets_##call( \
+
+ #undef __assign_str
+ #define __assign_str(dst, src) \
+- strcpy(__get_str(dst), src);
++ strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)");
+
+ #undef TP_fast_assign
+ #define TP_fast_assign(args...) args
+diff --git a/ipc/msgutil.c b/ipc/msgutil.c
+index 5652101cdac0..fc6fded4ff03 100644
+--- a/ipc/msgutil.c
++++ b/ipc/msgutil.c
+@@ -37,15 +37,15 @@ struct msg_msgseg {
+ /* the next part of the message follows immediately */
+ };
+
+-#define DATALEN_MSG (PAGE_SIZE-sizeof(struct msg_msg))
+-#define DATALEN_SEG (PAGE_SIZE-sizeof(struct msg_msgseg))
++#define DATALEN_MSG ((size_t)PAGE_SIZE-sizeof(struct msg_msg))
++#define DATALEN_SEG ((size_t)PAGE_SIZE-sizeof(struct msg_msgseg))
+
+-struct msg_msg *load_msg(const void __user *src, int len)
++struct msg_msg *load_msg(const void __user *src, size_t len)
+ {
+ struct msg_msg *msg;
+ struct msg_msgseg **pseg;
+ int err;
+- int alen;
++ size_t alen;
+
+ alen = len;
+ if (alen > DATALEN_MSG)
+@@ -99,9 +99,9 @@ out_err:
+ return ERR_PTR(err);
+ }
+
+-int store_msg(void __user *dest, struct msg_msg *msg, int len)
++int store_msg(void __user *dest, struct msg_msg *msg, size_t len)
+ {
+- int alen;
++ size_t alen;
+ struct msg_msgseg *seg;
+
+ alen = len;
+diff --git a/ipc/util.h b/ipc/util.h
+index 6f5c20bedaab..0bfc934c2f45 100644
+--- a/ipc/util.h
++++ b/ipc/util.h
+@@ -138,8 +138,8 @@ int ipc_parse_version (int *cmd);
+ #endif
+
+ extern void free_msg(struct msg_msg *msg);
+-extern struct msg_msg *load_msg(const void __user *src, int len);
+-extern int store_msg(void __user *dest, struct msg_msg *msg, int len);
++extern struct msg_msg *load_msg(const void __user *src, size_t len);
++extern int store_msg(void __user *dest, struct msg_msg *msg, size_t len);
+
+ extern void recompute_msgmni(struct ipc_namespace *);
+
+diff --git a/kernel/audit.c b/kernel/audit.c
+index d4bc594acbef..e14bc7422aca 100644
+--- a/kernel/audit.c
++++ b/kernel/audit.c
+@@ -625,7 +625,7 @@ static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type,
+ char *ctx = NULL;
+ u32 len;
+
+- if (!audit_enabled) {
++ if (!audit_enabled && msg_type != AUDIT_USER_AVC) {
+ *ab = NULL;
+ return rc;
+ }
+@@ -684,6 +684,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+
+ switch (msg_type) {
+ case AUDIT_GET:
++ status_set.mask = 0;
+ status_set.enabled = audit_enabled;
+ status_set.failure = audit_failure;
+ status_set.pid = audit_pid;
+@@ -695,7 +696,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ &status_set, sizeof(status_set));
+ break;
+ case AUDIT_SET:
+- if (nlh->nlmsg_len < sizeof(struct audit_status))
++ if (nlmsg_len(nlh) < sizeof(struct audit_status))
+ return -EINVAL;
+ status_get = (struct audit_status *)data;
+ if (status_get->mask & AUDIT_STATUS_ENABLED) {
+@@ -899,7 +900,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ struct task_struct *tsk;
+ unsigned long flags;
+
+- if (nlh->nlmsg_len < sizeof(struct audit_tty_status))
++ if (nlmsg_len(nlh) < sizeof(struct audit_tty_status))
+ return -EINVAL;
+ s = data;
+ if (s->enabled != 0 && s->enabled != 1)
+diff --git a/kernel/cpuset.c b/kernel/cpuset.c
+index 835eee6a287b..57eb98d1d887 100644
+--- a/kernel/cpuset.c
++++ b/kernel/cpuset.c
+@@ -983,8 +983,10 @@ static void cpuset_change_task_nodemask(struct task_struct *tsk,
+ need_loop = task_has_mempolicy(tsk) ||
+ !nodes_intersects(*newmems, tsk->mems_allowed);
+
+- if (need_loop)
++ if (need_loop) {
++ local_irq_disable();
+ write_seqcount_begin(&tsk->mems_allowed_seq);
++ }
+
+ nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
+ mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
+@@ -992,8 +994,10 @@ static void cpuset_change_task_nodemask(struct task_struct *tsk,
+ mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP2);
+ tsk->mems_allowed = *newmems;
+
+- if (need_loop)
++ if (need_loop) {
+ write_seqcount_end(&tsk->mems_allowed_seq);
++ local_irq_enable();
++ }
+
+ task_unlock(tsk);
+ }
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 1d0538e026ba..8888815c578b 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -285,7 +285,7 @@ again:
+ put_page(page);
+ /* serialize against __split_huge_page_splitting() */
+ local_irq_disable();
+- if (likely(__get_user_pages_fast(address, 1, 1, &page) == 1)) {
++ if (likely(__get_user_pages_fast(address, 1, !ro, &page) == 1)) {
+ page_head = compound_head(page);
+ /*
+ * page_head is valid pointer but we must pin
+diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
+index 15e53b1766a6..dcd3f9796817 100644
+--- a/kernel/irq/pm.c
++++ b/kernel/irq/pm.c
+@@ -50,7 +50,7 @@ static void resume_irqs(bool want_early)
+ bool is_early = desc->action &&
+ desc->action->flags & IRQF_EARLY_RESUME;
+
+- if (is_early != want_early)
++ if (!is_early && want_early)
+ continue;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
+index cbe2c1441392..380291efe788 100644
+--- a/kernel/power/snapshot.c
++++ b/kernel/power/snapshot.c
+@@ -1390,7 +1390,11 @@ int hibernate_preallocate_memory(void)
+ * highmem and non-highmem zones separately.
+ */
+ pages_highmem = preallocate_image_highmem(highmem / 2);
+- alloc = (count - max_size) - pages_highmem;
++ alloc = count - max_size;
++ if (alloc > pages_highmem)
++ alloc -= pages_highmem;
++ else
++ alloc = 0;
+ pages = preallocate_image_memory(alloc, avail_normal);
+ if (pages < alloc) {
+ /* We have exhausted non-highmem pages, try highmem. */
+diff --git a/kernel/ptrace.c b/kernel/ptrace.c
+index 67fedad29612..f79803a1bfc9 100644
+--- a/kernel/ptrace.c
++++ b/kernel/ptrace.c
+@@ -246,7 +246,8 @@ ok:
+ smp_rmb();
+ if (task->mm)
+ dumpable = get_dumpable(task->mm);
+- if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
++ if (dumpable != SUID_DUMP_USER &&
++ !task_ns_capable(task, CAP_SYS_PTRACE))
+ return -EPERM;
+
+ return security_ptrace_access_check(task, mode);
+diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
+index a6710a112b4f..f4010e2820fc 100644
+--- a/kernel/sched_debug.c
++++ b/kernel/sched_debug.c
+@@ -213,6 +213,14 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
+ SEQ_printf(m, " .%-30s: %d\n", "load_tg",
+ atomic_read(&cfs_rq->tg->load_weight));
+ #endif
++#ifdef CONFIG_CFS_BANDWIDTH
++ SEQ_printf(m, " .%-30s: %d\n", "tg->cfs_bandwidth.timer_active",
++ cfs_rq->tg->cfs_bandwidth.timer_active);
++ SEQ_printf(m, " .%-30s: %d\n", "throttled",
++ cfs_rq->throttled);
++ SEQ_printf(m, " .%-30s: %d\n", "throttle_count",
++ cfs_rq->throttle_count);
++#endif
+
+ print_cfs_group_stats(m, cpu, cfs_rq->tg);
+ #endif
+diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
+index c261da7ddc71..5b9e456ea98c 100644
+--- a/kernel/sched_fair.c
++++ b/kernel/sched_fair.c
+@@ -1527,6 +1527,8 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
+ cfs_rq->throttled_timestamp = rq->clock;
+ raw_spin_lock(&cfs_b->lock);
+ list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
++ if (!cfs_b->timer_active)
++ __start_cfs_bandwidth(cfs_b);
+ raw_spin_unlock(&cfs_b->lock);
+ }
+
+diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
+index 8a46f5d64504..0907e435da0a 100644
+--- a/kernel/time/alarmtimer.c
++++ b/kernel/time/alarmtimer.c
+@@ -468,7 +468,7 @@ static int alarm_clock_getres(const clockid_t which_clock, struct timespec *tp)
+ clockid_t baseid = alarm_bases[clock2alarm(which_clock)].base_clockid;
+
+ if (!alarmtimer_get_rtcdev())
+- return -ENOTSUPP;
++ return -EINVAL;
+
+ return hrtimer_get_res(baseid, tp);
+ }
+@@ -485,7 +485,7 @@ static int alarm_clock_get(clockid_t which_clock, struct timespec *tp)
+ struct alarm_base *base = &alarm_bases[clock2alarm(which_clock)];
+
+ if (!alarmtimer_get_rtcdev())
+- return -ENOTSUPP;
++ return -EINVAL;
+
+ *tp = ktime_to_timespec(base->gettime());
+ return 0;
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 226776b24357..d40d7f6f3c51 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -259,9 +259,6 @@ static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
+
+ static int __register_ftrace_function(struct ftrace_ops *ops)
+ {
+- if (ftrace_disabled)
+- return -ENODEV;
+-
+ if (FTRACE_WARN_ON(ops == &global_ops))
+ return -EINVAL;
+
+@@ -290,9 +287,6 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
+ {
+ int ret;
+
+- if (ftrace_disabled)
+- return -ENODEV;
+-
+ if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
+ return -EBUSY;
+
+@@ -1017,6 +1011,11 @@ static struct ftrace_page *ftrace_pages;
+
+ static struct dyn_ftrace *ftrace_free_records;
+
++static bool ftrace_hash_empty(struct ftrace_hash *hash)
++{
++ return !hash || !hash->count;
++}
++
+ static struct ftrace_func_entry *
+ ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
+ {
+@@ -1025,7 +1024,7 @@ ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
+ struct hlist_head *hhd;
+ struct hlist_node *n;
+
+- if (!hash->count)
++ if (ftrace_hash_empty(hash))
+ return NULL;
+
+ if (hash->size_bits > 0)
+@@ -1169,7 +1168,7 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
+ return NULL;
+
+ /* Empty hash? */
+- if (!hash || !hash->count)
++ if (ftrace_hash_empty(hash))
+ return new_hash;
+
+ size = 1 << hash->size_bits;
+@@ -1294,9 +1293,9 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
+ filter_hash = rcu_dereference_raw(ops->filter_hash);
+ notrace_hash = rcu_dereference_raw(ops->notrace_hash);
+
+- if ((!filter_hash || !filter_hash->count ||
++ if ((ftrace_hash_empty(filter_hash) ||
+ ftrace_lookup_ip(filter_hash, ip)) &&
+- (!notrace_hash || !notrace_hash->count ||
++ (ftrace_hash_empty(notrace_hash) ||
+ !ftrace_lookup_ip(notrace_hash, ip)))
+ ret = 1;
+ else
+@@ -1348,7 +1347,7 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
+ if (filter_hash) {
+ hash = ops->filter_hash;
+ other_hash = ops->notrace_hash;
+- if (!hash || !hash->count)
++ if (ftrace_hash_empty(hash))
+ all = 1;
+ } else {
+ inc = !inc;
+@@ -1358,7 +1357,7 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
+ * If the notrace hash has no items,
+ * then there's nothing to do.
+ */
+- if (hash && !hash->count)
++ if (ftrace_hash_empty(hash))
+ return;
+ }
+
+@@ -1375,8 +1374,8 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
+ if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
+ match = 1;
+ } else {
+- in_hash = hash && !!ftrace_lookup_ip(hash, rec->ip);
+- in_other_hash = other_hash && !!ftrace_lookup_ip(other_hash, rec->ip);
++ in_hash = !!ftrace_lookup_ip(hash, rec->ip);
++ in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
+
+ /*
+ *
+@@ -1384,7 +1383,7 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
+ if (filter_hash && in_hash && !in_other_hash)
+ match = 1;
+ else if (!filter_hash && in_hash &&
+- (in_other_hash || !other_hash->count))
++ (in_other_hash || ftrace_hash_empty(other_hash)))
+ match = 1;
+ }
+ if (!match)
+@@ -1698,10 +1697,15 @@ static void ftrace_startup_enable(int command)
+ static int ftrace_startup(struct ftrace_ops *ops, int command)
+ {
+ bool hash_enable = true;
++ int ret;
+
+ if (unlikely(ftrace_disabled))
+ return -ENODEV;
+
++ ret = __register_ftrace_function(ops);
++ if (ret)
++ return ret;
++
+ ftrace_start_up++;
+ command |= FTRACE_UPDATE_CALLS;
+
+@@ -1723,12 +1727,17 @@ static int ftrace_startup(struct ftrace_ops *ops, int command)
+ return 0;
+ }
+
+-static void ftrace_shutdown(struct ftrace_ops *ops, int command)
++static int ftrace_shutdown(struct ftrace_ops *ops, int command)
+ {
+ bool hash_disable = true;
++ int ret;
+
+ if (unlikely(ftrace_disabled))
+- return;
++ return -ENODEV;
++
++ ret = __unregister_ftrace_function(ops);
++ if (ret)
++ return ret;
+
+ ftrace_start_up--;
+ /*
+@@ -1763,9 +1772,10 @@ static void ftrace_shutdown(struct ftrace_ops *ops, int command)
+ }
+
+ if (!command || !ftrace_enabled)
+- return;
++ return 0;
+
+ ftrace_run_update_code(command);
++ return 0;
+ }
+
+ static void ftrace_startup_sysctl(void)
+@@ -1794,12 +1804,57 @@ static cycle_t ftrace_update_time;
+ static unsigned long ftrace_update_cnt;
+ unsigned long ftrace_update_tot_cnt;
+
+-static int ops_traces_mod(struct ftrace_ops *ops)
++static inline int ops_traces_mod(struct ftrace_ops *ops)
+ {
+- struct ftrace_hash *hash;
++ /*
++ * Filter_hash being empty will default to trace module.
++ * But notrace hash requires a test of individual module functions.
++ */
++ return ftrace_hash_empty(ops->filter_hash) &&
++ ftrace_hash_empty(ops->notrace_hash);
++}
+
+- hash = ops->filter_hash;
+- return !!(!hash || !hash->count);
++/*
++ * Check if the current ops references the record.
++ *
++ * If the ops traces all functions, then it was already accounted for.
++ * If the ops does not trace the current record function, skip it.
++ * If the ops ignores the function via notrace filter, skip it.
++ */
++static inline bool
++ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
++{
++ /* If ops isn't enabled, ignore it */
++ if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
++ return 0;
++
++ /* If ops traces all mods, we already accounted for it */
++ if (ops_traces_mod(ops))
++ return 0;
++
++ /* The function must be in the filter */
++ if (!ftrace_hash_empty(ops->filter_hash) &&
++ !ftrace_lookup_ip(ops->filter_hash, rec->ip))
++ return 0;
++
++ /* If in notrace hash, we ignore it too */
++ if (ftrace_lookup_ip(ops->notrace_hash, rec->ip))
++ return 0;
++
++ return 1;
++}
++
++static int referenced_filters(struct dyn_ftrace *rec)
++{
++ struct ftrace_ops *ops;
++ int cnt = 0;
++
++ for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
++ if (ops_references_rec(ops, rec))
++ cnt++;
++ }
++
++ return cnt;
+ }
+
+ static int ftrace_update_code(struct module *mod)
+@@ -1807,6 +1862,7 @@ static int ftrace_update_code(struct module *mod)
+ struct dyn_ftrace *p;
+ cycle_t start, stop;
+ unsigned long ref = 0;
++ bool test = false;
+
+ /*
+ * When adding a module, we need to check if tracers are
+@@ -1819,9 +1875,12 @@ static int ftrace_update_code(struct module *mod)
+
+ for (ops = ftrace_ops_list;
+ ops != &ftrace_list_end; ops = ops->next) {
+- if (ops->flags & FTRACE_OPS_FL_ENABLED &&
+- ops_traces_mod(ops))
+- ref++;
++ if (ops->flags & FTRACE_OPS_FL_ENABLED) {
++ if (ops_traces_mod(ops))
++ ref++;
++ else
++ test = true;
++ }
+ }
+ }
+
+@@ -1829,6 +1888,7 @@ static int ftrace_update_code(struct module *mod)
+ ftrace_update_cnt = 0;
+
+ while (ftrace_new_addrs) {
++ int cnt = ref;
+
+ /* If something went wrong, bail without enabling anything */
+ if (unlikely(ftrace_disabled))
+@@ -1836,7 +1896,9 @@ static int ftrace_update_code(struct module *mod)
+
+ p = ftrace_new_addrs;
+ ftrace_new_addrs = p->newlist;
+- p->flags = ref;
++ if (test)
++ cnt += referenced_filters(p);
++ p->flags = cnt;
+
+ /*
+ * Do the initial record conversion from mcount jump
+@@ -1859,7 +1921,7 @@ static int ftrace_update_code(struct module *mod)
+ * conversion puts the module to the correct state, thus
+ * passing the ftrace_make_call check.
+ */
+- if (ftrace_start_up && ref) {
++ if (ftrace_start_up && cnt) {
+ int failed = __ftrace_replace_code(p, 1);
+ if (failed) {
+ ftrace_bug(failed, p->ip);
+@@ -2112,7 +2174,8 @@ static void *t_start(struct seq_file *m, loff_t *pos)
+ * off, we can short cut and just print out that all
+ * functions are enabled.
+ */
+- if (iter->flags & FTRACE_ITER_FILTER && !ops->filter_hash->count) {
++ if (iter->flags & FTRACE_ITER_FILTER &&
++ ftrace_hash_empty(ops->filter_hash)) {
+ if (*pos > 0)
+ return t_hash_start(m, pos);
+ iter->flags |= FTRACE_ITER_PRINTALL;
+@@ -2564,16 +2627,13 @@ static void __enable_ftrace_function_probe(void)
+ if (i == FTRACE_FUNC_HASHSIZE)
+ return;
+
+- ret = __register_ftrace_function(&trace_probe_ops);
+- if (!ret)
+- ret = ftrace_startup(&trace_probe_ops, 0);
++ ret = ftrace_startup(&trace_probe_ops, 0);
+
+ ftrace_probe_registered = 1;
+ }
+
+ static void __disable_ftrace_function_probe(void)
+ {
+- int ret;
+ int i;
+
+ if (!ftrace_probe_registered)
+@@ -2586,9 +2646,7 @@ static void __disable_ftrace_function_probe(void)
+ }
+
+ /* no more funcs left */
+- ret = __unregister_ftrace_function(&trace_probe_ops);
+- if (!ret)
+- ftrace_shutdown(&trace_probe_ops, 0);
++ ftrace_shutdown(&trace_probe_ops, 0);
+
+ ftrace_probe_registered = 0;
+ }
+@@ -3561,12 +3619,15 @@ device_initcall(ftrace_nodyn_init);
+ static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
+ static inline void ftrace_startup_enable(int command) { }
+ /* Keep as macros so we do not need to define the commands */
+-# define ftrace_startup(ops, command) \
+- ({ \
+- (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
+- 0; \
++# define ftrace_startup(ops, command) \
++ ({ \
++ int ___ret = __register_ftrace_function(ops); \
++ if (!___ret) \
++ (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
++ ___ret; \
+ })
+-# define ftrace_shutdown(ops, command) do { } while (0)
++# define ftrace_shutdown(ops, command) __unregister_ftrace_function(ops)
++
+ # define ftrace_startup_sysctl() do { } while (0)
+ # define ftrace_shutdown_sysctl() do { } while (0)
+
+@@ -3906,15 +3967,8 @@ int register_ftrace_function(struct ftrace_ops *ops)
+
+ mutex_lock(&ftrace_lock);
+
+- if (unlikely(ftrace_disabled))
+- goto out_unlock;
+-
+- ret = __register_ftrace_function(ops);
+- if (!ret)
+- ret = ftrace_startup(ops, 0);
+-
++ ret = ftrace_startup(ops, 0);
+
+- out_unlock:
+ mutex_unlock(&ftrace_lock);
+ return ret;
+ }
+@@ -3931,9 +3985,7 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
+ int ret;
+
+ mutex_lock(&ftrace_lock);
+- ret = __unregister_ftrace_function(ops);
+- if (!ret)
+- ftrace_shutdown(ops, 0);
++ ret = ftrace_shutdown(ops, 0);
+ mutex_unlock(&ftrace_lock);
+
+ return ret;
+@@ -4127,6 +4179,12 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
+ return NOTIFY_DONE;
+ }
+
++/* Just a place holder for function graph */
++static struct ftrace_ops fgraph_ops __read_mostly = {
++ .func = ftrace_stub,
++ .flags = FTRACE_OPS_FL_GLOBAL,
++};
++
+ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
+ trace_func_graph_ent_t entryfunc)
+ {
+@@ -4153,7 +4211,7 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
+ ftrace_graph_return = retfunc;
+ ftrace_graph_entry = entryfunc;
+
+- ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
++ ret = ftrace_startup(&fgraph_ops, FTRACE_START_FUNC_RET);
+
+ out:
+ mutex_unlock(&ftrace_lock);
+@@ -4170,7 +4228,7 @@ void unregister_ftrace_graph(void)
+ ftrace_graph_active--;
+ ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
+ ftrace_graph_entry = ftrace_graph_entry_stub;
+- ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
++ ftrace_shutdown(&fgraph_ops, FTRACE_STOP_FUNC_RET);
+ unregister_pm_notifier(&ftrace_suspend_notifier);
+ unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
+
+diff --git a/lib/random32.c b/lib/random32.c
+index fc3545a32771..1f44bdc6e8d3 100644
+--- a/lib/random32.c
++++ b/lib/random32.c
+@@ -92,7 +92,7 @@ void srandom32(u32 entropy)
+ */
+ for_each_possible_cpu (i) {
+ struct rnd_state *state = &per_cpu(net_rand_state, i);
+- state->s1 = __seed(state->s1 ^ entropy, 1);
++ state->s1 = __seed(state->s1 ^ entropy, 2);
+ }
+ }
+ EXPORT_SYMBOL(srandom32);
+@@ -109,9 +109,9 @@ static int __init random32_init(void)
+ struct rnd_state *state = &per_cpu(net_rand_state,i);
+
+ #define LCG(x) ((x) * 69069) /* super-duper LCG */
+- state->s1 = __seed(LCG(i + jiffies), 1);
+- state->s2 = __seed(LCG(state->s1), 7);
+- state->s3 = __seed(LCG(state->s2), 15);
++ state->s1 = __seed(LCG(i + jiffies), 2);
++ state->s2 = __seed(LCG(state->s1), 8);
++ state->s3 = __seed(LCG(state->s2), 16);
+
+ /* "warm it up" */
+ prandom32(state);
+@@ -138,9 +138,9 @@ static int __init random32_reseed(void)
+ u32 seeds[3];
+
+ get_random_bytes(&seeds, sizeof(seeds));
+- state->s1 = __seed(seeds[0], 1);
+- state->s2 = __seed(seeds[1], 7);
+- state->s3 = __seed(seeds[2], 15);
++ state->s1 = __seed(seeds[0], 2);
++ state->s2 = __seed(seeds[1], 8);
++ state->s3 = __seed(seeds[2], 16);
+
+ /* mix it in */
+ prandom32(state);
+diff --git a/lib/vsprintf.c b/lib/vsprintf.c
+index d74c317f4b29..ae02e421b75f 100644
+--- a/lib/vsprintf.c
++++ b/lib/vsprintf.c
+@@ -25,6 +25,7 @@
+ #include <linux/kallsyms.h>
+ #include <linux/uaccess.h>
+ #include <linux/ioport.h>
++#include <linux/cred.h>
+ #include <net/addrconf.h>
+
+ #include <asm/page.h> /* for PAGE_SIZE */
+@@ -892,10 +893,35 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
+ spec.field_width = 2 * sizeof(void *);
+ return string(buf, end, "pK-error", spec);
+ }
+- if (!((kptr_restrict == 0) ||
+- (kptr_restrict == 1 &&
+- has_capability_noaudit(current, CAP_SYSLOG))))
++
++ switch (kptr_restrict) {
++ case 0:
++ /* Always print %pK values */
++ break;
++ case 1: {
++ /*
++ * Only print the real pointer value if the current
++ * process has CAP_SYSLOG and is running with the
++ * same credentials it started with. This is because
++ * access to files is checked at open() time, but %pK
++ * checks permission at read() time. We don't want to
++ * leak pointer values if a binary opens a file using
++ * %pK and then elevates privileges before reading it.
++ */
++ const struct cred *cred = current_cred();
++
++ if (!has_capability_noaudit(current, CAP_SYSLOG) ||
++ cred->euid != cred->uid ||
++ cred->egid != cred->gid)
++ ptr = NULL;
++ break;
++ }
++ case 2:
++ default:
++ /* Always print 0's for %pK */
+ ptr = NULL;
++ break;
++ }
+ break;
+ }
+ spec.flags |= SMALL;
+diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
+index bfa9ab93eda5..334d4cd7612f 100644
+--- a/net/appletalk/ddp.c
++++ b/net/appletalk/ddp.c
+@@ -1740,7 +1740,6 @@ static int atalk_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
+ size_t size, int flags)
+ {
+ struct sock *sk = sock->sk;
+- struct sockaddr_at *sat = (struct sockaddr_at *)msg->msg_name;
+ struct ddpehdr *ddp;
+ int copied = 0;
+ int offset = 0;
+@@ -1769,14 +1768,13 @@ static int atalk_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
+ }
+ err = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, copied);
+
+- if (!err) {
+- if (sat) {
+- sat->sat_family = AF_APPLETALK;
+- sat->sat_port = ddp->deh_sport;
+- sat->sat_addr.s_node = ddp->deh_snode;
+- sat->sat_addr.s_net = ddp->deh_snet;
+- }
+- msg->msg_namelen = sizeof(*sat);
++ if (!err && msg->msg_name) {
++ struct sockaddr_at *sat = msg->msg_name;
++ sat->sat_family = AF_APPLETALK;
++ sat->sat_port = ddp->deh_sport;
++ sat->sat_addr.s_node = ddp->deh_snode;
++ sat->sat_addr.s_net = ddp->deh_snet;
++ msg->msg_namelen = sizeof(*sat);
+ }
+
+ skb_free_datagram(sk, skb); /* Free the datagram. */
+diff --git a/net/atm/common.c b/net/atm/common.c
+index 43b6bfed036c..0ca06e851e7c 100644
+--- a/net/atm/common.c
++++ b/net/atm/common.c
+@@ -500,8 +500,6 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
+ struct sk_buff *skb;
+ int copied, error = -EINVAL;
+
+- msg->msg_namelen = 0;
+-
+ if (sock->state != SS_CONNECTED)
+ return -ENOTCONN;
+ if (flags & ~MSG_DONTWAIT) /* only handle MSG_DONTWAIT */
+diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
+index 86ac37f8e227..7b8db0ede163 100644
+--- a/net/ax25/af_ax25.c
++++ b/net/ax25/af_ax25.c
+@@ -1635,11 +1635,11 @@ static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock,
+
+ skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+
+- if (msg->msg_namelen != 0) {
+- struct sockaddr_ax25 *sax = (struct sockaddr_ax25 *)msg->msg_name;
++ if (msg->msg_name) {
+ ax25_digi digi;
+ ax25_address src;
+ const unsigned char *mac = skb_mac_header(skb);
++ struct sockaddr_ax25 *sax = msg->msg_name;
+
+ memset(sax, 0, sizeof(struct full_sockaddr_ax25));
+ ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL,
+diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
+index 838f113bfdd9..0938f6b505e1 100644
+--- a/net/bluetooth/af_bluetooth.c
++++ b/net/bluetooth/af_bluetooth.c
+@@ -245,8 +245,6 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if (flags & (MSG_OOB))
+ return -EOPNOTSUPP;
+
+- msg->msg_namelen = 0;
+-
+ skb = skb_recv_datagram(sk, flags, noblock, &err);
+ if (!skb) {
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+@@ -311,8 +309,6 @@ int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if (flags & MSG_OOB)
+ return -EOPNOTSUPP;
+
+- msg->msg_namelen = 0;
+-
+ BT_DBG("sk %p size %zu", sk, size);
+
+ lock_sock(sk);
+diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
+index 8361ee40aec4..bb78c754a5ae 100644
+--- a/net/bluetooth/hci_sock.c
++++ b/net/bluetooth/hci_sock.c
+@@ -448,8 +448,6 @@ static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if (!skb)
+ return err;
+
+- msg->msg_namelen = 0;
+-
+ copied = skb->len;
+ if (len < copied) {
+ msg->msg_flags |= MSG_TRUNC;
+diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
+index 82ce164c29fa..14c48647958b 100644
+--- a/net/bluetooth/rfcomm/sock.c
++++ b/net/bluetooth/rfcomm/sock.c
+@@ -627,7 +627,6 @@ static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+
+ if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) {
+ rfcomm_dlc_accept(d);
+- msg->msg_namelen = 0;
+ return 0;
+ }
+
+diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
+index f3f75ad93c89..56693c3dbe7f 100644
+--- a/net/bridge/br_if.c
++++ b/net/bridge/br_if.c
+@@ -170,6 +170,8 @@ void br_dev_delete(struct net_device *dev, struct list_head *head)
+ del_nbp(p);
+ }
+
++ br_fdb_delete_by_port(br, NULL, 1);
++
+ del_timer_sync(&br->gc_timer);
+
+ br_sysfs_delbr(br->dev);
+diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
+index 53a8e37d020a..7fac75f6fcfb 100644
+--- a/net/caif/caif_socket.c
++++ b/net/caif/caif_socket.c
+@@ -320,8 +320,6 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if (m->msg_flags&MSG_OOB)
+ goto read_error;
+
+- m->msg_namelen = 0;
+-
+ skb = skb_recv_datagram(sk, flags, 0 , &ret);
+ if (!skb)
+ goto read_error;
+@@ -395,8 +393,6 @@ static int caif_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if (flags&MSG_OOB)
+ goto out;
+
+- msg->msg_namelen = 0;
+-
+ /*
+ * Lock the socket to prevent queue disordering
+ * while sleeps in memcpy_tomsg
+diff --git a/net/compat.c b/net/compat.c
+index 3139ef298145..41724c9d32d5 100644
+--- a/net/compat.c
++++ b/net/compat.c
+@@ -72,7 +72,7 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
+ __get_user(kmsg->msg_flags, &umsg->msg_flags))
+ return -EFAULT;
+ if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
+- return -EINVAL;
++ kmsg->msg_namelen = sizeof(struct sockaddr_storage);
+ kmsg->msg_name = compat_ptr(tmp1);
+ kmsg->msg_iov = compat_ptr(tmp2);
+ kmsg->msg_control = compat_ptr(tmp3);
+@@ -93,7 +93,8 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
+ if (err < 0)
+ return err;
+ }
+- kern_msg->msg_name = kern_address;
++ if (kern_msg->msg_name)
++ kern_msg->msg_name = kern_address;
+ } else
+ kern_msg->msg_name = NULL;
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 8e455b84ecaa..7bcf37df0ce9 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2609,6 +2609,8 @@ ip:
+ goto done;
+
+ ip = (const struct iphdr *) (skb->data + nhoff);
++ if (ip->ihl < 5)
++ goto done;
+ if (ip_is_fragment(ip))
+ ip_proto = 0;
+ else
+@@ -4515,7 +4517,7 @@ static void dev_change_rx_flags(struct net_device *dev, int flags)
+ {
+ const struct net_device_ops *ops = dev->netdev_ops;
+
+- if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
++ if (ops->ndo_change_rx_flags)
+ ops->ndo_change_rx_flags(dev, flags);
+ }
+
+diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
+index c02e63c908da..c0c21b1ce1ec 100644
+--- a/net/core/fib_rules.c
++++ b/net/core/fib_rules.c
+@@ -443,7 +443,8 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
+ if (frh->action && (frh->action != rule->action))
+ continue;
+
+- if (frh->table && (frh_get_table(frh, tb) != rule->table))
++ if (frh_get_table(frh, tb) &&
++ (frh_get_table(frh, tb) != rule->table))
+ continue;
+
+ if (tb[FRA_PRIORITY] &&
+diff --git a/net/core/iovec.c b/net/core/iovec.c
+index c40f27e7d208..139ef937a909 100644
+--- a/net/core/iovec.c
++++ b/net/core/iovec.c
+@@ -48,7 +48,8 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
+ if (err < 0)
+ return err;
+ }
+- m->msg_name = address;
++ if (m->msg_name)
++ m->msg_name = address;
+ } else {
+ m->msg_name = NULL;
+ }
+diff --git a/net/core/pktgen.c b/net/core/pktgen.c
+index 2ef7da003865..80aeac9b20d2 100644
+--- a/net/core/pktgen.c
++++ b/net/core/pktgen.c
+@@ -2524,6 +2524,8 @@ static int process_ipsec(struct pktgen_dev *pkt_dev,
+ if (x) {
+ int ret;
+ __u8 *eth;
++ struct iphdr *iph;
++
+ nhead = x->props.header_len - skb_headroom(skb);
+ if (nhead > 0) {
+ ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
+@@ -2545,6 +2547,11 @@ static int process_ipsec(struct pktgen_dev *pkt_dev,
+ eth = (__u8 *) skb_push(skb, ETH_HLEN);
+ memcpy(eth, pkt_dev->hh, 12);
+ *(u16 *) &eth[12] = protocol;
++
++ /* Update IPv4 header len as well as checksum value */
++ iph = ip_hdr(skb);
++ iph->tot_len = htons(skb->len - ETH_HLEN);
++ ip_send_check(iph);
+ }
+ }
+ return 1;
+diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
+index 19d6aefe97d4..5d42df22c3c5 100644
+--- a/net/ieee802154/6lowpan.c
++++ b/net/ieee802154/6lowpan.c
+@@ -563,7 +563,7 @@ lowpan_process_data(struct sk_buff *skb)
+ * Traffic class carried in-line
+ * ECN + DSCP (1 byte), Flow Label is elided
+ */
+- case 1: /* 10b */
++ case 2: /* 10b */
+ if (!skb->len)
+ goto drop;
+ tmp = lowpan_fetch_skb_u8(skb);
+@@ -576,7 +576,7 @@ lowpan_process_data(struct sk_buff *skb)
+ * Flow Label carried in-line
+ * ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided
+ */
+- case 2: /* 01b */
++ case 1: /* 01b */
+ if (!skb->len)
+ goto drop;
+ tmp = lowpan_fetch_skb_u8(skb);
+diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
+index 424fafbc8cb0..ec0751051db7 100644
+--- a/net/ipv4/datagram.c
++++ b/net/ipv4/datagram.c
+@@ -57,7 +57,7 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ if (IS_ERR(rt)) {
+ err = PTR_ERR(rt);
+ if (err == -ENETUNREACH)
+- IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
++ IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
+ goto out;
+ }
+
+diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
+index 3b360023a821..542a9c14cc34 100644
+--- a/net/ipv4/ip_sockglue.c
++++ b/net/ipv4/ip_sockglue.c
+@@ -374,7 +374,7 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 inf
+ /*
+ * Handle MSG_ERRQUEUE
+ */
+-int ip_recv_error(struct sock *sk, struct msghdr *msg, int len)
++int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
+ {
+ struct sock_exterr_skb *serr;
+ struct sk_buff *skb, *skb2;
+@@ -411,6 +411,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len)
+ serr->addr_offset);
+ sin->sin_port = serr->port;
+ memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
++ *addr_len = sizeof(*sin);
+ }
+
+ memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
+index 294a380c6a09..00975b6db7e9 100644
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -567,7 +567,7 @@ static int ping_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ err = PTR_ERR(rt);
+ rt = NULL;
+ if (err == -ENETUNREACH)
+- IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
++ IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
+ goto out;
+ }
+
+@@ -623,7 +623,6 @@ static int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ size_t len, int noblock, int flags, int *addr_len)
+ {
+ struct inet_sock *isk = inet_sk(sk);
+- struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
+ struct sk_buff *skb;
+ int copied, err;
+
+@@ -632,11 +631,8 @@ static int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ if (flags & MSG_OOB)
+ goto out;
+
+- if (addr_len)
+- *addr_len = sizeof(*sin);
+-
+ if (flags & MSG_ERRQUEUE)
+- return ip_recv_error(sk, msg, len);
++ return ip_recv_error(sk, msg, len, addr_len);
+
+ skb = skb_recv_datagram(sk, flags, noblock, &err);
+ if (!skb)
+@@ -656,11 +652,14 @@ static int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ sock_recv_timestamp(msg, sk, skb);
+
+ /* Copy the address. */
+- if (sin) {
++ if (msg->msg_name) {
++ struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
++
+ sin->sin_family = AF_INET;
+ sin->sin_port = 0 /* skb->h.uh->source */;
+ sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
+ memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
++ *addr_len = sizeof(*sin);
+ }
+ if (isk->cmsg_flags)
+ ip_cmsg_recv(msg, skb);
+diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
+index 28150145ce02..cfded931accc 100644
+--- a/net/ipv4/raw.c
++++ b/net/ipv4/raw.c
+@@ -686,11 +686,8 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ if (flags & MSG_OOB)
+ goto out;
+
+- if (addr_len)
+- *addr_len = sizeof(*sin);
+-
+ if (flags & MSG_ERRQUEUE) {
+- err = ip_recv_error(sk, msg, len);
++ err = ip_recv_error(sk, msg, len, addr_len);
+ goto out;
+ }
+
+@@ -716,6 +713,7 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
+ sin->sin_port = 0;
+ memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
++ *addr_len = sizeof(*sin);
+ }
+ if (inet->cmsg_flags)
+ ip_cmsg_recv(msg, skb);
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index a97c9ad20db7..92d71388bc23 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -182,7 +182,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ if (IS_ERR(rt)) {
+ err = PTR_ERR(rt);
+ if (err == -ENETUNREACH)
+- IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
++ IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
+ return err;
+ }
+
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 5decc93a17d7..8c2e25902f0b 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -937,7 +937,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ err = PTR_ERR(rt);
+ rt = NULL;
+ if (err == -ENETUNREACH)
+- IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
++ IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
+ goto out;
+ }
+
+@@ -1036,6 +1036,9 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset,
+ struct udp_sock *up = udp_sk(sk);
+ int ret;
+
++ if (flags & MSG_SENDPAGE_NOTLAST)
++ flags |= MSG_MORE;
++
+ if (!up->pending) {
+ struct msghdr msg = { .msg_flags = flags|MSG_MORE };
+
+@@ -1171,14 +1174,8 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ int is_udplite = IS_UDPLITE(sk);
+ bool slow;
+
+- /*
+- * Check any passed addresses
+- */
+- if (addr_len)
+- *addr_len = sizeof(*sin);
+-
+ if (flags & MSG_ERRQUEUE)
+- return ip_recv_error(sk, msg, len);
++ return ip_recv_error(sk, msg, len, addr_len);
+
+ try_again:
+ skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
+@@ -1231,6 +1228,7 @@ try_again:
+ sin->sin_port = udp_hdr(skb)->source;
+ sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
+ memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
++ *addr_len = sizeof(*sin);
+ }
+ if (inet->cmsg_flags)
+ ip_cmsg_recv(msg, skb);
+diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
+index e2480691c220..3c7c9481b74c 100644
+--- a/net/ipv6/datagram.c
++++ b/net/ipv6/datagram.c
+@@ -315,7 +315,7 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu)
+ /*
+ * Handle MSG_ERRQUEUE
+ */
+-int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
++int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
+ {
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ struct sock_exterr_skb *serr;
+@@ -366,6 +366,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
+ ipv6_addr_set_v4mapped(*(__be32 *)(nh + serr->addr_offset),
+ &sin->sin6_addr);
+ }
++ *addr_len = sizeof(*sin);
+ }
+
+ memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
+@@ -374,6 +375,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
+ if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL) {
+ sin->sin6_family = AF_INET6;
+ sin->sin6_flowinfo = 0;
++ sin->sin6_port = 0;
+ sin->sin6_scope_id = 0;
+ if (skb->protocol == htons(ETH_P_IPV6)) {
+ ipv6_addr_copy(&sin->sin6_addr, &ipv6_hdr(skb)->saddr);
+@@ -418,7 +420,8 @@ out:
+ /*
+ * Handle IPV6_RECVPATHMTU
+ */
+-int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len)
++int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len,
++ int *addr_len)
+ {
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ struct sk_buff *skb;
+@@ -452,6 +455,7 @@ int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len)
+ sin->sin6_port = 0;
+ sin->sin6_scope_id = mtu_info.ip6m_addr.sin6_scope_id;
+ ipv6_addr_copy(&sin->sin6_addr, &mtu_info.ip6m_addr.sin6_addr);
++ *addr_len = sizeof(*sin);
+ }
+
+ put_cmsg(msg, SOL_IPV6, IPV6_PATHMTU, sizeof(mtu_info), &mtu_info);
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 97675bfcab9f..d3fde7eb5718 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -144,8 +144,8 @@ static int ip6_finish_output2(struct sk_buff *skb)
+ return res;
+ }
+ rcu_read_unlock();
+- IP6_INC_STATS_BH(dev_net(dst->dev),
+- ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
++ IP6_INC_STATS(dev_net(dst->dev),
++ ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
+index 6e6c2c4db2cf..9ecbc846b31d 100644
+--- a/net/ipv6/raw.c
++++ b/net/ipv6/raw.c
+@@ -456,14 +456,11 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
+ if (flags & MSG_OOB)
+ return -EOPNOTSUPP;
+
+- if (addr_len)
+- *addr_len=sizeof(*sin6);
+-
+ if (flags & MSG_ERRQUEUE)
+- return ipv6_recv_error(sk, msg, len);
++ return ipv6_recv_error(sk, msg, len, addr_len);
+
+ if (np->rxpmtu && np->rxopt.bits.rxpmtu)
+- return ipv6_recv_rxpmtu(sk, msg, len);
++ return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
+
+ skb = skb_recv_datagram(sk, flags, noblock, &err);
+ if (!skb)
+@@ -498,6 +495,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
+ sin6->sin6_scope_id = 0;
+ if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
+ sin6->sin6_scope_id = IP6CB(skb)->iif;
++ *addr_len = sizeof(*sin6);
+ }
+
+ sock_recv_ts_and_drops(msg, sk, skb);
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index bc9103de753b..1768238c855d 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -592,8 +592,11 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
+ prefix = &prefix_buf;
+ }
+
+- rt = rt6_get_route_info(net, prefix, rinfo->prefix_len, gwaddr,
+- dev->ifindex);
++ if (rinfo->prefix_len == 0)
++ rt = rt6_get_dflt_router(gwaddr, dev);
++ else
++ rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
++ gwaddr, dev->ifindex);
+
+ if (rt && !lifetime) {
+ ip6_del_rt(rt);
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index f9e496bde754..f8bec1e5fd4a 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -347,14 +347,11 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
+ int is_udp4;
+ bool slow;
+
+- if (addr_len)
+- *addr_len=sizeof(struct sockaddr_in6);
+-
+ if (flags & MSG_ERRQUEUE)
+- return ipv6_recv_error(sk, msg, len);
++ return ipv6_recv_error(sk, msg, len, addr_len);
+
+ if (np->rxpmtu && np->rxopt.bits.rxpmtu)
+- return ipv6_recv_rxpmtu(sk, msg, len);
++ return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
+
+ try_again:
+ skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
+@@ -423,7 +420,7 @@ try_again:
+ if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
+ sin6->sin6_scope_id = IP6CB(skb)->iif;
+ }
+-
++ *addr_len = sizeof(*sin6);
+ }
+ if (is_udp4) {
+ if (inet->cmsg_flags)
+diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
+index 9680226640ef..8c06a5065772 100644
+--- a/net/ipx/af_ipx.c
++++ b/net/ipx/af_ipx.c
+@@ -1835,8 +1835,6 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if (skb->tstamp.tv64)
+ sk->sk_stamp = skb->tstamp;
+
+- msg->msg_namelen = sizeof(*sipx);
+-
+ if (sipx) {
+ sipx->sipx_family = AF_IPX;
+ sipx->sipx_port = ipx->ipx_source.sock;
+@@ -1844,6 +1842,7 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock,
+ sipx->sipx_network = IPX_SKB_CB(skb)->ipx_source_net;
+ sipx->sipx_type = ipx->ipx_type;
+ sipx->sipx_zero = 0;
++ msg->msg_namelen = sizeof(*sipx);
+ }
+ rc = copied;
+
+diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
+index 91821e96dcec..f5d011ad4694 100644
+--- a/net/irda/af_irda.c
++++ b/net/irda/af_irda.c
+@@ -1386,8 +1386,6 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock,
+
+ IRDA_DEBUG(4, "%s()\n", __func__);
+
+- msg->msg_namelen = 0;
+-
+ skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
+ flags & MSG_DONTWAIT, &err);
+ if (!skb)
+@@ -1452,8 +1450,6 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock,
+ target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
+ timeo = sock_rcvtimeo(sk, noblock);
+
+- msg->msg_namelen = 0;
+-
+ do {
+ int chunk;
+ struct sk_buff *skb = skb_dequeue(&sk->sk_receive_queue);
+diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
+index e836140c75fb..cf98d625d958 100644
+--- a/net/iucv/af_iucv.c
++++ b/net/iucv/af_iucv.c
+@@ -1356,8 +1356,6 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+ int blen;
+ int err = 0;
+
+- msg->msg_namelen = 0;
+-
+ if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) &&
+ skb_queue_empty(&iucv->backlog_skb_q) &&
+ skb_queue_empty(&sk->sk_receive_queue) &&
+diff --git a/net/key/af_key.c b/net/key/af_key.c
+index 8dbdb8e7dbe1..dc8d7efefa6c 100644
+--- a/net/key/af_key.c
++++ b/net/key/af_key.c
+@@ -3595,7 +3595,6 @@ static int pfkey_recvmsg(struct kiocb *kiocb,
+ if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT))
+ goto out;
+
+- msg->msg_namelen = 0;
+ skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
+ if (skb == NULL)
+ goto out;
+diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
+index 6c7e6095bab1..334a93d1a8ed 100644
+--- a/net/l2tp/l2tp_ip.c
++++ b/net/l2tp/l2tp_ip.c
+@@ -568,9 +568,6 @@ static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
+ if (flags & MSG_OOB)
+ goto out;
+
+- if (addr_len)
+- *addr_len = sizeof(*sin);
+-
+ skb = skb_recv_datagram(sk, flags, noblock, &err);
+ if (!skb)
+ goto out;
+@@ -593,6 +590,7 @@ static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
+ sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
+ sin->sin_port = 0;
+ memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
++ *addr_len = sizeof(*sin);
+ }
+ if (inet->cmsg_flags)
+ ip_cmsg_recv(msg, skb);
+diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
+index 85700795af03..969cd3eacd2b 100644
+--- a/net/l2tp/l2tp_ppp.c
++++ b/net/l2tp/l2tp_ppp.c
+@@ -200,8 +200,6 @@ static int pppol2tp_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if (sk->sk_state & PPPOX_BOUND)
+ goto end;
+
+- msg->msg_namelen = 0;
+-
+ err = 0;
+ skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
+ flags & MSG_DONTWAIT, &err);
+diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
+index e5565c71cbbb..99a60d545b24 100644
+--- a/net/llc/af_llc.c
++++ b/net/llc/af_llc.c
+@@ -720,8 +720,6 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
+ int target; /* Read at least this many bytes */
+ long timeo;
+
+- msg->msg_namelen = 0;
+-
+ lock_sock(sk);
+ copied = -ENOTCONN;
+ if (unlikely(sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN))
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index db01d02ef372..71d85647c9df 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -764,7 +764,8 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx)
+ u16 sc;
+ int tid;
+
+- if (!ieee80211_is_data_qos(hdr->frame_control))
++ if (!ieee80211_is_data_qos(hdr->frame_control) ||
++ is_multicast_ether_addr(hdr->addr1))
+ goto dont_reorder;
+
+ /*
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 3d1d55ded4f9..2369e9620aa4 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -1445,8 +1445,6 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
+ }
+ #endif
+
+- msg->msg_namelen = 0;
+-
+ copied = data_skb->len;
+ if (len < copied) {
+ msg->msg_flags |= MSG_TRUNC;
+diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
+index 3df7c5a4ac8e..b4d889b589b0 100644
+--- a/net/netrom/af_netrom.c
++++ b/net/netrom/af_netrom.c
+@@ -1182,10 +1182,9 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock,
+ sax->sax25_family = AF_NETROM;
+ skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call,
+ AX25_ADDR_LEN);
++ msg->msg_namelen = sizeof(*sax);
+ }
+
+- msg->msg_namelen = sizeof(*sax);
+-
+ skb_free_datagram(sk, skb);
+
+ release_sock(sk);
+diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
+index 7a167fce319c..0d570d304da2 100644
+--- a/net/nfc/rawsock.c
++++ b/net/nfc/rawsock.c
+@@ -248,8 +248,6 @@ static int rawsock_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if (!skb)
+ return rc;
+
+- msg->msg_namelen = 0;
+-
+ copied = skb->len;
+ if (len < copied) {
+ msg->msg_flags |= MSG_TRUNC;
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index a2ac2c37939c..4f19bf248fcd 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -295,6 +295,7 @@ struct packet_sock {
+ unsigned int tp_reserve;
+ unsigned int tp_loss:1;
+ unsigned int tp_tstamp;
++ struct net_device __rcu *cached_dev;
+ struct packet_type prot_hook ____cacheline_aligned_in_smp;
+ };
+
+@@ -350,11 +351,15 @@ static void __fanout_link(struct sock *sk, struct packet_sock *po);
+ static void register_prot_hook(struct sock *sk)
+ {
+ struct packet_sock *po = pkt_sk(sk);
++
+ if (!po->running) {
+- if (po->fanout)
++ if (po->fanout) {
+ __fanout_link(sk, po);
+- else
++ } else {
+ dev_add_pack(&po->prot_hook);
++ rcu_assign_pointer(po->cached_dev, po->prot_hook.dev);
++ }
++
+ sock_hold(sk);
+ po->running = 1;
+ }
+@@ -372,10 +377,13 @@ static void __unregister_prot_hook(struct sock *sk, bool sync)
+ struct packet_sock *po = pkt_sk(sk);
+
+ po->running = 0;
+- if (po->fanout)
++ if (po->fanout) {
+ __fanout_unlink(sk, po);
+- else
++ } else {
+ __dev_remove_pack(&po->prot_hook);
++ RCU_INIT_POINTER(po->cached_dev, NULL);
++ }
++
+ __sock_put(sk);
+
+ if (sync) {
+@@ -497,9 +505,9 @@ static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
+
+ pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
+
+- spin_lock(&rb_queue->lock);
++ spin_lock_bh(&rb_queue->lock);
+ pkc->delete_blk_timer = 1;
+- spin_unlock(&rb_queue->lock);
++ spin_unlock_bh(&rb_queue->lock);
+
+ prb_del_retire_blk_timer(pkc);
+ }
+@@ -2032,12 +2040,24 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
+ return tp_len;
+ }
+
++static struct net_device *packet_cached_dev_get(struct packet_sock *po)
++{
++ struct net_device *dev;
++
++ rcu_read_lock();
++ dev = rcu_dereference(po->cached_dev);
++ if (dev)
++ dev_hold(dev);
++ rcu_read_unlock();
++
++ return dev;
++}
++
+ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
+ {
+ struct sk_buff *skb;
+ struct net_device *dev;
+ __be16 proto;
+- bool need_rls_dev = false;
+ int err, reserve = 0;
+ void *ph;
+ struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
+@@ -2050,7 +2070,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
+
+ err = -EBUSY;
+ if (saddr == NULL) {
+- dev = po->prot_hook.dev;
++ dev = packet_cached_dev_get(po);
+ proto = po->num;
+ addr = NULL;
+ } else {
+@@ -2064,19 +2084,17 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
+ proto = saddr->sll_protocol;
+ addr = saddr->sll_addr;
+ dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
+- need_rls_dev = true;
+ }
+
+ err = -ENXIO;
+ if (unlikely(dev == NULL))
+ goto out;
+-
+- reserve = dev->hard_header_len;
+-
+ err = -ENETDOWN;
+ if (unlikely(!(dev->flags & IFF_UP)))
+ goto out_put;
+
++ reserve = dev->hard_header_len;
++
+ size_max = po->tx_ring.frame_size
+ - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
+
+@@ -2152,8 +2170,7 @@ out_status:
+ __packet_set_status(po, ph, status);
+ kfree_skb(skb);
+ out_put:
+- if (need_rls_dev)
+- dev_put(dev);
++ dev_put(dev);
+ out:
+ mutex_unlock(&po->pg_vec_lock);
+ return err;
+@@ -2191,7 +2208,6 @@ static int packet_snd(struct socket *sock,
+ struct sk_buff *skb;
+ struct net_device *dev;
+ __be16 proto;
+- bool need_rls_dev = false;
+ unsigned char *addr;
+ int err, reserve = 0;
+ struct virtio_net_hdr vnet_hdr = { 0 };
+@@ -2205,7 +2221,7 @@ static int packet_snd(struct socket *sock,
+ */
+
+ if (saddr == NULL) {
+- dev = po->prot_hook.dev;
++ dev = packet_cached_dev_get(po);
+ proto = po->num;
+ addr = NULL;
+ } else {
+@@ -2217,19 +2233,17 @@ static int packet_snd(struct socket *sock,
+ proto = saddr->sll_protocol;
+ addr = saddr->sll_addr;
+ dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
+- need_rls_dev = true;
+ }
+
+ err = -ENXIO;
+- if (dev == NULL)
++ if (unlikely(dev == NULL))
+ goto out_unlock;
+- if (sock->type == SOCK_RAW)
+- reserve = dev->hard_header_len;
+-
+ err = -ENETDOWN;
+- if (!(dev->flags & IFF_UP))
++ if (unlikely(!(dev->flags & IFF_UP)))
+ goto out_unlock;
+
++ if (sock->type == SOCK_RAW)
++ reserve = dev->hard_header_len;
+ if (po->has_vnet_hdr) {
+ vnet_hdr_len = sizeof(vnet_hdr);
+
+@@ -2350,15 +2364,14 @@ static int packet_snd(struct socket *sock,
+ if (err > 0 && (err = net_xmit_errno(err)) != 0)
+ goto out_unlock;
+
+- if (need_rls_dev)
+- dev_put(dev);
++ dev_put(dev);
+
+ return len;
+
+ out_free:
+ kfree_skb(skb);
+ out_unlock:
+- if (dev && need_rls_dev)
++ if (dev)
+ dev_put(dev);
+ out:
+ return err;
+@@ -2575,6 +2588,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
+ po = pkt_sk(sk);
+ sk->sk_family = PF_PACKET;
+ po->num = proto;
++ RCU_INIT_POINTER(po->cached_dev, NULL);
+
+ sk->sk_destruct = packet_sock_destruct;
+ sk_refcnt_debug_inc(sk);
+@@ -2663,7 +2677,6 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct sock *sk = sock->sk;
+ struct sk_buff *skb;
+ int copied, err;
+- struct sockaddr_ll *sll;
+ int vnet_hdr_len = 0;
+
+ err = -EINVAL;
+@@ -2746,22 +2759,10 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
+ goto out_free;
+ }
+
+- /*
+- * If the address length field is there to be filled in, we fill
+- * it in now.
++ /* You lose any data beyond the buffer you gave. If it worries
++ * a user program they can ask the device for its MTU
++ * anyway.
+ */
+-
+- sll = &PACKET_SKB_CB(skb)->sa.ll;
+- if (sock->type == SOCK_PACKET)
+- msg->msg_namelen = sizeof(struct sockaddr_pkt);
+- else
+- msg->msg_namelen = sll->sll_halen + offsetof(struct sockaddr_ll, sll_addr);
+-
+- /*
+- * You lose any data beyond the buffer you gave. If it worries a
+- * user program they can ask the device for its MTU anyway.
+- */
+-
+ copied = skb->len;
+ if (copied > len) {
+ copied = len;
+@@ -2774,9 +2775,20 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
+
+ sock_recv_ts_and_drops(msg, sk, skb);
+
+- if (msg->msg_name)
++ if (msg->msg_name) {
++ /* If the address length field is there to be filled
++ * in, we fill it in now.
++ */
++ if (sock->type == SOCK_PACKET) {
++ msg->msg_namelen = sizeof(struct sockaddr_pkt);
++ } else {
++ struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
++ msg->msg_namelen = sll->sll_halen +
++ offsetof(struct sockaddr_ll, sll_addr);
++ }
+ memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
+ msg->msg_namelen);
++ }
+
+ if (pkt_sk(sk)->auxdata) {
+ struct tpacket_auxdata aux;
+diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c
+index bf35b4e1a14c..b25f2d321e05 100644
+--- a/net/phonet/datagram.c
++++ b/net/phonet/datagram.c
+@@ -139,9 +139,6 @@ static int pn_recvmsg(struct kiocb *iocb, struct sock *sk,
+ MSG_CMSG_COMPAT))
+ goto out_nofree;
+
+- if (addr_len)
+- *addr_len = sizeof(sa);
+-
+ skb = skb_recv_datagram(sk, flags, noblock, &rval);
+ if (skb == NULL)
+ goto out_nofree;
+@@ -162,8 +159,10 @@ static int pn_recvmsg(struct kiocb *iocb, struct sock *sk,
+
+ rval = (flags & MSG_TRUNC) ? skb->len : copylen;
+
+- if (msg->msg_name != NULL)
+- memcpy(msg->msg_name, &sa, sizeof(struct sockaddr_pn));
++ if (msg->msg_name != NULL) {
++ memcpy(msg->msg_name, &sa, sizeof(sa));
++ *addr_len = sizeof(sa);
++ }
+
+ out:
+ skb_free_datagram(sk, skb);
+diff --git a/net/rds/recv.c b/net/rds/recv.c
+index fc57d31676a1..96a1239c05c2 100644
+--- a/net/rds/recv.c
++++ b/net/rds/recv.c
+@@ -410,8 +410,6 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
+
+ rdsdebug("size %zu flags 0x%x timeo %ld\n", size, msg_flags, timeo);
+
+- msg->msg_namelen = 0;
+-
+ if (msg_flags & MSG_OOB)
+ goto out;
+
+diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
+index 1f96fb9743b5..bf76dec76e00 100644
+--- a/net/rose/af_rose.c
++++ b/net/rose/af_rose.c
+@@ -1221,7 +1221,6 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
+ {
+ struct sock *sk = sock->sk;
+ struct rose_sock *rose = rose_sk(sk);
+- struct sockaddr_rose *srose = (struct sockaddr_rose *)msg->msg_name;
+ size_t copied;
+ unsigned char *asmptr;
+ struct sk_buff *skb;
+@@ -1257,8 +1256,11 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
+
+ skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+
+- if (srose != NULL) {
+- memset(srose, 0, msg->msg_namelen);
++ if (msg->msg_name) {
++ struct sockaddr_rose *srose;
++
++ memset(msg->msg_name, 0, sizeof(struct full_sockaddr_rose));
++ srose = msg->msg_name;
+ srose->srose_family = AF_ROSE;
+ srose->srose_addr = rose->dest_addr;
+ srose->srose_call = rose->dest_call;
+diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c
+index 4b48687c3890..898492a8d61b 100644
+--- a/net/rxrpc/ar-recvmsg.c
++++ b/net/rxrpc/ar-recvmsg.c
+@@ -143,10 +143,13 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
+
+ /* copy the peer address and timestamp */
+ if (!continue_call) {
+- if (msg->msg_name && msg->msg_namelen > 0)
++ if (msg->msg_name) {
++ size_t len =
++ sizeof(call->conn->trans->peer->srx);
+ memcpy(msg->msg_name,
+- &call->conn->trans->peer->srx,
+- sizeof(call->conn->trans->peer->srx));
++ &call->conn->trans->peer->srx, len);
++ msg->msg_namelen = len;
++ }
+ sock_recv_ts_and_drops(msg, &rx->sk, skb);
+ }
+
+diff --git a/net/socket.c b/net/socket.c
+index bf7adaae4b7f..d4faaded1a9d 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -215,12 +215,13 @@ static int move_addr_to_user(struct sockaddr *kaddr, int klen,
+ int err;
+ int len;
+
++ BUG_ON(klen > sizeof(struct sockaddr_storage));
+ err = get_user(len, ulen);
+ if (err)
+ return err;
+ if (len > klen)
+ len = klen;
+- if (len < 0 || len > sizeof(struct sockaddr_storage))
++ if (len < 0)
+ return -EINVAL;
+ if (len) {
+ if (audit_sockaddr(klen, kaddr))
+@@ -1752,8 +1753,10 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
+ msg.msg_iov = &iov;
+ iov.iov_len = size;
+ iov.iov_base = ubuf;
+- msg.msg_name = (struct sockaddr *)&address;
+- msg.msg_namelen = sizeof(address);
++ /* Save some cycles and don't copy the address if not needed */
++ msg.msg_name = addr ? (struct sockaddr *)&address : NULL;
++ /* We assume all kernel code knows the size of sockaddr_storage */
++ msg.msg_namelen = 0;
+ if (sock->file->f_flags & O_NONBLOCK)
+ flags |= MSG_DONTWAIT;
+ err = sock_recvmsg(sock, &msg, size, flags);
+@@ -1882,7 +1885,7 @@ static int copy_msghdr_from_user(struct msghdr *kmsg,
+ if (copy_from_user(kmsg, umsg, sizeof(struct msghdr)))
+ return -EFAULT;
+ if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
+- return -EINVAL;
++ kmsg->msg_namelen = sizeof(struct sockaddr_storage);
+ return 0;
+ }
+
+@@ -2142,18 +2145,16 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
+ goto out;
+ }
+
+- /*
+- * Save the user-mode address (verify_iovec will change the
+- * kernel msghdr to use the kernel address space)
++ /* Save the user-mode address (verify_iovec will change the
++ * kernel msghdr to use the kernel address space)
+ */
+-
+ uaddr = (__force void __user *)msg_sys->msg_name;
+ uaddr_len = COMPAT_NAMELEN(msg);
+- if (MSG_CMSG_COMPAT & flags) {
++ if (MSG_CMSG_COMPAT & flags)
+ err = verify_compat_iovec(msg_sys, iov,
+ (struct sockaddr *)&addr,
+ VERIFY_WRITE);
+- } else
++ else
+ err = verify_iovec(msg_sys, iov,
+ (struct sockaddr *)&addr,
+ VERIFY_WRITE);
+@@ -2164,6 +2165,9 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
+ cmsg_ptr = (unsigned long)msg_sys->msg_control;
+ msg_sys->msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
+
++ /* We assume all kernel code knows the size of sockaddr_storage */
++ msg_sys->msg_namelen = 0;
++
+ if (sock->file->f_flags & O_NONBLOCK)
+ flags |= MSG_DONTWAIT;
+ err = (nosec ? sock_recvmsg_nosec : sock_recvmsg)(sock, msg_sys,
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index 65fe23b286f4..bfb78fab06ea 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -390,8 +390,10 @@ static int xs_send_kvec(struct socket *sock, struct sockaddr *addr, int addrlen,
+ return kernel_sendmsg(sock, &msg, NULL, 0, 0);
+ }
+
+-static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned int base, int more)
++static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned int base, int more, bool zerocopy)
+ {
++ ssize_t (*do_sendpage)(struct socket *sock, struct page *page,
++ int offset, size_t size, int flags);
+ struct page **ppage;
+ unsigned int remainder;
+ int err, sent = 0;
+@@ -400,6 +402,9 @@ static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned i
+ base += xdr->page_base;
+ ppage = xdr->pages + (base >> PAGE_SHIFT);
+ base &= ~PAGE_MASK;
++ do_sendpage = sock->ops->sendpage;
++ if (!zerocopy)
++ do_sendpage = sock_no_sendpage;
+ for(;;) {
+ unsigned int len = min_t(unsigned int, PAGE_SIZE - base, remainder);
+ int flags = XS_SENDMSG_FLAGS;
+@@ -407,7 +412,7 @@ static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned i
+ remainder -= len;
+ if (remainder != 0 || more)
+ flags |= MSG_MORE;
+- err = sock->ops->sendpage(sock, *ppage, base, len, flags);
++ err = do_sendpage(sock, *ppage, base, len, flags);
+ if (remainder == 0 || err != len)
+ break;
+ sent += err;
+@@ -428,9 +433,10 @@ static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned i
+ * @addrlen: UDP only -- length of destination address
+ * @xdr: buffer containing this request
+ * @base: starting position in the buffer
++ * @zerocopy: true if it is safe to use sendpage()
+ *
+ */
+-static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base)
++static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, bool zerocopy)
+ {
+ unsigned int remainder = xdr->len - base;
+ int err, sent = 0;
+@@ -458,7 +464,7 @@ static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen,
+ if (base < xdr->page_len) {
+ unsigned int len = xdr->page_len - base;
+ remainder -= len;
+- err = xs_send_pagedata(sock, xdr, base, remainder != 0);
++ err = xs_send_pagedata(sock, xdr, base, remainder != 0, zerocopy);
+ if (remainder == 0 || err != len)
+ goto out;
+ sent += err;
+@@ -561,7 +567,7 @@ static int xs_local_send_request(struct rpc_task *task)
+ req->rq_svec->iov_base, req->rq_svec->iov_len);
+
+ status = xs_sendpages(transport->sock, NULL, 0,
+- xdr, req->rq_bytes_sent);
++ xdr, req->rq_bytes_sent, true);
+ dprintk("RPC: %s(%u) = %d\n",
+ __func__, xdr->len - req->rq_bytes_sent, status);
+ if (likely(status >= 0)) {
+@@ -617,7 +623,7 @@ static int xs_udp_send_request(struct rpc_task *task)
+ status = xs_sendpages(transport->sock,
+ xs_addr(xprt),
+ xprt->addrlen, xdr,
+- req->rq_bytes_sent);
++ req->rq_bytes_sent, true);
+
+ dprintk("RPC: xs_udp_send_request(%u) = %d\n",
+ xdr->len - req->rq_bytes_sent, status);
+@@ -688,6 +694,7 @@ static int xs_tcp_send_request(struct rpc_task *task)
+ struct rpc_xprt *xprt = req->rq_xprt;
+ struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
+ struct xdr_buf *xdr = &req->rq_snd_buf;
++ bool zerocopy = true;
+ int status;
+
+ xs_encode_stream_record_marker(&req->rq_snd_buf);
+@@ -695,13 +702,20 @@ static int xs_tcp_send_request(struct rpc_task *task)
+ xs_pktdump("packet data:",
+ req->rq_svec->iov_base,
+ req->rq_svec->iov_len);
++ /* Don't use zero copy if this is a resend. If the RPC call
++ * completes while the socket holds a reference to the pages,
++ * then we may end up resending corrupted data.
++ */
++ if (task->tk_flags & RPC_TASK_SENT)
++ zerocopy = false;
+
+ /* Continue transmitting the packet/record. We must be careful
+ * to cope with writespace callbacks arriving _after_ we have
+ * called sendmsg(). */
+ while (1) {
+ status = xs_sendpages(transport->sock,
+- NULL, 0, xdr, req->rq_bytes_sent);
++ NULL, 0, xdr, req->rq_bytes_sent,
++ zerocopy);
+
+ dprintk("RPC: xs_tcp_send_request(%u) = %d\n",
+ xdr->len - req->rq_bytes_sent, status);
+diff --git a/net/tipc/socket.c b/net/tipc/socket.c
+index fdf34af436eb..058941e6c936 100644
+--- a/net/tipc/socket.c
++++ b/net/tipc/socket.c
+@@ -949,9 +949,6 @@ static int recv_msg(struct kiocb *iocb, struct socket *sock,
+ goto exit;
+ }
+
+- /* will be updated in set_orig_addr() if needed */
+- m->msg_namelen = 0;
+-
+ timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
+ restart:
+
+@@ -1078,9 +1075,6 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock,
+ goto exit;
+ }
+
+- /* will be updated in set_orig_addr() if needed */
+- m->msg_namelen = 0;
+-
+ target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
+ timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
+ restart:
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 5122b22540be..9338ccc1a00b 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -1744,7 +1744,6 @@ static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
+ {
+ struct unix_sock *u = unix_sk(sk);
+
+- msg->msg_namelen = 0;
+ if (u->addr) {
+ msg->msg_namelen = u->addr->len;
+ memcpy(msg->msg_name, u->addr->name, u->addr->len);
+@@ -1767,8 +1766,6 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if (flags&MSG_OOB)
+ goto out;
+
+- msg->msg_namelen = 0;
+-
+ err = mutex_lock_interruptible(&u->readlock);
+ if (err) {
+ err = sock_intr_errno(sock_rcvtimeo(sk, noblock));
+@@ -1902,8 +1899,6 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
+ target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
+ timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
+
+- msg->msg_namelen = 0;
+-
+ /* Lock the socket to prevent queue disordering
+ * while sleeps in memcpy_tomsg
+ */
+diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
+index dc24ba9d5ccc..07b9973af6a7 100644
+--- a/net/x25/af_x25.c
++++ b/net/x25/af_x25.c
+@@ -1343,10 +1343,9 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock,
+ if (sx25) {
+ sx25->sx25_family = AF_X25;
+ sx25->sx25_addr = x25->dest_addr;
++ msg->msg_namelen = sizeof(*sx25);
+ }
+
+- msg->msg_namelen = sizeof(struct sockaddr_x25);
+-
+ x25_check_rbuf(sk);
+ rc = copied;
+ out_free_dgram:
+diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
+index 1126c10a5e82..5898f342ee47 100644
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -53,6 +53,7 @@
+ #include <net/icmp.h>
+ #include <net/ip.h> /* for local_port_range[] */
+ #include <net/tcp.h> /* struct or_callable used in sock_rcv_skb */
++#include <net/inet_connection_sock.h>
+ #include <net/net_namespace.h>
+ #include <net/netlabel.h>
+ #include <linux/uaccess.h>
+@@ -3704,6 +3705,30 @@ static int selinux_skb_peerlbl_sid(struct sk_buff *skb, u16 family, u32 *sid)
+ return 0;
+ }
+
++/**
++ * selinux_conn_sid - Determine the child socket label for a connection
++ * @sk_sid: the parent socket's SID
++ * @skb_sid: the packet's SID
++ * @conn_sid: the resulting connection SID
++ *
++ * If @skb_sid is valid then the user:role:type information from @sk_sid is
++ * combined with the MLS information from @skb_sid in order to create
++ * @conn_sid. If @skb_sid is not valid then then @conn_sid is simply a copy
++ * of @sk_sid. Returns zero on success, negative values on failure.
++ *
++ */
++static int selinux_conn_sid(u32 sk_sid, u32 skb_sid, u32 *conn_sid)
++{
++ int err = 0;
++
++ if (skb_sid != SECSID_NULL)
++ err = security_sid_mls_copy(sk_sid, skb_sid, conn_sid);
++ else
++ *conn_sid = sk_sid;
++
++ return err;
++}
++
+ /* socket security operations */
+
+ static int socket_sockcreate_sid(const struct task_security_struct *tsec,
+@@ -4295,7 +4320,7 @@ static int selinux_inet_conn_request(struct sock *sk, struct sk_buff *skb,
+ struct sk_security_struct *sksec = sk->sk_security;
+ int err;
+ u16 family = sk->sk_family;
+- u32 newsid;
++ u32 connsid;
+ u32 peersid;
+
+ /* handle mapped IPv4 packets arriving via IPv6 sockets */
+@@ -4305,16 +4330,11 @@ static int selinux_inet_conn_request(struct sock *sk, struct sk_buff *skb,
+ err = selinux_skb_peerlbl_sid(skb, family, &peersid);
+ if (err)
+ return err;
+- if (peersid == SECSID_NULL) {
+- req->secid = sksec->sid;
+- req->peer_secid = SECSID_NULL;
+- } else {
+- err = security_sid_mls_copy(sksec->sid, peersid, &newsid);
+- if (err)
+- return err;
+- req->secid = newsid;
+- req->peer_secid = peersid;
+- }
++ err = selinux_conn_sid(sksec->sid, peersid, &connsid);
++ if (err)
++ return err;
++ req->secid = connsid;
++ req->peer_secid = peersid;
+
+ return selinux_netlbl_inet_conn_request(req, family);
+ }
+@@ -4542,6 +4562,7 @@ static unsigned int selinux_ipv6_forward(unsigned int hooknum,
+ static unsigned int selinux_ip_output(struct sk_buff *skb,
+ u16 family)
+ {
++ struct sock *sk;
+ u32 sid;
+
+ if (!netlbl_enabled())
+@@ -4550,8 +4571,27 @@ static unsigned int selinux_ip_output(struct sk_buff *skb,
+ /* we do this in the LOCAL_OUT path and not the POST_ROUTING path
+ * because we want to make sure we apply the necessary labeling
+ * before IPsec is applied so we can leverage AH protection */
+- if (skb->sk) {
+- struct sk_security_struct *sksec = skb->sk->sk_security;
++ sk = skb->sk;
++ if (sk) {
++ struct sk_security_struct *sksec;
++
++ if (sk->sk_state == TCP_LISTEN)
++ /* if the socket is the listening state then this
++ * packet is a SYN-ACK packet which means it needs to
++ * be labeled based on the connection/request_sock and
++ * not the parent socket. unfortunately, we can't
++ * lookup the request_sock yet as it isn't queued on
++ * the parent socket until after the SYN-ACK is sent.
++ * the "solution" is to simply pass the packet as-is
++ * as any IP option based labeling should be copied
++ * from the initial connection request (in the IP
++ * layer). it is far from ideal, but until we get a
++ * security label in the packet itself this is the
++ * best we can do. */
++ return NF_ACCEPT;
++
++ /* standard practice, label using the parent socket */
++ sksec = sk->sk_security;
+ sid = sksec->sid;
+ } else
+ sid = SECINITSID_KERNEL;
+@@ -4633,12 +4673,12 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
+ if (!secmark_active && !peerlbl_active)
+ return NF_ACCEPT;
+
+- /* if the packet is being forwarded then get the peer label from the
+- * packet itself; otherwise check to see if it is from a local
+- * application or the kernel, if from an application get the peer label
+- * from the sending socket, otherwise use the kernel's sid */
+ sk = skb->sk;
+ if (sk == NULL) {
++ /* Without an associated socket the packet is either coming
++ * from the kernel or it is being forwarded; check the packet
++ * to determine which and if the packet is being forwarded
++ * query the packet directly to determine the security label. */
+ if (skb->skb_iif) {
+ secmark_perm = PACKET__FORWARD_OUT;
+ if (selinux_skb_peerlbl_sid(skb, family, &peer_sid))
+@@ -4647,7 +4687,26 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
+ secmark_perm = PACKET__SEND;
+ peer_sid = SECINITSID_KERNEL;
+ }
++ } else if (sk->sk_state == TCP_LISTEN) {
++ /* Locally generated packet but the associated socket is in the
++ * listening state which means this is a SYN-ACK packet. In
++ * this particular case the correct security label is assigned
++ * to the connection/request_sock but unfortunately we can't
++ * query the request_sock as it isn't queued on the parent
++ * socket until after the SYN-ACK packet is sent; the only
++ * viable choice is to regenerate the label like we do in
++ * selinux_inet_conn_request(). See also selinux_ip_output()
++ * for similar problems. */
++ u32 skb_sid;
++ struct sk_security_struct *sksec = sk->sk_security;
++ if (selinux_skb_peerlbl_sid(skb, family, &skb_sid))
++ return NF_DROP;
++ if (selinux_conn_sid(sksec->sid, skb_sid, &peer_sid))
++ return NF_DROP;
++ secmark_perm = PACKET__SEND;
+ } else {
++ /* Locally generated packet, fetch the security label from the
++ * associated socket. */
+ struct sk_security_struct *sksec = sk->sk_security;
+ peer_sid = sksec->sid;
+ secmark_perm = PACKET__SEND;
+diff --git a/security/selinux/netlabel.c b/security/selinux/netlabel.c
+index da4b8b233280..6235d052338b 100644
+--- a/security/selinux/netlabel.c
++++ b/security/selinux/netlabel.c
+@@ -442,8 +442,7 @@ int selinux_netlbl_socket_connect(struct sock *sk, struct sockaddr *addr)
+ sksec->nlbl_state != NLBL_CONNLABELED)
+ return 0;
+
+- local_bh_disable();
+- bh_lock_sock_nested(sk);
++ lock_sock(sk);
+
+ /* connected sockets are allowed to disconnect when the address family
+ * is set to AF_UNSPEC, if that is what is happening we want to reset
+@@ -464,7 +463,6 @@ int selinux_netlbl_socket_connect(struct sock *sk, struct sockaddr *addr)
+ sksec->nlbl_state = NLBL_CONNLABELED;
+
+ socket_connect_return:
+- bh_unlock_sock(sk);
+- local_bh_enable();
++ release_sock(sk);
+ return rc;
+ }
+diff --git a/sound/drivers/pcsp/pcsp.c b/sound/drivers/pcsp/pcsp.c
+index 946a0cb996a9..e6ad8d434f80 100644
+--- a/sound/drivers/pcsp/pcsp.c
++++ b/sound/drivers/pcsp/pcsp.c
+@@ -187,8 +187,8 @@ static int __devinit pcsp_probe(struct platform_device *dev)
+ static int __devexit pcsp_remove(struct platform_device *dev)
+ {
+ struct snd_pcsp *chip = platform_get_drvdata(dev);
+- alsa_card_pcsp_exit(chip);
+ pcspkr_input_remove(chip->input_dev);
++ alsa_card_pcsp_exit(chip);
+ platform_set_drvdata(dev, NULL);
+ return 0;
+ }
+diff --git a/sound/isa/msnd/msnd_pinnacle.c b/sound/isa/msnd/msnd_pinnacle.c
+index 0961e2cf20ca..a9cc687fa21a 100644
+--- a/sound/isa/msnd/msnd_pinnacle.c
++++ b/sound/isa/msnd/msnd_pinnacle.c
+@@ -73,9 +73,11 @@
+ #ifdef MSND_CLASSIC
+ # include "msnd_classic.h"
+ # define LOGNAME "msnd_classic"
++# define DEV_NAME "msnd-classic"
+ #else
+ # include "msnd_pinnacle.h"
+ # define LOGNAME "snd_msnd_pinnacle"
++# define DEV_NAME "msnd-pinnacle"
+ #endif
+
+ static void __devinit set_default_audio_parameters(struct snd_msnd *chip)
+@@ -1068,8 +1070,6 @@ static int __devexit snd_msnd_isa_remove(struct device *pdev, unsigned int dev)
+ return 0;
+ }
+
+-#define DEV_NAME "msnd-pinnacle"
+-
+ static struct isa_driver snd_msnd_driver = {
+ .match = snd_msnd_isa_match,
+ .probe = snd_msnd_isa_probe,
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 984b5b1f0b32..843d9f33f49c 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -4610,6 +4610,8 @@ static const struct hda_codec_preset snd_hda_preset_conexant[] = {
+ .patch = patch_conexant_auto },
+ { .id = 0x14f15115, .name = "CX20757",
+ .patch = patch_conexant_auto },
++ { .id = 0x14f151d7, .name = "CX20952",
++ .patch = patch_conexant_auto },
+ {} /* terminator */
+ };
+
+@@ -4636,6 +4638,7 @@ MODULE_ALIAS("snd-hda-codec-id:14f15111");
+ MODULE_ALIAS("snd-hda-codec-id:14f15113");
+ MODULE_ALIAS("snd-hda-codec-id:14f15114");
+ MODULE_ALIAS("snd-hda-codec-id:14f15115");
++MODULE_ALIAS("snd-hda-codec-id:14f151d7");
+
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("Conexant HD-audio codec");
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 92c913d2aa3d..1f78ca68398f 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5943,6 +5943,7 @@ static int patch_alc662(struct hda_codec *codec)
+ case 0x10ec0272:
+ case 0x10ec0663:
+ case 0x10ec0665:
++ case 0x10ec0668:
+ set_beep_amp(spec, 0x0b, 0x04, HDA_INPUT);
+ break;
+ case 0x10ec0273:
+@@ -6019,6 +6020,7 @@ static int patch_alc680(struct hda_codec *codec)
+ */
+ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
+ { .id = 0x10ec0221, .name = "ALC221", .patch = patch_alc269 },
++ { .id = 0x10ec0231, .name = "ALC231", .patch = patch_alc269 },
+ { .id = 0x10ec0260, .name = "ALC260", .patch = patch_alc260 },
+ { .id = 0x10ec0262, .name = "ALC262", .patch = patch_alc262 },
+ { .id = 0x10ec0267, .name = "ALC267", .patch = patch_alc268 },
+diff --git a/sound/soc/codecs/ak4642.c b/sound/soc/codecs/ak4642.c
+index 1c4999d1758b..f2dac5cdd670 100644
+--- a/sound/soc/codecs/ak4642.c
++++ b/sound/soc/codecs/ak4642.c
+@@ -214,7 +214,7 @@ static int ak4642_dai_startup(struct snd_pcm_substream *substream,
+ * This operation came from example code of
+ * "ASAHI KASEI AK4642" (japanese) manual p94.
+ */
+- snd_soc_write(codec, SG_SL1, PMMP | MGAIN0);
++ snd_soc_update_bits(codec, SG_SL1, PMMP | MGAIN0, PMMP | MGAIN0);
+ snd_soc_write(codec, TIMER, ZTM(0x3) | WTM(0x3));
+ snd_soc_write(codec, ALC_CTL1, ALC | LMTH0);
+ snd_soc_update_bits(codec, PW_MGMT1, PMVCM | PMADL,
+diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
+index a7c9ae17fc7e..6cfd4f7b6824 100644
+--- a/sound/soc/codecs/wm8731.c
++++ b/sound/soc/codecs/wm8731.c
+@@ -392,10 +392,10 @@ static int wm8731_set_dai_fmt(struct snd_soc_dai *codec_dai,
+ iface |= 0x0001;
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+- iface |= 0x0003;
++ iface |= 0x0013;
+ break;
+ case SND_SOC_DAIFMT_DSP_B:
+- iface |= 0x0013;
++ iface |= 0x0003;
+ break;
+ default:
+ return -EINVAL;
+diff --git a/sound/soc/codecs/wm8990.c b/sound/soc/codecs/wm8990.c
+index d29a9622964c..cfa5bea8e44d 100644
+--- a/sound/soc/codecs/wm8990.c
++++ b/sound/soc/codecs/wm8990.c
+@@ -1266,6 +1266,8 @@ static int wm8990_set_bias_level(struct snd_soc_codec *codec,
+
+ /* disable POBCTRL, SOFT_ST and BUFDCOPEN */
+ snd_soc_write(codec, WM8990_ANTIPOP2, 0x0);
++
++ codec->cache_sync = 1;
+ break;
+ }
+
+diff --git a/sound/usb/6fire/chip.c b/sound/usb/6fire/chip.c
+index c7dca7b0b9fe..46a281674725 100644
+--- a/sound/usb/6fire/chip.c
++++ b/sound/usb/6fire/chip.c
+@@ -102,7 +102,7 @@ static int __devinit usb6fire_chip_probe(struct usb_interface *intf,
+ usb_set_intfdata(intf, chips[i]);
+ mutex_unlock(&register_mutex);
+ return 0;
+- } else if (regidx < 0)
++ } else if (!devices[i] && regidx < 0)
+ regidx = i;
+ }
+ if (regidx < 0) {
+diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
+index 533db333fc8d..5dbb35d02af4 100644
+--- a/virt/kvm/iommu.c
++++ b/virt/kvm/iommu.c
+@@ -101,6 +101,10 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
+ while ((gfn << PAGE_SHIFT) & (page_size - 1))
+ page_size >>= 1;
+
++ /* Make sure hva is aligned to the page size we want to map */
++ while (gfn_to_hva_memslot(slot, gfn) & (page_size - 1))
++ page_size >>= 1;
++
+ /*
+ * Pin all pages we are about to map in memory. This is
+ * important because we unmap and unpin in 4kb steps later.
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 8bf05f055c0a..d83aa5e700d5 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -774,7 +774,7 @@ skip_lpage:
+ new.userspace_addr = mem->userspace_addr;
+ #endif /* not defined CONFIG_S390 */
+
+- if (!npages) {
++ if (!npages || base_gfn != old.base_gfn) {
+ r = -ENOMEM;
+ slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
+ if (!slots)
+@@ -788,8 +788,10 @@ skip_lpage:
+ old_memslots = kvm->memslots;
+ rcu_assign_pointer(kvm->memslots, slots);
+ synchronize_srcu_expedited(&kvm->srcu);
+- /* From this point no new shadow pages pointing to a deleted
+- * memslot will be created.
++ /* slot was deleted or moved, clear iommu mapping */
++ kvm_iommu_unmap_pages(kvm, &old);
++ /* From this point no new shadow pages pointing to a deleted,
++ * or moved, memslot will be created.
+ *
+ * validation of sp->gfn happens in:
+ * - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
+@@ -803,14 +805,6 @@ skip_lpage:
+ if (r)
+ goto out_free;
+
+- /* map/unmap the pages in iommu page table */
+- if (npages) {
+- r = kvm_iommu_map_pages(kvm, &new);
+- if (r)
+- goto out_free;
+- } else
+- kvm_iommu_unmap_pages(kvm, &old);
+-
+ r = -ENOMEM;
+ slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
+ if (!slots)
+@@ -820,6 +814,13 @@ skip_lpage:
+ slots->nmemslots = mem->slot + 1;
+ slots->generation++;
+
++ /* map new memory slot into the iommu */
++ if (npages) {
++ r = kvm_iommu_map_pages(kvm, &new);
++ if (r)
++ goto out_slots;
++ }
++
+ /* actual memory is freed via old in kvm_free_physmem_slot below */
+ if (!npages) {
+ new.rmap = NULL;
+@@ -847,6 +848,8 @@ skip_lpage:
+
+ return 0;
+
++out_slots:
++ kfree(slots);
+ out_free:
+ kvm_free_physmem_slot(&new, &old);
+ out:
+@@ -1683,6 +1686,9 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
+ int r;
+ struct kvm_vcpu *vcpu, *v;
+
++ if (id >= KVM_MAX_VCPUS)
++ return -EINVAL;
++
+ vcpu = kvm_arch_vcpu_create(kvm, id);
+ if (IS_ERR(vcpu))
+ return PTR_ERR(vcpu);
diff --git a/1054_linux-3.2.55.patch b/1054_linux-3.2.55.patch
new file mode 100644
index 00000000..d23fda60
--- /dev/null
+++ b/1054_linux-3.2.55.patch
@@ -0,0 +1,2495 @@
+diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
+index 2ba8272a4b82..1b196ea24a7c 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -1305,6 +1305,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+
+ * dump_id: dump IDENTIFY data.
+
++ * disable: Disable this device.
++
+ If there are multiple matching configurations changing
+ the same attribute, the last one is used.
+
+diff --git a/Makefile b/Makefile
+index 848be2634e15..538463ed314e 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 54
++SUBLEVEL = 55
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
+index 7ac5dfd8afd6..d45fd2269e79 100644
+--- a/arch/arm/kernel/traps.c
++++ b/arch/arm/kernel/traps.c
+@@ -37,7 +37,13 @@
+
+ #include "signal.h"
+
+-static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
++static const char *handler[]= {
++ "prefetch abort",
++ "data abort",
++ "address exception",
++ "interrupt",
++ "undefined instruction",
++};
+
+ void *vectors_page;
+
+diff --git a/arch/arm/mach-footbridge/dc21285-timer.c b/arch/arm/mach-footbridge/dc21285-timer.c
+index 121ad1d4fa39..547731b817b8 100644
+--- a/arch/arm/mach-footbridge/dc21285-timer.c
++++ b/arch/arm/mach-footbridge/dc21285-timer.c
+@@ -95,17 +95,14 @@ static struct irqaction footbridge_timer_irq = {
+ static void __init footbridge_timer_init(void)
+ {
+ struct clock_event_device *ce = &ckevt_dc21285;
++ unsigned rate = DIV_ROUND_CLOSEST(mem_fclk_21285, 16);
+
+- clocksource_register_hz(&cksrc_dc21285, (mem_fclk_21285 + 8) / 16);
++ clocksource_register_hz(&cksrc_dc21285, rate);
+
+ setup_irq(ce->irq, &footbridge_timer_irq);
+
+- clockevents_calc_mult_shift(ce, mem_fclk_21285, 5);
+- ce->max_delta_ns = clockevent_delta2ns(0xffffff, ce);
+- ce->min_delta_ns = clockevent_delta2ns(0x000004, ce);
+ ce->cpumask = cpumask_of(smp_processor_id());
+-
+- clockevents_register_device(ce);
++ clockevents_config_and_register(ce, rate, 0x4, 0xffffff);
+ }
+
+ struct sys_timer footbridge_timer = {
+diff --git a/arch/ia64/kernel/machine_kexec.c b/arch/ia64/kernel/machine_kexec.c
+index 3d3aeef46947..abd6bad9a2d8 100644
+--- a/arch/ia64/kernel/machine_kexec.c
++++ b/arch/ia64/kernel/machine_kexec.c
+@@ -157,7 +157,7 @@ void arch_crash_save_vmcoreinfo(void)
+ #endif
+ #ifdef CONFIG_PGTABLE_3
+ VMCOREINFO_CONFIG(PGTABLE_3);
+-#elif CONFIG_PGTABLE_4
++#elif defined(CONFIG_PGTABLE_4)
+ VMCOREINFO_CONFIG(PGTABLE_4);
+ #endif
+ }
+diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
+index 8057f4f6980f..da85ad4254f4 100644
+--- a/arch/powerpc/include/asm/exception-64s.h
++++ b/arch/powerpc/include/asm/exception-64s.h
+@@ -163,7 +163,7 @@ do_kvm_##n: \
+ subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
+ beq- 1f; \
+ ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
+-1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \
++1: cmpdi cr1,r1,-INT_FRAME_SIZE; /* check if r1 is in userspace */ \
+ blt+ cr1,3f; /* abort if it is */ \
+ li r1,(n); /* will be reloaded later */ \
+ sth r1,PACA_TRAP_SAVE(r13); \
+diff --git a/arch/sh/lib/Makefile b/arch/sh/lib/Makefile
+index 7b95f29e3174..3baff31e58cf 100644
+--- a/arch/sh/lib/Makefile
++++ b/arch/sh/lib/Makefile
+@@ -6,7 +6,7 @@ lib-y = delay.o memmove.o memchr.o \
+ checksum.o strlen.o div64.o div64-generic.o
+
+ # Extracted from libgcc
+-lib-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \
++obj-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \
+ ashlsi3.o ashrsi3.o ashiftrt.o lshrsi3.o \
+ udiv_qrnnd.o
+
+diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
+index 3b8a2d30d14e..ea34253cb9c6 100644
+--- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c
++++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
+@@ -9,6 +9,7 @@
+ #include <linux/perf_event.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
++#include <linux/syscore_ops.h>
+
+ #include <asm/apic.h>
+
+@@ -209,6 +210,18 @@ out:
+ return ret;
+ }
+
++static void ibs_eilvt_setup(void)
++{
++ /*
++ * Force LVT offset assignment for family 10h: The offsets are
++ * not assigned by the BIOS for this family, so the OS is
++ * responsible for doing it. If the OS assignment fails, fall
++ * back to BIOS settings and try to setup this.
++ */
++ if (boot_cpu_data.x86 == 0x10)
++ force_ibs_eilvt_setup();
++}
++
+ static inline int get_ibs_lvt_offset(void)
+ {
+ u64 val;
+@@ -244,6 +257,36 @@ static void clear_APIC_ibs(void *dummy)
+ setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1);
+ }
+
++#ifdef CONFIG_PM
++
++static int perf_ibs_suspend(void)
++{
++ clear_APIC_ibs(NULL);
++ return 0;
++}
++
++static void perf_ibs_resume(void)
++{
++ ibs_eilvt_setup();
++ setup_APIC_ibs(NULL);
++}
++
++static struct syscore_ops perf_ibs_syscore_ops = {
++ .resume = perf_ibs_resume,
++ .suspend = perf_ibs_suspend,
++};
++
++static void perf_ibs_pm_init(void)
++{
++ register_syscore_ops(&perf_ibs_syscore_ops);
++}
++
++#else
++
++static inline void perf_ibs_pm_init(void) { }
++
++#endif
++
+ static int __cpuinit
+ perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
+ {
+@@ -270,18 +313,12 @@ static __init int amd_ibs_init(void)
+ if (!caps)
+ return -ENODEV; /* ibs not supported by the cpu */
+
+- /*
+- * Force LVT offset assignment for family 10h: The offsets are
+- * not assigned by the BIOS for this family, so the OS is
+- * responsible for doing it. If the OS assignment fails, fall
+- * back to BIOS settings and try to setup this.
+- */
+- if (boot_cpu_data.x86 == 0x10)
+- force_ibs_eilvt_setup();
++ ibs_eilvt_setup();
+
+ if (!ibs_eilvt_valid())
+ goto out;
+
++ perf_ibs_pm_init();
+ get_online_cpus();
+ ibs_caps = caps;
+ /* make ibs_caps visible to other cpus: */
+diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
+index e6fbb948dacc..20061b9e0d64 100644
+--- a/arch/x86/kernel/traps.c
++++ b/arch/x86/kernel/traps.c
+@@ -582,12 +582,13 @@ void __math_state_restore(struct task_struct *tsk)
+ /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
+ is pending. Clear the x87 state here by setting it to fixed
+ values. safe_address is a random variable that should be in L1 */
+- alternative_input(
+- ASM_NOP8 ASM_NOP2,
+- "emms\n\t" /* clear stack tags */
+- "fildl %P[addr]", /* set F?P to defined value */
+- X86_FEATURE_FXSAVE_LEAK,
+- [addr] "m" (safe_address));
++ if (unlikely(static_cpu_has(X86_FEATURE_FXSAVE_LEAK))) {
++ asm volatile(
++ "fnclex\n\t"
++ "emms\n\t"
++ "fildl %P[addr]" /* set F?P to defined value */
++ : : [addr] "m" (safe_address));
++ }
+
+ /*
+ * Paranoid restore. send a SIGSEGV if we fail to restore the state.
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 43e775337816..757c71635346 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -1278,14 +1278,12 @@ void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
+ void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
+ {
+ u32 data;
+- void *vapic;
+
+ if (!irqchip_in_kernel(vcpu->kvm) || !vcpu->arch.apic->vapic_addr)
+ return;
+
+- vapic = kmap_atomic(vcpu->arch.apic->vapic_page, KM_USER0);
+- data = *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr));
+- kunmap_atomic(vapic, KM_USER0);
++ kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
++ sizeof(u32));
+
+ apic_set_tpr(vcpu->arch.apic, data & 0xff);
+ }
+@@ -1295,7 +1293,6 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
+ u32 data, tpr;
+ int max_irr, max_isr;
+ struct kvm_lapic *apic;
+- void *vapic;
+
+ if (!irqchip_in_kernel(vcpu->kvm) || !vcpu->arch.apic->vapic_addr)
+ return;
+@@ -1310,17 +1307,22 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
+ max_isr = 0;
+ data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
+
+- vapic = kmap_atomic(vcpu->arch.apic->vapic_page, KM_USER0);
+- *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)) = data;
+- kunmap_atomic(vapic, KM_USER0);
++ kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
++ sizeof(u32));
+ }
+
+-void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
++int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
+ {
+ if (!irqchip_in_kernel(vcpu->kvm))
+- return;
++ return 0;
++
++ if (vapic_addr && kvm_gfn_to_hva_cache_init(vcpu->kvm,
++ &vcpu->arch.apic->vapic_cache,
++ vapic_addr, sizeof(u32)))
++ return -EINVAL;
+
+ vcpu->arch.apic->vapic_addr = vapic_addr;
++ return 0;
+ }
+
+ int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
+diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
+index 138e8cc6fea6..62ae376fa393 100644
+--- a/arch/x86/kvm/lapic.h
++++ b/arch/x86/kvm/lapic.h
+@@ -15,7 +15,7 @@ struct kvm_lapic {
+ bool irr_pending;
+ void *regs;
+ gpa_t vapic_addr;
+- struct page *vapic_page;
++ struct gfn_to_hva_cache vapic_cache;
+ };
+ int kvm_create_lapic(struct kvm_vcpu *vcpu);
+ void kvm_free_lapic(struct kvm_vcpu *vcpu);
+@@ -45,7 +45,7 @@ int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu);
+ u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu);
+ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data);
+
+-void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr);
++int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr);
+ void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu);
+ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu);
+
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 57867e48ff03..7774cca80118 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -3140,8 +3140,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
+ r = -EFAULT;
+ if (copy_from_user(&va, argp, sizeof va))
+ goto out;
+- r = 0;
+- kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
++ r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
+ break;
+ }
+ case KVM_X86_SETUP_MCE: {
+@@ -5537,33 +5536,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
+ !kvm_event_needs_reinjection(vcpu);
+ }
+
+-static void vapic_enter(struct kvm_vcpu *vcpu)
+-{
+- struct kvm_lapic *apic = vcpu->arch.apic;
+- struct page *page;
+-
+- if (!apic || !apic->vapic_addr)
+- return;
+-
+- page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
+-
+- vcpu->arch.apic->vapic_page = page;
+-}
+-
+-static void vapic_exit(struct kvm_vcpu *vcpu)
+-{
+- struct kvm_lapic *apic = vcpu->arch.apic;
+- int idx;
+-
+- if (!apic || !apic->vapic_addr)
+- return;
+-
+- idx = srcu_read_lock(&vcpu->kvm->srcu);
+- kvm_release_page_dirty(apic->vapic_page);
+- mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
+- srcu_read_unlock(&vcpu->kvm->srcu, idx);
+-}
+-
+ static void update_cr8_intercept(struct kvm_vcpu *vcpu)
+ {
+ int max_irr, tpr;
+@@ -5836,7 +5808,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
+ }
+
+ vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
+- vapic_enter(vcpu);
+
+ r = 1;
+ while (r > 0) {
+@@ -5893,8 +5864,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
+
+ srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
+
+- vapic_exit(vcpu);
+-
+ return r;
+ }
+
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index d29f6d5abf05..f4000eea3d09 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -429,17 +429,22 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ /* Marvell */
+ { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
+ { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
+- { PCI_DEVICE(0x1b4b, 0x9123),
++ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9123),
+ .class = PCI_CLASS_STORAGE_SATA_AHCI,
+ .class_mask = 0xffffff,
+ .driver_data = board_ahci_yes_fbs }, /* 88se9128 */
+- { PCI_DEVICE(0x1b4b, 0x9125),
++ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9125),
+ .driver_data = board_ahci_yes_fbs }, /* 88se9125 */
+- { PCI_DEVICE(0x1b4b, 0x917a),
++ { PCI_DEVICE_SUB(PCI_VENDOR_ID_MARVELL_EXT, 0x9178,
++ PCI_VENDOR_ID_MARVELL_EXT, 0x9170),
++ .driver_data = board_ahci_yes_fbs }, /* 88se9170 */
++ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a),
+ .driver_data = board_ahci_yes_fbs }, /* 88se9172 */
+- { PCI_DEVICE(0x1b4b, 0x9192),
++ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172),
++ .driver_data = board_ahci_yes_fbs }, /* 88se9172 */
++ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9192),
+ .driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */
+- { PCI_DEVICE(0x1b4b, 0x91a3),
++ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a3),
+ .driver_data = board_ahci_yes_fbs },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230),
+ .driver_data = board_ahci_yes_fbs },
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index a0a39872d612..72bbb5ed8096 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4097,6 +4097,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
+ ATA_HORKAGE_FIRMWARE_WARN },
+
++ /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
++ { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
++
+ /* Blacklist entries taken from Silicon Image 3124/3132
+ Windows driver .inf file - also several Linux problem reports */
+ { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
+@@ -6328,6 +6331,7 @@ static int __init ata_parse_force_one(char **cur,
+ { "nohrst", .lflags = ATA_LFLAG_NO_HRST },
+ { "nosrst", .lflags = ATA_LFLAG_NO_SRST },
+ { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
++ { "disable", .horkage_on = ATA_HORKAGE_DISABLE },
+ };
+ char *start = *cur, *p = *cur;
+ char *id, *val, *endp;
+diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
+index da85c0dfe265..a8423178db87 100644
+--- a/drivers/dma/Kconfig
++++ b/drivers/dma/Kconfig
+@@ -254,6 +254,7 @@ config NET_DMA
+ bool "Network: TCP receive copy offload"
+ depends on DMA_ENGINE && NET
+ default (INTEL_IOATDMA || FSL_DMA)
++ depends on BROKEN
+ help
+ This enables the use of DMA engines in the network stack to
+ offload receive copy-to-user operations, freeing CPU cycles.
+diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c
+index 5cb1227d69cf..3104502611f1 100644
+--- a/drivers/gpio/gpio-msm-v2.c
++++ b/drivers/gpio/gpio-msm-v2.c
+@@ -249,7 +249,7 @@ static void msm_gpio_irq_mask(struct irq_data *d)
+
+ spin_lock_irqsave(&tlmm_lock, irq_flags);
+ writel(TARGET_PROC_NONE, GPIO_INTR_CFG_SU(gpio));
+- clear_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio));
++ clear_gpio_bits(BIT(INTR_RAW_STATUS_EN) | BIT(INTR_ENABLE), GPIO_INTR_CFG(gpio));
+ __clear_bit(gpio, msm_gpio.enabled_irqs);
+ spin_unlock_irqrestore(&tlmm_lock, irq_flags);
+ }
+@@ -261,7 +261,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
+
+ spin_lock_irqsave(&tlmm_lock, irq_flags);
+ __set_bit(gpio, msm_gpio.enabled_irqs);
+- set_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio));
++ set_gpio_bits(BIT(INTR_RAW_STATUS_EN) | BIT(INTR_ENABLE), GPIO_INTR_CFG(gpio));
+ writel(TARGET_PROC_SCORPION, GPIO_INTR_CFG_SU(gpio));
+ spin_unlock_irqrestore(&tlmm_lock, irq_flags);
+ }
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 97a050f6ea22..ddb22e7c8347 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -37,6 +37,7 @@
+ */
+ #define INTEL_GMCH_CTRL 0x52
+ #define INTEL_GMCH_VGA_DISABLE (1 << 1)
++#define SNB_GMCH_CTRL 0x50
+
+ /* PCI config space */
+
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 6d3669526de7..61b708b60faa 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -9141,14 +9141,15 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
+ int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
++ unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
+ u16 gmch_ctrl;
+
+- pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl);
++ pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl);
+ if (state)
+ gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
+ else
+ gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
+- pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
++ pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl);
+ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
+index 93bce728cdb4..414a6812b5ba 100644
+--- a/drivers/gpu/drm/radeon/rs690.c
++++ b/drivers/gpu/drm/radeon/rs690.c
+@@ -160,6 +160,16 @@ void rs690_mc_init(struct radeon_device *rdev)
+ base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
+ base = G_000100_MC_FB_START(base) << 16;
+ rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
++ /* Some boards seem to be configured for 128MB of sideport memory,
++ * but really only have 64MB. Just skip the sideport and use
++ * UMA memory.
++ */
++ if (rdev->mc.igp_sideport_enabled &&
++ (rdev->mc.real_vram_size == (384 * 1024 * 1024))) {
++ base += 128 * 1024 * 1024;
++ rdev->mc.real_vram_size -= 128 * 1024 * 1024;
++ rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
++ }
+ rs690_pm_info(rdev);
+ radeon_vram_location(rdev, &rdev->mc, base);
+ rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1;
+diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
+index 221b924acebe..e223175a188d 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
++++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
+@@ -144,9 +144,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+ }
+
+ page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
+- bo->vm_node->start - vma->vm_pgoff;
+- page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
+- bo->vm_node->start - vma->vm_pgoff;
++ vma->vm_pgoff - bo->vm_node->start;
++ page_last = vma_pages(vma) + vma->vm_pgoff -
++ bo->vm_node->start;
+
+ if (unlikely(page_offset >= bo->num_pages)) {
+ retval = VM_FAULT_SIGBUS;
+diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
+index 3d630bbcd785..e6ec92009571 100644
+--- a/drivers/hwmon/coretemp.c
++++ b/drivers/hwmon/coretemp.c
+@@ -52,7 +52,7 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
+
+ #define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */
+ #define NUM_REAL_CORES 32 /* Number of Real cores per cpu */
+-#define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */
++#define CORETEMP_NAME_LENGTH 19 /* String Length of attrs */
+ #define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */
+ #define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
+ #define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index 8bba438a328e..6d05e26268a4 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -997,7 +997,7 @@ read_again:
+ /* Could not read all from this device, so we will
+ * need another r10_bio.
+ */
+- sectors_handled = (r10_bio->sectors + max_sectors
++ sectors_handled = (r10_bio->sector + max_sectors
+ - bio->bi_sector);
+ r10_bio->sectors = max_sectors;
+ spin_lock_irq(&conf->device_lock);
+@@ -1005,7 +1005,7 @@ read_again:
+ bio->bi_phys_segments = 2;
+ else
+ bio->bi_phys_segments++;
+- spin_unlock(&conf->device_lock);
++ spin_unlock_irq(&conf->device_lock);
+ /* Cannot call generic_make_request directly
+ * as that will be queued in __generic_make_request
+ * and subsequent mempool_alloc might block
+@@ -2563,10 +2563,6 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
+ if (j == conf->copies) {
+ /* Cannot recover, so abort the recovery or
+ * record a bad block */
+- put_buf(r10_bio);
+- if (rb2)
+- atomic_dec(&rb2->remaining);
+- r10_bio = rb2;
+ if (any_working) {
+ /* problem is that there are bad blocks
+ * on other device(s)
+@@ -2590,6 +2586,10 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
+ conf->mirrors[i].recovery_disabled
+ = mddev->recovery_disabled;
+ }
++ put_buf(r10_bio);
++ if (rb2)
++ atomic_dec(&rb2->remaining);
++ r10_bio = rb2;
+ break;
+ }
+ }
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index 26ef63a82b25..fb678330aaa7 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -3084,7 +3084,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
+ */
+ set_bit(R5_Insync, &dev->flags);
+
+- if (rdev && test_bit(R5_WriteError, &dev->flags)) {
++ if (test_bit(R5_WriteError, &dev->flags)) {
+ clear_bit(R5_Insync, &dev->flags);
+ if (!test_bit(Faulty, &rdev->flags)) {
+ s->handle_bad_blocks = 1;
+@@ -3092,7 +3092,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
+ } else
+ clear_bit(R5_WriteError, &dev->flags);
+ }
+- if (rdev && test_bit(R5_MadeGood, &dev->flags)) {
++ if (test_bit(R5_MadeGood, &dev->flags)) {
+ if (!test_bit(Faulty, &rdev->flags)) {
+ s->handle_bad_blocks = 1;
+ atomic_inc(&rdev->nr_pending);
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+index bbb669235fa2..e367ab11b385 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+@@ -108,6 +108,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
+ struct sk_buff *skb = tx_buf->skb;
+ u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
+ int nbd;
++ u16 split_bd_len = 0;
+
+ /* prefetch skb end pointer to speedup dev_kfree_skb() */
+ prefetch(&skb->end);
+@@ -115,11 +116,8 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
+ DP(BNX2X_MSG_FP, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
+ txdata->txq_index, idx, tx_buf, skb);
+
+- /* unmap first bd */
+ DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
+ tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
+- dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
+- BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
+
+
+ nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
+@@ -138,12 +136,19 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
+ --nbd;
+ bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+
+- /* ...and the TSO split header bd since they have no mapping */
++ /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
+ if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
++ tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
++ split_bd_len = BD_UNMAP_LEN(tx_data_bd);
+ --nbd;
+ bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+ }
+
++ /* unmap first bd */
++ dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
++ BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
++ DMA_TO_DEVICE);
++
+ /* now free frags */
+ while (nbd > 0) {
+
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index 1bc927a768f1..d5793d3fd762 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -14537,6 +14537,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
+ /* Clear this out for sanity. */
+ tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
+
++ /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
++ tw32(TG3PCI_REG_BASE_ADDR, 0);
++
+ pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
+ &pci_state_reg);
+ if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
+diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
+index bfeccbfde236..297f0b6818be 100644
+--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
++++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
+@@ -3015,7 +3015,7 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
+
+ dev->hw_features = NETIF_F_SG | NETIF_F_TSO
+ | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX | NETIF_F_LRO;
+- dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
++ dev->features = NETIF_F_SG | NETIF_F_TSO
+ | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX
+ | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
+ | NETIF_F_RXCSUM;
+diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
+index 3a90af6d111c..cc7e7def9b19 100644
+--- a/drivers/net/ethernet/tehuti/tehuti.c
++++ b/drivers/net/ethernet/tehuti/tehuti.c
+@@ -1995,7 +1995,6 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO
+ | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
+ NETIF_F_HW_VLAN_FILTER | NETIF_F_RXCSUM
+- /*| NETIF_F_FRAGLIST */
+ ;
+ ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
+ NETIF_F_TSO | NETIF_F_HW_VLAN_TX;
+diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
+index e26945d9dea4..1b7b3bed6b3d 100644
+--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
++++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
+@@ -1011,7 +1011,7 @@ static int __devinit temac_of_probe(struct platform_device *op)
+ dev_set_drvdata(&op->dev, ndev);
+ SET_NETDEV_DEV(ndev, &op->dev);
+ ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
+- ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST;
++ ndev->features = NETIF_F_SG;
+ ndev->netdev_ops = &temac_netdev_ops;
+ ndev->ethtool_ops = &temac_ethtool_ops;
+ #if 0
+diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
+index a4a3516b6bbf..3b3a7e07bbf1 100644
+--- a/drivers/net/hamradio/hdlcdrv.c
++++ b/drivers/net/hamradio/hdlcdrv.c
+@@ -571,6 +571,8 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+ case HDLCDRVCTL_CALIBRATE:
+ if(!capable(CAP_SYS_RAWIO))
+ return -EPERM;
++ if (bi.data.calibrate > INT_MAX / s->par.bitrate)
++ return -EINVAL;
+ s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16;
+ return 0;
+
+diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
+index 96a98d2ff151..e4260ab421f1 100644
+--- a/drivers/net/hamradio/yam.c
++++ b/drivers/net/hamradio/yam.c
+@@ -1060,6 +1060,7 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+ break;
+
+ case SIOCYAMGCFG:
++ memset(&yi, 0, sizeof(yi));
+ yi.cfg.mask = 0xffffffff;
+ yi.cfg.iobase = yp->iobase;
+ yi.cfg.irq = yp->irq;
+diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
+index 136ecf32a5d5..dc60aecc6d9b 100644
+--- a/drivers/net/usb/dm9601.c
++++ b/drivers/net/usb/dm9601.c
+@@ -445,7 +445,12 @@ static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)
+ dev->net->ethtool_ops = &dm9601_ethtool_ops;
+ dev->net->hard_header_len += DM_TX_OVERHEAD;
+ dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
+- dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD;
++
++ /* dm9620/21a require room for 4 byte padding, even in dm9601
++ * mode, so we need +1 to be able to receive full size
++ * ethernet frames.
++ */
++ dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD + 1;
+
+ dev->mii.dev = dev->net;
+ dev->mii.mdio_read = dm9601_mdio_read;
+@@ -531,7 +536,7 @@ static int dm9601_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
+ gfp_t flags)
+ {
+- int len;
++ int len, pad;
+
+ /* format:
+ b1: packet length low
+@@ -539,12 +544,23 @@ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
+ b3..n: packet data
+ */
+
+- len = skb->len;
++ len = skb->len + DM_TX_OVERHEAD;
+
+- if (skb_headroom(skb) < DM_TX_OVERHEAD) {
++ /* workaround for dm962x errata with tx fifo getting out of
++ * sync if a USB bulk transfer retry happens right after a
++ * packet with odd / maxpacket length by adding up to 3 bytes
++ * padding.
++ */
++ while ((len & 1) || !(len % dev->maxpacket))
++ len++;
++
++ len -= DM_TX_OVERHEAD; /* hw header doesn't count as part of length */
++ pad = len - skb->len;
++
++ if (skb_headroom(skb) < DM_TX_OVERHEAD || skb_tailroom(skb) < pad) {
+ struct sk_buff *skb2;
+
+- skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, 0, flags);
++ skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, pad, flags);
+ dev_kfree_skb_any(skb);
+ skb = skb2;
+ if (!skb)
+@@ -553,10 +569,10 @@ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
+
+ __skb_push(skb, DM_TX_OVERHEAD);
+
+- /* usbnet adds padding if length is a multiple of packet size
+- if so, adjust length value in header */
+- if ((skb->len % dev->maxpacket) == 0)
+- len++;
++ if (pad) {
++ memset(skb->data + skb->len, 0, pad);
++ __skb_put(skb, pad);
++ }
+
+ skb->data[0] = len;
+ skb->data[1] = len >> 8;
+diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+index b5920168606d..f4caeb328985 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
++++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+@@ -76,9 +76,16 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
+ mask2 |= ATH9K_INT_CST;
+ if (isr2 & AR_ISR_S2_TSFOOR)
+ mask2 |= ATH9K_INT_TSFOOR;
++
++ if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
++ REG_WRITE(ah, AR_ISR_S2, isr2);
++ isr &= ~AR_ISR_BCNMISC;
++ }
+ }
+
+- isr = REG_READ(ah, AR_ISR_RAC);
++ if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)
++ isr = REG_READ(ah, AR_ISR_RAC);
++
+ if (isr == 0xffffffff) {
+ *masked = 0;
+ return false;
+@@ -97,11 +104,23 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
+
+ *masked |= ATH9K_INT_TX;
+
+- s0_s = REG_READ(ah, AR_ISR_S0_S);
++ if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) {
++ s0_s = REG_READ(ah, AR_ISR_S0_S);
++ s1_s = REG_READ(ah, AR_ISR_S1_S);
++ } else {
++ s0_s = REG_READ(ah, AR_ISR_S0);
++ REG_WRITE(ah, AR_ISR_S0, s0_s);
++ s1_s = REG_READ(ah, AR_ISR_S1);
++ REG_WRITE(ah, AR_ISR_S1, s1_s);
++
++ isr &= ~(AR_ISR_TXOK |
++ AR_ISR_TXDESC |
++ AR_ISR_TXERR |
++ AR_ISR_TXEOL);
++ }
++
+ ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK);
+ ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC);
+-
+- s1_s = REG_READ(ah, AR_ISR_S1_S);
+ ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR);
+ ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL);
+ }
+@@ -114,13 +133,15 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
+ *masked |= mask2;
+ }
+
+- if (AR_SREV_9100(ah))
+- return true;
+-
+- if (isr & AR_ISR_GENTMR) {
++ if (!AR_SREV_9100(ah) && (isr & AR_ISR_GENTMR)) {
+ u32 s5_s;
+
+- s5_s = REG_READ(ah, AR_ISR_S5_S);
++ if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) {
++ s5_s = REG_READ(ah, AR_ISR_S5_S);
++ } else {
++ s5_s = REG_READ(ah, AR_ISR_S5);
++ }
++
+ ah->intr_gen_timer_trigger =
+ MS(s5_s, AR_ISR_S5_GENTIMER_TRIG);
+
+@@ -133,8 +154,21 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
+ if ((s5_s & AR_ISR_S5_TIM_TIMER) &&
+ !(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
+ *masked |= ATH9K_INT_TIM_TIMER;
++
++ if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
++ REG_WRITE(ah, AR_ISR_S5, s5_s);
++ isr &= ~AR_ISR_GENTMR;
++ }
+ }
+
++ if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
++ REG_WRITE(ah, AR_ISR, isr);
++ REG_READ(ah, AR_ISR);
++ }
++
++ if (AR_SREV_9100(ah))
++ return true;
++
+ if (sync_cause) {
+ fatal_int =
+ (sync_cause &
+diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+index 0b9a0e8a4958..b6cd36c70823 100644
+--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
++++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+@@ -139,21 +139,26 @@ static void ath9k_htc_bssid_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+ struct ath9k_vif_iter_data *iter_data = data;
+ int i;
+
+- for (i = 0; i < ETH_ALEN; i++)
+- iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]);
++ if (iter_data->hw_macaddr != NULL) {
++ for (i = 0; i < ETH_ALEN; i++)
++ iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]);
++ } else {
++ iter_data->hw_macaddr = mac;
++ }
+ }
+
+-static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv,
++static void ath9k_htc_set_mac_bssid_mask(struct ath9k_htc_priv *priv,
+ struct ieee80211_vif *vif)
+ {
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_vif_iter_data iter_data;
+
+ /*
+- * Use the hardware MAC address as reference, the hardware uses it
+- * together with the BSSID mask when matching addresses.
++ * Pick the MAC address of the first interface as the new hardware
++ * MAC address. The hardware will use it together with the BSSID mask
++ * when matching addresses.
+ */
+- iter_data.hw_macaddr = common->macaddr;
++ iter_data.hw_macaddr = NULL;
+ memset(&iter_data.mask, 0xff, ETH_ALEN);
+
+ if (vif)
+@@ -164,6 +169,10 @@ static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv,
+ &iter_data);
+
+ memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
++
++ if (iter_data.hw_macaddr)
++ memcpy(common->macaddr, iter_data.hw_macaddr, ETH_ALEN);
++
+ ath_hw_setbssidmask(common);
+ }
+
+@@ -1100,7 +1109,7 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
+ goto out;
+ }
+
+- ath9k_htc_set_bssid_mask(priv, vif);
++ ath9k_htc_set_mac_bssid_mask(priv, vif);
+
+ priv->vif_slot |= (1 << avp->index);
+ priv->nvifs++;
+@@ -1163,7 +1172,7 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
+
+ ath9k_htc_set_opmode(priv);
+
+- ath9k_htc_set_bssid_mask(priv, vif);
++ ath9k_htc_set_mac_bssid_mask(priv, vif);
+
+ /*
+ * Stop ANI only if there are no associated station interfaces.
+diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
+index a59267aabfeb..ad331260a581 100644
+--- a/drivers/net/wireless/ath/ath9k/main.c
++++ b/drivers/net/wireless/ath/ath9k/main.c
+@@ -1357,8 +1357,9 @@ void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ /*
+- * Use the hardware MAC address as reference, the hardware uses it
+- * together with the BSSID mask when matching addresses.
++ * Pick the MAC address of the first interface as the new hardware
++ * MAC address. The hardware will use it together with the BSSID mask
++ * when matching addresses.
+ */
+ memset(iter_data, 0, sizeof(*iter_data));
+ iter_data->hw_macaddr = common->macaddr;
+diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
+index 47ba0f7c6007..38b793bca681 100644
+--- a/drivers/net/wireless/rtlwifi/pci.c
++++ b/drivers/net/wireless/rtlwifi/pci.c
+@@ -685,6 +685,8 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
+ };
+ int index = rtlpci->rx_ring[rx_queue_idx].idx;
+
++ if (rtlpci->driver_is_goingto_unload)
++ return;
+ /*RX NORMAL PKT */
+ while (count--) {
+ /*rx descriptor */
+@@ -1563,6 +1565,7 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
+ */
+ set_hal_stop(rtlhal);
+
++ rtlpci->driver_is_goingto_unload = true;
+ rtlpriv->cfg->ops->disable_interrupt(hw);
+ tasklet_kill(&rtlpriv->works.ips_leave_tasklet);
+
+@@ -1580,7 +1583,6 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
+ ppsc->rfchange_inprogress = true;
+ spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
+
+- rtlpci->driver_is_goingto_unload = true;
+ rtlpriv->cfg->ops->hw_disable(hw);
+ rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF);
+
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 363a5c60bf9e..9f1fec14723f 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -1915,10 +1915,6 @@ void pci_enable_ari(struct pci_dev *dev)
+ if (!pci_is_pcie(dev) || dev->devfn)
+ return;
+
+- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
+- if (!pos)
+- return;
+-
+ bridge = dev->bus->self;
+ if (!bridge || !pci_is_pcie(bridge))
+ return;
+@@ -1937,10 +1933,14 @@ void pci_enable_ari(struct pci_dev *dev)
+ return;
+
+ pci_read_config_word(bridge, pos + PCI_EXP_DEVCTL2, &ctrl);
+- ctrl |= PCI_EXP_DEVCTL2_ARI;
++ if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
++ ctrl |= PCI_EXP_DEVCTL2_ARI;
++ bridge->ari_enabled = 1;
++ } else {
++ ctrl &= ~PCI_EXP_DEVCTL2_ARI;
++ bridge->ari_enabled = 0;
++ }
+ pci_write_config_word(bridge, pos + PCI_EXP_DEVCTL2, ctrl);
+-
+- bridge->ari_enabled = 1;
+ }
+
+ /**
+diff --git a/drivers/staging/comedi/drivers/cb_pcidio.c b/drivers/staging/comedi/drivers/cb_pcidio.c
+index 79477a595ef9..b3c9c8f107ab 100644
+--- a/drivers/staging/comedi/drivers/cb_pcidio.c
++++ b/drivers/staging/comedi/drivers/cb_pcidio.c
+@@ -56,10 +56,6 @@ struct pcidio_board {
+ const char *name; /* name of the board */
+ int dev_id;
+ int n_8255; /* number of 8255 chips on board */
+-
+- /* indices of base address regions */
+- int pcicontroler_badrindex;
+- int dioregs_badrindex;
+ };
+
+ static const struct pcidio_board pcidio_boards[] = {
+@@ -67,22 +63,16 @@ static const struct pcidio_board pcidio_boards[] = {
+ .name = "pci-dio24",
+ .dev_id = 0x0028,
+ .n_8255 = 1,
+- .pcicontroler_badrindex = 1,
+- .dioregs_badrindex = 2,
+ },
+ {
+ .name = "pci-dio24h",
+ .dev_id = 0x0014,
+ .n_8255 = 1,
+- .pcicontroler_badrindex = 1,
+- .dioregs_badrindex = 2,
+ },
+ {
+ .name = "pci-dio48h",
+ .dev_id = 0x000b,
+ .n_8255 = 2,
+- .pcicontroler_badrindex = 0,
+- .dioregs_badrindex = 1,
+ },
+ };
+
+@@ -244,10 +234,15 @@ found:
+ ("cb_pcidio: failed to enable PCI device and request regions\n");
+ return -EIO;
+ }
+- devpriv->dio_reg_base
+- =
++ /*
++ * Use PCI BAR 2 region if non-zero length, else use PCI BAR 1 region.
++ * PCI BAR 1 is only used for older PCI-DIO48H boards. At some point
++ * the PCI-DIO48H was redesigned to use the same PCI interface chip
++ * (and same PCI BAR region) as the other boards.
++ */
++ devpriv->dio_reg_base =
+ pci_resource_start(devpriv->pci_dev,
+- pcidio_boards[index].dioregs_badrindex);
++ (pci_resource_len(pcidev, 2) ? 2 : 1));
+
+ /*
+ * Allocate the subdevice structures. alloc_subdevice() is a
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 3effde29c17a..45c13a659940 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -861,24 +861,22 @@ static int iscsit_handle_scsi_cmd(
+ if (((hdr->flags & ISCSI_FLAG_CMD_READ) ||
+ (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) {
+ /*
+- * Vmware ESX v3.0 uses a modified Cisco Initiator (v3.4.2)
+- * that adds support for RESERVE/RELEASE. There is a bug
+- * add with this new functionality that sets R/W bits when
+- * neither CDB carries any READ or WRITE datapayloads.
++ * From RFC-3720 Section 10.3.1:
++ *
++ * "Either or both of R and W MAY be 1 when either the
++ * Expected Data Transfer Length and/or Bidirectional Read
++ * Expected Data Transfer Length are 0"
++ *
++ * For this case, go ahead and clear the unnecssary bits
++ * to avoid any confusion with ->data_direction.
+ */
+- if ((hdr->cdb[0] == 0x16) || (hdr->cdb[0] == 0x17)) {
+- hdr->flags &= ~ISCSI_FLAG_CMD_READ;
+- hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
+- goto done;
+- }
++ hdr->flags &= ~ISCSI_FLAG_CMD_READ;
++ hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
+
+- pr_err("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
++ pr_warn("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
+ " set when Expected Data Transfer Length is 0 for"
+- " CDB: 0x%02x. Bad iSCSI Initiator.\n", hdr->cdb[0]);
+- return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
+- buf, conn);
++ " CDB: 0x%02x, Fixing up flags\n", hdr->cdb[0]);
+ }
+-done:
+
+ if (!(hdr->flags & ISCSI_FLAG_CMD_READ) &&
+ !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) {
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 827f933666d3..15685c34b8fb 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -121,7 +121,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ * any other sleep) on Haswell machines with LPT and LPT-LP
+ * with the new Intel BIOS
+ */
+- xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
++ /* Limit the quirk to only known vendors, as this triggers
++ * yet another BIOS bug on some other machines
++ * https://bugzilla.kernel.org/show_bug.cgi?id=66171
++ */
++ if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)
++ xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
+ }
+ if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
+ pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index 6203d805eb45..b24e2d332767 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -608,6 +608,8 @@ static void __unregister_request(struct ceph_mds_client *mdsc,
+ req->r_unsafe_dir = NULL;
+ }
+
++ complete_all(&req->r_safe_completion);
++
+ ceph_mdsc_put_request(req);
+ }
+
+@@ -1815,8 +1817,11 @@ static int __do_request(struct ceph_mds_client *mdsc,
+ int mds = -1;
+ int err = -EAGAIN;
+
+- if (req->r_err || req->r_got_result)
++ if (req->r_err || req->r_got_result) {
++ if (req->r_aborted)
++ __unregister_request(mdsc, req);
+ goto out;
++ }
+
+ if (req->r_timeout &&
+ time_after_eq(jiffies, req->r_started + req->r_timeout)) {
+@@ -2124,7 +2129,6 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
+ if (head->safe) {
+ req->r_got_safe = true;
+ __unregister_request(mdsc, req);
+- complete_all(&req->r_safe_completion);
+
+ if (req->r_got_unsafe) {
+ /*
+diff --git a/fs/ext2/super.c b/fs/ext2/super.c
+index bd8ac164a3bf..94b9e329b712 100644
+--- a/fs/ext2/super.c
++++ b/fs/ext2/super.c
+@@ -1448,6 +1448,7 @@ static ssize_t ext2_quota_write(struct super_block *sb, int type,
+ sb->s_blocksize - offset : towrite;
+
+ tmp_bh.b_state = 0;
++ tmp_bh.b_size = sb->s_blocksize;
+ err = ext2_get_block(inode, blk, &tmp_bh, 1);
+ if (err < 0)
+ goto out;
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 22c71b9dd727..68b160255b24 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -278,6 +278,16 @@ struct ext4_io_submit {
+ /* Translate # of blks to # of clusters */
+ #define EXT4_NUM_B2C(sbi, blks) (((blks) + (sbi)->s_cluster_ratio - 1) >> \
+ (sbi)->s_cluster_bits)
++/* Mask out the low bits to get the starting block of the cluster */
++#define EXT4_PBLK_CMASK(s, pblk) ((pblk) & \
++ ~((ext4_fsblk_t) (s)->s_cluster_ratio - 1))
++#define EXT4_LBLK_CMASK(s, lblk) ((lblk) & \
++ ~((ext4_lblk_t) (s)->s_cluster_ratio - 1))
++/* Get the cluster offset */
++#define EXT4_PBLK_COFF(s, pblk) ((pblk) & \
++ ((ext4_fsblk_t) (s)->s_cluster_ratio - 1))
++#define EXT4_LBLK_COFF(s, lblk) ((lblk) & \
++ ((ext4_lblk_t) (s)->s_cluster_ratio - 1))
+
+ /*
+ * Structure of a blocks group descriptor
+diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
+index d0b8f9838e81..9995b990eebe 100644
+--- a/fs/ext4/ext4_jbd2.c
++++ b/fs/ext4/ext4_jbd2.c
+@@ -113,6 +113,10 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
+ if (WARN_ON_ONCE(err)) {
+ ext4_journal_abort_handle(where, line, __func__, bh,
+ handle, err);
++ ext4_error_inode(inode, where, line,
++ bh->b_blocknr,
++ "journal_dirty_metadata failed: "
++ "errcode %d", err);
+ }
+ } else {
+ if (inode)
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 3e8fc8029a38..bf35fe01a83f 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -318,8 +318,10 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
+ {
+ ext4_fsblk_t block = ext4_ext_pblock(ext);
+ int len = ext4_ext_get_actual_len(ext);
++ ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
++ ext4_lblk_t last = lblock + len - 1;
+
+- if (len == 0)
++ if (lblock > last)
+ return 0;
+ return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
+ }
+@@ -345,11 +347,26 @@ static int ext4_valid_extent_entries(struct inode *inode,
+ if (depth == 0) {
+ /* leaf entries */
+ struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
++ struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
++ ext4_fsblk_t pblock = 0;
++ ext4_lblk_t lblock = 0;
++ ext4_lblk_t prev = 0;
++ int len = 0;
+ while (entries) {
+ if (!ext4_valid_extent(inode, ext))
+ return 0;
++
++ /* Check for overlapping extents */
++ lblock = le32_to_cpu(ext->ee_block);
++ len = ext4_ext_get_actual_len(ext);
++ if ((lblock <= prev) && prev) {
++ pblock = ext4_ext_pblock(ext);
++ es->s_last_error_block = cpu_to_le64(pblock);
++ return 0;
++ }
+ ext++;
+ entries--;
++ prev = lblock + len - 1;
+ }
+ } else {
+ struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
+@@ -1642,8 +1659,7 @@ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
+ depth = ext_depth(inode);
+ if (!path[depth].p_ext)
+ goto out;
+- b2 = le32_to_cpu(path[depth].p_ext->ee_block);
+- b2 &= ~(sbi->s_cluster_ratio - 1);
++ b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block));
+
+ /*
+ * get the next allocated block if the extent in the path
+@@ -1653,7 +1669,7 @@ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
+ b2 = ext4_ext_next_allocated_block(path);
+ if (b2 == EXT_MAX_BLOCKS)
+ goto out;
+- b2 &= ~(sbi->s_cluster_ratio - 1);
++ b2 = EXT4_LBLK_CMASK(sbi, b2);
+ }
+
+ /* check for wrap through zero on extent logical start block*/
+@@ -2288,7 +2304,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
+ * truncate operation has removed all of the blocks in
+ * the cluster.
+ */
+- if (pblk & (sbi->s_cluster_ratio - 1) &&
++ if (EXT4_PBLK_COFF(sbi, pblk) &&
+ (ee_len == num))
+ *partial_cluster = EXT4_B2C(sbi, pblk);
+ else
+@@ -3491,7 +3507,7 @@ int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk,
+ {
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ ext4_lblk_t lblk_start, lblk_end;
+- lblk_start = lblk & (~(sbi->s_cluster_ratio - 1));
++ lblk_start = EXT4_LBLK_CMASK(sbi, lblk);
+ lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
+
+ return ext4_find_delalloc_range(inode, lblk_start, lblk_end,
+@@ -3551,9 +3567,9 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
+ trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks);
+
+ /* Check towards left side */
+- c_offset = lblk_start & (sbi->s_cluster_ratio - 1);
++ c_offset = EXT4_LBLK_COFF(sbi, lblk_start);
+ if (c_offset) {
+- lblk_from = lblk_start & (~(sbi->s_cluster_ratio - 1));
++ lblk_from = EXT4_LBLK_CMASK(sbi, lblk_start);
+ lblk_to = lblk_from + c_offset - 1;
+
+ if (ext4_find_delalloc_range(inode, lblk_from, lblk_to, 0))
+@@ -3561,7 +3577,7 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
+ }
+
+ /* Now check towards right. */
+- c_offset = (lblk_start + num_blks) & (sbi->s_cluster_ratio - 1);
++ c_offset = EXT4_LBLK_COFF(sbi, lblk_start + num_blks);
+ if (allocated_clusters && c_offset) {
+ lblk_from = lblk_start + num_blks;
+ lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1;
+@@ -3754,7 +3770,7 @@ static int get_implied_cluster_alloc(struct super_block *sb,
+ struct ext4_ext_path *path)
+ {
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+- ext4_lblk_t c_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
++ ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
+ ext4_lblk_t ex_cluster_start, ex_cluster_end;
+ ext4_lblk_t rr_cluster_start, rr_cluster_end;
+ ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
+@@ -3773,8 +3789,7 @@ static int get_implied_cluster_alloc(struct super_block *sb,
+ (rr_cluster_start == ex_cluster_start)) {
+ if (rr_cluster_start == ex_cluster_end)
+ ee_start += ee_len - 1;
+- map->m_pblk = (ee_start & ~(sbi->s_cluster_ratio - 1)) +
+- c_offset;
++ map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset;
+ map->m_len = min(map->m_len,
+ (unsigned) sbi->s_cluster_ratio - c_offset);
+ /*
+@@ -4052,7 +4067,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
+ */
+ map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
+ newex.ee_block = cpu_to_le32(map->m_lblk);
+- cluster_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
++ cluster_offset = EXT4_LBLK_CMASK(sbi, map->m_lblk);
+
+ /*
+ * If we are doing bigalloc, check to see if the extent returned
+@@ -4120,7 +4135,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
+ * needed so that future calls to get_implied_cluster_alloc()
+ * work correctly.
+ */
+- offset = map->m_lblk & (sbi->s_cluster_ratio - 1);
++ offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
+ ar.len = EXT4_NUM_B2C(sbi, offset+allocated);
+ ar.goal -= offset;
+ ar.logical -= offset;
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 9b8c131ffa5a..81feb175c76e 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -3378,6 +3378,9 @@ static void ext4_mb_pa_callback(struct rcu_head *head)
+ {
+ struct ext4_prealloc_space *pa;
+ pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
++
++ BUG_ON(atomic_read(&pa->pa_count));
++ BUG_ON(pa->pa_deleted == 0);
+ kmem_cache_free(ext4_pspace_cachep, pa);
+ }
+
+@@ -3391,11 +3394,13 @@ static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
+ ext4_group_t grp;
+ ext4_fsblk_t grp_blk;
+
+- if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0)
+- return;
+-
+ /* in this short window concurrent discard can set pa_deleted */
+ spin_lock(&pa->pa_lock);
++ if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) {
++ spin_unlock(&pa->pa_lock);
++ return;
++ }
++
+ if (pa->pa_deleted == 1) {
+ spin_unlock(&pa->pa_lock);
+ return;
+@@ -4062,7 +4067,7 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac,
+
+ /* set up allocation goals */
+ memset(ac, 0, sizeof(struct ext4_allocation_context));
+- ac->ac_b_ex.fe_logical = ar->logical & ~(sbi->s_cluster_ratio - 1);
++ ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical);
+ ac->ac_status = AC_STATUS_CONTINUE;
+ ac->ac_sb = sb;
+ ac->ac_inode = ar->inode;
+@@ -4600,7 +4605,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
+ * blocks at the beginning or the end unless we are explicitly
+ * requested to avoid doing so.
+ */
+- overflow = block & (sbi->s_cluster_ratio - 1);
++ overflow = EXT4_PBLK_COFF(sbi, block);
+ if (overflow) {
+ if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) {
+ overflow = sbi->s_cluster_ratio - overflow;
+@@ -4614,7 +4619,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
+ count += overflow;
+ }
+ }
+- overflow = count & (sbi->s_cluster_ratio - 1);
++ overflow = EXT4_LBLK_COFF(sbi, count);
+ if (overflow) {
+ if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) {
+ if (count > overflow)
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 84f84bfdbdf2..acf2baf73b63 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -819,7 +819,7 @@ static void ext4_put_super(struct super_block *sb)
+ ext4_abort(sb, "Couldn't clean up the journal");
+ }
+
+- del_timer(&sbi->s_err_report);
++ del_timer_sync(&sbi->s_err_report);
+ ext4_release_system_zone(sb);
+ ext4_mb_release(sb);
+ ext4_ext_release(sb);
+@@ -3961,7 +3961,7 @@ failed_mount_wq:
+ sbi->s_journal = NULL;
+ }
+ failed_mount3:
+- del_timer(&sbi->s_err_report);
++ del_timer_sync(&sbi->s_err_report);
+ if (sbi->s_flex_groups)
+ ext4_kvfree(sbi->s_flex_groups);
+ percpu_counter_destroy(&sbi->s_freeclusters_counter);
+diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c
+index 89d2a5803ae3..5ecfffe25334 100644
+--- a/fs/hpfs/file.c
++++ b/fs/hpfs/file.c
+@@ -116,9 +116,12 @@ static int hpfs_write_begin(struct file *file, struct address_space *mapping,
+ hpfs_get_block,
+ &hpfs_i(mapping->host)->mmu_private);
+ if (unlikely(ret)) {
+- loff_t isize = mapping->host->i_size;
++ loff_t isize;
++ hpfs_lock(mapping->host->i_sb);
++ isize = mapping->host->i_size;
+ if (pos + len > isize)
+ vmtruncate(mapping->host, isize);
++ hpfs_unlock(mapping->host->i_sb);
+ }
+
+ return ret;
+diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
+index 233d3ed264fa..3ceacedbf627 100644
+--- a/fs/nilfs2/segment.c
++++ b/fs/nilfs2/segment.c
+@@ -1437,17 +1437,19 @@ static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
+
+ nilfs_clear_logs(&sci->sc_segbufs);
+
+- err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
+- if (unlikely(err))
+- return err;
+-
+ if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
+ err = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
+ sci->sc_freesegs,
+ sci->sc_nfreesegs,
+ NULL);
+ WARN_ON(err); /* do not happen */
++ sci->sc_stage.flags &= ~NILFS_CF_SUFREED;
+ }
++
++ err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
++ if (unlikely(err))
++ return err;
++
+ nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
+ sci->sc_stage = prev_stage;
+ }
+diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
+index 34817adf4b9e..10ca5e56351e 100644
+--- a/fs/xfs/xfs_log.c
++++ b/fs/xfs/xfs_log.c
+@@ -653,8 +653,9 @@ xfs_log_unmount_write(xfs_mount_t *mp)
+ .lv_iovecp = &reg,
+ };
+
+- /* remove inited flag */
++ /* remove inited flag, and account for space used */
+ tic->t_flags = 0;
++ tic->t_curr_res -= sizeof(magic);
+ error = xlog_write(log, &vec, tic, &lsn,
+ NULL, XLOG_UNMOUNT_TRANS);
+ /*
+diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
+index def807c5e510..c37fd8941c7e 100644
+--- a/include/drm/drm_pciids.h
++++ b/include/drm/drm_pciids.h
+@@ -495,7 +495,7 @@
+ {0x1002, 0x9645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
+ {0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
+- {0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
++ {0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
+ {0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index 32697c186c70..4bc9445222f2 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -24,6 +24,7 @@ struct hugepage_subpool *hugepage_new_subpool(long nr_blocks);
+ void hugepage_put_subpool(struct hugepage_subpool *spool);
+
+ int PageHuge(struct page *page);
++int PageHeadHuge(struct page *page_head);
+
+ void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
+ int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
+@@ -88,6 +89,11 @@ static inline int PageHuge(struct page *page)
+ return 0;
+ }
+
++static inline int PageHeadHuge(struct page *page_head)
++{
++ return 0;
++}
++
+ static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
+ {
+ }
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 8c43fd17d140..4b04097c748c 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -1719,6 +1719,15 @@ static inline int dev_parse_header(const struct sk_buff *skb,
+ return dev->header_ops->parse(skb, haddr);
+ }
+
++static inline int dev_rebuild_header(struct sk_buff *skb)
++{
++ const struct net_device *dev = skb->dev;
++
++ if (!dev->header_ops || !dev->header_ops->rebuild)
++ return 0;
++ return dev->header_ops->rebuild(skb);
++}
++
+ typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
+ extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf);
+ static inline int unregister_gifconf(unsigned int family)
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index 7cda65b5f798..fe76a74ae0b3 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -590,6 +590,20 @@ struct pci_driver {
+ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
+
+ /**
++ * PCI_DEVICE_SUB - macro used to describe a specific pci device with subsystem
++ * @vend: the 16 bit PCI Vendor ID
++ * @dev: the 16 bit PCI Device ID
++ * @subvend: the 16 bit PCI Subvendor ID
++ * @subdev: the 16 bit PCI Subdevice ID
++ *
++ * This macro is used to create a struct pci_device_id that matches a
++ * specific device with subsystem information.
++ */
++#define PCI_DEVICE_SUB(vend, dev, subvend, subdev) \
++ .vendor = (vend), .device = (dev), \
++ .subvendor = (subvend), .subdevice = (subdev)
++
++/**
+ * PCI_DEVICE_CLASS - macro used to describe a specific pci device class
+ * @dev_class: the class, subclass, prog-if triple for this device
+ * @dev_class_mask: the class mask for this device
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 312d047eb918..c17fdfbd323f 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1200,6 +1200,7 @@ struct sched_entity {
+ struct sched_rt_entity {
+ struct list_head run_list;
+ unsigned long timeout;
++ unsigned long watchdog_stamp;
+ unsigned int time_slice;
+ int nr_cpus_allowed;
+
+diff --git a/kernel/sched.c b/kernel/sched.c
+index d93369a977d1..ea85b0d2bbf2 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -2189,6 +2189,10 @@ static int irqtime_account_si_update(void)
+
+ #endif
+
++#ifdef CONFIG_SMP
++static void unthrottle_offline_cfs_rqs(struct rq *rq);
++#endif
++
+ #include "sched_idletask.c"
+ #include "sched_fair.c"
+ #include "sched_rt.c"
+@@ -6566,8 +6570,6 @@ static void unthrottle_offline_cfs_rqs(struct rq *rq)
+ unthrottle_cfs_rq(cfs_rq);
+ }
+ }
+-#else
+-static void unthrottle_offline_cfs_rqs(struct rq *rq) {}
+ #endif
+
+ /*
+@@ -6595,9 +6597,6 @@ static void migrate_tasks(unsigned int dead_cpu)
+ */
+ rq->stop = NULL;
+
+- /* Ensure any throttled groups are reachable by pick_next_task */
+- unthrottle_offline_cfs_rqs(rq);
+-
+ for ( ; ; ) {
+ /*
+ * There's this thread running, bail when that's the only
+@@ -6624,6 +6623,10 @@ static void migrate_tasks(unsigned int dead_cpu)
+
+ #endif /* CONFIG_HOTPLUG_CPU */
+
++#if !defined(CONFIG_HOTPLUG_CPU) || !defined(CONFIG_CFS_BANDWIDTH)
++static void unthrottle_offline_cfs_rqs(struct rq *rq) {}
++#endif
++
+ #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
+
+ static struct ctl_table sd_ctl_dir[] = {
+diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
+index 5b9e456ea98c..37f3f3925d3d 100644
+--- a/kernel/sched_fair.c
++++ b/kernel/sched_fair.c
+@@ -4848,6 +4848,9 @@ static void rq_online_fair(struct rq *rq)
+ static void rq_offline_fair(struct rq *rq)
+ {
+ update_sysctl();
++
++ /* Ensure any throttled groups are reachable by pick_next_task */
++ unthrottle_offline_cfs_rqs(rq);
+ }
+
+ #else /* CONFIG_SMP */
+diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
+index 6ad4fb394b08..f57fda7cbef0 100644
+--- a/kernel/sched_rt.c
++++ b/kernel/sched_rt.c
+@@ -509,6 +509,7 @@ balanced:
+ * runtime - in which case borrowing doesn't make sense.
+ */
+ rt_rq->rt_runtime = RUNTIME_INF;
++ rt_rq->rt_throttled = 0;
+ raw_spin_unlock(&rt_rq->rt_runtime_lock);
+ raw_spin_unlock(&rt_b->rt_runtime_lock);
+ }
+@@ -587,6 +588,19 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
+ return 1;
+
+ span = sched_rt_period_mask();
++#ifdef CONFIG_RT_GROUP_SCHED
++ /*
++ * FIXME: isolated CPUs should really leave the root task group,
++ * whether they are isolcpus or were isolated via cpusets, lest
++ * the timer run on a CPU which does not service all runqueues,
++ * potentially leaving other CPUs indefinitely throttled. If
++ * isolation is really required, the user will turn the throttle
++ * off to kill the perturbations it causes anyway. Meanwhile,
++ * this maintains functionality for boot and/or troubleshooting.
++ */
++ if (rt_b == &root_task_group.rt_bandwidth)
++ span = cpu_online_mask;
++#endif
+ for_each_cpu(i, span) {
+ int enqueue = 0;
+ struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
+@@ -719,6 +733,13 @@ inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
+ {
+ struct rq *rq = rq_of_rt_rq(rt_rq);
+
++#ifdef CONFIG_RT_GROUP_SCHED
++ /*
++ * Change rq's cpupri only if rt_rq is the top queue.
++ */
++ if (&rq->rt != rt_rq)
++ return;
++#endif
+ if (rq->online && prio < prev_prio)
+ cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
+ }
+@@ -728,6 +749,13 @@ dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
+ {
+ struct rq *rq = rq_of_rt_rq(rt_rq);
+
++#ifdef CONFIG_RT_GROUP_SCHED
++ /*
++ * Change rq's cpupri only if rt_rq is the top queue.
++ */
++ if (&rq->rt != rt_rq)
++ return;
++#endif
+ if (rq->online && rt_rq->highest_prio.curr != prev_prio)
+ cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
+ }
+@@ -1749,7 +1777,11 @@ static void watchdog(struct rq *rq, struct task_struct *p)
+ if (soft != RLIM_INFINITY) {
+ unsigned long next;
+
+- p->rt.timeout++;
++ if (p->rt.watchdog_stamp != jiffies) {
++ p->rt.timeout++;
++ p->rt.watchdog_stamp = jiffies;
++ }
++
+ next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
+ if (p->rt.timeout > next)
+ p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
+@@ -1758,6 +1790,8 @@ static void watchdog(struct rq *rq, struct task_struct *p)
+
+ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
+ {
++ struct sched_rt_entity *rt_se = &p->rt;
++
+ update_curr_rt(rq);
+
+ watchdog(rq, p);
+@@ -1775,12 +1809,15 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
+ p->rt.time_slice = DEF_TIMESLICE;
+
+ /*
+- * Requeue to the end of queue if we are not the only element
+- * on the queue:
++ * Requeue to the end of queue if we (and all of our ancestors) are the
++ * only element on the queue
+ */
+- if (p->rt.run_list.prev != p->rt.run_list.next) {
+- requeue_task_rt(rq, p, 0);
+- set_tsk_need_resched(p);
++ for_each_sched_rt_entity(rt_se) {
++ if (rt_se->run_list.prev != rt_se->run_list.next) {
++ requeue_task_rt(rq, p, 0);
++ set_tsk_need_resched(p);
++ return;
++ }
+ }
+ }
+
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index d40d7f6f3c51..cf8b439727b0 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -618,7 +618,7 @@ static int ftrace_profile_init(void)
+ int cpu;
+ int ret = 0;
+
+- for_each_online_cpu(cpu) {
++ for_each_possible_cpu(cpu) {
+ ret = ftrace_profile_init_cpu(cpu);
+ if (ret)
+ break;
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index ddf2128f93fa..3a5aae2d6ee7 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -679,6 +679,23 @@ int PageHuge(struct page *page)
+ }
+ EXPORT_SYMBOL_GPL(PageHuge);
+
++/*
++ * PageHeadHuge() only returns true for hugetlbfs head page, but not for
++ * normal or transparent huge pages.
++ */
++int PageHeadHuge(struct page *page_head)
++{
++ compound_page_dtor *dtor;
++
++ if (!PageHead(page_head))
++ return 0;
++
++ dtor = get_compound_page_dtor(page_head);
++
++ return dtor == free_huge_page;
++}
++EXPORT_SYMBOL_GPL(PageHeadHuge);
++
+ pgoff_t __basepage_index(struct page *page)
+ {
+ struct page *page_head = compound_head(page);
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 1b038789a9d6..96c4bcf188b1 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -1441,10 +1441,18 @@ static int soft_offline_huge_page(struct page *page, int flags)
+ return ret;
+ }
+ done:
+- if (!PageHWPoison(hpage))
+- atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
+- set_page_hwpoison_huge_page(hpage);
+- dequeue_hwpoisoned_huge_page(hpage);
++ /* overcommit hugetlb page will be freed to buddy */
++ if (PageHuge(hpage)) {
++ if (!PageHWPoison(hpage))
++ atomic_long_add(1 << compound_trans_order(hpage),
++ &mce_bad_pages);
++ set_page_hwpoison_huge_page(hpage);
++ dequeue_hwpoisoned_huge_page(hpage);
++ } else {
++ SetPageHWPoison(page);
++ atomic_long_inc(&mce_bad_pages);
++ }
++
+ /* keep elevated page count for bad page */
+ return ret;
+ }
+diff --git a/mm/mmap.c b/mm/mmap.c
+index dff37a686230..6182c8a4ea38 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -1368,7 +1368,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ struct vm_area_struct *vma;
+ unsigned long start_addr;
+
+- if (len > TASK_SIZE)
++ if (len > TASK_SIZE - mmap_min_addr)
+ return -ENOMEM;
+
+ if (flags & MAP_FIXED)
+@@ -1377,7 +1377,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
++ if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
+ (!vma || addr + len <= vma->vm_start))
+ return addr;
+ }
+@@ -1442,9 +1442,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+ unsigned long addr = addr0;
++ unsigned long low_limit = max(PAGE_SIZE, mmap_min_addr);
+
+ /* requested length too big for entire address space */
+- if (len > TASK_SIZE)
++ if (len > TASK_SIZE - mmap_min_addr)
+ return -ENOMEM;
+
+ if (flags & MAP_FIXED)
+@@ -1454,7 +1455,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
++ if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
+ (!vma || addr + len <= vma->vm_start))
+ return addr;
+ }
+@@ -1469,14 +1470,14 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ addr = mm->free_area_cache;
+
+ /* make sure it can fit in the remaining address space */
+- if (addr > len) {
++ if (addr >= low_limit + len) {
+ vma = find_vma(mm, addr-len);
+ if (!vma || addr <= vma->vm_start)
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr-len);
+ }
+
+- if (mm->mmap_base < len)
++ if (mm->mmap_base < low_limit + len)
+ goto bottomup;
+
+ addr = mm->mmap_base-len;
+@@ -1498,7 +1499,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+
+ /* try just below the current vma->vm_start */
+ addr = vma->vm_start-len;
+- } while (len < vma->vm_start);
++ } while (vma->vm_start >= low_limit + len);
+
+ bottomup:
+ /*
+diff --git a/mm/swap.c b/mm/swap.c
+index 55b266dcfbeb..a4b90161be66 100644
+--- a/mm/swap.c
++++ b/mm/swap.c
+@@ -31,6 +31,7 @@
+ #include <linux/backing-dev.h>
+ #include <linux/memcontrol.h>
+ #include <linux/gfp.h>
++#include <linux/hugetlb.h>
+
+ #include "internal.h"
+
+@@ -69,7 +70,8 @@ static void __put_compound_page(struct page *page)
+ {
+ compound_page_dtor *dtor;
+
+- __page_cache_release(page);
++ if (!PageHuge(page))
++ __page_cache_release(page);
+ dtor = get_compound_page_dtor(page);
+ (*dtor)(page);
+ }
+@@ -83,6 +85,35 @@ static void put_compound_page(struct page *page)
+ if (likely(page != page_head &&
+ get_page_unless_zero(page_head))) {
+ unsigned long flags;
++
++ if (PageHeadHuge(page_head)) {
++ if (likely(PageTail(page))) {
++ /*
++ * __split_huge_page_refcount
++ * cannot race here.
++ */
++ VM_BUG_ON(!PageHead(page_head));
++ atomic_dec(&page->_mapcount);
++ if (put_page_testzero(page_head))
++ VM_BUG_ON(1);
++ if (put_page_testzero(page_head))
++ __put_compound_page(page_head);
++ return;
++ } else {
++ /*
++ * __split_huge_page_refcount
++ * run before us, "page" was a
++ * THP tail. The split
++ * page_head has been freed
++ * and reallocated as slab or
++ * hugetlbfs page of smaller
++ * order (only possible if
++ * reallocated as slab on
++ * x86).
++ */
++ goto skip_lock;
++ }
++ }
+ /*
+ * page_head wasn't a dangling pointer but it
+ * may not be a head page anymore by the time
+@@ -94,9 +125,29 @@ static void put_compound_page(struct page *page)
+ /* __split_huge_page_refcount run before us */
+ compound_unlock_irqrestore(page_head, flags);
+ VM_BUG_ON(PageHead(page_head));
+- if (put_page_testzero(page_head))
+- __put_single_page(page_head);
+- out_put_single:
++skip_lock:
++ if (put_page_testzero(page_head)) {
++ /*
++ * The head page may have been
++ * freed and reallocated as a
++ * compound page of smaller
++ * order and then freed again.
++ * All we know is that it
++ * cannot have become: a THP
++ * page, a compound page of
++ * higher order, a tail page.
++ * That is because we still
++ * hold the refcount of the
++ * split THP tail and
++ * page_head was the THP head
++ * before the split.
++ */
++ if (PageHead(page_head))
++ __put_compound_page(page_head);
++ else
++ __put_single_page(page_head);
++ }
++out_put_single:
+ if (put_page_testzero(page))
+ __put_single_page(page);
+ return;
+@@ -163,6 +214,31 @@ bool __get_page_tail(struct page *page)
+ struct page *page_head = compound_trans_head(page);
+
+ if (likely(page != page_head && get_page_unless_zero(page_head))) {
++ /* Ref to put_compound_page() comment. */
++ if (PageHeadHuge(page_head)) {
++ if (likely(PageTail(page))) {
++ /*
++ * This is a hugetlbfs
++ * page. __split_huge_page_refcount
++ * cannot race here.
++ */
++ VM_BUG_ON(!PageHead(page_head));
++ __get_page_tail_foll(page, false);
++ return true;
++ } else {
++ /*
++ * __split_huge_page_refcount run
++ * before us, "page" was a THP
++ * tail. The split page_head has been
++ * freed and reallocated as slab or
++ * hugetlbfs page of smaller order
++ * (only possible if reallocated as
++ * slab on x86).
++ */
++ put_page(page_head);
++ return false;
++ }
++ }
+ /*
+ * page_head wasn't a dangling pointer but it
+ * may not be a head page anymore by the time
+diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
+index b40d3da974b5..48a62d89f8ae 100644
+--- a/net/8021q/vlan_dev.c
++++ b/net/8021q/vlan_dev.c
+@@ -522,6 +522,22 @@ static const struct header_ops vlan_header_ops = {
+ .parse = eth_header_parse,
+ };
+
++static int vlan_passthru_hard_header(struct sk_buff *skb, struct net_device *dev,
++ unsigned short type,
++ const void *daddr, const void *saddr,
++ unsigned int len)
++{
++ struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
++
++ return dev_hard_header(skb, real_dev, type, daddr, saddr, len);
++}
++
++static const struct header_ops vlan_passthru_header_ops = {
++ .create = vlan_passthru_hard_header,
++ .rebuild = dev_rebuild_header,
++ .parse = eth_header_parse,
++};
++
+ static const struct net_device_ops vlan_netdev_ops;
+
+ static int vlan_dev_init(struct net_device *dev)
+@@ -561,7 +577,7 @@ static int vlan_dev_init(struct net_device *dev)
+
+ dev->needed_headroom = real_dev->needed_headroom;
+ if (real_dev->features & NETIF_F_HW_VLAN_TX) {
+- dev->header_ops = real_dev->header_ops;
++ dev->header_ops = &vlan_passthru_header_ops;
+ dev->hard_header_len = real_dev->hard_header_len;
+ } else {
+ dev->header_ops = &vlan_header_ops;
+diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
+index a06deca97f0e..2157984a9eb9 100644
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -1743,7 +1743,7 @@ int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
+ u32 old;
+ struct net_bridge_mdb_htable *mdb;
+
+- spin_lock(&br->multicast_lock);
++ spin_lock_bh(&br->multicast_lock);
+ if (!netif_running(br->dev))
+ goto unlock;
+
+@@ -1775,7 +1775,7 @@ rollback:
+ }
+
+ unlock:
+- spin_unlock(&br->multicast_lock);
++ spin_unlock_bh(&br->multicast_lock);
+
+ return err;
+ }
+diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
+index b856f87e63d2..1d9a52929bad 100644
+--- a/net/core/drop_monitor.c
++++ b/net/core/drop_monitor.c
+@@ -61,7 +61,6 @@ static struct genl_family net_drop_monitor_family = {
+ .hdrsize = 0,
+ .name = "NET_DM",
+ .version = 2,
+- .maxattr = NET_DM_CMD_MAX,
+ };
+
+ static DEFINE_PER_CPU(struct per_cpu_dm_data, dm_cpu_data);
+diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
+index ccee270a9b65..6be5e8e0f865 100644
+--- a/net/ipv4/inet_diag.c
++++ b/net/ipv4/inet_diag.c
+@@ -119,6 +119,10 @@ static int inet_csk_diag_fill(struct sock *sk,
+
+ r->id.idiag_sport = inet->inet_sport;
+ r->id.idiag_dport = inet->inet_dport;
++
++ memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
++ memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
++
+ r->id.idiag_src[0] = inet->inet_rcv_saddr;
+ r->id.idiag_dst[0] = inet->inet_daddr;
+
+@@ -209,13 +213,20 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
+
+ r->idiag_family = tw->tw_family;
+ r->idiag_retrans = 0;
++
+ r->id.idiag_if = tw->tw_bound_dev_if;
+ r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
+ r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
++
+ r->id.idiag_sport = tw->tw_sport;
+ r->id.idiag_dport = tw->tw_dport;
++
++ memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
++ memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
++
+ r->id.idiag_src[0] = tw->tw_rcv_saddr;
+ r->id.idiag_dst[0] = tw->tw_daddr;
++
+ r->idiag_state = tw->tw_substate;
+ r->idiag_timer = 3;
+ r->idiag_expires = DIV_ROUND_UP(tmo * 1000, HZ);
+@@ -598,8 +609,13 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
+
+ r->id.idiag_sport = inet->inet_sport;
+ r->id.idiag_dport = ireq->rmt_port;
++
++ memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
++ memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
++
+ r->id.idiag_src[0] = ireq->loc_addr;
+ r->id.idiag_dst[0] = ireq->rmt_addr;
++
+ r->idiag_expires = jiffies_to_msecs(tmo);
+ r->idiag_rqueue = 0;
+ r->idiag_wqueue = 0;
+@@ -824,7 +840,7 @@ next_normal:
+ ++num;
+ }
+
+- if (r->idiag_states & TCPF_TIME_WAIT) {
++ if (r->idiag_states & (TCPF_TIME_WAIT | TCPF_FIN_WAIT2)) {
+ struct inet_timewait_sock *tw;
+
+ inet_twsk_for_each(tw, node,
+@@ -832,6 +848,8 @@ next_normal:
+
+ if (num < s_num)
+ goto next_dying;
++ if (!(r->idiag_states & (1 << tw->tw_substate)))
++ goto next_dying;
+ if (r->id.idiag_sport != tw->tw_sport &&
+ r->id.idiag_sport)
+ goto next_dying;
+diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
+index b5e64e4c0646..140d37750b0f 100644
+--- a/net/ipv4/ipmr.c
++++ b/net/ipv4/ipmr.c
+@@ -155,9 +155,12 @@ static struct mr_table *ipmr_get_table(struct net *net, u32 id)
+ static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
+ struct mr_table **mrt)
+ {
+- struct ipmr_result res;
+- struct fib_lookup_arg arg = { .result = &res, };
+ int err;
++ struct ipmr_result res;
++ struct fib_lookup_arg arg = {
++ .result = &res,
++ .flags = FIB_LOOKUP_NOREF,
++ };
+
+ err = fib_rules_lookup(net->ipv4.mr_rules_ops,
+ flowi4_to_flowi(flp4), 0, &arg);
+diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
+index f5af259386e0..f96c96f6e4fd 100644
+--- a/net/ipv6/ip6mr.c
++++ b/net/ipv6/ip6mr.c
+@@ -139,9 +139,12 @@ static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
+ static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
+ struct mr6_table **mrt)
+ {
+- struct ip6mr_result res;
+- struct fib_lookup_arg arg = { .result = &res, };
+ int err;
++ struct ip6mr_result res;
++ struct fib_lookup_arg arg = {
++ .result = &res,
++ .flags = FIB_LOOKUP_NOREF,
++ };
+
+ err = fib_rules_lookup(net->ipv6.mr6_rules_ops,
+ flowi6_to_flowi(flp6), 0, &arg);
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 1768238c855d..9a4f4377ce92 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -2058,15 +2058,11 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
+ {
+ struct net *net = dev_net(idev->dev);
+ struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops,
+- net->loopback_dev, 0);
++ net->loopback_dev, DST_NOCOUNT);
+ struct neighbour *neigh;
+
+- if (rt == NULL) {
+- if (net_ratelimit())
+- pr_warning("IPv6: Maximum number of routes reached,"
+- " consider increasing route/max_size.\n");
++ if (rt == NULL)
+ return ERR_PTR(-ENOMEM);
+- }
+
+ in6_dev_hold(idev);
+
+diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
+index 99a60d545b24..f432d7b6d93a 100644
+--- a/net/llc/af_llc.c
++++ b/net/llc/af_llc.c
+@@ -715,7 +715,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct llc_sock *llc = llc_sk(sk);
+ size_t copied = 0;
+ u32 peek_seq = 0;
+- u32 *seq;
++ u32 *seq, skb_len;
+ unsigned long used;
+ int target; /* Read at least this many bytes */
+ long timeo;
+@@ -813,6 +813,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
+ }
+ continue;
+ found_ok_skb:
++ skb_len = skb->len;
+ /* Ok so how much can we use? */
+ used = skb->len - offset;
+ if (len < used)
+@@ -843,7 +844,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
+ }
+
+ /* Partial read */
+- if (used + offset < skb->len)
++ if (used + offset < skb_len)
+ continue;
+ } while (len > 0);
+
+diff --git a/net/rds/ib.c b/net/rds/ib.c
+index b4c8b0022fee..ba2dffeff608 100644
+--- a/net/rds/ib.c
++++ b/net/rds/ib.c
+@@ -338,7 +338,8 @@ static int rds_ib_laddr_check(__be32 addr)
+ ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
+ /* due to this, we will claim to support iWARP devices unless we
+ check node_type. */
+- if (ret || cm_id->device->node_type != RDMA_NODE_IB_CA)
++ if (ret || !cm_id->device ||
++ cm_id->device->node_type != RDMA_NODE_IB_CA)
+ ret = -EADDRNOTAVAIL;
+
+ rdsdebug("addr %pI4 ret %d node type %d\n",
+diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
+index e59094981175..37be6e226d1b 100644
+--- a/net/rds/ib_send.c
++++ b/net/rds/ib_send.c
+@@ -552,9 +552,8 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
+ && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
+ rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
+ scat = &rm->data.op_sg[sg];
+- ret = sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
+- ret = min_t(int, ret, scat->length - conn->c_xmit_data_off);
+- return ret;
++ ret = max_t(int, RDS_CONG_MAP_BYTES, scat->length);
++ return sizeof(struct rds_header) + ret;
+ }
+
+ /* FIXME we may overallocate here */
+diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
+index bf76dec76e00..686fb1ab2a08 100644
+--- a/net/rose/af_rose.c
++++ b/net/rose/af_rose.c
+@@ -1258,6 +1258,7 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
+
+ if (msg->msg_name) {
+ struct sockaddr_rose *srose;
++ struct full_sockaddr_rose *full_srose = msg->msg_name;
+
+ memset(msg->msg_name, 0, sizeof(struct full_sockaddr_rose));
+ srose = msg->msg_name;
+@@ -1265,18 +1266,9 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
+ srose->srose_addr = rose->dest_addr;
+ srose->srose_call = rose->dest_call;
+ srose->srose_ndigis = rose->dest_ndigis;
+- if (msg->msg_namelen >= sizeof(struct full_sockaddr_rose)) {
+- struct full_sockaddr_rose *full_srose = (struct full_sockaddr_rose *)msg->msg_name;
+- for (n = 0 ; n < rose->dest_ndigis ; n++)
+- full_srose->srose_digis[n] = rose->dest_digis[n];
+- msg->msg_namelen = sizeof(struct full_sockaddr_rose);
+- } else {
+- if (rose->dest_ndigis >= 1) {
+- srose->srose_ndigis = 1;
+- srose->srose_digi = rose->dest_digis[0];
+- }
+- msg->msg_namelen = sizeof(struct sockaddr_rose);
+- }
++ for (n = 0 ; n < rose->dest_ndigis ; n++)
++ full_srose->srose_digis[n] = rose->dest_digis[n];
++ msg->msg_namelen = sizeof(struct full_sockaddr_rose);
+ }
+
+ skb_free_datagram(sk, skb);
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 9338ccc1a00b..eddfdecc122b 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -696,7 +696,9 @@ static int unix_autobind(struct socket *sock)
+ int err;
+ unsigned int retries = 0;
+
+- mutex_lock(&u->readlock);
++ err = mutex_lock_interruptible(&u->readlock);
++ if (err)
++ return err;
+
+ err = 0;
+ if (u->addr)
+@@ -829,7 +831,9 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ goto out;
+ addr_len = err;
+
+- mutex_lock(&u->readlock);
++ err = mutex_lock_interruptible(&u->readlock);
++ if (err)
++ goto out;
+
+ err = -EINVAL;
+ if (u->addr)
+diff --git a/net/wireless/radiotap.c b/net/wireless/radiotap.c
+index 617a310025b1..60549a4a2f98 100644
+--- a/net/wireless/radiotap.c
++++ b/net/wireless/radiotap.c
+@@ -122,6 +122,10 @@ int ieee80211_radiotap_iterator_init(
+ /* find payload start allowing for extended bitmap(s) */
+
+ if (iterator->_bitmap_shifter & (1<<IEEE80211_RADIOTAP_EXT)) {
++ if ((unsigned long)iterator->_arg -
++ (unsigned long)iterator->_rtheader + sizeof(uint32_t) >
++ (unsigned long)iterator->_max_length)
++ return -EINVAL;
+ while (get_unaligned_le32(iterator->_arg) &
+ (1 << IEEE80211_RADIOTAP_EXT)) {
+ iterator->_arg += sizeof(uint32_t);
+diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
+index 5898f342ee47..bcf1d73a3e03 100644
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -217,6 +217,14 @@ static int inode_alloc_security(struct inode *inode)
+ return 0;
+ }
+
++static void inode_free_rcu(struct rcu_head *head)
++{
++ struct inode_security_struct *isec;
++
++ isec = container_of(head, struct inode_security_struct, rcu);
++ kmem_cache_free(sel_inode_cache, isec);
++}
++
+ static void inode_free_security(struct inode *inode)
+ {
+ struct inode_security_struct *isec = inode->i_security;
+@@ -227,8 +235,16 @@ static void inode_free_security(struct inode *inode)
+ list_del_init(&isec->list);
+ spin_unlock(&sbsec->isec_lock);
+
+- inode->i_security = NULL;
+- kmem_cache_free(sel_inode_cache, isec);
++ /*
++ * The inode may still be referenced in a path walk and
++ * a call to selinux_inode_permission() can be made
++ * after inode_free_security() is called. Ideally, the VFS
++ * wouldn't do this, but fixing that is a much harder
++ * job. For now, simply free the i_security via RCU, and
++ * leave the current inode->i_security pointer intact.
++ * The inode will be freed after the RCU grace period too.
++ */
++ call_rcu(&isec->rcu, inode_free_rcu);
+ }
+
+ static int file_alloc_security(struct file *file)
+@@ -4181,8 +4197,10 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
+ }
+ err = avc_has_perm(sk_sid, peer_sid, SECCLASS_PEER,
+ PEER__RECV, &ad);
+- if (err)
++ if (err) {
+ selinux_netlbl_err(skb, err, 0);
++ return err;
++ }
+ }
+
+ if (secmark_active) {
+@@ -5372,11 +5390,11 @@ static int selinux_setprocattr(struct task_struct *p,
+ /* Check for ptracing, and update the task SID if ok.
+ Otherwise, leave SID unchanged and fail. */
+ ptsid = 0;
+- task_lock(p);
++ rcu_read_lock();
+ tracer = ptrace_parent(p);
+ if (tracer)
+ ptsid = task_sid(tracer);
+- task_unlock(p);
++ rcu_read_unlock();
+
+ if (tracer) {
+ error = avc_has_perm(ptsid, sid, SECCLASS_PROCESS,
+diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
+index 26c7eee1c309..7b1830bde1c8 100644
+--- a/security/selinux/include/objsec.h
++++ b/security/selinux/include/objsec.h
+@@ -38,7 +38,10 @@ struct task_security_struct {
+
+ struct inode_security_struct {
+ struct inode *inode; /* back pointer to inode object */
+- struct list_head list; /* list of inode_security_struct */
++ union {
++ struct list_head list; /* list of inode_security_struct */
++ struct rcu_head rcu; /* for freeing the inode_security_struct */
++ };
+ u32 task_sid; /* SID of creating task */
+ u32 sid; /* SID of this object */
+ u16 sclass; /* security class of this object */
+diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
+index 3420bd3da5d7..cf0d46e5909e 100644
+--- a/sound/core/pcm_lib.c
++++ b/sound/core/pcm_lib.c
+@@ -1846,6 +1846,8 @@ static int wait_for_avail(struct snd_pcm_substream *substream,
+ case SNDRV_PCM_STATE_DISCONNECTED:
+ err = -EBADFD;
+ goto _endloop;
++ case SNDRV_PCM_STATE_PAUSED:
++ continue;
+ }
+ if (!tout) {
+ snd_printd("%s write error (DMA or IRQ trouble?)\n",
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 7ebe4b772fe4..fea68959f33b 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2618,6 +2618,10 @@ static void __devinit check_probe_mask(struct azx *chip, int dev)
+ * white/black-list for enable_msi
+ */
+ static struct snd_pci_quirk msi_black_list[] __devinitdata = {
++ SND_PCI_QUIRK(0x103c, 0x2191, "HP", 0), /* AMD Hudson */
++ SND_PCI_QUIRK(0x103c, 0x2192, "HP", 0), /* AMD Hudson */
++ SND_PCI_QUIRK(0x103c, 0x21f7, "HP", 0), /* AMD Hudson */
++ SND_PCI_QUIRK(0x103c, 0x21fa, "HP", 0), /* AMD Hudson */
+ SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */
+ SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */
+ SND_PCI_QUIRK(0x1043, 0x822d, "ASUS", 0), /* Athlon64 X2 + nvidia MCP55 */
+diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
+index 285ef87e6704..fafb76f48403 100644
+--- a/sound/soc/codecs/wm8904.c
++++ b/sound/soc/codecs/wm8904.c
+@@ -1714,7 +1714,7 @@ static int wm8904_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_DSP_B:
+- aif1 |= WM8904_AIF_LRCLK_INV;
++ aif1 |= 0x3 | WM8904_AIF_LRCLK_INV;
+ case SND_SOC_DAIFMT_DSP_A:
+ aif1 |= 0x3;
+ break;
+diff --git a/tools/power/cpupower/utils/cpupower-set.c b/tools/power/cpupower/utils/cpupower-set.c
+index dc4de3762111..bcf1d2f0b791 100644
+--- a/tools/power/cpupower/utils/cpupower-set.c
++++ b/tools/power/cpupower/utils/cpupower-set.c
+@@ -18,9 +18,9 @@
+ #include "helpers/bitmask.h"
+
+ static struct option set_opts[] = {
+- { .name = "perf-bias", .has_arg = optional_argument, .flag = NULL, .val = 'b'},
+- { .name = "sched-mc", .has_arg = optional_argument, .flag = NULL, .val = 'm'},
+- { .name = "sched-smt", .has_arg = optional_argument, .flag = NULL, .val = 's'},
++ { .name = "perf-bias", .has_arg = required_argument, .flag = NULL, .val = 'b'},
++ { .name = "sched-mc", .has_arg = required_argument, .flag = NULL, .val = 'm'},
++ { .name = "sched-smt", .has_arg = required_argument, .flag = NULL, .val = 's'},
+ { },
+ };
+
diff --git a/1055_linux-3.2.56.patch b/1055_linux-3.2.56.patch
new file mode 100644
index 00000000..fc4c4e94
--- /dev/null
+++ b/1055_linux-3.2.56.patch
@@ -0,0 +1,6904 @@
+diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
+index 0ec91f03422e..404a09767546 100644
+--- a/Documentation/filesystems/proc.txt
++++ b/Documentation/filesystems/proc.txt
+@@ -1293,8 +1293,8 @@ may allocate from based on an estimation of its current memory and swap use.
+ For example, if a task is using all allowed memory, its badness score will be
+ 1000. If it is using half of its allowed memory, its score will be 500.
+
+-There is an additional factor included in the badness score: root
+-processes are given 3% extra memory over other tasks.
++There is an additional factor included in the badness score: the current memory
++and swap usage is discounted by 3% for root processes.
+
+ The amount of "allowed" memory depends on the context in which the oom killer
+ was called. If it is due to the memory assigned to the allocating task's cpuset
+diff --git a/Documentation/i2c/busses/i2c-piix4 b/Documentation/i2c/busses/i2c-piix4
+index 65da15796ed3..0982e725df09 100644
+--- a/Documentation/i2c/busses/i2c-piix4
++++ b/Documentation/i2c/busses/i2c-piix4
+@@ -8,7 +8,7 @@ Supported adapters:
+ Datasheet: Only available via NDA from ServerWorks
+ * ATI IXP200, IXP300, IXP400, SB600, SB700 and SB800 southbridges
+ Datasheet: Not publicly available
+- * AMD Hudson-2, CZ
++ * AMD Hudson-2, ML, CZ
+ Datasheet: Not publicly available
+ * Standard Microsystems (SMSC) SLC90E66 (Victory66) southbridge
+ Datasheet: Publicly available at the SMSC website http://www.smsc.com
+diff --git a/Makefile b/Makefile
+index 538463ed314e..ec90bfb29420 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 55
++SUBLEVEL = 56
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/alpha/lib/csum_partial_copy.c b/arch/alpha/lib/csum_partial_copy.c
+index 40736da9bea8..1d2ef5a3fc84 100644
+--- a/arch/alpha/lib/csum_partial_copy.c
++++ b/arch/alpha/lib/csum_partial_copy.c
+@@ -373,6 +373,11 @@ csum_partial_copy_from_user(const void __user *src, void *dst, int len,
+ __wsum
+ csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
+ {
+- return csum_partial_copy_from_user((__force const void __user *)src,
+- dst, len, sum, NULL);
++ __wsum checksum;
++ mm_segment_t oldfs = get_fs();
++ set_fs(KERNEL_DS);
++ checksum = csum_partial_copy_from_user((__force const void __user *)src,
++ dst, len, sum, NULL);
++ set_fs(oldfs);
++ return checksum;
+ }
+diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
+index 1397408541bb..b1e0e0752c92 100644
+--- a/arch/arm/include/asm/cacheflush.h
++++ b/arch/arm/include/asm/cacheflush.h
+@@ -202,6 +202,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
+ static inline void __flush_icache_all(void)
+ {
+ __flush_icache_preferred();
++ dsb();
+ }
+
+ #define flush_cache_all() __cpuc_flush_kern_all()
+diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
+index 65fa3c88095c..095a9a3f40b4 100644
+--- a/arch/arm/include/asm/spinlock.h
++++ b/arch/arm/include/asm/spinlock.h
+@@ -44,18 +44,9 @@
+
+ static inline void dsb_sev(void)
+ {
+-#if __LINUX_ARM_ARCH__ >= 7
+- __asm__ __volatile__ (
+- "dsb\n"
+- SEV
+- );
+-#else
+- __asm__ __volatile__ (
+- "mcr p15, 0, %0, c7, c10, 4\n"
+- SEV
+- : : "r" (0)
+- );
+-#endif
++
++ dsb();
++ __asm__(SEV);
+ }
+
+ /*
+diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
+index 8168d99444b0..85a5348a7679 100644
+--- a/arch/arm/mm/proc-v6.S
++++ b/arch/arm/mm/proc-v6.S
+@@ -199,7 +199,6 @@ __v6_setup:
+ mcr p15, 0, r0, c7, c14, 0 @ clean+invalidate D cache
+ mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
+ mcr p15, 0, r0, c7, c15, 0 @ clean+invalidate cache
+- mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
+ #ifdef CONFIG_MMU
+ mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs
+ mcr p15, 0, r0, c2, c0, 2 @ TTB control register
+@@ -209,6 +208,8 @@ __v6_setup:
+ ALT_UP(orr r8, r8, #TTB_FLAGS_UP)
+ mcr p15, 0, r8, c2, c0, 1 @ load TTB1
+ #endif /* CONFIG_MMU */
++ mcr p15, 0, r0, c7, c10, 4 @ drain write buffer and
++ @ complete invalidations
+ adr r5, v6_crval
+ ldmia r5, {r5, r6}
+ #ifdef CONFIG_CPU_ENDIAN_BE8
+diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
+index 785365eace01..19d21ff27b0e 100644
+--- a/arch/arm/mm/proc-v7.S
++++ b/arch/arm/mm/proc-v7.S
+@@ -368,7 +368,6 @@ __v7_setup:
+
+ 3: mov r10, #0
+ mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate
+- dsb
+ #ifdef CONFIG_MMU
+ mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs
+ mcr p15, 0, r10, c2, c0, 2 @ TTB control register
+@@ -382,6 +381,7 @@ __v7_setup:
+ mcr p15, 0, r5, c10, c2, 0 @ write PRRR
+ mcr p15, 0, r6, c10, c2, 1 @ write NMRR
+ #endif
++ dsb @ Complete invalidations
+ #ifndef CONFIG_ARM_THUMBEE
+ mrc p15, 0, r0, c0, c1, 0 @ read ID_PFR0 for ThumbEE
+ and r0, r0, #(0xf << 12) @ ThumbEE enabled field
+diff --git a/arch/avr32/Makefile b/arch/avr32/Makefile
+index 22fb66590dcd..dba48a5d5bb9 100644
+--- a/arch/avr32/Makefile
++++ b/arch/avr32/Makefile
+@@ -11,7 +11,7 @@ all: uImage vmlinux.elf
+
+ KBUILD_DEFCONFIG := atstk1002_defconfig
+
+-KBUILD_CFLAGS += -pipe -fno-builtin -mno-pic
++KBUILD_CFLAGS += -pipe -fno-builtin -mno-pic -D__linux__
+ KBUILD_AFLAGS += -mrelax -mno-pic
+ KBUILD_CFLAGS_MODULE += -mno-relax
+ LDFLAGS_vmlinux += --relax
+diff --git a/arch/avr32/boards/mimc200/fram.c b/arch/avr32/boards/mimc200/fram.c
+index 9764a1a1073e..c1466a872b9c 100644
+--- a/arch/avr32/boards/mimc200/fram.c
++++ b/arch/avr32/boards/mimc200/fram.c
+@@ -11,6 +11,7 @@
+ #define FRAM_VERSION "1.0"
+
+ #include <linux/miscdevice.h>
++#include <linux/module.h>
+ #include <linux/proc_fs.h>
+ #include <linux/mm.h>
+ #include <linux/io.h>
+diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
+index a79fe9aa7721..7d5ac0b3ed8b 100644
+--- a/arch/mips/mm/c-r4k.c
++++ b/arch/mips/mm/c-r4k.c
+@@ -633,9 +633,6 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
+ if (size >= scache_size)
+ r4k_blast_scache();
+ else {
+- unsigned long lsize = cpu_scache_line_size();
+- unsigned long almask = ~(lsize - 1);
+-
+ /*
+ * There is no clearly documented alignment requirement
+ * for the cache instruction on MIPS processors and
+@@ -644,9 +641,6 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
+ * hit ops with insufficient alignment. Solved by
+ * aligning the address to cache line size.
+ */
+- cache_op(Hit_Writeback_Inv_SD, addr & almask);
+- cache_op(Hit_Writeback_Inv_SD,
+- (addr + size - 1) & almask);
+ blast_inv_scache_range(addr, addr + size);
+ }
+ __sync();
+@@ -656,12 +650,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
+ if (cpu_has_safe_index_cacheops && size >= dcache_size) {
+ r4k_blast_dcache();
+ } else {
+- unsigned long lsize = cpu_dcache_line_size();
+- unsigned long almask = ~(lsize - 1);
+-
+ R4600_HIT_CACHEOP_WAR_IMPL;
+- cache_op(Hit_Writeback_Inv_D, addr & almask);
+- cache_op(Hit_Writeback_Inv_D, (addr + size - 1) & almask);
+ blast_inv_dcache_range(addr, addr + size);
+ }
+
+diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
+index a3c684b4c862..02ebe1a99621 100644
+--- a/arch/powerpc/kernel/cacheinfo.c
++++ b/arch/powerpc/kernel/cacheinfo.c
+@@ -788,6 +788,9 @@ static void remove_cache_dir(struct cache_dir *cache_dir)
+ {
+ remove_index_dirs(cache_dir);
+
++ /* Remove cache dir from sysfs */
++ kobject_del(cache_dir->kobj);
++
+ kobject_put(cache_dir->kobj);
+
+ kfree(cache_dir);
+diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
+index 424afb6b8fba..39787c96968c 100644
+--- a/arch/powerpc/kernel/crash_dump.c
++++ b/arch/powerpc/kernel/crash_dump.c
+@@ -108,17 +108,19 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
+ size_t csize, unsigned long offset, int userbuf)
+ {
+ void *vaddr;
++ phys_addr_t paddr;
+
+ if (!csize)
+ return 0;
+
+ csize = min_t(size_t, csize, PAGE_SIZE);
++ paddr = pfn << PAGE_SHIFT;
+
+- if ((min_low_pfn < pfn) && (pfn < max_pfn)) {
+- vaddr = __va(pfn << PAGE_SHIFT);
++ if (memblock_is_region_memory(paddr, csize)) {
++ vaddr = __va(paddr);
+ csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
+ } else {
+- vaddr = __ioremap(pfn << PAGE_SHIFT, PAGE_SIZE, 0);
++ vaddr = __ioremap(paddr, PAGE_SIZE, 0);
+ csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
+ iounmap(vaddr);
+ }
+diff --git a/arch/powerpc/kernel/reloc_64.S b/arch/powerpc/kernel/reloc_64.S
+index b47a0e1ab001..c712ecec13ba 100644
+--- a/arch/powerpc/kernel/reloc_64.S
++++ b/arch/powerpc/kernel/reloc_64.S
+@@ -81,6 +81,7 @@ _GLOBAL(relocate)
+
+ 6: blr
+
++.balign 8
+ p_dyn: .llong __dynamic_start - 0b
+ p_rela: .llong __rela_dyn_start - 0b
+ p_st: .llong _stext - 0b
+diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
+index 13c432ea2fa8..844fc832f3ec 100644
+--- a/arch/powerpc/kvm/e500_tlb.c
++++ b/arch/powerpc/kvm/e500_tlb.c
+@@ -481,7 +481,7 @@ static inline void kvmppc_e500_priv_release(struct tlbe_priv *priv)
+ }
+
+ static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
+- unsigned int eaddr, int as)
++ gva_t eaddr, int as)
+ {
+ struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+ unsigned int victim, pidsel, tsized;
+diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
+index 3ec8b394146d..51fb1efd63c7 100644
+--- a/arch/s390/crypto/aes_s390.c
++++ b/arch/s390/crypto/aes_s390.c
+@@ -25,6 +25,7 @@
+ #include <linux/err.h>
+ #include <linux/module.h>
+ #include <linux/init.h>
++#include <linux/spinlock.h>
+ #include "crypt_s390.h"
+
+ #define AES_KEYLEN_128 1
+@@ -32,6 +33,7 @@
+ #define AES_KEYLEN_256 4
+
+ static u8 *ctrblk;
++static DEFINE_SPINLOCK(ctrblk_lock);
+ static char keylen_flag;
+
+ struct s390_aes_ctx {
+@@ -324,7 +326,8 @@ static int ecb_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
+ u8 *in = walk->src.virt.addr;
+
+ ret = crypt_s390_km(func, param, out, in, n);
+- BUG_ON((ret < 0) || (ret != n));
++ if (ret < 0 || ret != n)
++ return -EIO;
+
+ nbytes &= AES_BLOCK_SIZE - 1;
+ ret = blkcipher_walk_done(desc, walk, nbytes);
+@@ -463,7 +466,8 @@ static int cbc_aes_crypt(struct blkcipher_desc *desc, long func,
+ u8 *in = walk->src.virt.addr;
+
+ ret = crypt_s390_kmc(func, &param, out, in, n);
+- BUG_ON((ret < 0) || (ret != n));
++ if (ret < 0 || ret != n)
++ return -EIO;
+
+ nbytes &= AES_BLOCK_SIZE - 1;
+ ret = blkcipher_walk_done(desc, walk, nbytes);
+@@ -636,7 +640,8 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
+ memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
+ memcpy(pcc_param.key, xts_ctx->pcc_key, 32);
+ ret = crypt_s390_pcc(func, &pcc_param.key[offset]);
+- BUG_ON(ret < 0);
++ if (ret < 0)
++ return -EIO;
+
+ memcpy(xts_param.key, xts_ctx->key, 32);
+ memcpy(xts_param.init, pcc_param.xts, 16);
+@@ -647,7 +652,8 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
+ in = walk->src.virt.addr;
+
+ ret = crypt_s390_km(func, &xts_param.key[offset], out, in, n);
+- BUG_ON(ret < 0 || ret != n);
++ if (ret < 0 || ret != n)
++ return -EIO;
+
+ nbytes &= AES_BLOCK_SIZE - 1;
+ ret = blkcipher_walk_done(desc, walk, nbytes);
+@@ -756,42 +762,67 @@ static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ return aes_set_key(tfm, in_key, key_len);
+ }
+
++static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes)
++{
++ unsigned int i, n;
++
++ /* only use complete blocks, max. PAGE_SIZE */
++ n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
++ for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) {
++ memcpy(ctrptr + i, ctrptr + i - AES_BLOCK_SIZE,
++ AES_BLOCK_SIZE);
++ crypto_inc(ctrptr + i, AES_BLOCK_SIZE);
++ }
++ return n;
++}
++
+ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
+ struct s390_aes_ctx *sctx, struct blkcipher_walk *walk)
+ {
+ int ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
+- unsigned int i, n, nbytes;
+- u8 buf[AES_BLOCK_SIZE];
+- u8 *out, *in;
++ unsigned int n, nbytes;
++ u8 buf[AES_BLOCK_SIZE], ctrbuf[AES_BLOCK_SIZE];
++ u8 *out, *in, *ctrptr = ctrbuf;
+
+ if (!walk->nbytes)
+ return ret;
+
+- memcpy(ctrblk, walk->iv, AES_BLOCK_SIZE);
++ if (spin_trylock(&ctrblk_lock))
++ ctrptr = ctrblk;
++
++ memcpy(ctrptr, walk->iv, AES_BLOCK_SIZE);
+ while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
+ out = walk->dst.virt.addr;
+ in = walk->src.virt.addr;
+ while (nbytes >= AES_BLOCK_SIZE) {
+- /* only use complete blocks, max. PAGE_SIZE */
+- n = (nbytes > PAGE_SIZE) ? PAGE_SIZE :
+- nbytes & ~(AES_BLOCK_SIZE - 1);
+- for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) {
+- memcpy(ctrblk + i, ctrblk + i - AES_BLOCK_SIZE,
+- AES_BLOCK_SIZE);
+- crypto_inc(ctrblk + i, AES_BLOCK_SIZE);
++ if (ctrptr == ctrblk)
++ n = __ctrblk_init(ctrptr, nbytes);
++ else
++ n = AES_BLOCK_SIZE;
++ ret = crypt_s390_kmctr(func, sctx->key, out, in,
++ n, ctrptr);
++ if (ret < 0 || ret != n) {
++ if (ctrptr == ctrblk)
++ spin_unlock(&ctrblk_lock);
++ return -EIO;
+ }
+- ret = crypt_s390_kmctr(func, sctx->key, out, in, n, ctrblk);
+- BUG_ON(ret < 0 || ret != n);
+ if (n > AES_BLOCK_SIZE)
+- memcpy(ctrblk, ctrblk + n - AES_BLOCK_SIZE,
++ memcpy(ctrptr, ctrptr + n - AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE);
+- crypto_inc(ctrblk, AES_BLOCK_SIZE);
++ crypto_inc(ctrptr, AES_BLOCK_SIZE);
+ out += n;
+ in += n;
+ nbytes -= n;
+ }
+ ret = blkcipher_walk_done(desc, walk, nbytes);
+ }
++ if (ctrptr == ctrblk) {
++ if (nbytes)
++ memcpy(ctrbuf, ctrptr, AES_BLOCK_SIZE);
++ else
++ memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
++ spin_unlock(&ctrblk_lock);
++ }
+ /*
+ * final block may be < AES_BLOCK_SIZE, copy only nbytes
+ */
+@@ -799,13 +830,15 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
+ out = walk->dst.virt.addr;
+ in = walk->src.virt.addr;
+ ret = crypt_s390_kmctr(func, sctx->key, buf, in,
+- AES_BLOCK_SIZE, ctrblk);
+- BUG_ON(ret < 0 || ret != AES_BLOCK_SIZE);
++ AES_BLOCK_SIZE, ctrbuf);
++ if (ret < 0 || ret != AES_BLOCK_SIZE)
++ return -EIO;
+ memcpy(out, buf, nbytes);
+- crypto_inc(ctrblk, AES_BLOCK_SIZE);
++ crypto_inc(ctrbuf, AES_BLOCK_SIZE);
+ ret = blkcipher_walk_done(desc, walk, 0);
++ memcpy(walk->iv, ctrbuf, AES_BLOCK_SIZE);
+ }
+- memcpy(walk->iv, ctrblk, AES_BLOCK_SIZE);
++
+ return ret;
+ }
+
+diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
+index a52bfd124d86..991fb7d18574 100644
+--- a/arch/s390/crypto/des_s390.c
++++ b/arch/s390/crypto/des_s390.c
+@@ -25,6 +25,7 @@
+ #define DES3_KEY_SIZE (3 * DES_KEY_SIZE)
+
+ static u8 *ctrblk;
++static DEFINE_SPINLOCK(ctrblk_lock);
+
+ struct s390_des_ctx {
+ u8 iv[DES_BLOCK_SIZE];
+@@ -95,7 +96,8 @@ static int ecb_desall_crypt(struct blkcipher_desc *desc, long func,
+ u8 *in = walk->src.virt.addr;
+
+ ret = crypt_s390_km(func, key, out, in, n);
+- BUG_ON((ret < 0) || (ret != n));
++ if (ret < 0 || ret != n)
++ return -EIO;
+
+ nbytes &= DES_BLOCK_SIZE - 1;
+ ret = blkcipher_walk_done(desc, walk, nbytes);
+@@ -105,28 +107,35 @@ static int ecb_desall_crypt(struct blkcipher_desc *desc, long func,
+ }
+
+ static int cbc_desall_crypt(struct blkcipher_desc *desc, long func,
+- u8 *iv, struct blkcipher_walk *walk)
++ struct blkcipher_walk *walk)
+ {
++ struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ int ret = blkcipher_walk_virt(desc, walk);
+ unsigned int nbytes = walk->nbytes;
++ struct {
++ u8 iv[DES_BLOCK_SIZE];
++ u8 key[DES3_KEY_SIZE];
++ } param;
+
+ if (!nbytes)
+ goto out;
+
+- memcpy(iv, walk->iv, DES_BLOCK_SIZE);
++ memcpy(param.iv, walk->iv, DES_BLOCK_SIZE);
++ memcpy(param.key, ctx->key, DES3_KEY_SIZE);
+ do {
+ /* only use complete blocks */
+ unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1);
+ u8 *out = walk->dst.virt.addr;
+ u8 *in = walk->src.virt.addr;
+
+- ret = crypt_s390_kmc(func, iv, out, in, n);
+- BUG_ON((ret < 0) || (ret != n));
++ ret = crypt_s390_kmc(func, &param, out, in, n);
++ if (ret < 0 || ret != n)
++ return -EIO;
+
+ nbytes &= DES_BLOCK_SIZE - 1;
+ ret = blkcipher_walk_done(desc, walk, nbytes);
+ } while ((nbytes = walk->nbytes));
+- memcpy(walk->iv, iv, DES_BLOCK_SIZE);
++ memcpy(walk->iv, param.iv, DES_BLOCK_SIZE);
+
+ out:
+ return ret;
+@@ -179,22 +188,20 @@ static int cbc_des_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+ {
+- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+- return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, ctx->iv, &walk);
++ return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, &walk);
+ }
+
+ static int cbc_des_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+ {
+- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+- return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, ctx->iv, &walk);
++ return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, &walk);
+ }
+
+ static struct crypto_alg cbc_des_alg = {
+@@ -331,22 +338,20 @@ static int cbc_des3_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+ {
+- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+- return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, ctx->iv, &walk);
++ return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, &walk);
+ }
+
+ static int cbc_des3_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+ {
+- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+- return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, ctx->iv, &walk);
++ return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, &walk);
+ }
+
+ static struct crypto_alg cbc_des3_alg = {
+@@ -372,52 +377,80 @@ static struct crypto_alg cbc_des3_alg = {
+ }
+ };
+
++static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes)
++{
++ unsigned int i, n;
++
++ /* align to block size, max. PAGE_SIZE */
++ n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(DES_BLOCK_SIZE - 1);
++ for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) {
++ memcpy(ctrptr + i, ctrptr + i - DES_BLOCK_SIZE, DES_BLOCK_SIZE);
++ crypto_inc(ctrptr + i, DES_BLOCK_SIZE);
++ }
++ return n;
++}
++
+ static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
+- struct s390_des_ctx *ctx, struct blkcipher_walk *walk)
++ struct s390_des_ctx *ctx,
++ struct blkcipher_walk *walk)
+ {
+ int ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE);
+- unsigned int i, n, nbytes;
+- u8 buf[DES_BLOCK_SIZE];
+- u8 *out, *in;
++ unsigned int n, nbytes;
++ u8 buf[DES_BLOCK_SIZE], ctrbuf[DES_BLOCK_SIZE];
++ u8 *out, *in, *ctrptr = ctrbuf;
++
++ if (!walk->nbytes)
++ return ret;
++
++ if (spin_trylock(&ctrblk_lock))
++ ctrptr = ctrblk;
+
+- memcpy(ctrblk, walk->iv, DES_BLOCK_SIZE);
++ memcpy(ctrptr, walk->iv, DES_BLOCK_SIZE);
+ while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
+ out = walk->dst.virt.addr;
+ in = walk->src.virt.addr;
+ while (nbytes >= DES_BLOCK_SIZE) {
+- /* align to block size, max. PAGE_SIZE */
+- n = (nbytes > PAGE_SIZE) ? PAGE_SIZE :
+- nbytes & ~(DES_BLOCK_SIZE - 1);
+- for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) {
+- memcpy(ctrblk + i, ctrblk + i - DES_BLOCK_SIZE,
+- DES_BLOCK_SIZE);
+- crypto_inc(ctrblk + i, DES_BLOCK_SIZE);
++ if (ctrptr == ctrblk)
++ n = __ctrblk_init(ctrptr, nbytes);
++ else
++ n = DES_BLOCK_SIZE;
++ ret = crypt_s390_kmctr(func, ctx->key, out, in,
++ n, ctrptr);
++ if (ret < 0 || ret != n) {
++ if (ctrptr == ctrblk)
++ spin_unlock(&ctrblk_lock);
++ return -EIO;
+ }
+- ret = crypt_s390_kmctr(func, ctx->key, out, in, n, ctrblk);
+- BUG_ON((ret < 0) || (ret != n));
+ if (n > DES_BLOCK_SIZE)
+- memcpy(ctrblk, ctrblk + n - DES_BLOCK_SIZE,
++ memcpy(ctrptr, ctrptr + n - DES_BLOCK_SIZE,
+ DES_BLOCK_SIZE);
+- crypto_inc(ctrblk, DES_BLOCK_SIZE);
++ crypto_inc(ctrptr, DES_BLOCK_SIZE);
+ out += n;
+ in += n;
+ nbytes -= n;
+ }
+ ret = blkcipher_walk_done(desc, walk, nbytes);
+ }
+-
++ if (ctrptr == ctrblk) {
++ if (nbytes)
++ memcpy(ctrbuf, ctrptr, DES_BLOCK_SIZE);
++ else
++ memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE);
++ spin_unlock(&ctrblk_lock);
++ }
+ /* final block may be < DES_BLOCK_SIZE, copy only nbytes */
+ if (nbytes) {
+ out = walk->dst.virt.addr;
+ in = walk->src.virt.addr;
+ ret = crypt_s390_kmctr(func, ctx->key, buf, in,
+- DES_BLOCK_SIZE, ctrblk);
+- BUG_ON(ret < 0 || ret != DES_BLOCK_SIZE);
++ DES_BLOCK_SIZE, ctrbuf);
++ if (ret < 0 || ret != DES_BLOCK_SIZE)
++ return -EIO;
+ memcpy(out, buf, nbytes);
+- crypto_inc(ctrblk, DES_BLOCK_SIZE);
++ crypto_inc(ctrbuf, DES_BLOCK_SIZE);
+ ret = blkcipher_walk_done(desc, walk, 0);
++ memcpy(walk->iv, ctrbuf, DES_BLOCK_SIZE);
+ }
+- memcpy(walk->iv, ctrblk, DES_BLOCK_SIZE);
+ return ret;
+ }
+
+diff --git a/arch/s390/crypto/ghash_s390.c b/arch/s390/crypto/ghash_s390.c
+index b1bd170f24b1..f6373f0e7749 100644
+--- a/arch/s390/crypto/ghash_s390.c
++++ b/arch/s390/crypto/ghash_s390.c
+@@ -72,14 +72,16 @@ static int ghash_update(struct shash_desc *desc,
+ if (!dctx->bytes) {
+ ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf,
+ GHASH_BLOCK_SIZE);
+- BUG_ON(ret != GHASH_BLOCK_SIZE);
++ if (ret != GHASH_BLOCK_SIZE)
++ return -EIO;
+ }
+ }
+
+ n = srclen & ~(GHASH_BLOCK_SIZE - 1);
+ if (n) {
+ ret = crypt_s390_kimd(KIMD_GHASH, ctx, src, n);
+- BUG_ON(ret != n);
++ if (ret != n)
++ return -EIO;
+ src += n;
+ srclen -= n;
+ }
+@@ -92,7 +94,7 @@ static int ghash_update(struct shash_desc *desc,
+ return 0;
+ }
+
+-static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
++static int ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
+ {
+ u8 *buf = dctx->buffer;
+ int ret;
+@@ -103,21 +105,24 @@ static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
+ memset(pos, 0, dctx->bytes);
+
+ ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf, GHASH_BLOCK_SIZE);
+- BUG_ON(ret != GHASH_BLOCK_SIZE);
++ if (ret != GHASH_BLOCK_SIZE)
++ return -EIO;
+ }
+
+ dctx->bytes = 0;
++ return 0;
+ }
+
+ static int ghash_final(struct shash_desc *desc, u8 *dst)
+ {
+ struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+ struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
++ int ret;
+
+- ghash_flush(ctx, dctx);
+- memcpy(dst, ctx->icv, GHASH_BLOCK_SIZE);
+-
+- return 0;
++ ret = ghash_flush(ctx, dctx);
++ if (!ret)
++ memcpy(dst, ctx->icv, GHASH_BLOCK_SIZE);
++ return ret;
+ }
+
+ static struct shash_alg ghash_alg = {
+diff --git a/arch/s390/crypto/sha_common.c b/arch/s390/crypto/sha_common.c
+index bd37d09b9d3c..8620b0ec9c42 100644
+--- a/arch/s390/crypto/sha_common.c
++++ b/arch/s390/crypto/sha_common.c
+@@ -36,7 +36,8 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
+ if (index) {
+ memcpy(ctx->buf + index, data, bsize - index);
+ ret = crypt_s390_kimd(ctx->func, ctx->state, ctx->buf, bsize);
+- BUG_ON(ret != bsize);
++ if (ret != bsize)
++ return -EIO;
+ data += bsize - index;
+ len -= bsize - index;
+ index = 0;
+@@ -46,7 +47,8 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
+ if (len >= bsize) {
+ ret = crypt_s390_kimd(ctx->func, ctx->state, data,
+ len & ~(bsize - 1));
+- BUG_ON(ret != (len & ~(bsize - 1)));
++ if (ret != (len & ~(bsize - 1)))
++ return -EIO;
+ data += ret;
+ len -= ret;
+ }
+@@ -88,7 +90,8 @@ int s390_sha_final(struct shash_desc *desc, u8 *out)
+ memcpy(ctx->buf + end - 8, &bits, sizeof(bits));
+
+ ret = crypt_s390_kimd(ctx->func, ctx->state, ctx->buf, end);
+- BUG_ON(ret != end);
++ if (ret != end)
++ return -EIO;
+
+ /* copy digest to out */
+ memcpy(out, ctx->state, crypto_shash_digestsize(desc->tfm));
+diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c
+index a90d45e9dfb0..27c50f4d90cb 100644
+--- a/arch/s390/mm/page-states.c
++++ b/arch/s390/mm/page-states.c
+@@ -12,6 +12,8 @@
+ #include <linux/mm.h>
+ #include <linux/gfp.h>
+ #include <linux/init.h>
++#include <asm/setup.h>
++#include <asm/ipl.h>
+
+ #define ESSA_SET_STABLE 1
+ #define ESSA_SET_UNUSED 2
+@@ -41,6 +43,14 @@ void __init cmma_init(void)
+
+ if (!cmma_flag)
+ return;
++ /*
++ * Disable CMM for dump, otherwise the tprot based memory
++ * detection can fail because of unstable pages.
++ */
++ if (OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP) {
++ cmma_flag = 0;
++ return;
++ }
+ asm volatile(
+ " .insn rrf,0xb9ab0000,%1,%1,0,0\n"
+ "0: la %0,0\n"
+diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
+index ba870bb6dd8e..f97134a2b691 100644
+--- a/arch/x86/include/asm/irq.h
++++ b/arch/x86/include/asm/irq.h
+@@ -25,6 +25,7 @@ extern void irq_ctx_init(int cpu);
+
+ #ifdef CONFIG_HOTPLUG_CPU
+ #include <linux/cpumask.h>
++extern int check_irq_vectors_for_cpu_disable(void);
+ extern void fixup_irqs(void);
+ extern void irq_force_complete_move(int);
+ #endif
+diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
+index 2bda212a0010..1c041e07c3c9 100644
+--- a/arch/x86/kernel/cpu/perf_event.c
++++ b/arch/x86/kernel/cpu/perf_event.c
+@@ -971,6 +971,9 @@ static void x86_pmu_del(struct perf_event *event, int flags)
+ for (i = 0; i < cpuc->n_events; i++) {
+ if (event == cpuc->event_list[i]) {
+
++ if (i >= cpuc->n_events - cpuc->n_added)
++ --cpuc->n_added;
++
+ if (x86_pmu.put_event_constraints)
+ x86_pmu.put_event_constraints(cpuc, event);
+
+diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
+index fb2eb324b29f..687637b7f76a 100644
+--- a/arch/x86/kernel/irq.c
++++ b/arch/x86/kernel/irq.c
+@@ -222,6 +222,76 @@ void smp_x86_platform_ipi(struct pt_regs *regs)
+ EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);
+
+ #ifdef CONFIG_HOTPLUG_CPU
++/*
++ * This cpu is going to be removed and its vectors migrated to the remaining
++ * online cpus. Check to see if there are enough vectors in the remaining cpus.
++ * This function is protected by stop_machine().
++ */
++int check_irq_vectors_for_cpu_disable(void)
++{
++ int irq, cpu;
++ unsigned int this_cpu, vector, this_count, count;
++ struct irq_desc *desc;
++ struct irq_data *data;
++ struct cpumask affinity_new, online_new;
++
++ this_cpu = smp_processor_id();
++ cpumask_copy(&online_new, cpu_online_mask);
++ cpu_clear(this_cpu, online_new);
++
++ this_count = 0;
++ for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
++ irq = __this_cpu_read(vector_irq[vector]);
++ if (irq >= 0) {
++ desc = irq_to_desc(irq);
++ data = irq_desc_get_irq_data(desc);
++ cpumask_copy(&affinity_new, data->affinity);
++ cpu_clear(this_cpu, affinity_new);
++
++ /* Do not count inactive or per-cpu irqs. */
++ if (!irq_has_action(irq) || irqd_is_per_cpu(data))
++ continue;
++
++ /*
++ * A single irq may be mapped to multiple
++ * cpu's vector_irq[] (for example IOAPIC cluster
++ * mode). In this case we have two
++ * possibilities:
++ *
++ * 1) the resulting affinity mask is empty; that is
++ * this the down'd cpu is the last cpu in the irq's
++ * affinity mask, or
++ *
++ * 2) the resulting affinity mask is no longer
++ * a subset of the online cpus but the affinity
++ * mask is not zero; that is the down'd cpu is the
++ * last online cpu in a user set affinity mask.
++ */
++ if (cpumask_empty(&affinity_new) ||
++ !cpumask_subset(&affinity_new, &online_new))
++ this_count++;
++ }
++ }
++
++ count = 0;
++ for_each_online_cpu(cpu) {
++ if (cpu == this_cpu)
++ continue;
++ for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
++ vector++) {
++ if (per_cpu(vector_irq, cpu)[vector] < 0)
++ count++;
++ }
++ }
++
++ if (count < this_count) {
++ pr_warn("CPU %d disable failed: CPU has %u vectors assigned and there are only %u available.\n",
++ this_cpu, this_count, count);
++ return -ERANGE;
++ }
++ return 0;
++}
++
+ /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
+ void fixup_irqs(void)
+ {
+diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
+index 03920a15a632..28a3e62fcc50 100644
+--- a/arch/x86/kernel/quirks.c
++++ b/arch/x86/kernel/quirks.c
+@@ -525,7 +525,7 @@ static void __init quirk_amd_nb_node(struct pci_dev *dev)
+ return;
+
+ pci_read_config_dword(nb_ht, 0x60, &val);
+- node = val & 7;
++ node = pcibus_to_node(dev->bus) | (val & 7);
+ /*
+ * Some hardware may return an invalid node ID,
+ * so check it first:
+diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
+index 9f548cb4a958..b88eadb2aeb0 100644
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -1278,6 +1278,7 @@ void cpu_disable_common(void)
+ int native_cpu_disable(void)
+ {
+ int cpu = smp_processor_id();
++ int ret;
+
+ /*
+ * Perhaps use cpufreq to drop frequency, but that could go
+@@ -1290,6 +1291,10 @@ int native_cpu_disable(void)
+ if (cpu == 0)
+ return -EBUSY;
+
++ ret = check_irq_vectors_for_cpu_disable();
++ if (ret)
++ return ret;
++
+ clear_local_APIC();
+
+ cpu_disable_common();
+diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
+index 405f2620392f..139415e2c5bf 100644
+--- a/arch/x86/kvm/i8254.c
++++ b/arch/x86/kvm/i8254.c
+@@ -38,6 +38,7 @@
+
+ #include "irq.h"
+ #include "i8254.h"
++#include "x86.h"
+
+ #ifndef CONFIG_X86_64
+ #define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
+@@ -364,6 +365,23 @@ static void create_pit_timer(struct kvm *kvm, u32 val, int is_period)
+ atomic_set(&pt->pending, 0);
+ ps->irq_ack = 1;
+
++ /*
++ * Do not allow the guest to program periodic timers with small
++ * interval, since the hrtimers are not throttled by the host
++ * scheduler.
++ */
++ if (ps->is_periodic) {
++ s64 min_period = min_timer_period_us * 1000LL;
++
++ if (pt->period < min_period) {
++ pr_info_ratelimited(
++ "kvm: requested %lld ns "
++ "i8254 timer period limited to %lld ns\n",
++ pt->period, min_period);
++ pt->period = min_period;
++ }
++ }
++
+ hrtimer_start(&pt->timer, ktime_add_ns(ktime_get(), interval),
+ HRTIMER_MODE_ABS);
+ }
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 757c71635346..176205ab0ca8 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -68,9 +68,6 @@
+ #define VEC_POS(v) ((v) & (32 - 1))
+ #define REG_POS(v) (((v) >> 5) << 4)
+
+-static unsigned int min_timer_period_us = 500;
+-module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);
+-
+ static inline u32 apic_get_reg(struct kvm_lapic *apic, int reg_off)
+ {
+ return *((u32 *) (apic->regs + reg_off));
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 94a4672a4381..2102a1738580 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -2904,10 +2904,8 @@ static int cr8_write_interception(struct vcpu_svm *svm)
+ u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
+ /* instruction emulation calls kvm_set_cr8() */
+ r = cr_interception(svm);
+- if (irqchip_in_kernel(svm->vcpu.kvm)) {
+- clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
++ if (irqchip_in_kernel(svm->vcpu.kvm))
+ return r;
+- }
+ if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
+ return r;
+ kvm_run->exit_reason = KVM_EXIT_SET_TPR;
+@@ -3462,6 +3460,8 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
+ if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
+ return;
+
++ clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
++
+ if (irr == -1)
+ return;
+
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 7774cca80118..b9fefaf276c4 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -92,6 +92,9 @@ EXPORT_SYMBOL_GPL(kvm_x86_ops);
+ int ignore_msrs = 0;
+ module_param_named(ignore_msrs, ignore_msrs, bool, S_IRUGO | S_IWUSR);
+
++unsigned int min_timer_period_us = 500;
++module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);
++
+ bool kvm_has_tsc_control;
+ EXPORT_SYMBOL_GPL(kvm_has_tsc_control);
+ u32 kvm_max_guest_tsc_khz;
+diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
+index d36fe237c665..0e22f6411bc0 100644
+--- a/arch/x86/kvm/x86.h
++++ b/arch/x86/kvm/x86.h
+@@ -125,4 +125,6 @@ int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
+ gva_t addr, void *val, unsigned int bytes,
+ struct x86_exception *exception);
+
++extern unsigned int min_timer_period_us;
++
+ #endif
+diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
+index bee75a6052a4..e56da773f504 100644
+--- a/arch/x86/platform/efi/efi.c
++++ b/arch/x86/platform/efi/efi.c
+@@ -424,7 +424,7 @@ void __init efi_reserve_boot_services(void)
+ * - Not within any part of the kernel
+ * - Not the bios reserved area
+ */
+- if ((start+size >= virt_to_phys(_text)
++ if ((start + size > virt_to_phys(_text)
+ && start <= virt_to_phys(_end)) ||
+ !e820_all_mapped(start, start+size, E820_RAM) ||
+ memblock_x86_check_reserved_size(&start, &size,
+diff --git a/block/blk-lib.c b/block/blk-lib.c
+index 2b461b496a78..36751e211bb8 100644
+--- a/block/blk-lib.c
++++ b/block/blk-lib.c
+@@ -101,6 +101,14 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
+
+ atomic_inc(&bb.done);
+ submit_bio(type, bio);
++
++ /*
++ * We can loop for a long time in here, if someone does
++ * full device discards (like mkfs). Be nice and allow
++ * us to schedule out to avoid softlocking if preempt
++ * is disabled.
++ */
++ cond_resched();
+ }
+
+ /* Wait for bios in-flight */
+diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
+index 5016de5e91f6..f3244291bd9c 100644
+--- a/drivers/acpi/bus.c
++++ b/drivers/acpi/bus.c
+@@ -33,6 +33,7 @@
+ #include <linux/proc_fs.h>
+ #include <linux/acpi.h>
+ #include <linux/slab.h>
++#include <linux/regulator/machine.h>
+ #ifdef CONFIG_X86
+ #include <asm/mpspec.h>
+ #endif
+@@ -917,6 +918,14 @@ void __init acpi_early_init(void)
+ goto error0;
+ }
+
++ /*
++ * If the system is using ACPI then we can be reasonably
++ * confident that any regulators are managed by the firmware
++ * so tell the regulator core it has everything it needs to
++ * know.
++ */
++ regulator_has_full_constraints();
++
+ return;
+
+ error0:
+diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
+index 605a2954ef17..abe45cbd60a8 100644
+--- a/drivers/acpi/processor_throttling.c
++++ b/drivers/acpi/processor_throttling.c
+@@ -59,6 +59,12 @@ struct throttling_tstate {
+ int target_state; /* target T-state */
+ };
+
++struct acpi_processor_throttling_arg {
++ struct acpi_processor *pr;
++ int target_state;
++ bool force;
++};
++
+ #define THROTTLING_PRECHANGE (1)
+ #define THROTTLING_POSTCHANGE (2)
+
+@@ -1063,16 +1069,24 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
+ return 0;
+ }
+
++static long acpi_processor_throttling_fn(void *data)
++{
++ struct acpi_processor_throttling_arg *arg = data;
++ struct acpi_processor *pr = arg->pr;
++
++ return pr->throttling.acpi_processor_set_throttling(pr,
++ arg->target_state, arg->force);
++}
++
+ int acpi_processor_set_throttling(struct acpi_processor *pr,
+ int state, bool force)
+ {
+- cpumask_var_t saved_mask;
+ int ret = 0;
+ unsigned int i;
+ struct acpi_processor *match_pr;
+ struct acpi_processor_throttling *p_throttling;
++ struct acpi_processor_throttling_arg arg;
+ struct throttling_tstate t_state;
+- cpumask_var_t online_throttling_cpus;
+
+ if (!pr)
+ return -EINVAL;
+@@ -1083,14 +1097,6 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
+ if ((state < 0) || (state > (pr->throttling.state_count - 1)))
+ return -EINVAL;
+
+- if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
+- return -ENOMEM;
+-
+- if (!alloc_cpumask_var(&online_throttling_cpus, GFP_KERNEL)) {
+- free_cpumask_var(saved_mask);
+- return -ENOMEM;
+- }
+-
+ if (cpu_is_offline(pr->id)) {
+ /*
+ * the cpu pointed by pr->id is offline. Unnecessary to change
+@@ -1099,17 +1105,15 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
+ return -ENODEV;
+ }
+
+- cpumask_copy(saved_mask, &current->cpus_allowed);
+ t_state.target_state = state;
+ p_throttling = &(pr->throttling);
+- cpumask_and(online_throttling_cpus, cpu_online_mask,
+- p_throttling->shared_cpu_map);
++
+ /*
+ * The throttling notifier will be called for every
+ * affected cpu in order to get one proper T-state.
+ * The notifier event is THROTTLING_PRECHANGE.
+ */
+- for_each_cpu(i, online_throttling_cpus) {
++ for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) {
+ t_state.cpu = i;
+ acpi_processor_throttling_notifier(THROTTLING_PRECHANGE,
+ &t_state);
+@@ -1121,21 +1125,18 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
+ * it can be called only for the cpu pointed by pr.
+ */
+ if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
+- /* FIXME: use work_on_cpu() */
+- if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) {
+- /* Can't migrate to the pr->id CPU. Exit */
+- ret = -ENODEV;
+- goto exit;
+- }
+- ret = p_throttling->acpi_processor_set_throttling(pr,
+- t_state.target_state, force);
++ arg.pr = pr;
++ arg.target_state = state;
++ arg.force = force;
++ ret = work_on_cpu(pr->id, acpi_processor_throttling_fn, &arg);
+ } else {
+ /*
+ * When the T-state coordination is SW_ALL or HW_ALL,
+ * it is necessary to set T-state for every affected
+ * cpus.
+ */
+- for_each_cpu(i, online_throttling_cpus) {
++ for_each_cpu_and(i, cpu_online_mask,
++ p_throttling->shared_cpu_map) {
+ match_pr = per_cpu(processors, i);
+ /*
+ * If the pointer is invalid, we will report the
+@@ -1156,13 +1157,12 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
+ "on CPU %d\n", i));
+ continue;
+ }
+- t_state.cpu = i;
+- /* FIXME: use work_on_cpu() */
+- if (set_cpus_allowed_ptr(current, cpumask_of(i)))
+- continue;
+- ret = match_pr->throttling.
+- acpi_processor_set_throttling(
+- match_pr, t_state.target_state, force);
++
++ arg.pr = match_pr;
++ arg.target_state = state;
++ arg.force = force;
++ ret = work_on_cpu(pr->id, acpi_processor_throttling_fn,
++ &arg);
+ }
+ }
+ /*
+@@ -1171,17 +1171,12 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
+ * affected cpu to update the T-states.
+ * The notifier event is THROTTLING_POSTCHANGE
+ */
+- for_each_cpu(i, online_throttling_cpus) {
++ for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) {
+ t_state.cpu = i;
+ acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
+ &t_state);
+ }
+- /* restore the previous state */
+- /* FIXME: use work_on_cpu() */
+- set_cpus_allowed_ptr(current, saved_mask);
+-exit:
+- free_cpumask_var(online_throttling_cpus);
+- free_cpumask_var(saved_mask);
++
+ return ret;
+ }
+
+diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
+index e19898de38b8..2ebfdd20e692 100644
+--- a/drivers/acpi/video.c
++++ b/drivers/acpi/video.c
+@@ -646,6 +646,7 @@ acpi_video_init_brightness(struct acpi_video_device *device)
+ union acpi_object *o;
+ struct acpi_video_device_brightness *br = NULL;
+ int result = -EINVAL;
++ u32 value;
+
+ if (!ACPI_SUCCESS(acpi_video_device_lcd_query_levels(device, &obj))) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Could not query available "
+@@ -676,7 +677,12 @@ acpi_video_init_brightness(struct acpi_video_device *device)
+ printk(KERN_ERR PREFIX "Invalid data\n");
+ continue;
+ }
+- br->levels[count] = (u32) o->integer.value;
++ value = (u32) o->integer.value;
++ /* Skip duplicate entries */
++ if (count > 2 && br->levels[count - 1] == value)
++ continue;
++
++ br->levels[count] = value;
+
+ if (br->levels[count] > max_level)
+ max_level = br->levels[count];
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index f4000eea3d09..0feffc390d04 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -61,6 +61,7 @@ enum board_ids {
+ /* board IDs by feature in alphabetical order */
+ board_ahci,
+ board_ahci_ign_iferr,
++ board_ahci_noncq,
+ board_ahci_nosntf,
+ board_ahci_yes_fbs,
+
+@@ -123,6 +124,13 @@ static const struct ata_port_info ahci_port_info[] = {
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_ops,
+ },
++ [board_ahci_noncq] = {
++ AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ),
++ .flags = AHCI_FLAG_COMMON,
++ .pio_mask = ATA_PIO4,
++ .udma_mask = ATA_UDMA6,
++ .port_ops = &ahci_ops,
++ },
+ [board_ahci_nosntf] =
+ {
+ AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF),
+@@ -458,6 +466,12 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */
+ { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */
+
++ /*
++ * Samsung SSDs found on some macbooks. NCQ times out.
++ * https://bugzilla.kernel.org/show_bug.cgi?id=60731
++ */
++ { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_noncq },
++
+ /* Enmotus */
+ { PCI_DEVICE(0x1c44, 0x8000), board_ahci },
+
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 72bbb5ed8096..b3f0f5ad8c41 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -2188,6 +2188,16 @@ int ata_dev_configure(struct ata_device *dev)
+ if (rc)
+ return rc;
+
++ /* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */
++ if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) &&
++ (id[76] & 0xe) == 0x2)
++ dev->horkage |= ATA_HORKAGE_NOLPM;
++
++ if (dev->horkage & ATA_HORKAGE_NOLPM) {
++ ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
++ dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
++ }
++
+ /* let ACPI work its magic */
+ rc = ata_acpi_on_devcfg(dev);
+ if (rc)
+@@ -4099,6 +4109,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+
+ /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
+ { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
++ { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
+
+ /* Blacklist entries taken from Silicon Image 3124/3132
+ Windows driver .inf file - also several Linux problem reports */
+@@ -4143,6 +4154,23 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
+ { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
+
++ /*
++ * Some WD SATA-I drives spin up and down erratically when the link
++ * is put into the slumber mode. We don't have full list of the
++ * affected devices. Disable LPM if the device matches one of the
++ * known prefixes and is SATA-1. As a side effect LPM partial is
++ * lost too.
++ *
++ * https://bugzilla.kernel.org/show_bug.cgi?id=57211
++ */
++ { "WDC WD800JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
++ { "WDC WD1200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
++ { "WDC WD1600JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
++ { "WDC WD2000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
++ { "WDC WD2500JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
++ { "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
++ { "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
++
+ /* End Marker */
+ { }
+ };
+diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
+index f5c35beadc65..0ba32fe00d13 100644
+--- a/drivers/ata/libata-pmp.c
++++ b/drivers/ata/libata-pmp.c
+@@ -447,8 +447,11 @@ static void sata_pmp_quirks(struct ata_port *ap)
+ * otherwise. Don't try hard to recover it.
+ */
+ ap->pmp_link[ap->nr_pmp_links - 1].flags |= ATA_LFLAG_NO_RETRY;
+- } else if (vendor == 0x197b && devid == 0x2352) {
+- /* chip found in Thermaltake BlackX Duet, jmicron JMB350? */
++ } else if (vendor == 0x197b && (devid == 0x2352 || devid == 0x0325)) {
++ /*
++ * 0x2352: found in Thermaltake BlackX Duet, jmicron JMB350?
++ * 0x0325: jmicron JMB394.
++ */
+ ata_for_each_link(link, ap, EDGE) {
+ /* SRST breaks detection and disks get misclassified
+ * LPM disabled to avoid potential problems
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index dd332e5b65d0..8460e621ae02 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -111,12 +111,14 @@ static const char *ata_lpm_policy_names[] = {
+ [ATA_LPM_MIN_POWER] = "min_power",
+ };
+
+-static ssize_t ata_scsi_lpm_store(struct device *dev,
++static ssize_t ata_scsi_lpm_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+ {
+- struct Scsi_Host *shost = class_to_shost(dev);
++ struct Scsi_Host *shost = class_to_shost(device);
+ struct ata_port *ap = ata_shost_to_port(shost);
++ struct ata_link *link;
++ struct ata_device *dev;
+ enum ata_lpm_policy policy;
+ unsigned long flags;
+
+@@ -132,10 +134,20 @@ static ssize_t ata_scsi_lpm_store(struct device *dev,
+ return -EINVAL;
+
+ spin_lock_irqsave(ap->lock, flags);
++
++ ata_for_each_link(link, ap, EDGE) {
++ ata_for_each_dev(dev, &ap->link, ENABLED) {
++ if (dev->horkage & ATA_HORKAGE_NOLPM) {
++ count = -EOPNOTSUPP;
++ goto out_unlock;
++ }
++ }
++ }
++
+ ap->target_lpm_policy = policy;
+ ata_port_schedule_eh(ap);
++out_unlock:
+ spin_unlock_irqrestore(ap->lock, flags);
+-
+ return count;
+ }
+
+diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
+index 9dfb40b8c2c9..0c4ed89b6aa5 100644
+--- a/drivers/ata/sata_sil.c
++++ b/drivers/ata/sata_sil.c
+@@ -157,6 +157,7 @@ static const struct sil_drivelist {
+ { "ST380011ASL", SIL_QUIRK_MOD15WRITE },
+ { "ST3120022ASL", SIL_QUIRK_MOD15WRITE },
+ { "ST3160021ASL", SIL_QUIRK_MOD15WRITE },
++ { "TOSHIBA MK2561GSYN", SIL_QUIRK_MOD15WRITE },
+ { "Maxtor 4D060H3", SIL_QUIRK_UDMA5MAX },
+ { }
+ };
+diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
+index 7b2ec5908413..96b8cb7573a4 100644
+--- a/drivers/block/xen-blkfront.c
++++ b/drivers/block/xen-blkfront.c
+@@ -1281,13 +1281,16 @@ static void blkback_changed(struct xenbus_device *dev,
+ case XenbusStateReconfiguring:
+ case XenbusStateReconfigured:
+ case XenbusStateUnknown:
+- case XenbusStateClosed:
+ break;
+
+ case XenbusStateConnected:
+ blkfront_connect(info);
+ break;
+
++ case XenbusStateClosed:
++ if (dev->state == XenbusStateClosed)
++ break;
++ /* Missed the backend's Closing state -- fallthrough */
+ case XenbusStateClosing:
+ blkfront_closing(info);
+ break;
+diff --git a/drivers/char/raw.c b/drivers/char/raw.c
+index b6de2c047145..5ded1a531e90 100644
+--- a/drivers/char/raw.c
++++ b/drivers/char/raw.c
+@@ -190,7 +190,7 @@ static int bind_get(int number, dev_t *dev)
+ struct raw_device_data *rawdev;
+ struct block_device *bdev;
+
+- if (number <= 0 || number >= MAX_RAW_MINORS)
++ if (number <= 0 || number >= max_raw_minors)
+ return -EINVAL;
+
+ rawdev = &raw_devices[number];
+diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
+index 659518015972..3f89386361be 100644
+--- a/drivers/dma/ioat/dma.c
++++ b/drivers/dma/ioat/dma.c
+@@ -75,7 +75,8 @@ static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
+ attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
+ for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
+ chan = ioat_chan_by_index(instance, bit);
+- tasklet_schedule(&chan->cleanup_task);
++ if (test_bit(IOAT_RUN, &chan->state))
++ tasklet_schedule(&chan->cleanup_task);
+ }
+
+ writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
+@@ -91,7 +92,8 @@ static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
+ {
+ struct ioat_chan_common *chan = data;
+
+- tasklet_schedule(&chan->cleanup_task);
++ if (test_bit(IOAT_RUN, &chan->state))
++ tasklet_schedule(&chan->cleanup_task);
+
+ return IRQ_HANDLED;
+ }
+@@ -113,7 +115,6 @@ void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *c
+ chan->timer.function = device->timer_fn;
+ chan->timer.data = data;
+ tasklet_init(&chan->cleanup_task, device->cleanup_fn, data);
+- tasklet_disable(&chan->cleanup_task);
+ }
+
+ /**
+@@ -356,13 +357,43 @@ static int ioat1_dma_alloc_chan_resources(struct dma_chan *c)
+ writel(((u64) chan->completion_dma) >> 32,
+ chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
+
+- tasklet_enable(&chan->cleanup_task);
++ set_bit(IOAT_RUN, &chan->state);
+ ioat1_dma_start_null_desc(ioat); /* give chain to dma device */
+ dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
+ __func__, ioat->desccount);
+ return ioat->desccount;
+ }
+
++void ioat_stop(struct ioat_chan_common *chan)
++{
++ struct ioatdma_device *device = chan->device;
++ struct pci_dev *pdev = device->pdev;
++ int chan_id = chan_num(chan);
++
++ /* 1/ stop irq from firing tasklets
++ * 2/ stop the tasklet from re-arming irqs
++ */
++ clear_bit(IOAT_RUN, &chan->state);
++
++ /* flush inflight interrupts */
++#ifdef CONFIG_PCI_MSI
++ if (pdev->msix_enabled) {
++ struct msix_entry *msix = &device->msix_entries[chan_id];
++ synchronize_irq(msix->vector);
++ } else
++#endif
++ synchronize_irq(pdev->irq);
++
++ /* flush inflight timers */
++ del_timer_sync(&chan->timer);
++
++ /* flush inflight tasklet runs */
++ tasklet_kill(&chan->cleanup_task);
++
++ /* final cleanup now that everything is quiesced and can't re-arm */
++ device->cleanup_fn((unsigned long) &chan->common);
++}
++
+ /**
+ * ioat1_dma_free_chan_resources - release all the descriptors
+ * @chan: the channel to be cleaned
+@@ -381,9 +412,7 @@ static void ioat1_dma_free_chan_resources(struct dma_chan *c)
+ if (ioat->desccount == 0)
+ return;
+
+- tasklet_disable(&chan->cleanup_task);
+- del_timer_sync(&chan->timer);
+- ioat1_cleanup(ioat);
++ ioat_stop(chan);
+
+ /* Delay 100ms after reset to allow internal DMA logic to quiesce
+ * before removing DMA descriptor resources.
+@@ -528,8 +557,11 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
+ static void ioat1_cleanup_event(unsigned long data)
+ {
+ struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
++ struct ioat_chan_common *chan = &ioat->base;
+
+ ioat1_cleanup(ioat);
++ if (!test_bit(IOAT_RUN, &chan->state))
++ return;
+ writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
+ }
+
+diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
+index 8bebddd189c7..466e0f34c15f 100644
+--- a/drivers/dma/ioat/dma.h
++++ b/drivers/dma/ioat/dma.h
+@@ -344,6 +344,7 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
+ dma_addr_t *phys_complete);
+ void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
+ void ioat_kobject_del(struct ioatdma_device *device);
++void ioat_stop(struct ioat_chan_common *chan);
+ extern const struct sysfs_ops ioat_sysfs_ops;
+ extern struct ioat_sysfs_entry ioat_version_attr;
+ extern struct ioat_sysfs_entry ioat_cap_attr;
+diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
+index cb8864d45601..e60933efdb0e 100644
+--- a/drivers/dma/ioat/dma_v2.c
++++ b/drivers/dma/ioat/dma_v2.c
+@@ -189,8 +189,11 @@ static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
+ void ioat2_cleanup_event(unsigned long data)
+ {
+ struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
++ struct ioat_chan_common *chan = &ioat->base;
+
+ ioat2_cleanup(ioat);
++ if (!test_bit(IOAT_RUN, &chan->state))
++ return;
+ writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
+ }
+
+@@ -542,10 +545,10 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
+ ioat->issued = 0;
+ ioat->tail = 0;
+ ioat->alloc_order = order;
++ set_bit(IOAT_RUN, &chan->state);
+ spin_unlock_bh(&ioat->prep_lock);
+ spin_unlock_bh(&chan->cleanup_lock);
+
+- tasklet_enable(&chan->cleanup_task);
+ ioat2_start_null_desc(ioat);
+
+ /* check that we got off the ground */
+@@ -555,7 +558,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
+ } while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status));
+
+ if (is_ioat_active(status) || is_ioat_idle(status)) {
+- set_bit(IOAT_RUN, &chan->state);
+ return 1 << ioat->alloc_order;
+ } else {
+ u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
+@@ -798,11 +800,8 @@ void ioat2_free_chan_resources(struct dma_chan *c)
+ if (!ioat->ring)
+ return;
+
+- tasklet_disable(&chan->cleanup_task);
+- del_timer_sync(&chan->timer);
+- device->cleanup_fn((unsigned long) c);
++ ioat_stop(chan);
+ device->reset_hw(chan);
+- clear_bit(IOAT_RUN, &chan->state);
+
+ spin_lock_bh(&chan->cleanup_lock);
+ spin_lock_bh(&ioat->prep_lock);
+diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
+index 714560fdd049..86800310ce80 100644
+--- a/drivers/dma/ioat/dma_v3.c
++++ b/drivers/dma/ioat/dma_v3.c
+@@ -325,8 +325,11 @@ static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
+ static void ioat3_cleanup_event(unsigned long data)
+ {
+ struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
++ struct ioat_chan_common *chan = &ioat->base;
+
+ ioat3_cleanup(ioat);
++ if (!test_bit(IOAT_RUN, &chan->state))
++ return;
+ writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
+ }
+
+diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
+index 13259cad0ceb..4b27c5461faa 100644
+--- a/drivers/dma/ste_dma40.c
++++ b/drivers/dma/ste_dma40.c
+@@ -1202,6 +1202,7 @@ static void dma_tasklet(unsigned long data)
+ struct d40_chan *d40c = (struct d40_chan *) data;
+ struct d40_desc *d40d;
+ unsigned long flags;
++ bool callback_active;
+ dma_async_tx_callback callback;
+ void *callback_param;
+
+@@ -1225,6 +1226,7 @@ static void dma_tasklet(unsigned long data)
+ }
+
+ /* Callback to client */
++ callback_active = !!(d40d->txd.flags & DMA_PREP_INTERRUPT);
+ callback = d40d->txd.callback;
+ callback_param = d40d->txd.callback_param;
+
+@@ -1249,7 +1251,7 @@ static void dma_tasklet(unsigned long data)
+
+ spin_unlock_irqrestore(&d40c->lock, flags);
+
+- if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT))
++ if (callback_active && callback)
+ callback(callback_param);
+
+ return;
+diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
+index d69144a09043..7db101bcb770 100644
+--- a/drivers/edac/edac_mc.c
++++ b/drivers/edac/edac_mc.c
+@@ -323,7 +323,8 @@ static void edac_mc_workq_function(struct work_struct *work_req)
+ *
+ * called with the mem_ctls_mutex held
+ */
+-static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
++static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec,
++ bool init)
+ {
+ debugf0("%s()\n", __func__);
+
+@@ -331,7 +332,9 @@ static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
+ if (mci->op_state != OP_RUNNING_POLL)
+ return;
+
+- INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
++ if (init)
++ INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
++
+ queue_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec));
+ }
+
+@@ -391,7 +394,7 @@ void edac_mc_reset_delay_period(int value)
+ list_for_each(item, &mc_devices) {
+ mci = list_entry(item, struct mem_ctl_info, link);
+
+- edac_mc_workq_setup(mci, (unsigned long) value);
++ edac_mc_workq_setup(mci, value, false);
+ }
+
+ mutex_unlock(&mem_ctls_mutex);
+@@ -539,7 +542,7 @@ int edac_mc_add_mc(struct mem_ctl_info *mci)
+ /* This instance is NOW RUNNING */
+ mci->op_state = OP_RUNNING_POLL;
+
+- edac_mc_workq_setup(mci, edac_mc_get_poll_msec());
++ edac_mc_workq_setup(mci, edac_mc_get_poll_msec(), true);
+ } else {
+ mci->op_state = OP_RUNNING_INTERRUPT;
+ }
+diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
+index 2244df0b2ffb..1e084260c9c0 100644
+--- a/drivers/edac/i7300_edac.c
++++ b/drivers/edac/i7300_edac.c
+@@ -962,33 +962,35 @@ static int __devinit i7300_get_devices(struct mem_ctl_info *mci)
+
+ /* Attempt to 'get' the MCH register we want */
+ pdev = NULL;
+- while (!pvt->pci_dev_16_1_fsb_addr_map ||
+- !pvt->pci_dev_16_2_fsb_err_regs) {
+- pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+- PCI_DEVICE_ID_INTEL_I7300_MCH_ERR, pdev);
+- if (!pdev) {
+- /* End of list, leave */
+- i7300_printk(KERN_ERR,
+- "'system address,Process Bus' "
+- "device not found:"
+- "vendor 0x%x device 0x%x ERR funcs "
+- "(broken BIOS?)\n",
+- PCI_VENDOR_ID_INTEL,
+- PCI_DEVICE_ID_INTEL_I7300_MCH_ERR);
+- goto error;
+- }
+-
++ while ((pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
++ PCI_DEVICE_ID_INTEL_I7300_MCH_ERR,
++ pdev))) {
+ /* Store device 16 funcs 1 and 2 */
+ switch (PCI_FUNC(pdev->devfn)) {
+ case 1:
+- pvt->pci_dev_16_1_fsb_addr_map = pdev;
++ if (!pvt->pci_dev_16_1_fsb_addr_map)
++ pvt->pci_dev_16_1_fsb_addr_map =
++ pci_dev_get(pdev);
+ break;
+ case 2:
+- pvt->pci_dev_16_2_fsb_err_regs = pdev;
++ if (!pvt->pci_dev_16_2_fsb_err_regs)
++ pvt->pci_dev_16_2_fsb_err_regs =
++ pci_dev_get(pdev);
+ break;
+ }
+ }
+
++ if (!pvt->pci_dev_16_1_fsb_addr_map ||
++ !pvt->pci_dev_16_2_fsb_err_regs) {
++ /* At least one device was not found */
++ i7300_printk(KERN_ERR,
++ "'system address,Process Bus' device not found:"
++ "vendor 0x%x device 0x%x ERR funcs (broken BIOS?)\n",
++ PCI_VENDOR_ID_INTEL,
++ PCI_DEVICE_ID_INTEL_I7300_MCH_ERR);
++ goto error;
++ }
++
+ debugf1("System Address, processor bus- PCI Bus ID: %s %x:%x\n",
+ pci_name(pvt->pci_dev_16_0_fsb_ctlr),
+ pvt->pci_dev_16_0_fsb_ctlr->vendor,
+diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
+index b3ccefac46a3..4c18b3c78f03 100644
+--- a/drivers/edac/i7core_edac.c
++++ b/drivers/edac/i7core_edac.c
+@@ -1365,14 +1365,19 @@ static int i7core_get_onedevice(struct pci_dev **prev,
+ * is at addr 8086:2c40, instead of 8086:2c41. So, we need
+ * to probe for the alternate address in case of failure
+ */
+- if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev)
++ if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev) {
++ pci_dev_get(*prev); /* pci_get_device will put it */
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT, *prev);
++ }
+
+- if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE && !pdev)
++ if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE &&
++ !pdev) {
++ pci_dev_get(*prev); /* pci_get_device will put it */
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT,
+ *prev);
++ }
+
+ if (!pdev) {
+ if (*prev) {
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 61b708b60faa..2ea8a96e0f14 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -7252,6 +7252,20 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
+ goto err_unpin;
+ }
+
++ /*
++ * BSpec MI_DISPLAY_FLIP for IVB:
++ * "The full packet must be contained within the same cache line."
++ *
++ * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
++ * cacheline, if we ever start emitting more commands before
++ * the MI_DISPLAY_FLIP we may need to first emit everything else,
++ * then do the cacheline alignment, and finally emit the
++ * MI_DISPLAY_FLIP.
++ */
++ ret = intel_ring_cacheline_align(ring);
++ if (ret)
++ goto err_unpin;
++
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ goto err_unpin;
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index 72163e87f0da..ca7507616db9 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -483,6 +483,7 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp,
+ uint8_t msg[20];
+ int msg_bytes;
+ uint8_t ack;
++ int retry;
+
+ intel_dp_check_edp(intel_dp);
+ if (send_bytes > 16)
+@@ -493,18 +494,20 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp,
+ msg[3] = send_bytes - 1;
+ memcpy(&msg[4], send, send_bytes);
+ msg_bytes = send_bytes + 4;
+- for (;;) {
++ for (retry = 0; retry < 7; retry++) {
+ ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
+ if (ret < 0)
+ return ret;
+ if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
+- break;
++ return send_bytes;
+ else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+- udelay(100);
++ usleep_range(400, 500);
+ else
+ return -EIO;
+ }
+- return send_bytes;
++
++ DRM_ERROR("too many retries, giving up\n");
++ return -EIO;
+ }
+
+ /* Write a single byte to the aux channel in native mode */
+@@ -526,6 +529,7 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
+ int reply_bytes;
+ uint8_t ack;
+ int ret;
++ int retry;
+
+ intel_dp_check_edp(intel_dp);
+ msg[0] = AUX_NATIVE_READ << 4;
+@@ -536,7 +540,7 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
+ msg_bytes = 4;
+ reply_bytes = recv_bytes + 1;
+
+- for (;;) {
++ for (retry = 0; retry < 7; retry++) {
+ ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
+ reply, reply_bytes);
+ if (ret == 0)
+@@ -549,10 +553,13 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
+ return ret - 1;
+ }
+ else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
+- udelay(100);
++ usleep_range(400, 500);
+ else
+ return -EIO;
+ }
++
++ DRM_ERROR("too many retries, giving up\n");
++ return -EIO;
+ }
+
+ static int
+diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
+index 3c55cf62b9f4..8ee068ef401c 100644
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
+@@ -1213,6 +1213,27 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
+ return 0;
+ }
+
++/* Align the ring tail to a cacheline boundary */
++int intel_ring_cacheline_align(struct intel_ring_buffer *ring)
++{
++ int num_dwords = (64 - (ring->tail & 63)) / sizeof(uint32_t);
++ int ret;
++
++ if (num_dwords == 0)
++ return 0;
++
++ ret = intel_ring_begin(ring, num_dwords);
++ if (ret)
++ return ret;
++
++ while (num_dwords--)
++ intel_ring_emit(ring, MI_NOOP);
++
++ intel_ring_advance(ring);
++
++ return 0;
++}
++
+ void intel_ring_advance(struct intel_ring_buffer *ring)
+ {
+ ring->tail &= ring->size - 1;
+diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
+index 68281c96c558..d83cc975865f 100644
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
+@@ -174,6 +174,7 @@ static inline int intel_wait_ring_idle(struct intel_ring_buffer *ring)
+ }
+
+ int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
++int __must_check intel_ring_cacheline_align(struct intel_ring_buffer *ring);
+
+ static inline void intel_ring_emit(struct intel_ring_buffer *ring,
+ u32 data)
+diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
+index 038570aadf7f..cd98c0618250 100644
+--- a/drivers/gpu/drm/radeon/atombios_crtc.c
++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
+@@ -956,10 +956,13 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
+ ss_enabled =
+ radeon_atombios_get_ppll_ss_info(rdev, &ss,
+ ATOM_DP_SS_ID1);
+- } else
++ } else {
+ ss_enabled =
+ radeon_atombios_get_ppll_ss_info(rdev, &ss,
+ ATOM_DP_SS_ID1);
++ }
++ /* disable spread spectrum on DCE3 DP */
++ ss_enabled = false;
+ }
+ break;
+ case ATOM_ENCODER_MODE_LVDS:
+diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
+index 57e45c64fc95..f7e3cc079446 100644
+--- a/drivers/gpu/drm/radeon/r600.c
++++ b/drivers/gpu/drm/radeon/r600.c
+@@ -2315,14 +2315,18 @@ int r600_ring_test(struct radeon_device *rdev)
+ void r600_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence)
+ {
++ u32 cp_coher_cntl = PACKET3_TC_ACTION_ENA | PACKET3_VC_ACTION_ENA |
++ PACKET3_SH_ACTION_ENA;
++
++ if (rdev->family >= CHIP_RV770)
++ cp_coher_cntl |= PACKET3_FULL_CACHE_ENA;
++
+ if (rdev->wb.use_event) {
+ u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET +
+ (u64)(rdev->fence_drv.scratch_reg - rdev->scratch.reg_base);
+ /* flush read cache over gart */
+ radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
+- radeon_ring_write(rdev, PACKET3_TC_ACTION_ENA |
+- PACKET3_VC_ACTION_ENA |
+- PACKET3_SH_ACTION_ENA);
++ radeon_ring_write(rdev, cp_coher_cntl);
+ radeon_ring_write(rdev, 0xFFFFFFFF);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 10); /* poll interval */
+@@ -2336,9 +2340,7 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
+ } else {
+ /* flush read cache over gart */
+ radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
+- radeon_ring_write(rdev, PACKET3_TC_ACTION_ENA |
+- PACKET3_VC_ACTION_ENA |
+- PACKET3_SH_ACTION_ENA);
++ radeon_ring_write(rdev, cp_coher_cntl);
+ radeon_ring_write(rdev, 0xFFFFFFFF);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 10); /* poll interval */
+diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
+index d4d23a8d7226..cb29480ad280 100644
+--- a/drivers/gpu/drm/radeon/r600d.h
++++ b/drivers/gpu/drm/radeon/r600d.h
+@@ -838,6 +838,7 @@
+ #define PACKET3_INDIRECT_BUFFER 0x32
+ #define PACKET3_SURFACE_SYNC 0x43
+ # define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
++# define PACKET3_FULL_CACHE_ENA (1 << 20) /* r7xx+ only */
+ # define PACKET3_TC_ACTION_ENA (1 << 23)
+ # define PACKET3_VC_ACTION_ENA (1 << 24)
+ # define PACKET3_CB_ACTION_ENA (1 << 25)
+diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
+index cda89c6b74f8..e4e455ee70d0 100644
+--- a/drivers/gpu/drm/radeon/radeon_atombios.c
++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
+@@ -2735,6 +2735,10 @@ void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev)
+ /* tell the bios not to handle mode switching */
+ bios_6_scratch |= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH;
+
++ /* clear the vbios dpms state */
++ if (ASIC_IS_DCE4(rdev))
++ bios_2_scratch &= ~ATOM_S2_DEVICE_DPMS_STATE;
++
+ if (rdev->family >= CHIP_R600) {
+ WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
+ WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
+diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
+index cf203510b16b..894b5f0fb08c 100644
+--- a/drivers/gpu/drm/radeon/radeon_i2c.c
++++ b/drivers/gpu/drm/radeon/radeon_i2c.c
+@@ -998,6 +998,9 @@ void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
+ /* Add the default buses */
+ void radeon_i2c_init(struct radeon_device *rdev)
+ {
++ if (radeon_hw_i2c)
++ DRM_INFO("hw_i2c forced on, you may experience display detection problems!\n");
++
+ if (rdev->is_atom_bios)
+ radeon_atombios_i2c_init(rdev);
+ else
+diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
+index d58eccbd7f24..b0a0ee458563 100644
+--- a/drivers/gpu/drm/radeon/radeon_pm.c
++++ b/drivers/gpu/drm/radeon/radeon_pm.c
+@@ -573,8 +573,10 @@ void radeon_pm_resume(struct radeon_device *rdev)
+ rdev->pm.current_clock_mode_index = 0;
+ rdev->pm.current_sclk = rdev->pm.default_sclk;
+ rdev->pm.current_mclk = rdev->pm.default_mclk;
+- rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
+- rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
++ if (rdev->pm.power_state) {
++ rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
++ rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
++ }
+ if (rdev->pm.pm_method == PM_METHOD_DYNPM
+ && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
+ rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
+diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
+index 650c9f0b6642..d7f3df97427a 100644
+--- a/drivers/hv/connection.c
++++ b/drivers/hv/connection.c
+@@ -45,7 +45,6 @@ struct vmbus_connection vmbus_connection = {
+ int vmbus_connect(void)
+ {
+ int ret = 0;
+- int t;
+ struct vmbus_channel_msginfo *msginfo = NULL;
+ struct vmbus_channel_initiate_contact *msg;
+ unsigned long flags;
+@@ -132,16 +131,7 @@ int vmbus_connect(void)
+ }
+
+ /* Wait for the connection response */
+- t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
+- if (t == 0) {
+- spin_lock_irqsave(&vmbus_connection.channelmsg_lock,
+- flags);
+- list_del(&msginfo->msglistentry);
+- spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock,
+- flags);
+- ret = -ETIMEDOUT;
+- goto cleanup;
+- }
++ wait_for_completion(&msginfo->waitevent);
+
+ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+ list_del(&msginfo->msglistentry);
+diff --git a/drivers/hwmon/max1668.c b/drivers/hwmon/max1668.c
+index 20d1b2ddffb6..e19fef7acfa5 100644
+--- a/drivers/hwmon/max1668.c
++++ b/drivers/hwmon/max1668.c
+@@ -243,7 +243,7 @@ static ssize_t set_temp_min(struct device *dev,
+ data->temp_min[index] = SENSORS_LIMIT(temp/1000, -128, 127);
+ if (i2c_smbus_write_byte_data(client,
+ MAX1668_REG_LIML_WR(index),
+- data->temp_max[index]))
++ data->temp_min[index]))
+ count = -EIO;
+ mutex_unlock(&data->update_lock);
+
+diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
+index dbd4fa5df357..949ea64077ee 100644
+--- a/drivers/i2c/busses/Kconfig
++++ b/drivers/i2c/busses/Kconfig
+@@ -137,6 +137,7 @@ config I2C_PIIX4
+ ATI SB700
+ ATI SB800
+ AMD Hudson-2
++ AMD ML
+ AMD CZ
+ Serverworks OSB4
+ Serverworks CSB5
+diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
+index 14b588cde2fd..c60f8e1790f9 100644
+--- a/drivers/i2c/busses/i2c-piix4.c
++++ b/drivers/i2c/busses/i2c-piix4.c
+@@ -22,7 +22,7 @@
+ Intel PIIX4, 440MX
+ Serverworks OSB4, CSB5, CSB6, HT-1000, HT-1100
+ ATI IXP200, IXP300, IXP400, SB600, SB700, SB800
+- AMD Hudson-2, CZ
++ AMD Hudson-2, ML, CZ
+ SMSC Victory66
+
+ Note: we assume there can only be one device, with one SMBus interface.
+@@ -231,7 +231,8 @@ static int __devinit piix4_setup_sb800(struct pci_dev *PIIX4_dev,
+ const struct pci_device_id *id)
+ {
+ unsigned short smba_idx = 0xcd6;
+- u8 smba_en_lo, smba_en_hi, i2ccfg, i2ccfg_offset = 0x10, smb_en = 0x2c;
++ u8 smba_en_lo, smba_en_hi, smb_en, smb_en_status;
++ u8 i2ccfg, i2ccfg_offset = 0x10;
+
+ /* SB800 and later SMBus does not support forcing address */
+ if (force || force_addr) {
+@@ -241,6 +242,16 @@ static int __devinit piix4_setup_sb800(struct pci_dev *PIIX4_dev,
+ }
+
+ /* Determine the address of the SMBus areas */
++ if ((PIIX4_dev->vendor == PCI_VENDOR_ID_AMD &&
++ PIIX4_dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS &&
++ PIIX4_dev->revision >= 0x41) ||
++ (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD &&
++ PIIX4_dev->device == 0x790b &&
++ PIIX4_dev->revision >= 0x49))
++ smb_en = 0x00;
++ else
++ smb_en = 0x2c;
++
+ if (!request_region(smba_idx, 2, "smba_idx")) {
+ dev_err(&PIIX4_dev->dev, "SMBus base address index region "
+ "0x%x already in use!\n", smba_idx);
+@@ -252,13 +263,20 @@ static int __devinit piix4_setup_sb800(struct pci_dev *PIIX4_dev,
+ smba_en_hi = inb_p(smba_idx + 1);
+ release_region(smba_idx, 2);
+
+- if ((smba_en_lo & 1) == 0) {
++ if (!smb_en) {
++ smb_en_status = smba_en_lo & 0x10;
++ piix4_smba = smba_en_hi << 8;
++ } else {
++ smb_en_status = smba_en_lo & 0x01;
++ piix4_smba = ((smba_en_hi << 8) | smba_en_lo) & 0xffe0;
++ }
++
++ if (!smb_en_status) {
+ dev_err(&PIIX4_dev->dev,
+ "Host SMBus controller not enabled!\n");
+ return -ENODEV;
+ }
+
+- piix4_smba = ((smba_en_hi << 8) | smba_en_lo) & 0xffe0;
+ if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name))
+ return -ENODEV;
+
+diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
+index 4fa2b11331ae..44fde43c9bc6 100644
+--- a/drivers/idle/intel_idle.c
++++ b/drivers/idle/intel_idle.c
+@@ -584,8 +584,9 @@ static int __init intel_idle_init(void)
+ intel_idle_cpuidle_driver_init();
+ retval = cpuidle_register_driver(&intel_idle_driver);
+ if (retval) {
++ struct cpuidle_driver *drv = cpuidle_get_driver();
+ printk(KERN_DEBUG PREFIX "intel_idle yielding to %s",
+- cpuidle_get_driver()->name);
++ drv ? drv->name : "none");
+ return retval;
+ }
+
+diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
+index 5a070e8c983d..d8517fcca325 100644
+--- a/drivers/infiniband/hw/qib/qib_iba7322.c
++++ b/drivers/infiniband/hw/qib/qib_iba7322.c
+@@ -2279,6 +2279,11 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
+ qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
+ qib_write_kreg(dd, kr_scratch, 0ULL);
+
++ /* ensure previous Tx parameters are not still forced */
++ qib_write_kreg_port(ppd, krp_tx_deemph_override,
++ SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
++ reset_tx_deemphasis_override));
++
+ if (qib_compat_ddr_negotiate) {
+ ppd->cpspec->ibdeltainprog = 1;
+ ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
+diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
+index 828609fa4d28..c0d93d474ccb 100644
+--- a/drivers/infiniband/hw/qib/qib_ud.c
++++ b/drivers/infiniband/hw/qib/qib_ud.c
+@@ -57,13 +57,20 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
+ struct qib_sge *sge;
+ struct ib_wc wc;
+ u32 length;
++ enum ib_qp_type sqptype, dqptype;
+
+ qp = qib_lookup_qpn(ibp, swqe->wr.wr.ud.remote_qpn);
+ if (!qp) {
+ ibp->n_pkt_drops++;
+ return;
+ }
+- if (qp->ibqp.qp_type != sqp->ibqp.qp_type ||
++
++ sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ?
++ IB_QPT_UD : sqp->ibqp.qp_type;
++ dqptype = qp->ibqp.qp_type == IB_QPT_GSI ?
++ IB_QPT_UD : qp->ibqp.qp_type;
++
++ if (dqptype != sqptype ||
+ !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
+ ibp->n_pkt_drops++;
+ goto drop;
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index b4a4aaf81793..1a51b3d9ebb5 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -884,7 +884,7 @@ static void dma_pte_free_level(struct dmar_domain *domain, int level,
+
+ /* If range covers entire pagetable, free it */
+ if (!(start_pfn > level_pfn ||
+- last_pfn < level_pfn + level_size(level))) {
++ last_pfn < level_pfn + level_size(level) - 1)) {
+ dma_clear_pte(pte);
+ domain_flush_cache(domain, pte, sizeof(*pte));
+ free_pgtable_page(level_pte);
+diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
+index faa4741df6d3..e10b6bb2ff2f 100644
+--- a/drivers/md/Kconfig
++++ b/drivers/md/Kconfig
+@@ -185,8 +185,12 @@ config MD_FAULTY
+
+ In unsure, say N.
+
++config BLK_DEV_DM_BUILTIN
++ boolean
++
+ config BLK_DEV_DM
+ tristate "Device mapper support"
++ select BLK_DEV_DM_BUILTIN
+ ---help---
+ Device-mapper is a low level volume manager. It works by allowing
+ people to specify mappings for ranges of logical sectors. Various
+diff --git a/drivers/md/Makefile b/drivers/md/Makefile
+index 046860c7a166..f67dec34631c 100644
+--- a/drivers/md/Makefile
++++ b/drivers/md/Makefile
+@@ -28,6 +28,7 @@ obj-$(CONFIG_MD_MULTIPATH) += multipath.o
+ obj-$(CONFIG_MD_FAULTY) += faulty.o
+ obj-$(CONFIG_BLK_DEV_MD) += md-mod.o
+ obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o
++obj-$(CONFIG_BLK_DEV_DM_BUILTIN) += dm-builtin.o
+ obj-$(CONFIG_DM_BUFIO) += dm-bufio.o
+ obj-$(CONFIG_DM_CRYPT) += dm-crypt.o
+ obj-$(CONFIG_DM_DELAY) += dm-delay.o
+diff --git a/drivers/md/dm-builtin.c b/drivers/md/dm-builtin.c
+new file mode 100644
+index 000000000000..8b8278837435
+--- /dev/null
++++ b/drivers/md/dm-builtin.c
+@@ -0,0 +1,49 @@
++#include <linux/export.h>
++#include "dm.h"
++
++/*
++ * The kobject release method must not be placed in the module itself,
++ * otherwise we are subject to module unload races.
++ *
++ * The release method is called when the last reference to the kobject is
++ * dropped. It may be called by any other kernel code that drops the last
++ * reference.
++ *
++ * The release method suffers from module unload race. We may prevent the
++ * module from being unloaded at the start of the release method (using
++ * increased module reference count or synchronizing against the release
++ * method), however there is no way to prevent the module from being
++ * unloaded at the end of the release method.
++ *
++ * If this code were placed in the dm module, the following race may
++ * happen:
++ * 1. Some other process takes a reference to dm kobject
++ * 2. The user issues ioctl function to unload the dm device
++ * 3. dm_sysfs_exit calls kobject_put, however the object is not released
++ * because of the other reference taken at step 1
++ * 4. dm_sysfs_exit waits on the completion
++ * 5. The other process that took the reference in step 1 drops it,
++ * dm_kobject_release is called from this process
++ * 6. dm_kobject_release calls complete()
++ * 7. a reschedule happens before dm_kobject_release returns
++ * 8. dm_sysfs_exit continues, the dm device is unloaded, module reference
++ * count is decremented
++ * 9. The user unloads the dm module
++ * 10. The other process that was rescheduled in step 7 continues to run,
++ * it is now executing code in unloaded module, so it crashes
++ *
++ * Note that if the process that takes the foreign reference to dm kobject
++ * has a low priority and the system is sufficiently loaded with
++ * higher-priority processes that prevent the low-priority process from
++ * being scheduled long enough, this bug may really happen.
++ *
++ * In order to fix this module unload race, we place the release method
++ * into a helper code that is compiled directly into the kernel.
++ */
++
++void dm_kobject_release(struct kobject *kobj)
++{
++ complete(dm_get_completion_from_kobject(kobj));
++}
++
++EXPORT_SYMBOL(dm_kobject_release);
+diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
+index 84d2b91e4efb..c62c5ab6aed5 100644
+--- a/drivers/md/dm-sysfs.c
++++ b/drivers/md/dm-sysfs.c
+@@ -86,6 +86,7 @@ static const struct sysfs_ops dm_sysfs_ops = {
+ static struct kobj_type dm_ktype = {
+ .sysfs_ops = &dm_sysfs_ops,
+ .default_attrs = dm_attrs,
++ .release = dm_kobject_release,
+ };
+
+ /*
+@@ -104,5 +105,7 @@ int dm_sysfs_init(struct mapped_device *md)
+ */
+ void dm_sysfs_exit(struct mapped_device *md)
+ {
+- kobject_put(dm_kobject(md));
++ struct kobject *kobj = dm_kobject(md);
++ kobject_put(kobj);
++ wait_for_completion(dm_get_completion_from_kobject(kobj));
+ }
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 895363049bef..7ead06506588 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -192,8 +192,8 @@ struct mapped_device {
+ /* forced geometry settings */
+ struct hd_geometry geometry;
+
+- /* sysfs handle */
+- struct kobject kobj;
++ /* kobject and completion */
++ struct dm_kobject_holder kobj_holder;
+
+ /* zero-length flush that will be cloned and submitted to targets */
+ struct bio flush_bio;
+@@ -1891,6 +1891,7 @@ static struct mapped_device *alloc_dev(int minor)
+ init_waitqueue_head(&md->wait);
+ INIT_WORK(&md->work, dm_wq_work);
+ init_waitqueue_head(&md->eventq);
++ init_completion(&md->kobj_holder.completion);
+
+ md->disk->major = _major;
+ md->disk->first_minor = minor;
+@@ -2682,7 +2683,7 @@ struct gendisk *dm_disk(struct mapped_device *md)
+
+ struct kobject *dm_kobject(struct mapped_device *md)
+ {
+- return &md->kobj;
++ return &md->kobj_holder.kobj;
+ }
+
+ /*
+@@ -2693,9 +2694,7 @@ struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
+ {
+ struct mapped_device *md;
+
+- md = container_of(kobj, struct mapped_device, kobj);
+- if (&md->kobj != kobj)
+- return NULL;
++ md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
+
+ if (test_bit(DMF_FREEING, &md->flags) ||
+ dm_deleting_md(md))
+diff --git a/drivers/md/dm.h b/drivers/md/dm.h
+index b7dacd59d8d7..9db80c92096a 100644
+--- a/drivers/md/dm.h
++++ b/drivers/md/dm.h
+@@ -15,6 +15,8 @@
+ #include <linux/list.h>
+ #include <linux/blkdev.h>
+ #include <linux/hdreg.h>
++#include <linux/completion.h>
++#include <linux/kobject.h>
+
+ /*
+ * Suspend feature flags
+@@ -119,12 +121,27 @@ void dm_interface_exit(void);
+ /*
+ * sysfs interface
+ */
++struct dm_kobject_holder {
++ struct kobject kobj;
++ struct completion completion;
++};
++
++static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
++{
++ return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
++}
++
+ int dm_sysfs_init(struct mapped_device *md);
+ void dm_sysfs_exit(struct mapped_device *md);
+ struct kobject *dm_kobject(struct mapped_device *md);
+ struct mapped_device *dm_get_from_kobject(struct kobject *kobj);
+
+ /*
++ * The kobject helper
++ */
++void dm_kobject_release(struct kobject *kobj);
++
++/*
+ * Targets for linear and striped mappings
+ */
+ int dm_linear_init(void);
+diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
+index df2494c06cdc..8c5b8249d715 100644
+--- a/drivers/md/persistent-data/dm-space-map-common.c
++++ b/drivers/md/persistent-data/dm-space-map-common.c
+@@ -244,6 +244,10 @@ int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks)
+ return -EINVAL;
+ }
+
++ /*
++ * We need to set this before the dm_tm_new_block() call below.
++ */
++ ll->nr_blocks = nr_blocks;
+ for (i = old_blocks; i < blocks; i++) {
+ struct dm_block *b;
+ struct disk_index_entry idx;
+@@ -251,6 +255,7 @@ int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks)
+ r = dm_tm_new_block(ll->tm, &dm_sm_bitmap_validator, &b);
+ if (r < 0)
+ return r;
++
+ idx.blocknr = cpu_to_le64(dm_block_location(b));
+
+ r = dm_tm_unlock(ll->tm, b);
+@@ -265,7 +270,6 @@ int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks)
+ return r;
+ }
+
+- ll->nr_blocks = nr_blocks;
+ return 0;
+ }
+
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index fb678330aaa7..7c963c405d01 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -1693,6 +1693,7 @@ static void raid5_end_write_request(struct bio *bi, int error)
+ }
+
+ if (!uptodate) {
++ set_bit(STRIPE_DEGRADED, &sh->state);
+ set_bit(WriteErrorSeen, &conf->disks[i].rdev->flags);
+ set_bit(R5_WriteError, &sh->dev[i].flags);
+ } else if (is_badblock(conf->disks[i].rdev, sh->sector, STRIPE_SECTORS,
+@@ -4449,23 +4450,43 @@ raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks)
+ return sectors * (raid_disks - conf->max_degraded);
+ }
+
++static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
++{
++ safe_put_page(percpu->spare_page);
++ kfree(percpu->scribble);
++ percpu->spare_page = NULL;
++ percpu->scribble = NULL;
++}
++
++static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
++{
++ if (conf->level == 6 && !percpu->spare_page)
++ percpu->spare_page = alloc_page(GFP_KERNEL);
++ if (!percpu->scribble)
++ percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
++
++ if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) {
++ free_scratch_buffer(conf, percpu);
++ return -ENOMEM;
++ }
++
++ return 0;
++}
++
+ static void raid5_free_percpu(struct r5conf *conf)
+ {
+- struct raid5_percpu *percpu;
+ unsigned long cpu;
+
+ if (!conf->percpu)
+ return;
+
+- get_online_cpus();
+- for_each_possible_cpu(cpu) {
+- percpu = per_cpu_ptr(conf->percpu, cpu);
+- safe_put_page(percpu->spare_page);
+- kfree(percpu->scribble);
+- }
+ #ifdef CONFIG_HOTPLUG_CPU
+ unregister_cpu_notifier(&conf->cpu_notify);
+ #endif
++
++ get_online_cpus();
++ for_each_possible_cpu(cpu)
++ free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
+ put_online_cpus();
+
+ free_percpu(conf->percpu);
+@@ -4491,15 +4512,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
+ switch (action) {
+ case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
+- if (conf->level == 6 && !percpu->spare_page)
+- percpu->spare_page = alloc_page(GFP_KERNEL);
+- if (!percpu->scribble)
+- percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
+-
+- if (!percpu->scribble ||
+- (conf->level == 6 && !percpu->spare_page)) {
+- safe_put_page(percpu->spare_page);
+- kfree(percpu->scribble);
++ if (alloc_scratch_buffer(conf, percpu)) {
+ pr_err("%s: failed memory allocation for cpu%ld\n",
+ __func__, cpu);
+ return notifier_from_errno(-ENOMEM);
+@@ -4507,10 +4520,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
+ break;
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+- safe_put_page(percpu->spare_page);
+- kfree(percpu->scribble);
+- percpu->spare_page = NULL;
+- percpu->scribble = NULL;
++ free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
+ break;
+ default:
+ break;
+@@ -4522,40 +4532,29 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
+ static int raid5_alloc_percpu(struct r5conf *conf)
+ {
+ unsigned long cpu;
+- struct page *spare_page;
+- struct raid5_percpu __percpu *allcpus;
+- void *scribble;
+- int err;
++ int err = 0;
+
+- allcpus = alloc_percpu(struct raid5_percpu);
+- if (!allcpus)
++ conf->percpu = alloc_percpu(struct raid5_percpu);
++ if (!conf->percpu)
+ return -ENOMEM;
+- conf->percpu = allcpus;
++
++#ifdef CONFIG_HOTPLUG_CPU
++ conf->cpu_notify.notifier_call = raid456_cpu_notify;
++ conf->cpu_notify.priority = 0;
++ err = register_cpu_notifier(&conf->cpu_notify);
++ if (err)
++ return err;
++#endif
+
+ get_online_cpus();
+- err = 0;
+ for_each_present_cpu(cpu) {
+- if (conf->level == 6) {
+- spare_page = alloc_page(GFP_KERNEL);
+- if (!spare_page) {
+- err = -ENOMEM;
+- break;
+- }
+- per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page;
+- }
+- scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
+- if (!scribble) {
+- err = -ENOMEM;
++ err = alloc_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
++ if (err) {
++ pr_err("%s: failed memory allocation for cpu%ld\n",
++ __func__, cpu);
+ break;
+ }
+- per_cpu_ptr(conf->percpu, cpu)->scribble = scribble;
+ }
+-#ifdef CONFIG_HOTPLUG_CPU
+- conf->cpu_notify.notifier_call = raid456_cpu_notify;
+- conf->cpu_notify.priority = 0;
+- if (err == 0)
+- err = register_cpu_notifier(&conf->cpu_notify);
+-#endif
+ put_online_cpus();
+
+ return err;
+diff --git a/drivers/media/dvb/dvb-usb/mxl111sf-tuner.h b/drivers/media/dvb/dvb-usb/mxl111sf-tuner.h
+index ff333960b184..10b93c406d3e 100644
+--- a/drivers/media/dvb/dvb-usb/mxl111sf-tuner.h
++++ b/drivers/media/dvb/dvb-usb/mxl111sf-tuner.h
+@@ -69,7 +69,7 @@ struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe,
+ #else
+ static inline
+ struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe,
+- struct mxl111sf_state *mxl_state
++ struct mxl111sf_state *mxl_state,
+ struct mxl111sf_tuner_config *cfg)
+ {
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+diff --git a/drivers/media/dvb/frontends/dib8000.c b/drivers/media/dvb/frontends/dib8000.c
+index fe284d5292f5..a542db159c7d 100644
+--- a/drivers/media/dvb/frontends/dib8000.c
++++ b/drivers/media/dvb/frontends/dib8000.c
+@@ -114,15 +114,10 @@ static u16 dib8000_i2c_read16(struct i2c_device *i2c, u16 reg)
+ return ret;
+ }
+
+-static u16 dib8000_read_word(struct dib8000_state *state, u16 reg)
++static u16 __dib8000_read_word(struct dib8000_state *state, u16 reg)
+ {
+ u16 ret;
+
+- if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
+- dprintk("could not acquire lock");
+- return 0;
+- }
+-
+ state->i2c_write_buffer[0] = reg >> 8;
+ state->i2c_write_buffer[1] = reg & 0xff;
+
+@@ -140,6 +135,21 @@ static u16 dib8000_read_word(struct dib8000_state *state, u16 reg)
+ dprintk("i2c read error on %d", reg);
+
+ ret = (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
++
++ return ret;
++}
++
++static u16 dib8000_read_word(struct dib8000_state *state, u16 reg)
++{
++ u16 ret;
++
++ if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
++ dprintk("could not acquire lock");
++ return 0;
++ }
++
++ ret = __dib8000_read_word(state, reg);
++
+ mutex_unlock(&state->i2c_buffer_lock);
+
+ return ret;
+@@ -149,8 +159,15 @@ static u32 dib8000_read32(struct dib8000_state *state, u16 reg)
+ {
+ u16 rw[2];
+
+- rw[0] = dib8000_read_word(state, reg + 0);
+- rw[1] = dib8000_read_word(state, reg + 1);
++ if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
++ dprintk("could not acquire lock");
++ return 0;
++ }
++
++ rw[0] = __dib8000_read_word(state, reg + 0);
++ rw[1] = __dib8000_read_word(state, reg + 1);
++
++ mutex_unlock(&state->i2c_buffer_lock);
+
+ return ((rw[0] << 16) | (rw[1]));
+ }
+diff --git a/drivers/media/video/saa7134/saa7134-alsa.c b/drivers/media/video/saa7134/saa7134-alsa.c
+index 10460fd3ce39..dbcdfbf8aed0 100644
+--- a/drivers/media/video/saa7134/saa7134-alsa.c
++++ b/drivers/media/video/saa7134/saa7134-alsa.c
+@@ -172,7 +172,9 @@ static void saa7134_irq_alsa_done(struct saa7134_dev *dev,
+ dprintk("irq: overrun [full=%d/%d] - Blocks in %d\n",dev->dmasound.read_count,
+ dev->dmasound.bufsize, dev->dmasound.blocks);
+ spin_unlock(&dev->slock);
++ snd_pcm_stream_lock(dev->dmasound.substream);
+ snd_pcm_stop(dev->dmasound.substream,SNDRV_PCM_STATE_XRUN);
++ snd_pcm_stream_unlock(dev->dmasound.substream);
+ return;
+ }
+
+diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
+index 83790f20970f..fd61f6354e6d 100644
+--- a/drivers/mmc/host/atmel-mci.c
++++ b/drivers/mmc/host/atmel-mci.c
+@@ -1014,11 +1014,22 @@ static void atmci_start_request(struct atmel_mci *host,
+ iflags |= ATMCI_CMDRDY;
+ cmd = mrq->cmd;
+ cmdflags = atmci_prepare_command(slot->mmc, cmd);
+- atmci_send_command(host, cmd, cmdflags);
++
++ /*
++ * DMA transfer should be started before sending the command to avoid
++ * unexpected errors especially for read operations in SDIO mode.
++ * Unfortunately, in PDC mode, command has to be sent before starting
++ * the transfer.
++ */
++ if (host->submit_data != &atmci_submit_data_dma)
++ atmci_send_command(host, cmd, cmdflags);
+
+ if (data)
+ host->submit_data(host, data);
+
++ if (host->submit_data == &atmci_submit_data_dma)
++ atmci_send_command(host, cmd, cmdflags);
++
+ if (mrq->stop) {
+ host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop);
+ host->stop_cmdr |= ATMCI_CMDR_STOP_XFER;
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index aaeaff2446aa..9f68b82effd7 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -1670,12 +1670,12 @@ static int sdhci_execute_tuning(struct mmc_host *mmc)
+ int tuning_loop_counter = MAX_TUNING_LOOP;
+ unsigned long timeout;
+ int err = 0;
++ unsigned long flags;
+
+ host = mmc_priv(mmc);
+
+ sdhci_runtime_pm_get(host);
+- disable_irq(host->irq);
+- spin_lock(&host->lock);
++ spin_lock_irqsave(&host->lock, flags);
+
+ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+
+@@ -1689,8 +1689,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc)
+ (host->flags & SDHCI_SDR50_NEEDS_TUNING)))
+ ctrl |= SDHCI_CTRL_EXEC_TUNING;
+ else {
+- spin_unlock(&host->lock);
+- enable_irq(host->irq);
++ spin_unlock_irqrestore(&host->lock, flags);
+ sdhci_runtime_pm_put(host);
+ return 0;
+ }
+@@ -1752,15 +1751,12 @@ static int sdhci_execute_tuning(struct mmc_host *mmc)
+ host->cmd = NULL;
+ host->mrq = NULL;
+
+- spin_unlock(&host->lock);
+- enable_irq(host->irq);
+-
++ spin_unlock_irqrestore(&host->lock, flags);
+ /* Wait for Buffer Read Ready interrupt */
+ wait_event_interruptible_timeout(host->buf_ready_int,
+ (host->tuning_done == 1),
+ msecs_to_jiffies(50));
+- disable_irq(host->irq);
+- spin_lock(&host->lock);
++ spin_lock_irqsave(&host->lock, flags);
+
+ if (!host->tuning_done) {
+ pr_info(DRIVER_NAME ": Timeout waiting for "
+@@ -1833,8 +1829,7 @@ out:
+ err = 0;
+
+ sdhci_clear_set_irqs(host, SDHCI_INT_DATA_AVAIL, ier);
+- spin_unlock(&host->lock);
+- enable_irq(host->irq);
++ spin_unlock_irqrestore(&host->lock, flags);
+ sdhci_runtime_pm_put(host);
+
+ return err;
+diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
+index 74a43b818d0e..baf8356cb667 100644
+--- a/drivers/mtd/nand/mxc_nand.c
++++ b/drivers/mtd/nand/mxc_nand.c
+@@ -596,7 +596,6 @@ static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat,
+ ecc_stat >>= 4;
+ } while (--no_subpages);
+
+- mtd->ecc_stats.corrected += ret;
+ pr_debug("%d Symbol Correctable RS-ECC Error\n", ret);
+
+ return ret;
+diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
+index 0ae0d7c54ccf..aaa79997898b 100644
+--- a/drivers/net/bonding/bond_3ad.c
++++ b/drivers/net/bonding/bond_3ad.c
+@@ -1854,8 +1854,6 @@ void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout)
+ BOND_AD_INFO(bond).agg_select_timer = timeout;
+ }
+
+-static u16 aggregator_identifier;
+-
+ /**
+ * bond_3ad_initialize - initialize a bond's 802.3ad parameters and structures
+ * @bond: bonding struct to work on
+@@ -1869,7 +1867,7 @@ void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution)
+ if (MAC_ADDRESS_COMPARE(&(BOND_AD_INFO(bond).system.sys_mac_addr),
+ bond->dev->dev_addr)) {
+
+- aggregator_identifier = 0;
++ BOND_AD_INFO(bond).aggregator_identifier = 0;
+
+ BOND_AD_INFO(bond).system.sys_priority = 0xFFFF;
+ BOND_AD_INFO(bond).system.sys_mac_addr = *((struct mac_addr *)bond->dev->dev_addr);
+@@ -1941,7 +1939,7 @@ int bond_3ad_bind_slave(struct slave *slave)
+ ad_initialize_agg(aggregator);
+
+ aggregator->aggregator_mac_address = *((struct mac_addr *)bond->dev->dev_addr);
+- aggregator->aggregator_identifier = (++aggregator_identifier);
++ aggregator->aggregator_identifier = ++BOND_AD_INFO(bond).aggregator_identifier;
+ aggregator->slave = slave;
+ aggregator->is_active = 0;
+ aggregator->num_of_ports = 0;
+diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
+index 235b2cc58b28..20d9c78cf787 100644
+--- a/drivers/net/bonding/bond_3ad.h
++++ b/drivers/net/bonding/bond_3ad.h
+@@ -253,6 +253,7 @@ struct ad_system {
+ struct ad_bond_info {
+ struct ad_system system; /* 802.3ad system structure */
+ u32 agg_select_timer; // Timer to select aggregator after all adapter's hand shakes
++ u16 aggregator_identifier;
+ };
+
+ struct ad_slave_info {
+diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
+index bb828c24ce6f..7c6bb5a49732 100644
+--- a/drivers/net/can/flexcan.c
++++ b/drivers/net/can/flexcan.c
+@@ -777,14 +777,16 @@ static void flexcan_chip_stop(struct net_device *dev)
+ struct flexcan_regs __iomem *regs = priv->base;
+ u32 reg;
+
+- /* Disable all interrupts */
+- flexcan_write(0, &regs->imask1);
+-
+ /* Disable + halt module */
+ reg = flexcan_read(&regs->mcr);
+ reg |= FLEXCAN_MCR_MDIS | FLEXCAN_MCR_HALT;
+ flexcan_write(reg, &regs->mcr);
+
++ /* Disable all interrupts */
++ flexcan_write(0, &regs->imask1);
++ flexcan_write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL,
++ &regs->ctrl);
++
+ flexcan_transceiver_switch(priv, 0);
+ priv->can.state = CAN_STATE_STOPPED;
+
+@@ -809,12 +811,14 @@ static int flexcan_open(struct net_device *dev)
+ /* start chip and queuing */
+ err = flexcan_chip_start(dev);
+ if (err)
+- goto out_close;
++ goto out_free_irq;
+ napi_enable(&priv->napi);
+ netif_start_queue(dev);
+
+ return 0;
+
++ out_free_irq:
++ free_irq(dev->irq, dev);
+ out_close:
+ close_candev(dev);
+ out:
+@@ -1023,6 +1027,7 @@ static int __devexit flexcan_remove(struct platform_device *pdev)
+ struct resource *mem;
+
+ unregister_flexcandev(dev);
++ netif_napi_del(&priv->napi);
+ platform_set_drvdata(pdev, NULL);
+ iounmap(priv->base);
+
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index d5793d3fd762..c77c462bcee5 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -5637,8 +5637,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
+
+ work_mask |= opaque_key;
+
+- if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
+- (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
++ if (desc->err_vlan & RXD_ERR_MASK) {
+ drop_it:
+ tg3_recycle_rx(tnapi, tpr, opaque_key,
+ desc_idx, *post_ptr);
+@@ -7103,12 +7102,12 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
+
+ tg3_netif_stop(tp);
+
++ tg3_set_mtu(dev, tp, new_mtu);
++
+ tg3_full_lock(tp, 1);
+
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+
+- tg3_set_mtu(dev, tp, new_mtu);
+-
+ err = tg3_restart_hw(tp, 0);
+
+ if (!err)
+diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
+index da90ba51c4e5..a398a6fc4f0c 100644
+--- a/drivers/net/ethernet/broadcom/tg3.h
++++ b/drivers/net/ethernet/broadcom/tg3.h
+@@ -2477,7 +2477,11 @@ struct tg3_rx_buffer_desc {
+ #define RXD_ERR_TOO_SMALL 0x00400000
+ #define RXD_ERR_NO_RESOURCES 0x00800000
+ #define RXD_ERR_HUGE_FRAME 0x01000000
+-#define RXD_ERR_MASK 0xffff0000
++
++#define RXD_ERR_MASK (RXD_ERR_BAD_CRC | RXD_ERR_COLLISION | \
++ RXD_ERR_LINK_LOST | RXD_ERR_PHY_DECODE | \
++ RXD_ERR_MAC_ABRT | RXD_ERR_TOO_SMALL | \
++ RXD_ERR_NO_RESOURCES | RXD_ERR_HUGE_FRAME)
+
+ u32 reserved;
+ u32 opaque;
+diff --git a/drivers/net/usb/gl620a.c b/drivers/net/usb/gl620a.c
+index c4cfd1dea881..75d90407c39e 100644
+--- a/drivers/net/usb/gl620a.c
++++ b/drivers/net/usb/gl620a.c
+@@ -86,6 +86,10 @@ static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ u32 size;
+ u32 count;
+
++ /* This check is no longer done by usbnet */
++ if (skb->len < dev->net->hard_header_len)
++ return 0;
++
+ header = (struct gl_header *) skb->data;
+
+ // get the packet count of the received skb
+diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
+index db2cb74bf854..a2f757976eac 100644
+--- a/drivers/net/usb/mcs7830.c
++++ b/drivers/net/usb/mcs7830.c
+@@ -601,8 +601,9 @@ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ {
+ u8 status;
+
+- if (skb->len == 0) {
+- dev_err(&dev->udev->dev, "unexpected empty rx frame\n");
++ /* This check is no longer done by usbnet */
++ if (skb->len < dev->net->hard_header_len) {
++ dev_err(&dev->udev->dev, "unexpected tiny rx frame\n");
+ return 0;
+ }
+
+diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c
+index 01db4602a39e..c9e32783b008 100644
+--- a/drivers/net/usb/net1080.c
++++ b/drivers/net/usb/net1080.c
+@@ -419,6 +419,10 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ struct nc_trailer *trailer;
+ u16 hdr_len, packet_len;
+
++ /* This check is no longer done by usbnet */
++ if (skb->len < dev->net->hard_header_len)
++ return 0;
++
+ if (!(skb->len & 0x01)) {
+ #ifdef DEBUG
+ struct net_device *net = dev->net;
+diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
+index 255d6a424a6b..13b40e335a10 100644
+--- a/drivers/net/usb/rndis_host.c
++++ b/drivers/net/usb/rndis_host.c
+@@ -490,6 +490,10 @@ EXPORT_SYMBOL_GPL(rndis_unbind);
+ */
+ int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ {
++ /* This check is no longer done by usbnet */
++ if (skb->len < dev->net->hard_header_len)
++ return 0;
++
+ /* peripheral may have batched packets to us... */
+ while (likely(skb->len)) {
+ struct rndis_data_hdr *hdr = (void *)skb->data;
+diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
+index a8e464024fbb..c41b42ba80c2 100644
+--- a/drivers/net/usb/smsc75xx.c
++++ b/drivers/net/usb/smsc75xx.c
+@@ -1079,6 +1079,10 @@ static void smsc75xx_rx_csum_offload(struct usbnet *dev, struct sk_buff *skb,
+
+ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ {
++ /* This check is no longer done by usbnet */
++ if (skb->len < dev->net->hard_header_len)
++ return 0;
++
+ while (skb->len > 0) {
+ u32 rx_cmd_a, rx_cmd_b, align_count, size;
+ struct sk_buff *ax_skb;
+diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
+index 55b3218c2cd2..6617325b14c2 100644
+--- a/drivers/net/usb/smsc95xx.c
++++ b/drivers/net/usb/smsc95xx.c
+@@ -1039,6 +1039,10 @@ static void smsc95xx_rx_csum_offload(struct sk_buff *skb)
+
+ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ {
++ /* This check is no longer done by usbnet */
++ if (skb->len < dev->net->hard_header_len)
++ return 0;
++
+ while (skb->len > 0) {
+ u32 header, align_count;
+ struct sk_buff *ax_skb;
+diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
+index dc53a8f83001..3d217421c016 100644
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -414,17 +414,19 @@ static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
+ }
+ // else network stack removes extra byte if we forced a short packet
+
+- if (skb->len) {
+- /* all data was already cloned from skb inside the driver */
+- if (dev->driver_info->flags & FLAG_MULTI_PACKET)
+- dev_kfree_skb_any(skb);
+- else
+- usbnet_skb_return(dev, skb);
++ /* all data was already cloned from skb inside the driver */
++ if (dev->driver_info->flags & FLAG_MULTI_PACKET)
++ goto done;
++
++ if (skb->len < ETH_HLEN) {
++ dev->net->stats.rx_errors++;
++ dev->net->stats.rx_length_errors++;
++ netif_dbg(dev, rx_err, dev->net, "rx length %d\n", skb->len);
++ } else {
++ usbnet_skb_return(dev, skb);
+ return;
+ }
+
+- netif_dbg(dev, rx_err, dev->net, "drop\n");
+- dev->net->stats.rx_errors++;
+ done:
+ skb_queue_tail(&dev->done, skb);
+ }
+@@ -446,13 +448,6 @@ static void rx_complete (struct urb *urb)
+ switch (urb_status) {
+ /* success */
+ case 0:
+- if (skb->len < dev->net->hard_header_len) {
+- state = rx_cleanup;
+- dev->net->stats.rx_errors++;
+- dev->net->stats.rx_length_errors++;
+- netif_dbg(dev, rx_err, dev->net,
+- "rx length %d\n", skb->len);
+- }
+ break;
+
+ /* stalls need manual reset. this is rare ... except that
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 43a6a11d261d..f13a6734dcbc 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -1029,7 +1029,8 @@ static int virtnet_probe(struct virtio_device *vdev)
+ /* If we can receive ANY GSO packets, we must allocate large ones. */
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
+ virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
+- virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
++ virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
++ virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
+ vi->big_packets = true;
+
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
+diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
+index d426261d91f6..28ceef2b6941 100644
+--- a/drivers/net/vmxnet3/vmxnet3_drv.c
++++ b/drivers/net/vmxnet3/vmxnet3_drv.c
+@@ -1734,11 +1734,20 @@ vmxnet3_netpoll(struct net_device *netdev)
+ {
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+
+- if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
+- vmxnet3_disable_all_intrs(adapter);
+-
+- vmxnet3_do_poll(adapter, adapter->rx_queue[0].rx_ring[0].size);
+- vmxnet3_enable_all_intrs(adapter);
++ switch (adapter->intr.type) {
++#ifdef CONFIG_PCI_MSI
++ case VMXNET3_IT_MSIX: {
++ int i;
++ for (i = 0; i < adapter->num_rx_queues; i++)
++ vmxnet3_msix_rx(0, &adapter->rx_queue[i]);
++ break;
++ }
++#endif
++ case VMXNET3_IT_MSI:
++ default:
++ vmxnet3_intr(0, adapter->netdev);
++ break;
++ }
+
+ }
+ #endif /* CONFIG_NET_POLL_CONTROLLER */
+diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
+index 9c51b395b4ff..eabf374f42e3 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
++++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
+@@ -55,7 +55,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
+ {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3039605e, 0x33795d5e},
+ {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
+- {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
++ {0x00009e20, 0x000003a5, 0x000003a5, 0x000003a5, 0x000003a5},
+ {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
+ {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c782},
+ {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
+@@ -85,7 +85,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
+ {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x00100000},
+ {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+- {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
++ {0x0000ae20, 0x000001a6, 0x000001a6, 0x000001aa, 0x000001aa},
+ {0x0000b284, 0x00000000, 0x00000000, 0x00000550, 0x00000550},
+ };
+
+diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+index ef921e1bab82..c6c3e1cf0fe3 100644
+--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
++++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+@@ -28,6 +28,10 @@ int htc_modparam_nohwcrypt;
+ module_param_named(nohwcrypt, htc_modparam_nohwcrypt, int, 0444);
+ MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
+
++static int ath9k_ps_enable;
++module_param_named(ps_enable, ath9k_ps_enable, int, 0444);
++MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave");
++
+ #define CHAN2G(_freq, _idx) { \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+@@ -729,11 +733,13 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
+ IEEE80211_HW_SPECTRUM_MGMT |
+ IEEE80211_HW_HAS_RATE_CONTROL |
+ IEEE80211_HW_RX_INCLUDES_FCS |
+- IEEE80211_HW_SUPPORTS_PS |
+ IEEE80211_HW_PS_NULLFUNC_STACK |
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS |
+ IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING;
+
++ if (ath9k_ps_enable)
++ hw->flags |= IEEE80211_HW_SUPPORTS_PS;
++
+ hw->wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
+index ba6a49c8a43b..1355d7101478 100644
+--- a/drivers/net/wireless/ath/ath9k/init.c
++++ b/drivers/net/wireless/ath/ath9k/init.c
+@@ -44,6 +44,10 @@ static int ath9k_btcoex_enable;
+ module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444);
+ MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
+
++static int ath9k_ps_enable;
++module_param_named(ps_enable, ath9k_ps_enable, int, 0444);
++MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave");
++
+ bool is_ath9k_unloaded;
+ /* We use the hw_value as an index into our private channel structure */
+
+@@ -671,11 +675,13 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
+ hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
+ IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
+ IEEE80211_HW_SIGNAL_DBM |
+- IEEE80211_HW_SUPPORTS_PS |
+ IEEE80211_HW_PS_NULLFUNC_STACK |
+ IEEE80211_HW_SPECTRUM_MGMT |
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS;
+
++ if (ath9k_ps_enable)
++ hw->flags |= IEEE80211_HW_SUPPORTS_PS;
++
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
+ hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
+
+diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
+index 126ed31affe1..2e88af13293e 100644
+--- a/drivers/net/wireless/ath/ath9k/xmit.c
++++ b/drivers/net/wireless/ath/ath9k/xmit.c
+@@ -1176,14 +1176,16 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
+ for (tidno = 0, tid = &an->tid[tidno];
+ tidno < WME_NUM_TID; tidno++, tid++) {
+
+- if (!tid->sched)
+- continue;
+-
+ ac = tid->ac;
+ txq = ac->txq;
+
+ spin_lock_bh(&txq->axq_lock);
+
++ if (!tid->sched) {
++ spin_unlock_bh(&txq->axq_lock);
++ continue;
++ }
++
+ buffered = !skb_queue_empty(&tid->buf_q);
+
+ tid->sched = false;
+diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
+index 5f77cbe0b6aa..c6c34bf15105 100644
+--- a/drivers/net/wireless/b43/xmit.c
++++ b/drivers/net/wireless/b43/xmit.c
+@@ -819,10 +819,10 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
+ * channel number in b43. */
+ if (chanstat & B43_RX_CHAN_5GHZ) {
+ status.band = IEEE80211_BAND_5GHZ;
+- status.freq = b43_freq_to_channel_5ghz(chanid);
++ status.freq = b43_channel_to_freq_5ghz(chanid);
+ } else {
+ status.band = IEEE80211_BAND_2GHZ;
+- status.freq = b43_freq_to_channel_2ghz(chanid);
++ status.freq = b43_channel_to_freq_2ghz(chanid);
+ }
+ break;
+ default:
+diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+index df1540ca6102..5fe46029cbfc 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
++++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+@@ -854,8 +854,6 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
+ struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
+ struct iwl_ht_agg *agg;
+ struct sk_buff_head reclaimed_skbs;
+- struct ieee80211_tx_info *info;
+- struct ieee80211_hdr *hdr;
+ struct sk_buff *skb;
+ unsigned long flags;
+ int sta_id;
+@@ -941,24 +939,32 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
+ 0, &reclaimed_skbs);
+ freed = 0;
+ while (!skb_queue_empty(&reclaimed_skbs)) {
++ struct ieee80211_hdr *hdr;
++ struct ieee80211_tx_info *info;
+
+ skb = __skb_dequeue(&reclaimed_skbs);
+- hdr = (struct ieee80211_hdr *)skb->data;
++ hdr = (void *)skb->data;
++ info = IEEE80211_SKB_CB(skb);
+
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ freed++;
+ else
+ WARN_ON_ONCE(1);
+
+- info = IEEE80211_SKB_CB(skb);
+ kmem_cache_free(priv->tx_cmd_pool, (info->driver_data[1]));
+
++ memset(&info->status, 0, sizeof(info->status));
++ /* Packet was transmitted successfully, failures come as single
++ * frames because before failing a frame the firmware transmits
++ * it without aggregation at least once.
++ */
++ info->flags |= IEEE80211_TX_STAT_ACK;
++
+ if (freed == 1) {
+ /* this is the first skb we deliver in this batch */
+ /* put the rate scaling data there */
+ info = IEEE80211_SKB_CB(skb);
+ memset(&info->status, 0, sizeof(info->status));
+- info->flags |= IEEE80211_TX_STAT_ACK;
+ info->flags |= IEEE80211_TX_STAT_AMPDU;
+ info->status.ampdu_ack_len = ba_resp->txed_2_done;
+ info->status.ampdu_len = ba_resp->txed;
+diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
+index 34bba5234294..6543f4ff2ea6 100644
+--- a/drivers/net/wireless/mwifiex/11n.c
++++ b/drivers/net/wireless/mwifiex/11n.c
+@@ -343,8 +343,7 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
+ ht_cap->header.len =
+ cpu_to_le16(sizeof(struct ieee80211_ht_cap));
+ memcpy((u8 *) ht_cap + sizeof(struct mwifiex_ie_types_header),
+- (u8 *) bss_desc->bcn_ht_cap +
+- sizeof(struct ieee_types_header),
++ (u8 *)bss_desc->bcn_ht_cap,
+ le16_to_cpu(ht_cap->header.len));
+
+ mwifiex_fill_cap_info(priv, radio_type, ht_cap);
+diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
+index f1cc90751dbf..6d3d6536c532 100644
+--- a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
++++ b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
+@@ -15,6 +15,8 @@
+ #ifndef RTL8187_H
+ #define RTL8187_H
+
++#include <linux/cache.h>
++
+ #include "rtl818x.h"
+ #include "leds.h"
+
+@@ -131,7 +133,10 @@ struct rtl8187_priv {
+ u8 aifsn[4];
+ u8 rfkill_mask;
+ struct {
+- __le64 buf;
++ union {
++ __le64 buf;
++ u8 dummy1[L1_CACHE_BYTES];
++ } ____cacheline_aligned;
+ struct sk_buff_head queue;
+ } b_tx_status; /* This queue is used by both -b and non-b devices */
+ struct mutex io_mutex;
+@@ -139,7 +144,8 @@ struct rtl8187_priv {
+ u8 bits8;
+ __le16 bits16;
+ __le32 bits32;
+- } *io_dmabuf;
++ u8 dummy2[L1_CACHE_BYTES];
++ } *io_dmabuf ____cacheline_aligned;
+ bool rfkill_off;
+ };
+
+diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
+index 3f0f056fae9c..4fa987b6662d 100644
+--- a/drivers/net/wireless/rtlwifi/core.c
++++ b/drivers/net/wireless/rtlwifi/core.c
+@@ -131,6 +131,7 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
+ rtlpriv->cfg->maps
+ [RTL_IBSS_INT_MASKS]);
+ }
++ mac->link_state = MAC80211_LINKED;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+index cb480d857cc2..ce1fdbea9fed 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+@@ -905,14 +905,26 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
+ bool is92c;
+ int err;
+ u8 tmp_u1b;
++ unsigned long flags;
+
+ rtlpci->being_init_adapter = true;
++
++ /* Since this function can take a very long time (up to 350 ms)
++ * and can be called with irqs disabled, reenable the irqs
++ * to let the other devices continue being serviced.
++ *
++ * It is safe doing so since our own interrupts will only be enabled
++ * in a subsequent step.
++ */
++ local_save_flags(flags);
++ local_irq_enable();
++
+ rtlpriv->intf_ops->disable_aspm(hw);
+ rtstatus = _rtl92ce_init_mac(hw);
+ if (rtstatus != true) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("Init MAC failed\n"));
+ err = 1;
+- return err;
++ goto exit;
+ }
+
+ err = rtl92c_download_fw(hw);
+@@ -922,7 +934,7 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
+ "without FW now..\n"));
+ err = 1;
+ rtlhal->fw_ready = false;
+- return err;
++ goto exit;
+ } else {
+ rtlhal->fw_ready = true;
+ }
+@@ -985,6 +997,8 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("under 1.5V\n"));
+ }
+ rtl92c_dm_init(hw);
++exit:
++ local_irq_restore(flags);
+ rtlpci->being_init_adapter = false;
+ return err;
+ }
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
+index 17a8e9628512..d9848673c20c 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
+@@ -85,17 +85,15 @@ void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+ if (mac->act_scanning) {
+ tx_agc[RF90_PATH_A] = 0x3f3f3f3f;
+ tx_agc[RF90_PATH_B] = 0x3f3f3f3f;
+- if (turbo_scanoff) {
+- for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+- tx_agc[idx1] = ppowerlevel[idx1] |
+- (ppowerlevel[idx1] << 8) |
+- (ppowerlevel[idx1] << 16) |
+- (ppowerlevel[idx1] << 24);
+- if (rtlhal->interface == INTF_USB) {
+- if (tx_agc[idx1] > 0x20 &&
+- rtlefuse->external_pa)
+- tx_agc[idx1] = 0x20;
+- }
++ for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
++ tx_agc[idx1] = ppowerlevel[idx1] |
++ (ppowerlevel[idx1] << 8) |
++ (ppowerlevel[idx1] << 16) |
++ (ppowerlevel[idx1] << 24);
++ if (rtlhal->interface == INTF_USB) {
++ if (tx_agc[idx1] > 0x20 &&
++ rtlefuse->external_pa)
++ tx_agc[idx1] = 0x20;
+ }
+ }
+ } else {
+@@ -107,7 +105,7 @@ void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+ TXHIGHPWRLEVEL_LEVEL2) {
+ tx_agc[RF90_PATH_A] = 0x00000000;
+ tx_agc[RF90_PATH_B] = 0x00000000;
+- } else{
++ } else {
+ for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+ tx_agc[idx1] = ppowerlevel[idx1] |
+ (ppowerlevel[idx1] << 8) |
+@@ -380,7 +378,12 @@ static void _rtl92c_write_ofdm_power_reg(struct ieee80211_hw *hw,
+ regoffset == RTXAGC_B_MCS07_MCS04)
+ regoffset = 0xc98;
+ for (i = 0; i < 3; i++) {
+- writeVal = (writeVal > 6) ? (writeVal - 6) : 0;
++ if (i != 2)
++ writeVal = (writeVal > 8) ?
++ (writeVal - 8) : 0;
++ else
++ writeVal = (writeVal > 6) ?
++ (writeVal - 6) : 0;
+ rtl_write_byte(rtlpriv, (u32)(regoffset + i),
+ (u8)writeVal);
+ }
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+index 016ef862c608..c1842537331b 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+@@ -316,6 +316,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
+ {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
+ {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
+ {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
++ {RTL_USB_DEVICE(0x0df6, 0x0077, rtl92cu_hal_cfg)}, /*Sitecom-WLA2100V2*/
+ {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
+ {RTL_USB_DEVICE(0x4856, 0x0091, rtl92cu_hal_cfg)}, /*NetweeN - Feixun*/
+ /* HP - Lite-On ,8188CUS Slim Combo */
+diff --git a/drivers/of/address.c b/drivers/of/address.c
+index 72c33fbe451d..45c1727d5104 100644
+--- a/drivers/of/address.c
++++ b/drivers/of/address.c
+@@ -97,8 +97,13 @@ static unsigned int of_bus_default_get_flags(const __be32 *addr)
+
+ static int of_bus_pci_match(struct device_node *np)
+ {
+- /* "vci" is for the /chaos bridge on 1st-gen PCI powermacs */
+- return !strcmp(np->type, "pci") || !strcmp(np->type, "vci");
++ /*
++ * "pciex" is PCI Express
++ * "vci" is for the /chaos bridge on 1st-gen PCI powermacs
++ * "ht" is hypertransport
++ */
++ return !strcmp(np->type, "pci") || !strcmp(np->type, "pciex") ||
++ !strcmp(np->type, "vci") || !strcmp(np->type, "ht");
+ }
+
+ static void of_bus_pci_count_cells(struct device_node *np,
+diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
+index d0b597b50398..5a4432dd8830 100644
+--- a/drivers/parport/parport_pc.c
++++ b/drivers/parport/parport_pc.c
+@@ -2875,8 +2875,6 @@ enum parport_pc_pci_cards {
+ syba_2p_epp,
+ syba_1p_ecp,
+ titan_010l,
+- titan_1284p1,
+- titan_1284p2,
+ avlab_1p,
+ avlab_2p,
+ oxsemi_952,
+@@ -2935,8 +2933,6 @@ static struct parport_pc_pci {
+ /* syba_2p_epp AP138B */ { 2, { { 0, 0x078 }, { 0, 0x178 }, } },
+ /* syba_1p_ecp W83787 */ { 1, { { 0, 0x078 }, } },
+ /* titan_010l */ { 1, { { 3, -1 }, } },
+- /* titan_1284p1 */ { 1, { { 0, 1 }, } },
+- /* titan_1284p2 */ { 2, { { 0, 1 }, { 2, 3 }, } },
+ /* avlab_1p */ { 1, { { 0, 1}, } },
+ /* avlab_2p */ { 2, { { 0, 1}, { 2, 3 },} },
+ /* The Oxford Semi cards are unusual: 954 doesn't support ECP,
+@@ -2952,8 +2948,8 @@ static struct parport_pc_pci {
+ /* netmos_9705 */ { 1, { { 0, -1 }, } },
+ /* netmos_9715 */ { 2, { { 0, 1 }, { 2, 3 },} },
+ /* netmos_9755 */ { 2, { { 0, 1 }, { 2, 3 },} },
+- /* netmos_9805 */ { 1, { { 0, -1 }, } },
+- /* netmos_9815 */ { 2, { { 0, -1 }, { 2, -1 }, } },
++ /* netmos_9805 */ { 1, { { 0, 1 }, } },
++ /* netmos_9815 */ { 2, { { 0, 1 }, { 2, 3 }, } },
+ /* netmos_9901 */ { 1, { { 0, -1 }, } },
+ /* netmos_9865 */ { 1, { { 0, -1 }, } },
+ /* quatech_sppxp100 */ { 1, { { 0, 1 }, } },
+@@ -2997,8 +2993,6 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, syba_1p_ecp },
+ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_010L,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, titan_010l },
+- { 0x9710, 0x9805, 0x1000, 0x0010, 0, 0, titan_1284p1 },
+- { 0x9710, 0x9815, 0x1000, 0x0020, 0, 0, titan_1284p2 },
+ /* PCI_VENDOR_ID_AVLAB/Intek21 has another bunch of cards ...*/
+ /* AFAVLAB_TK9902 */
+ { 0x14db, 0x2120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_1p},
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 9f1fec14723f..4c3a9e9bf289 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -1078,6 +1078,8 @@ EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
+ static int do_pci_enable_device(struct pci_dev *dev, int bars)
+ {
+ int err;
++ u16 cmd;
++ u8 pin;
+
+ err = pci_set_power_state(dev, PCI_D0);
+ if (err < 0 && err != -EIO)
+@@ -1087,6 +1089,17 @@ static int do_pci_enable_device(struct pci_dev *dev, int bars)
+ return err;
+ pci_fixup_device(pci_fixup_enable, dev);
+
++ if (dev->msi_enabled || dev->msix_enabled)
++ return 0;
++
++ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
++ if (pin) {
++ pci_read_config_word(dev, PCI_COMMAND, &cmd);
++ if (cmd & PCI_COMMAND_INTX_DISABLE)
++ pci_write_config_word(dev, PCI_COMMAND,
++ cmd & ~PCI_COMMAND_INTX_DISABLE);
++ }
++
+ return 0;
+ }
+
+diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
+index fdacfcebd175..0076feae0ce3 100644
+--- a/drivers/platform/x86/hp_accel.c
++++ b/drivers/platform/x86/hp_accel.c
+@@ -77,6 +77,7 @@ static inline void delayed_sysfs_set(struct led_classdev *led_cdev,
+ static struct acpi_device_id lis3lv02d_device_ids[] = {
+ {"HPQ0004", 0}, /* HP Mobile Data Protection System PNP */
+ {"HPQ6000", 0}, /* HP Mobile Data Protection System PNP */
++ {"HPQ6007", 0}, /* HP Mobile Data Protection System PNP */
+ {"", 0},
+ };
+ MODULE_DEVICE_TABLE(acpi, lis3lv02d_device_ids);
+diff --git a/drivers/power/max17040_battery.c b/drivers/power/max17040_battery.c
+index 2f2f9a6f54fa..39d3697b6abc 100644
+--- a/drivers/power/max17040_battery.c
++++ b/drivers/power/max17040_battery.c
+@@ -148,7 +148,7 @@ static void max17040_get_online(struct i2c_client *client)
+ {
+ struct max17040_chip *chip = i2c_get_clientdata(client);
+
+- if (chip->pdata->battery_online)
++ if (chip->pdata && chip->pdata->battery_online)
+ chip->online = chip->pdata->battery_online();
+ else
+ chip->online = 1;
+@@ -158,7 +158,8 @@ static void max17040_get_status(struct i2c_client *client)
+ {
+ struct max17040_chip *chip = i2c_get_clientdata(client);
+
+- if (!chip->pdata->charger_online || !chip->pdata->charger_enable) {
++ if (!chip->pdata || !chip->pdata->charger_online
++ || !chip->pdata->charger_enable) {
+ chip->status = POWER_SUPPLY_STATUS_UNKNOWN;
+ return;
+ }
+diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
+index e3eed1840e4b..cae89855c62d 100644
+--- a/drivers/rtc/rtc-cmos.c
++++ b/drivers/rtc/rtc-cmos.c
+@@ -34,11 +34,11 @@
+ #include <linux/interrupt.h>
+ #include <linux/spinlock.h>
+ #include <linux/platform_device.h>
+-#include <linux/mod_devicetable.h>
+ #include <linux/log2.h>
+ #include <linux/pm.h>
+ #include <linux/of.h>
+ #include <linux/of_platform.h>
++#include <linux/dmi.h>
+
+ /* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */
+ #include <asm-generic/rtc.h>
+@@ -377,6 +377,51 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
+ return 0;
+ }
+
++/*
++ * Do not disable RTC alarm on shutdown - workaround for b0rked BIOSes.
++ */
++static bool alarm_disable_quirk;
++
++static int __init set_alarm_disable_quirk(const struct dmi_system_id *id)
++{
++ alarm_disable_quirk = true;
++ pr_info("rtc-cmos: BIOS has alarm-disable quirk. ");
++ pr_info("RTC alarms disabled\n");
++ return 0;
++}
++
++static const struct dmi_system_id rtc_quirks[] __initconst = {
++ /* https://bugzilla.novell.com/show_bug.cgi?id=805740 */
++ {
++ .callback = set_alarm_disable_quirk,
++ .ident = "IBM Truman",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "4852570"),
++ },
++ },
++ /* https://bugzilla.novell.com/show_bug.cgi?id=812592 */
++ {
++ .callback = set_alarm_disable_quirk,
++ .ident = "Gigabyte GA-990XA-UD3",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR,
++ "Gigabyte Technology Co., Ltd."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "GA-990XA-UD3"),
++ },
++ },
++ /* http://permalink.gmane.org/gmane.linux.kernel/1604474 */
++ {
++ .callback = set_alarm_disable_quirk,
++ .ident = "Toshiba Satellite L300",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L300"),
++ },
++ },
++ {}
++};
++
+ static int cmos_alarm_irq_enable(struct device *dev, unsigned int enabled)
+ {
+ struct cmos_rtc *cmos = dev_get_drvdata(dev);
+@@ -385,6 +430,9 @@ static int cmos_alarm_irq_enable(struct device *dev, unsigned int enabled)
+ if (!is_valid_irq(cmos->irq))
+ return -EINVAL;
+
++ if (alarm_disable_quirk)
++ return 0;
++
+ spin_lock_irqsave(&rtc_lock, flags);
+
+ if (enabled)
+@@ -1166,6 +1214,8 @@ static int __init cmos_init(void)
+ platform_driver_registered = true;
+ }
+
++ dmi_check_system(rtc_quirks);
++
+ if (retval == 0)
+ return 0;
+
+diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
+index 66fb72531b34..633048bd5274 100644
+--- a/drivers/scsi/bfa/bfad.c
++++ b/drivers/scsi/bfa/bfad.c
+@@ -1613,7 +1613,7 @@ out:
+ static u32 *
+ bfad_load_fwimg(struct pci_dev *pdev)
+ {
+- if (pdev->device == BFA_PCI_DEVICE_ID_CT2) {
++ if (bfa_asic_id_ct2(pdev->device)) {
+ if (bfi_image_ct2_size == 0)
+ bfad_read_firmware(pdev, &bfi_image_ct2,
+ &bfi_image_ct2_size, BFAD_FW_FILE_CT2);
+@@ -1623,12 +1623,14 @@ bfad_load_fwimg(struct pci_dev *pdev)
+ bfad_read_firmware(pdev, &bfi_image_ct,
+ &bfi_image_ct_size, BFAD_FW_FILE_CT);
+ return bfi_image_ct;
+- } else {
++ } else if (bfa_asic_id_cb(pdev->device)) {
+ if (bfi_image_cb_size == 0)
+ bfad_read_firmware(pdev, &bfi_image_cb,
+ &bfi_image_cb_size, BFAD_FW_FILE_CB);
+ return bfi_image_cb;
+ }
++
++ return NULL;
+ }
+
+ static void
+diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
+index 646051afd3cb..ef84778b6d67 100644
+--- a/drivers/scsi/isci/host.h
++++ b/drivers/scsi/isci/host.h
+@@ -309,9 +309,8 @@ static inline struct isci_pci_info *to_pci_info(struct pci_dev *pdev)
+ }
+
+ #define for_each_isci_host(id, ihost, pdev) \
+- for (id = 0, ihost = to_pci_info(pdev)->hosts[id]; \
+- id < ARRAY_SIZE(to_pci_info(pdev)->hosts) && ihost; \
+- ihost = to_pci_info(pdev)->hosts[++id])
++ for (id = 0; id < SCI_MAX_CONTROLLERS && \
++ (ihost = to_pci_info(pdev)->hosts[id]); id++)
+
+ static inline enum isci_status isci_host_get_state(struct isci_host *isci_host)
+ {
+diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
+index 38a99d281141..21a676931a27 100644
+--- a/drivers/scsi/isci/port_config.c
++++ b/drivers/scsi/isci/port_config.c
+@@ -610,13 +610,6 @@ static void sci_apc_agent_link_up(struct isci_host *ihost,
+ sci_apc_agent_configure_ports(ihost, port_agent, iphy, true);
+ } else {
+ /* the phy is already the part of the port */
+- u32 port_state = iport->sm.current_state_id;
+-
+- /* if the PORT'S state is resetting then the link up is from
+- * port hard reset in this case, we need to tell the port
+- * that link up is recieved
+- */
+- BUG_ON(port_state != SCI_PORT_RESETTING);
+ port_agent->phy_ready_mask |= 1 << phy_index;
+ sci_port_link_up(iport, iphy);
+ }
+diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
+index e294d118d89c..60a530fd5f03 100644
+--- a/drivers/scsi/isci/task.c
++++ b/drivers/scsi/isci/task.c
+@@ -1390,7 +1390,7 @@ int isci_task_I_T_nexus_reset(struct domain_device *dev)
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ if (!idev || !test_bit(IDEV_EH, &idev->flags)) {
+- ret = TMF_RESP_FUNC_COMPLETE;
++ ret = -ENODEV;
+ goto out;
+ }
+
+diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
+index fcf052c50bf5..c491a945f860 100644
+--- a/drivers/scsi/qla2xxx/qla_def.h
++++ b/drivers/scsi/qla2xxx/qla_def.h
+@@ -2568,8 +2568,7 @@ struct qla_hw_data {
+ IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
+ IS_QLA82XX(ha))
+ #define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha))
+-#define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && \
+- (ha)->flags.msix_enabled)
++#define IS_NOPOLLING_TYPE(ha) (IS_QLA81XX(ha) && (ha)->flags.msix_enabled)
+ #define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha))
+ #define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha))
+ #define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha))
+diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c
+index da2b75b15d4e..672538e8a9b0 100644
+--- a/drivers/staging/comedi/drivers/adv_pci1710.c
++++ b/drivers/staging/comedi/drivers/adv_pci1710.c
+@@ -414,6 +414,7 @@ static int pci171x_insn_write_ao(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data)
+ {
++ unsigned int val;
+ int n, chan, range, ofs;
+
+ chan = CR_CHAN(insn->chanspec);
+@@ -429,11 +430,14 @@ static int pci171x_insn_write_ao(struct comedi_device *dev,
+ outw(devpriv->da_ranges, dev->iobase + PCI171x_DAREF);
+ ofs = PCI171x_DA1;
+ }
++ val = devpriv->ao_data[chan];
+
+- for (n = 0; n < insn->n; n++)
+- outw(data[n], dev->iobase + ofs);
++ for (n = 0; n < insn->n; n++) {
++ val = data[n];
++ outw(val, dev->iobase + ofs);
++ }
+
+- devpriv->ao_data[chan] = data[n];
++ devpriv->ao_data[chan] = val;
+
+ return n;
+
+@@ -582,6 +586,7 @@ static int pci1720_insn_write_ao(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data)
+ {
++ unsigned int val;
+ int n, rangereg, chan;
+
+ chan = CR_CHAN(insn->chanspec);
+@@ -591,13 +596,15 @@ static int pci1720_insn_write_ao(struct comedi_device *dev,
+ outb(rangereg, dev->iobase + PCI1720_RANGE);
+ devpriv->da_ranges = rangereg;
+ }
++ val = devpriv->ao_data[chan];
+
+ for (n = 0; n < insn->n; n++) {
+- outw(data[n], dev->iobase + PCI1720_DA0 + (chan << 1));
++ val = data[n];
++ outw(val, dev->iobase + PCI1720_DA0 + (chan << 1));
+ outb(0, dev->iobase + PCI1720_SYNCOUT); /* update outputs */
+ }
+
+- devpriv->ao_data[chan] = data[n];
++ devpriv->ao_data[chan] = val;
+
+ return n;
+ }
+diff --git a/drivers/staging/comedi/drivers/pcmuio.c b/drivers/staging/comedi/drivers/pcmuio.c
+index b2c2c8971a32..6c25bd312cb0 100644
+--- a/drivers/staging/comedi/drivers/pcmuio.c
++++ b/drivers/staging/comedi/drivers/pcmuio.c
+@@ -464,13 +464,13 @@ static int pcmuio_detach(struct comedi_device *dev)
+ if (dev->iobase)
+ release_region(dev->iobase, ASIC_IOSIZE * thisboard->num_asics);
+
+- for (i = 0; i < MAX_ASICS; ++i) {
+- if (devpriv->asics[i].irq)
+- free_irq(devpriv->asics[i].irq, dev);
+- }
+-
+- if (devpriv && devpriv->sprivs)
++ if (devpriv) {
++ for (i = 0; i < MAX_ASICS; ++i) {
++ if (devpriv->asics[i].irq)
++ free_irq(devpriv->asics[i].irq, dev);
++ }
+ kfree(devpriv->sprivs);
++ }
+
+ return 0;
+ }
+diff --git a/drivers/staging/comedi/drivers/ssv_dnp.c b/drivers/staging/comedi/drivers/ssv_dnp.c
+index 526de2efa125..0316780103b5 100644
+--- a/drivers/staging/comedi/drivers/ssv_dnp.c
++++ b/drivers/staging/comedi/drivers/ssv_dnp.c
+@@ -251,11 +251,11 @@ static int dnp_dio_insn_bits(struct comedi_device *dev,
+
+ /* on return, data[1] contains the value of the digital input lines. */
+ outb(PADR, CSCIR);
+- data[0] = inb(CSCDR);
++ data[1] = inb(CSCDR);
+ outb(PBDR, CSCIR);
+- data[0] += inb(CSCDR) << 8;
++ data[1] += inb(CSCDR) << 8;
+ outb(PCDR, CSCIR);
+- data[0] += ((inb(CSCDR) & 0xF0) << 12);
++ data[1] += ((inb(CSCDR) & 0xF0) << 12);
+
+ return 2;
+
+diff --git a/drivers/staging/iio/adc/ad799x_core.c b/drivers/staging/iio/adc/ad799x_core.c
+index ee6cd792bec8..f79d0cb8d188 100644
+--- a/drivers/staging/iio/adc/ad799x_core.c
++++ b/drivers/staging/iio/adc/ad799x_core.c
+@@ -873,7 +873,8 @@ static int __devinit ad799x_probe(struct i2c_client *client,
+ return 0;
+
+ error_free_irq:
+- free_irq(client->irq, indio_dev);
++ if (client->irq > 0)
++ free_irq(client->irq, indio_dev);
+ error_cleanup_ring:
+ ad799x_ring_cleanup(indio_dev);
+ error_disable_reg:
+diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
+index 88d1d35d08c9..a74e83d7de3b 100644
+--- a/drivers/staging/rtl8712/usb_intf.c
++++ b/drivers/staging/rtl8712/usb_intf.c
+@@ -363,6 +363,10 @@ static u8 key_2char2num(u8 hch, u8 lch)
+ return (hex_to_bin(hch) << 4) | hex_to_bin(lch);
+ }
+
++static const struct device_type wlan_type = {
++ .name = "wlan",
++};
++
+ /*
+ * drv_init() - a device potentially for us
+ *
+@@ -398,6 +402,7 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
+ padapter->pusb_intf = pusb_intf;
+ usb_set_intfdata(pusb_intf, pnetdev);
+ SET_NETDEV_DEV(pnetdev, &pusb_intf->dev);
++ pnetdev->dev.type = &wlan_type;
+ /* step 2. */
+ padapter->dvobj_init = &r8712_usb_dvobj_init;
+ padapter->dvobj_deinit = &r8712_usb_dvobj_deinit;
+diff --git a/drivers/staging/vt6656/baseband.c b/drivers/staging/vt6656/baseband.c
+index 0d11147f91c1..59cf6748de42 100644
+--- a/drivers/staging/vt6656/baseband.c
++++ b/drivers/staging/vt6656/baseband.c
+@@ -1656,7 +1656,6 @@ BBvUpdatePreEDThreshold(
+
+ if( bScanning )
+ { // need Max sensitivity //RSSI -69, -70,....
+- if(pDevice->byBBPreEDIndex == 0) break;
+ pDevice->byBBPreEDIndex = 0;
+ ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x00); //CR201(0xC9)
+ ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x30); //CR206(0xCE)
+@@ -1799,7 +1798,6 @@ BBvUpdatePreEDThreshold(
+
+ if( bScanning )
+ { // need Max sensitivity //RSSI -69, -70, ...
+- if(pDevice->byBBPreEDIndex == 0) break;
+ pDevice->byBBPreEDIndex = 0;
+ ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x00); //CR201(0xC9)
+ ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x24); //CR206(0xCE)
+@@ -1951,7 +1949,6 @@ BBvUpdatePreEDThreshold(
+ case RF_VT3342A0: //RobertYu:20060627, testing table
+ if( bScanning )
+ { // need Max sensitivity //RSSI -67, -68, ...
+- if(pDevice->byBBPreEDIndex == 0) break;
+ pDevice->byBBPreEDIndex = 0;
+ ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x00); //CR201(0xC9)
+ ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x38); //CR206(0xCE)
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 45c13a659940..4a88eea158ab 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -50,7 +50,7 @@
+ static LIST_HEAD(g_tiqn_list);
+ static LIST_HEAD(g_np_list);
+ static DEFINE_SPINLOCK(tiqn_lock);
+-static DEFINE_SPINLOCK(np_lock);
++static DEFINE_MUTEX(np_lock);
+
+ static struct idr tiqn_idr;
+ struct idr sess_idr;
+@@ -262,6 +262,9 @@ int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
+ return 0;
+ }
+
++/*
++ * Called with mutex np_lock held
++ */
+ static struct iscsi_np *iscsit_get_np(
+ struct __kernel_sockaddr_storage *sockaddr,
+ int network_transport)
+@@ -272,11 +275,10 @@ static struct iscsi_np *iscsit_get_np(
+ int ip_match = 0;
+ u16 port;
+
+- spin_lock_bh(&np_lock);
+ list_for_each_entry(np, &g_np_list, np_list) {
+- spin_lock(&np->np_thread_lock);
++ spin_lock_bh(&np->np_thread_lock);
+ if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
+- spin_unlock(&np->np_thread_lock);
++ spin_unlock_bh(&np->np_thread_lock);
+ continue;
+ }
+
+@@ -309,13 +311,11 @@ static struct iscsi_np *iscsit_get_np(
+ * while iscsi_tpg_add_network_portal() is called.
+ */
+ np->np_exports++;
+- spin_unlock(&np->np_thread_lock);
+- spin_unlock_bh(&np_lock);
++ spin_unlock_bh(&np->np_thread_lock);
+ return np;
+ }
+- spin_unlock(&np->np_thread_lock);
++ spin_unlock_bh(&np->np_thread_lock);
+ }
+- spin_unlock_bh(&np_lock);
+
+ return NULL;
+ }
+@@ -329,16 +329,22 @@ struct iscsi_np *iscsit_add_np(
+ struct sockaddr_in6 *sock_in6;
+ struct iscsi_np *np;
+ int ret;
++
++ mutex_lock(&np_lock);
++
+ /*
+ * Locate the existing struct iscsi_np if already active..
+ */
+ np = iscsit_get_np(sockaddr, network_transport);
+- if (np)
++ if (np) {
++ mutex_unlock(&np_lock);
+ return np;
++ }
+
+ np = kzalloc(sizeof(struct iscsi_np), GFP_KERNEL);
+ if (!np) {
+ pr_err("Unable to allocate memory for struct iscsi_np\n");
++ mutex_unlock(&np_lock);
+ return ERR_PTR(-ENOMEM);
+ }
+
+@@ -361,6 +367,7 @@ struct iscsi_np *iscsit_add_np(
+ ret = iscsi_target_setup_login_socket(np, sockaddr);
+ if (ret != 0) {
+ kfree(np);
++ mutex_unlock(&np_lock);
+ return ERR_PTR(ret);
+ }
+
+@@ -369,6 +376,7 @@ struct iscsi_np *iscsit_add_np(
+ pr_err("Unable to create kthread: iscsi_np\n");
+ ret = PTR_ERR(np->np_thread);
+ kfree(np);
++ mutex_unlock(&np_lock);
+ return ERR_PTR(ret);
+ }
+ /*
+@@ -379,10 +387,10 @@ struct iscsi_np *iscsit_add_np(
+ * point because iscsi_np has not been added to g_np_list yet.
+ */
+ np->np_exports = 1;
++ np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
+
+- spin_lock_bh(&np_lock);
+ list_add_tail(&np->np_list, &g_np_list);
+- spin_unlock_bh(&np_lock);
++ mutex_unlock(&np_lock);
+
+ pr_debug("CORE[0] - Added Network Portal: %s:%hu on %s\n",
+ np->np_ip, np->np_port, (np->np_network_transport == ISCSI_TCP) ?
+@@ -453,9 +461,9 @@ int iscsit_del_np(struct iscsi_np *np)
+ }
+ iscsit_del_np_comm(np);
+
+- spin_lock_bh(&np_lock);
++ mutex_lock(&np_lock);
+ list_del(&np->np_list);
+- spin_unlock_bh(&np_lock);
++ mutex_unlock(&np_lock);
+
+ pr_debug("CORE[0] - Removed Network Portal: %s:%hu on %s\n",
+ np->np_ip, np->np_port, (np->np_network_transport == ISCSI_TCP) ?
+diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
+index d1902696e272..643a0a0268e8 100644
+--- a/drivers/tty/n_gsm.c
++++ b/drivers/tty/n_gsm.c
+@@ -1090,6 +1090,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, u8 *data, int clen)
+ {
+ unsigned int addr = 0;
+ unsigned int modem = 0;
++ unsigned int brk = 0;
+ struct gsm_dlci *dlci;
+ int len = clen;
+ u8 *dp = data;
+@@ -1116,6 +1117,16 @@ static void gsm_control_modem(struct gsm_mux *gsm, u8 *data, int clen)
+ if (len == 0)
+ return;
+ }
++ len--;
++ if (len > 0) {
++ while (gsm_read_ea(&brk, *dp++) == 0) {
++ len--;
++ if (len == 0)
++ return;
++ }
++ modem <<= 7;
++ modem |= (brk & 0x7f);
++ }
+ tty = tty_port_tty_get(&dlci->port);
+ gsm_process_modem(tty, dlci, modem, clen);
+ if (tty) {
+diff --git a/drivers/tty/serial/8250.c b/drivers/tty/serial/8250.c
+index cff03e5c3316..33601f891fd7 100644
+--- a/drivers/tty/serial/8250.c
++++ b/drivers/tty/serial/8250.c
+@@ -2701,6 +2701,10 @@ static void serial8250_config_port(struct uart_port *port, int flags)
+ if (up->port.type == PORT_16550A && up->port.iotype == UPIO_AU)
+ up->bugs |= UART_BUG_NOMSR;
+
++ /* HW bugs may trigger IRQ while IIR == NO_INT */
++ if (up->port.type == PORT_TEGRA)
++ up->bugs |= UART_BUG_NOMSR;
++
+ if (up->port.type != PORT_UNKNOWN && flags & UART_CONFIG_IRQ)
+ autoconfig_irq(up);
+
+diff --git a/drivers/tty/serial/8250_pci.c b/drivers/tty/serial/8250_pci.c
+index 6c9bcdfdc9c6..ef24e9660d4b 100644
+--- a/drivers/tty/serial/8250_pci.c
++++ b/drivers/tty/serial/8250_pci.c
+@@ -1147,6 +1147,11 @@ pci_xr17c154_setup(struct serial_private *priv,
+ #define PCI_DEVICE_ID_TITAN_800E 0xA014
+ #define PCI_DEVICE_ID_TITAN_200EI 0xA016
+ #define PCI_DEVICE_ID_TITAN_200EISI 0xA017
++#define PCI_DEVICE_ID_TITAN_200V3 0xA306
++#define PCI_DEVICE_ID_TITAN_400V3 0xA310
++#define PCI_DEVICE_ID_TITAN_410V3 0xA312
++#define PCI_DEVICE_ID_TITAN_800V3 0xA314
++#define PCI_DEVICE_ID_TITAN_800V3B 0xA315
+ #define PCI_DEVICE_ID_OXSEMI_16PCI958 0x9538
+ #define PCIE_DEVICE_ID_NEO_2_OX_IBM 0x00F6
+ #define PCI_DEVICE_ID_PLX_CRONYX_OMEGA 0xc001
+@@ -3465,6 +3470,21 @@ static struct pci_device_id serial_pci_tbl[] = {
+ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200EISI,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_oxsemi_2_4000000 },
++ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200V3,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ pbn_b0_bt_2_921600 },
++ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_400V3,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ pbn_b0_4_921600 },
++ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_410V3,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ pbn_b0_4_921600 },
++ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_800V3,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ pbn_b0_4_921600 },
++ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_800V3B,
++ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++ pbn_b0_4_921600 },
+
+ { PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S_10x_550,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
+index 086f7fe63f94..a6caf62fcba3 100644
+--- a/drivers/tty/serial/pmac_zilog.c
++++ b/drivers/tty/serial/pmac_zilog.c
+@@ -2198,6 +2198,9 @@ static int __init pmz_console_init(void)
+ /* Probe ports */
+ pmz_probe();
+
++ if (pmz_ports_count == 0)
++ return -ENODEV;
++
+ /* TODO: Autoprobe console based on OF */
+ /* pmz_console.index = i; */
+ register_console(&pmz_console);
+diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
+index 78609d302625..1e3e21139d83 100644
+--- a/drivers/usb/core/config.c
++++ b/drivers/usb/core/config.c
+@@ -722,6 +722,10 @@ int usb_get_configuration(struct usb_device *dev)
+ result = -ENOMEM;
+ goto err;
+ }
++
++ if (dev->quirks & USB_QUIRK_DELAY_INIT)
++ msleep(100);
++
+ result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno,
+ bigbuffer, length);
+ if (result < 0) {
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index bcde6f65b1c6..3677d229b8c4 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -46,6 +46,10 @@ static const struct usb_device_id usb_quirk_list[] = {
+ /* Microsoft LifeCam-VX700 v2.0 */
+ { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
+
++ /* Logitech HD Pro Webcams C920 and C930e */
++ { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
++ { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
++
+ /* Logitech Quickcam Fusion */
+ { USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME },
+
+diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
+index 339be1054622..2abdad6a94c9 100644
+--- a/drivers/usb/host/ehci-hcd.c
++++ b/drivers/usb/host/ehci-hcd.c
+@@ -807,8 +807,15 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
+ struct ehci_hcd *ehci = hcd_to_ehci (hcd);
+ u32 status, masked_status, pcd_status = 0, cmd;
+ int bh;
++ unsigned long flags;
+
+- spin_lock (&ehci->lock);
++ /*
++ * For threadirqs option we use spin_lock_irqsave() variant to prevent
++ * deadlock with ehci hrtimer callback, because hrtimer callbacks run
++ * in interrupt context even when threadirqs is specified. We can go
++ * back to spin_lock() variant when hrtimer callbacks become threaded.
++ */
++ spin_lock_irqsave(&ehci->lock, flags);
+
+ status = ehci_readl(ehci, &ehci->regs->status);
+
+@@ -826,7 +833,7 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
+
+ /* Shared IRQ? */
+ if (!masked_status || unlikely(ehci->rh_state == EHCI_RH_HALTED)) {
+- spin_unlock(&ehci->lock);
++ spin_unlock_irqrestore(&ehci->lock, flags);
+ return IRQ_NONE;
+ }
+
+@@ -929,7 +936,7 @@ dead:
+
+ if (bh)
+ ehci_work (ehci);
+- spin_unlock (&ehci->lock);
++ spin_unlock_irqrestore(&ehci->lock, flags);
+ if (pcd_status)
+ usb_hcd_poll_rh_status(hcd);
+ return IRQ_HANDLED;
+diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
+index 77bbb2357e47..4527b90332c8 100644
+--- a/drivers/usb/host/ehci-hub.c
++++ b/drivers/usb/host/ehci-hub.c
+@@ -207,6 +207,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
+ int port;
+ int mask;
+ int changed;
++ bool fs_idle_delay;
+
+ ehci_dbg(ehci, "suspend root hub\n");
+
+@@ -249,6 +250,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
+ ehci->bus_suspended = 0;
+ ehci->owned_ports = 0;
+ changed = 0;
++ fs_idle_delay = false;
+ port = HCS_N_PORTS(ehci->hcs_params);
+ while (port--) {
+ u32 __iomem *reg = &ehci->regs->port_status [port];
+@@ -279,16 +281,34 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
+ if (t1 != t2) {
+ ehci_vdbg (ehci, "port %d, %08x -> %08x\n",
+ port + 1, t1, t2);
++ /*
++ * On some controllers, Wake-On-Disconnect will
++ * generate false wakeup signals until the bus
++ * switches over to full-speed idle. For their
++ * sake, add a delay if we need one.
++ */
++ if ((t2 & PORT_WKDISC_E) &&
++ ehci_port_speed(ehci, t2) ==
++ USB_PORT_STAT_HIGH_SPEED)
++ fs_idle_delay = true;
+ ehci_writel(ehci, t2, reg);
+ changed = 1;
+ }
+ }
++ spin_unlock_irq(&ehci->lock);
++
++ if ((changed && ehci->has_hostpc) || fs_idle_delay) {
++ /*
++ * Wait for HCD to enter low-power mode or for the bus
++ * to switch to full-speed idle.
++ */
++ usleep_range(5000, 5500);
++ }
++
++ spin_lock_irq(&ehci->lock);
+
+ if (changed && ehci->has_hostpc) {
+- spin_unlock_irq(&ehci->lock);
+- msleep(5); /* 5 ms for HCD to enter low-power mode */
+ spin_lock_irq(&ehci->lock);
+-
+ port = HCS_N_PORTS(ehci->hcs_params);
+ while (port--) {
+ u32 __iomem *hostpc_reg;
+diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
+index 0a5fda73b3f2..b65912d567ef 100644
+--- a/drivers/usb/host/ehci.h
++++ b/drivers/usb/host/ehci.h
+@@ -147,6 +147,7 @@ struct ehci_hcd { /* one per controller */
+ unsigned use_dummy_qh:1; /* AMD Frame List table quirk*/
+ unsigned has_synopsys_hc_bug:1; /* Synopsys HC */
+ unsigned frame_index_bug:1; /* MosChip (AKA NetMos) */
++ unsigned imx28_write_fix:1; /* For Freescale i.MX28 */
+
+ /* required for usb32 quirk */
+ #define OHCI_CTRL_HCFS (3 << 6)
+@@ -654,6 +655,18 @@ static inline unsigned int ehci_readl(const struct ehci_hcd *ehci,
+ #endif
+ }
+
++#ifdef CONFIG_SOC_IMX28
++static inline void imx28_ehci_writel(const unsigned int val,
++ volatile __u32 __iomem *addr)
++{
++ __asm__ ("swp %0, %0, [%1]" : : "r"(val), "r"(addr));
++}
++#else
++static inline void imx28_ehci_writel(const unsigned int val,
++ volatile __u32 __iomem *addr)
++{
++}
++#endif
+ static inline void ehci_writel(const struct ehci_hcd *ehci,
+ const unsigned int val, __u32 __iomem *regs)
+ {
+@@ -662,7 +675,10 @@ static inline void ehci_writel(const struct ehci_hcd *ehci,
+ writel_be(val, regs) :
+ writel(val, regs);
+ #else
+- writel(val, regs);
++ if (ehci->imx28_write_fix)
++ imx28_ehci_writel(val, regs);
++ else
++ writel(val, regs);
+ #endif
+ }
+
+diff --git a/drivers/usb/serial/cypress_m8.h b/drivers/usb/serial/cypress_m8.h
+index b461311a2ae7..ce13e61b7d55 100644
+--- a/drivers/usb/serial/cypress_m8.h
++++ b/drivers/usb/serial/cypress_m8.h
+@@ -63,7 +63,7 @@
+ #define UART_DSR 0x20 /* data set ready - flow control - device to host */
+ #define CONTROL_RTS 0x10 /* request to send - flow control - host to device */
+ #define UART_CTS 0x10 /* clear to send - flow control - device to host */
+-#define UART_RI 0x10 /* ring indicator - modem - device to host */
++#define UART_RI 0x80 /* ring indicator - modem - device to host */
+ #define UART_CD 0x40 /* carrier detect - modem - device to host */
+ #define CYP_ERROR 0x08 /* received from input report - device to host */
+ /* Note - the below has nothing to do with the "feature report" reset */
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 85504bb130a7..5c97d9fcc613 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -163,6 +163,7 @@ static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_NXTCAM_PID) },
++ { USB_DEVICE(FTDI_VID, FTDI_EV3CON_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_1_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_2_PID) },
+@@ -202,6 +203,8 @@ static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) },
+ { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) },
++ { USB_DEVICE(FTDI_VID, FTDI_TAGSYS_LP101_PID) },
++ { USB_DEVICE(FTDI_VID, FTDI_TAGSYS_P200X_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_LENZ_LIUSB_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_XF_632_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_XF_634_PID) },
+@@ -914,6 +917,9 @@ static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(FTDI_VID, FTDI_LUMEL_PD12_PID) },
+ /* Crucible Devices */
+ { USB_DEVICE(FTDI_VID, FTDI_CT_COMET_PID) },
++ { USB_DEVICE(FTDI_VID, FTDI_Z3X_PID) },
++ /* Cressi Devices */
++ { USB_DEVICE(FTDI_VID, FTDI_CRESSI_PID) },
+ { }, /* Optional parameter entry */
+ { } /* Terminating entry */
+ };
+@@ -2193,10 +2199,20 @@ static void ftdi_set_termios(struct tty_struct *tty,
+ }
+
+ /*
+- * All FTDI UART chips are limited to CS7/8. We won't pretend to
++ * All FTDI UART chips are limited to CS7/8. We shouldn't pretend to
+ * support CS5/6 and revert the CSIZE setting instead.
++ *
++ * CS5 however is used to control some smartcard readers which abuse
++ * this limitation to switch modes. Original FTDI chips fall back to
++ * eight data bits.
++ *
++ * TODO: Implement a quirk to only allow this with mentioned
++ * readers. One I know of (Argolis Smartreader V1)
++ * returns "USB smartcard server" as iInterface string.
++ * The vendor didn't bother with a custom VID/PID of
++ * course.
+ */
+- if ((C_CSIZE(tty) != CS8) && (C_CSIZE(tty) != CS7)) {
++ if (C_CSIZE(tty) == CS6) {
+ dev_warn(&port->dev, "requested CSIZE setting not supported\n");
+
+ termios->c_cflag &= ~CSIZE;
+@@ -2243,6 +2259,9 @@ no_skip:
+ urb_value |= FTDI_SIO_SET_DATA_PARITY_NONE;
+ }
+ switch (cflag & CSIZE) {
++ case CS5:
++ dev_dbg(&port->dev, "Setting CS5 quirk\n");
++ break;
+ case CS7:
+ urb_value |= 7;
+ dev_dbg(&port->dev, "Setting CS7\n");
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 61685ed27e8d..71fe2de79bab 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -50,6 +50,7 @@
+ #define TI_XDS100V2_PID 0xa6d0
+
+ #define FTDI_NXTCAM_PID 0xABB8 /* NXTCam for Mindstorms NXT */
++#define FTDI_EV3CON_PID 0xABB9 /* Mindstorms EV3 Console Adapter */
+
+ /* US Interface Navigator (http://www.usinterface.com/) */
+ #define FTDI_USINT_CAT_PID 0xb810 /* Navigator CAT and 2nd PTT lines */
+@@ -363,6 +364,12 @@
+ /* Sprog II (Andrew Crosland's SprogII DCC interface) */
+ #define FTDI_SPROG_II 0xF0C8
+
++/*
++ * Two of the Tagsys RFID Readers
++ */
++#define FTDI_TAGSYS_LP101_PID 0xF0E9 /* Tagsys L-P101 RFID*/
++#define FTDI_TAGSYS_P200X_PID 0xF0EE /* Tagsys Medio P200x RFID*/
++
+ /* an infrared receiver for user access control with IR tags */
+ #define FTDI_PIEGROUP_PID 0xF208 /* Product Id */
+
+@@ -1307,3 +1314,15 @@
+ * Manufacturer: Crucible Technologies
+ */
+ #define FTDI_CT_COMET_PID 0x8e08
++
++/*
++ * Product: Z3X Box
++ * Manufacturer: Smart GSM Team
++ */
++#define FTDI_Z3X_PID 0x0011
++
++/*
++ * Product: Cressi PC Interface
++ * Manufacturer: Cressi
++ */
++#define FTDI_CRESSI_PID 0x87d0
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index d6d0fb451f2b..5f5047fcfecf 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -325,6 +325,9 @@ static void option_instat_callback(struct urb *urb);
+ * It seems to contain a Qualcomm QSC6240/6290 chipset */
+ #define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603
+
++/* iBall 3.5G connect wireless modem */
++#define IBALL_3_5G_CONNECT 0x9605
++
+ /* Zoom */
+ #define ZOOM_PRODUCT_4597 0x9607
+
+@@ -1373,7 +1376,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1267, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1268, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1269, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1271, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) },
+@@ -1461,6 +1465,17 @@ static const struct usb_device_id option_ids[] = {
+ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffe9, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8b, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8c, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8d, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8e, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8f, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff90, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff91, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff92, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff93, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff94, 0xff, 0xff, 0xff) },
+
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) },
+@@ -1509,6 +1524,7 @@ static const struct usb_device_id option_ids[] = {
+ .driver_info = (kernel_ulong_t)&four_g_w14_blacklist
+ },
+ { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
++ { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
+ { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
+ /* Pirelli */
+ { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_1)},
+@@ -1530,7 +1546,8 @@ static const struct usb_device_id option_ids[] = {
+ /* Cinterion */
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
+- { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8) },
++ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8),
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX) },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
+index e3936c1002b3..7b293178e8a6 100644
+--- a/drivers/usb/serial/pl2303.c
++++ b/drivers/usb/serial/pl2303.c
+@@ -153,6 +153,8 @@ struct pl2303_private {
+ u8 line_control;
+ u8 line_status;
+ enum pl2303_type type;
++
++ u8 line_settings[7];
+ };
+
+ static int pl2303_vendor_read(__u16 value, __u16 index,
+@@ -266,10 +268,6 @@ static void pl2303_set_termios(struct tty_struct *tty,
+
+ dbg("%s - port %d", __func__, port->number);
+
+- /* The PL2303 is reported to lose bytes if you change
+- serial settings even to the same values as before. Thus
+- we actually need to filter in this specific case */
+-
+ if (old_termios && !tty_termios_hw_change(tty->termios, old_termios))
+ return;
+
+@@ -407,10 +405,29 @@ static void pl2303_set_termios(struct tty_struct *tty,
+ dbg("%s - parity = none", __func__);
+ }
+
+- i = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
+- SET_LINE_REQUEST, SET_LINE_REQUEST_TYPE,
+- 0, 0, buf, 7, 100);
+- dbg("0x21:0x20:0:0 %d", i);
++ /*
++ * Some PL2303 are known to lose bytes if you change serial settings
++ * even to the same values as before. Thus we actually need to filter
++ * in this specific case.
++ *
++ * Note that the tty_termios_hw_change check above is not sufficient
++ * as a previously requested baud rate may differ from the one
++ * actually used (and stored in old_termios).
++ *
++ * NOTE: No additional locking needed for line_settings as it is
++ * only used in set_termios, which is serialised against itself.
++ */
++ if (!old_termios || memcmp(buf, priv->line_settings, 7)) {
++ i = usb_control_msg(serial->dev,
++ usb_sndctrlpipe(serial->dev, 0),
++ SET_LINE_REQUEST, SET_LINE_REQUEST_TYPE,
++ 0, 0, buf, 7, 100);
++
++ dbg("0x21:0x20:0:0 %d", i);
++
++ if (i == 7)
++ memcpy(priv->line_settings, buf, 7);
++ }
+
+ /* change control lines if we are switching to or from B0 */
+ spin_lock_irqsave(&priv->lock, flags);
+diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig
+index 303c34b4973d..3460ee830391 100644
+--- a/drivers/usb/storage/Kconfig
++++ b/drivers/usb/storage/Kconfig
+@@ -19,7 +19,9 @@ config USB_STORAGE
+
+ This option depends on 'SCSI' support being enabled, but you
+ probably also need 'SCSI device support: SCSI disk support'
+- (BLK_DEV_SD) for most USB storage devices.
++ (BLK_DEV_SD) for most USB storage devices. Some devices also
++ will require 'Probe all LUNs on each SCSI device'
++ (SCSI_MULTI_LUN).
+
+ To compile this driver as a module, choose M here: the
+ module will be called usb-storage.
+diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
+index 13b8bcdf3dba..eb660bb9dec2 100644
+--- a/drivers/usb/storage/scsiglue.c
++++ b/drivers/usb/storage/scsiglue.c
+@@ -116,6 +116,10 @@ static int slave_alloc (struct scsi_device *sdev)
+ if (us->subclass == USB_SC_UFI)
+ sdev->sdev_target->pdt_1f_for_no_lun = 1;
+
++ /* Tell the SCSI layer if we know there is more than one LUN */
++ if (us->protocol == USB_PR_BULK && us->max_lun > 0)
++ sdev->sdev_bflags |= BLIST_FORCELUN;
++
+ return 0;
+ }
+
+diff --git a/drivers/usb/storage/unusual_cypress.h b/drivers/usb/storage/unusual_cypress.h
+index 65a6a75066a8..82e8ed0324e3 100644
+--- a/drivers/usb/storage/unusual_cypress.h
++++ b/drivers/usb/storage/unusual_cypress.h
+@@ -31,7 +31,7 @@ UNUSUAL_DEV( 0x04b4, 0x6831, 0x0000, 0x9999,
+ "Cypress ISD-300LP",
+ USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
+
+-UNUSUAL_DEV( 0x14cd, 0x6116, 0x0000, 0x0219,
++UNUSUAL_DEV( 0x14cd, 0x6116, 0x0160, 0x0160,
+ "Super Top",
+ "USB 2.0 SATA BRIDGE",
+ USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index 8a3b53122051..08711bc24124 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -226,6 +226,13 @@ UNUSUAL_DEV( 0x0421, 0x0495, 0x0370, 0x0370,
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_MAX_SECTORS_64 ),
+
++/* Patch submitted by Mikhail Zolotaryov <lebon@lebon.org.ua> */
++UNUSUAL_DEV( 0x0421, 0x06aa, 0x1110, 0x1110,
++ "Nokia",
++ "502",
++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++ US_FL_MAX_SECTORS_64 ),
++
+ #ifdef NO_SDDR09
+ UNUSUAL_DEV( 0x0436, 0x0005, 0x0100, 0x0100,
+ "Microtech",
+@@ -1434,6 +1441,13 @@ UNUSUAL_DEV( 0x0f88, 0x042e, 0x0100, 0x0100,
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_FIX_CAPACITY ),
+
++/* Reported by Moritz Moeller-Herrmann <moritz-kernel@moeller-herrmann.de> */
++UNUSUAL_DEV( 0x0fca, 0x8004, 0x0201, 0x0201,
++ "Research In Motion",
++ "BlackBerry Bold 9000",
++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++ US_FL_MAX_SECTORS_64 ),
++
+ /* Reported by Michael Stattmann <michael@stattmann.com> */
+ UNUSUAL_DEV( 0x0fce, 0xd008, 0x0000, 0x0000,
+ "Sony Ericsson",
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 49eefdbf58b3..f63719a20d1e 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -6725,7 +6725,7 @@ out:
+ */
+ if (root_dropped == false)
+ btrfs_add_dead_root(root);
+- if (err)
++ if (err && err != -EAGAIN)
+ btrfs_std_error(root->fs_info, err);
+ return;
+ }
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 1372634e4101..622d3223fe56 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -56,7 +56,7 @@
+ #include "inode-map.h"
+
+ struct btrfs_iget_args {
+- u64 ino;
++ struct btrfs_key *location;
+ struct btrfs_root *root;
+ };
+
+@@ -3847,7 +3847,9 @@ again:
+ static int btrfs_init_locked_inode(struct inode *inode, void *p)
+ {
+ struct btrfs_iget_args *args = p;
+- inode->i_ino = args->ino;
++ inode->i_ino = args->location->objectid;
++ memcpy(&BTRFS_I(inode)->location, args->location,
++ sizeof(*args->location));
+ BTRFS_I(inode)->root = args->root;
+ btrfs_set_inode_space_info(args->root, inode);
+ return 0;
+@@ -3856,20 +3858,20 @@ static int btrfs_init_locked_inode(struct inode *inode, void *p)
+ static int btrfs_find_actor(struct inode *inode, void *opaque)
+ {
+ struct btrfs_iget_args *args = opaque;
+- return args->ino == btrfs_ino(inode) &&
++ return args->location->objectid == BTRFS_I(inode)->location.objectid &&
+ args->root == BTRFS_I(inode)->root;
+ }
+
+ static struct inode *btrfs_iget_locked(struct super_block *s,
+- u64 objectid,
++ struct btrfs_key *location,
+ struct btrfs_root *root)
+ {
+ struct inode *inode;
+ struct btrfs_iget_args args;
+- args.ino = objectid;
++ args.location = location;
+ args.root = root;
+
+- inode = iget5_locked(s, objectid, btrfs_find_actor,
++ inode = iget5_locked(s, location->objectid, btrfs_find_actor,
+ btrfs_init_locked_inode,
+ (void *)&args);
+ return inode;
+@@ -3883,13 +3885,11 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
+ {
+ struct inode *inode;
+
+- inode = btrfs_iget_locked(s, location->objectid, root);
++ inode = btrfs_iget_locked(s, location, root);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+
+ if (inode->i_state & I_NEW) {
+- BTRFS_I(inode)->root = root;
+- memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
+ btrfs_read_locked_inode(inode);
+ if (!is_bad_inode(inode)) {
+ inode_tree_add(inode);
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 618ae6f9fbe9..7cbe2f8e9b97 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -1327,12 +1327,17 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
+ printk(KERN_INFO "btrfs: Snapshot src from "
+ "another FS\n");
+ ret = -EINVAL;
+- fput(src_file);
+- goto out;
++ } else if (!inode_owner_or_capable(src_inode)) {
++ /*
++ * Subvolume creation is not restricted, but snapshots
++ * are limited to own subvolumes only
++ */
++ ret = -EPERM;
++ } else {
++ ret = btrfs_mksubvol(&file->f_path, name, namelen,
++ BTRFS_I(src_inode)->root,
++ transid, readonly);
+ }
+- ret = btrfs_mksubvol(&file->f_path, name, namelen,
+- BTRFS_I(src_inode)->root,
+- transid, readonly);
+ fput(src_file);
+ }
+ out:
+diff --git a/fs/buffer.c b/fs/buffer.c
+index 19a4f0b6f808..5f4bde20839a 100644
+--- a/fs/buffer.c
++++ b/fs/buffer.c
+@@ -663,14 +663,16 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode);
+ static void __set_page_dirty(struct page *page,
+ struct address_space *mapping, int warn)
+ {
+- spin_lock_irq(&mapping->tree_lock);
++ unsigned long flags;
++
++ spin_lock_irqsave(&mapping->tree_lock, flags);
+ if (page->mapping) { /* Race with truncate? */
+ WARN_ON_ONCE(warn && !PageUptodate(page));
+ account_page_dirtied(page, mapping);
+ radix_tree_tag_set(&mapping->page_tree,
+ page_index(page), PAGECACHE_TAG_DIRTY);
+ }
+- spin_unlock_irq(&mapping->tree_lock);
++ spin_unlock_irqrestore(&mapping->tree_lock, flags);
+ __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
+ }
+
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 56c152d33b22..49d6e21052c2 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -1113,7 +1113,8 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
+ cERROR(1, "Krb5 cifs privacy not supported");
+ goto cifs_parse_mount_err;
+ } else if (strnicmp(value, "krb5", 4) == 0) {
+- vol->secFlg |= CIFSSEC_MAY_KRB5;
++ vol->secFlg |= CIFSSEC_MAY_KRB5 |
++ CIFSSEC_MAY_SIGN;
+ } else if (strnicmp(value, "ntlmsspi", 8) == 0) {
+ vol->secFlg |= CIFSSEC_MAY_NTLMSSP |
+ CIFSSEC_MUST_SIGN;
+diff --git a/fs/exofs/ore.c b/fs/exofs/ore.c
+index 26bb512c3566..df4a10fa9909 100644
+--- a/fs/exofs/ore.c
++++ b/fs/exofs/ore.c
+@@ -103,7 +103,7 @@ int ore_verify_layout(unsigned total_comps, struct ore_layout *layout)
+
+ layout->max_io_length =
+ (BIO_MAX_PAGES_KMALLOC * PAGE_SIZE - layout->stripe_unit) *
+- layout->group_width;
++ (layout->group_width - layout->parity);
+ if (layout->parity) {
+ unsigned stripe_length =
+ (layout->group_width - layout->parity) *
+@@ -286,7 +286,8 @@ int ore_get_rw_state(struct ore_layout *layout, struct ore_components *oc,
+ if (length) {
+ ore_calc_stripe_info(layout, offset, length, &ios->si);
+ ios->length = ios->si.length;
+- ios->nr_pages = (ios->length + PAGE_SIZE - 1) / PAGE_SIZE;
++ ios->nr_pages = ((ios->offset & (PAGE_SIZE - 1)) +
++ ios->length + PAGE_SIZE - 1) / PAGE_SIZE;
+ if (layout->parity)
+ _ore_post_alloc_raid_stuff(ios);
+ }
+@@ -536,6 +537,7 @@ void ore_calc_stripe_info(struct ore_layout *layout, u64 file_offset,
+ u64 H = LmodS - G * T;
+
+ u32 N = div_u64(H, U);
++ u32 Nlast;
+
+ /* "H - (N * U)" is just "H % U" so it's bound to u32 */
+ u32 C = (u32)(H - (N * U)) / stripe_unit + G * group_width;
+@@ -568,6 +570,10 @@ void ore_calc_stripe_info(struct ore_layout *layout, u64 file_offset,
+ si->length = T - H;
+ if (si->length > length)
+ si->length = length;
++
++ Nlast = div_u64(H + si->length + U - 1, U);
++ si->maxdevUnits = Nlast - N;
++
+ si->M = M;
+ }
+ EXPORT_SYMBOL(ore_calc_stripe_info);
+@@ -583,13 +589,16 @@ int _ore_add_stripe_unit(struct ore_io_state *ios, unsigned *cur_pg,
+ int ret;
+
+ if (per_dev->bio == NULL) {
+- unsigned pages_in_stripe = ios->layout->group_width *
+- (ios->layout->stripe_unit / PAGE_SIZE);
+- unsigned nr_pages = ios->nr_pages * ios->layout->group_width /
+- (ios->layout->group_width -
+- ios->layout->parity);
+- unsigned bio_size = (nr_pages + pages_in_stripe) /
+- ios->layout->group_width;
++ unsigned bio_size;
++
++ if (!ios->reading) {
++ bio_size = ios->si.maxdevUnits;
++ } else {
++ bio_size = (ios->si.maxdevUnits + 1) *
++ (ios->layout->group_width - ios->layout->parity) /
++ ios->layout->group_width;
++ }
++ bio_size *= (ios->layout->stripe_unit / PAGE_SIZE);
+
+ per_dev->bio = bio_kmalloc(GFP_KERNEL, bio_size);
+ if (unlikely(!per_dev->bio)) {
+@@ -609,8 +618,12 @@ int _ore_add_stripe_unit(struct ore_io_state *ios, unsigned *cur_pg,
+ added_len = bio_add_pc_page(q, per_dev->bio, pages[pg],
+ pglen, pgbase);
+ if (unlikely(pglen != added_len)) {
+- ORE_DBGMSG("Failed bio_add_pc_page bi_vcnt=%u\n",
+- per_dev->bio->bi_vcnt);
++ /* If bi_vcnt == bi_max then this is a SW BUG */
++ ORE_DBGMSG("Failed bio_add_pc_page bi_vcnt=0x%x "
++ "bi_max=0x%x BIO_MAX=0x%x cur_len=0x%x\n",
++ per_dev->bio->bi_vcnt,
++ per_dev->bio->bi_max_vecs,
++ BIO_MAX_PAGES_KMALLOC, cur_len);
+ ret = -ENOMEM;
+ goto out;
+ }
+@@ -1099,7 +1112,7 @@ int ore_truncate(struct ore_layout *layout, struct ore_components *oc,
+ size_attr->attr = g_attr_logical_length;
+ size_attr->attr.val_ptr = &size_attr->newsize;
+
+- ORE_DBGMSG("trunc(0x%llx) obj_offset=0x%llx dev=%d\n",
++ ORE_DBGMSG2("trunc(0x%llx) obj_offset=0x%llx dev=%d\n",
+ _LLU(oc->comps->obj.id), _LLU(obj_size), i);
+ ret = _truncate_mirrors(ios, i * ios->layout->mirrors_p1,
+ &size_attr->attr);
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 68b160255b24..40f4d062708f 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -745,6 +745,8 @@ do { \
+ if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime)) \
+ (einode)->xtime.tv_sec = \
+ (signed)le32_to_cpu((raw_inode)->xtime); \
++ else \
++ (einode)->xtime.tv_sec = 0; \
+ if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime ## _extra)) \
+ ext4_decode_extra_time(&(einode)->xtime, \
+ raw_inode->xtime ## _extra); \
+diff --git a/fs/file.c b/fs/file.c
+index 4c6992d8f3ba..30bfc997cc49 100644
+--- a/fs/file.c
++++ b/fs/file.c
+@@ -47,7 +47,7 @@ static void *alloc_fdmem(unsigned int size)
+ * vmalloc() if the allocation size will be considered "large" by the VM.
+ */
+ if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
+- void *data = kmalloc(size, GFP_KERNEL|__GFP_NOWARN);
++ void *data = kmalloc(size, GFP_KERNEL|__GFP_NOWARN|__GFP_NORETRY);
+ if (data != NULL)
+ return data;
+ }
+diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
+index 5c029fb3e087..cf0098d26409 100644
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -1199,22 +1199,6 @@ static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
+ return fuse_dev_do_read(fc, file, &cs, iov_length(iov, nr_segs));
+ }
+
+-static int fuse_dev_pipe_buf_steal(struct pipe_inode_info *pipe,
+- struct pipe_buffer *buf)
+-{
+- return 1;
+-}
+-
+-static const struct pipe_buf_operations fuse_dev_pipe_buf_ops = {
+- .can_merge = 0,
+- .map = generic_pipe_buf_map,
+- .unmap = generic_pipe_buf_unmap,
+- .confirm = generic_pipe_buf_confirm,
+- .release = generic_pipe_buf_release,
+- .steal = fuse_dev_pipe_buf_steal,
+- .get = generic_pipe_buf_get,
+-};
+-
+ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe,
+ size_t len, unsigned int flags)
+@@ -1261,7 +1245,11 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
+ buf->page = bufs[page_nr].page;
+ buf->offset = bufs[page_nr].offset;
+ buf->len = bufs[page_nr].len;
+- buf->ops = &fuse_dev_pipe_buf_ops;
++ /*
++ * Need to be careful about this. Having buf->ops in module
++ * code can Oops if the buffer persists after module unload.
++ */
++ buf->ops = &nosteal_pipe_buf_ops;
+
+ pipe->nrbufs++;
+ page_nr++;
+diff --git a/fs/hpfs/alloc.c b/fs/hpfs/alloc.c
+index 7a5eb2c718c8..e0d57c060a80 100644
+--- a/fs/hpfs/alloc.c
++++ b/fs/hpfs/alloc.c
+@@ -8,6 +8,58 @@
+
+ #include "hpfs_fn.h"
+
++static void hpfs_claim_alloc(struct super_block *s, secno sec)
++{
++ struct hpfs_sb_info *sbi = hpfs_sb(s);
++ if (sbi->sb_n_free != (unsigned)-1) {
++ if (unlikely(!sbi->sb_n_free)) {
++ hpfs_error(s, "free count underflow, allocating sector %08x", sec);
++ sbi->sb_n_free = -1;
++ return;
++ }
++ sbi->sb_n_free--;
++ }
++}
++
++static void hpfs_claim_free(struct super_block *s, secno sec)
++{
++ struct hpfs_sb_info *sbi = hpfs_sb(s);
++ if (sbi->sb_n_free != (unsigned)-1) {
++ if (unlikely(sbi->sb_n_free >= sbi->sb_fs_size)) {
++ hpfs_error(s, "free count overflow, freeing sector %08x", sec);
++ sbi->sb_n_free = -1;
++ return;
++ }
++ sbi->sb_n_free++;
++ }
++}
++
++static void hpfs_claim_dirband_alloc(struct super_block *s, secno sec)
++{
++ struct hpfs_sb_info *sbi = hpfs_sb(s);
++ if (sbi->sb_n_free_dnodes != (unsigned)-1) {
++ if (unlikely(!sbi->sb_n_free_dnodes)) {
++ hpfs_error(s, "dirband free count underflow, allocating sector %08x", sec);
++ sbi->sb_n_free_dnodes = -1;
++ return;
++ }
++ sbi->sb_n_free_dnodes--;
++ }
++}
++
++static void hpfs_claim_dirband_free(struct super_block *s, secno sec)
++{
++ struct hpfs_sb_info *sbi = hpfs_sb(s);
++ if (sbi->sb_n_free_dnodes != (unsigned)-1) {
++ if (unlikely(sbi->sb_n_free_dnodes >= sbi->sb_dirband_size / 4)) {
++ hpfs_error(s, "dirband free count overflow, freeing sector %08x", sec);
++ sbi->sb_n_free_dnodes = -1;
++ return;
++ }
++ sbi->sb_n_free_dnodes++;
++ }
++}
++
+ /*
+ * Check if a sector is allocated in bitmap
+ * This is really slow. Turned on only if chk==2
+@@ -203,9 +255,15 @@ secno hpfs_alloc_sector(struct super_block *s, secno near, unsigned n, int forwa
+ }
+ sec = 0;
+ ret:
++ if (sec) {
++ i = 0;
++ do
++ hpfs_claim_alloc(s, sec + i);
++ while (unlikely(++i < n));
++ }
+ if (sec && f_p) {
+ for (i = 0; i < forward; i++) {
+- if (!hpfs_alloc_if_possible(s, sec + i + 1)) {
++ if (!hpfs_alloc_if_possible(s, sec + n + i)) {
+ hpfs_error(s, "Prealloc doesn't work! Wanted %d, allocated at %08x, can't allocate %d", forward, sec, i);
+ sec = 0;
+ break;
+@@ -228,6 +286,7 @@ static secno alloc_in_dirband(struct super_block *s, secno near)
+ nr >>= 2;
+ sec = alloc_in_bmp(s, (~0x3fff) | nr, 1, 0);
+ if (!sec) return 0;
++ hpfs_claim_dirband_alloc(s, sec);
+ return ((sec & 0x3fff) << 2) + sbi->sb_dirband_start;
+ }
+
+@@ -242,6 +301,7 @@ int hpfs_alloc_if_possible(struct super_block *s, secno sec)
+ bmp[(sec & 0x3fff) >> 5] &= cpu_to_le32(~(1 << (sec & 0x1f)));
+ hpfs_mark_4buffers_dirty(&qbh);
+ hpfs_brelse4(&qbh);
++ hpfs_claim_alloc(s, sec);
+ return 1;
+ }
+ hpfs_brelse4(&qbh);
+@@ -275,6 +335,7 @@ void hpfs_free_sectors(struct super_block *s, secno sec, unsigned n)
+ return;
+ }
+ bmp[(sec & 0x3fff) >> 5] |= cpu_to_le32(1 << (sec & 0x1f));
++ hpfs_claim_free(s, sec);
+ if (!--n) {
+ hpfs_mark_4buffers_dirty(&qbh);
+ hpfs_brelse4(&qbh);
+@@ -359,6 +420,7 @@ void hpfs_free_dnode(struct super_block *s, dnode_secno dno)
+ bmp[ssec >> 5] |= cpu_to_le32(1 << (ssec & 0x1f));
+ hpfs_mark_4buffers_dirty(&qbh);
+ hpfs_brelse4(&qbh);
++ hpfs_claim_dirband_free(s, dno);
+ }
+ }
+
+@@ -366,7 +428,7 @@ struct dnode *hpfs_alloc_dnode(struct super_block *s, secno near,
+ dnode_secno *dno, struct quad_buffer_head *qbh)
+ {
+ struct dnode *d;
+- if (hpfs_count_one_bitmap(s, hpfs_sb(s)->sb_dmap) > FREE_DNODES_ADD) {
++ if (hpfs_get_free_dnodes(s) > FREE_DNODES_ADD) {
+ if (!(*dno = alloc_in_dirband(s, near)))
+ if (!(*dno = hpfs_alloc_sector(s, near, 4, 0))) return NULL;
+ } else {
+diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c
+index 2fa0089a02a8..46549c778001 100644
+--- a/fs/hpfs/dir.c
++++ b/fs/hpfs/dir.c
+@@ -33,25 +33,27 @@ static loff_t hpfs_dir_lseek(struct file *filp, loff_t off, int whence)
+ if (whence == SEEK_DATA || whence == SEEK_HOLE)
+ return -EINVAL;
+
++ mutex_lock(&i->i_mutex);
+ hpfs_lock(s);
+
+ /*printk("dir lseek\n");*/
+ if (new_off == 0 || new_off == 1 || new_off == 11 || new_off == 12 || new_off == 13) goto ok;
+- mutex_lock(&i->i_mutex);
+ pos = ((loff_t) hpfs_de_as_down_as_possible(s, hpfs_inode->i_dno) << 4) + 1;
+ while (pos != new_off) {
+ if (map_pos_dirent(i, &pos, &qbh)) hpfs_brelse4(&qbh);
+ else goto fail;
+ if (pos == 12) goto fail;
+ }
+- mutex_unlock(&i->i_mutex);
++ hpfs_add_pos(i, &filp->f_pos);
+ ok:
++ filp->f_pos = new_off;
+ hpfs_unlock(s);
+- return filp->f_pos = new_off;
+-fail:
+ mutex_unlock(&i->i_mutex);
++ return new_off;
++fail:
+ /*printk("illegal lseek: %016llx\n", new_off);*/
+ hpfs_unlock(s);
++ mutex_unlock(&i->i_mutex);
+ return -ESPIPE;
+ }
+
+diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h
+index de946170ebb1..f1f2ca737ef3 100644
+--- a/fs/hpfs/hpfs_fn.h
++++ b/fs/hpfs/hpfs_fn.h
+@@ -314,7 +314,7 @@ static inline struct hpfs_sb_info *hpfs_sb(struct super_block *sb)
+ __printf(2, 3)
+ void hpfs_error(struct super_block *, const char *, ...);
+ int hpfs_stop_cycles(struct super_block *, int, int *, int *, char *);
+-unsigned hpfs_count_one_bitmap(struct super_block *, secno);
++unsigned hpfs_get_free_dnodes(struct super_block *);
+
+ /*
+ * local time (HPFS) to GMT (Unix)
+diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
+index f760c15f0bf5..b03e76641da9 100644
+--- a/fs/hpfs/super.c
++++ b/fs/hpfs/super.c
+@@ -115,7 +115,7 @@ static void hpfs_put_super(struct super_block *s)
+ kfree(sbi);
+ }
+
+-unsigned hpfs_count_one_bitmap(struct super_block *s, secno secno)
++static unsigned hpfs_count_one_bitmap(struct super_block *s, secno secno)
+ {
+ struct quad_buffer_head qbh;
+ unsigned long *bits;
+@@ -123,7 +123,7 @@ unsigned hpfs_count_one_bitmap(struct super_block *s, secno secno)
+
+ bits = hpfs_map_4sectors(s, secno, &qbh, 4);
+ if (!bits)
+- return 0;
++ return (unsigned)-1;
+ count = bitmap_weight(bits, 2048 * BITS_PER_BYTE);
+ hpfs_brelse4(&qbh);
+ return count;
+@@ -134,29 +134,45 @@ static unsigned count_bitmaps(struct super_block *s)
+ unsigned n, count, n_bands;
+ n_bands = (hpfs_sb(s)->sb_fs_size + 0x3fff) >> 14;
+ count = 0;
+- for (n = 0; n < n_bands; n++)
+- count += hpfs_count_one_bitmap(s, le32_to_cpu(hpfs_sb(s)->sb_bmp_dir[n]));
++ for (n = 0; n < n_bands; n++) {
++ unsigned c;
++ c = hpfs_count_one_bitmap(s, le32_to_cpu(hpfs_sb(s)->sb_bmp_dir[n]));
++ if (c != (unsigned)-1)
++ count += c;
++ }
+ return count;
+ }
+
++unsigned hpfs_get_free_dnodes(struct super_block *s)
++{
++ struct hpfs_sb_info *sbi = hpfs_sb(s);
++ if (sbi->sb_n_free_dnodes == (unsigned)-1) {
++ unsigned c = hpfs_count_one_bitmap(s, sbi->sb_dmap);
++ if (c == (unsigned)-1)
++ return 0;
++ sbi->sb_n_free_dnodes = c;
++ }
++ return sbi->sb_n_free_dnodes;
++}
++
+ static int hpfs_statfs(struct dentry *dentry, struct kstatfs *buf)
+ {
+ struct super_block *s = dentry->d_sb;
+ struct hpfs_sb_info *sbi = hpfs_sb(s);
+ u64 id = huge_encode_dev(s->s_bdev->bd_dev);
++
+ hpfs_lock(s);
+
+- /*if (sbi->sb_n_free == -1) {*/
++ if (sbi->sb_n_free == (unsigned)-1)
+ sbi->sb_n_free = count_bitmaps(s);
+- sbi->sb_n_free_dnodes = hpfs_count_one_bitmap(s, sbi->sb_dmap);
+- /*}*/
++
+ buf->f_type = s->s_magic;
+ buf->f_bsize = 512;
+ buf->f_blocks = sbi->sb_fs_size;
+ buf->f_bfree = sbi->sb_n_free;
+ buf->f_bavail = sbi->sb_n_free;
+ buf->f_files = sbi->sb_dirband_size / 4;
+- buf->f_ffree = sbi->sb_n_free_dnodes;
++ buf->f_ffree = hpfs_get_free_dnodes(s);
+ buf->f_fsid.val[0] = (u32)id;
+ buf->f_fsid.val[1] = (u32)(id >> 32);
+ buf->f_namelen = 254;
+diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
+index cd8703d19a32..cb7b18614dd8 100644
+--- a/fs/lockd/svclock.c
++++ b/fs/lockd/svclock.c
+@@ -741,6 +741,7 @@ nlmsvc_grant_blocked(struct nlm_block *block)
+ struct nlm_file *file = block->b_file;
+ struct nlm_lock *lock = &block->b_call->a_args.lock;
+ int error;
++ loff_t fl_start, fl_end;
+
+ dprintk("lockd: grant blocked lock %p\n", block);
+
+@@ -758,9 +759,16 @@ nlmsvc_grant_blocked(struct nlm_block *block)
+ }
+
+ /* Try the lock operation again */
++ /* vfs_lock_file() can mangle fl_start and fl_end, but we need
++ * them unchanged for the GRANT_MSG
++ */
+ lock->fl.fl_flags |= FL_SLEEP;
++ fl_start = lock->fl.fl_start;
++ fl_end = lock->fl.fl_end;
+ error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
+ lock->fl.fl_flags &= ~FL_SLEEP;
++ lock->fl.fl_start = fl_start;
++ lock->fl.fl_end = fl_end;
+
+ switch (error) {
+ case 0:
+diff --git a/fs/nfs/blocklayout/extents.c b/fs/nfs/blocklayout/extents.c
+index 4e2ee9919951..bb15ccc517fa 100644
+--- a/fs/nfs/blocklayout/extents.c
++++ b/fs/nfs/blocklayout/extents.c
+@@ -44,7 +44,7 @@
+ static inline sector_t normalize(sector_t s, int base)
+ {
+ sector_t tmp = s; /* Since do_div modifies its argument */
+- return s - do_div(tmp, base);
++ return s - sector_div(tmp, base);
+ }
+
+ static inline sector_t normalize_up(sector_t s, int base)
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 3d0293154393..5aea30af26b9 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -6197,7 +6197,7 @@ nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
+ switch (err) {
+ case 0:
+ case -NFS4ERR_WRONGSEC:
+- case -NFS4ERR_NOTSUPP:
++ case -ENOTSUPP:
+ goto out;
+ default:
+ err = nfs4_handle_exception(server, err, &exception);
+@@ -6229,7 +6229,7 @@ nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
+ * Fall back on "guess and check" method if
+ * the server doesn't support SECINFO_NO_NAME
+ */
+- if (err == -NFS4ERR_WRONGSEC || err == -NFS4ERR_NOTSUPP) {
++ if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) {
+ err = nfs4_find_root_sec(server, fhandle, info);
+ goto out_freepage;
+ }
+diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
+index 00818c8588eb..4479f66f8a73 100644
+--- a/fs/nfs/nfs4xdr.c
++++ b/fs/nfs/nfs4xdr.c
+@@ -3056,7 +3056,8 @@ out_overflow:
+ return -EIO;
+ }
+
+-static int decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
++static bool __decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected,
++ int *nfs_retval)
+ {
+ __be32 *p;
+ uint32_t opnum;
+@@ -3066,19 +3067,32 @@ static int decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
+ if (unlikely(!p))
+ goto out_overflow;
+ opnum = be32_to_cpup(p++);
+- if (opnum != expected) {
+- dprintk("nfs: Server returned operation"
+- " %d but we issued a request for %d\n",
+- opnum, expected);
+- return -EIO;
+- }
++ if (unlikely(opnum != expected))
++ goto out_bad_operation;
+ nfserr = be32_to_cpup(p);
+- if (nfserr != NFS_OK)
+- return nfs4_stat_to_errno(nfserr);
+- return 0;
++ if (nfserr == NFS_OK)
++ *nfs_retval = 0;
++ else
++ *nfs_retval = nfs4_stat_to_errno(nfserr);
++ return true;
++out_bad_operation:
++ dprintk("nfs: Server returned operation"
++ " %d but we issued a request for %d\n",
++ opnum, expected);
++ *nfs_retval = -EREMOTEIO;
++ return false;
+ out_overflow:
+ print_overflow_msg(__func__, xdr);
+- return -EIO;
++ *nfs_retval = -EIO;
++ return false;
++}
++
++static int decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
++{
++ int retval;
++
++ __decode_op_hdr(xdr, expected, &retval);
++ return retval;
+ }
+
+ /* Dummy routine */
+@@ -4744,11 +4758,12 @@ static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res)
+ uint32_t savewords, bmlen, i;
+ int status;
+
+- status = decode_op_hdr(xdr, OP_OPEN);
+- if (status != -EIO)
+- nfs_increment_open_seqid(status, res->seqid);
+- if (!status)
+- status = decode_stateid(xdr, &res->stateid);
++ if (!__decode_op_hdr(xdr, OP_OPEN, &status))
++ return status;
++ nfs_increment_open_seqid(status, res->seqid);
++ if (status)
++ return status;
++ status = decode_stateid(xdr, &res->stateid);
+ if (unlikely(status))
+ return status;
+
+diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
+index 1c7d45e293c4..d20d64c30b58 100644
+--- a/fs/ocfs2/file.c
++++ b/fs/ocfs2/file.c
+@@ -2389,8 +2389,8 @@ out_dio:
+
+ if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) ||
+ ((file->f_flags & O_DIRECT) && !direct_io)) {
+- ret = filemap_fdatawrite_range(file->f_mapping, pos,
+- pos + count - 1);
++ ret = filemap_fdatawrite_range(file->f_mapping, *ppos,
++ *ppos + count - 1);
+ if (ret < 0)
+ written = ret;
+
+@@ -2403,8 +2403,8 @@ out_dio:
+ }
+
+ if (!ret)
+- ret = filemap_fdatawait_range(file->f_mapping, pos,
+- pos + count - 1);
++ ret = filemap_fdatawait_range(file->f_mapping, *ppos,
++ *ppos + count - 1);
+ }
+
+ /*
+diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
+index 92fcd575775a..a40e5cedd6d4 100644
+--- a/fs/ocfs2/quota_global.c
++++ b/fs/ocfs2/quota_global.c
+@@ -712,6 +712,12 @@ static int ocfs2_release_dquot(struct dquot *dquot)
+ */
+ if (status < 0)
+ mlog_errno(status);
++ /*
++ * Clear dq_off so that we search for the structure in quota file next
++ * time we acquire it. The structure might be deleted and reallocated
++ * elsewhere by another node while our dquot structure is on freelist.
++ */
++ dquot->dq_off = 0;
+ clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
+ out_trans:
+ ocfs2_commit_trans(osb, handle);
+@@ -750,16 +756,17 @@ static int ocfs2_acquire_dquot(struct dquot *dquot)
+ status = ocfs2_lock_global_qf(info, 1);
+ if (status < 0)
+ goto out;
+- if (!test_bit(DQ_READ_B, &dquot->dq_flags)) {
+- status = ocfs2_qinfo_lock(info, 0);
+- if (status < 0)
+- goto out_dq;
+- status = qtree_read_dquot(&info->dqi_gi, dquot);
+- ocfs2_qinfo_unlock(info, 0);
+- if (status < 0)
+- goto out_dq;
+- }
+- set_bit(DQ_READ_B, &dquot->dq_flags);
++ status = ocfs2_qinfo_lock(info, 0);
++ if (status < 0)
++ goto out_dq;
++ /*
++ * We always want to read dquot structure from disk because we don't
++ * know what happened with it while it was on freelist.
++ */
++ status = qtree_read_dquot(&info->dqi_gi, dquot);
++ ocfs2_qinfo_unlock(info, 0);
++ if (status < 0)
++ goto out_dq;
+
+ OCFS2_DQUOT(dquot)->dq_use_count++;
+ OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
+diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
+index f100bf70a906..b6cfcf263d7f 100644
+--- a/fs/ocfs2/quota_local.c
++++ b/fs/ocfs2/quota_local.c
+@@ -1300,10 +1300,6 @@ int ocfs2_local_release_dquot(handle_t *handle, struct dquot *dquot)
+ ocfs2_journal_dirty(handle, od->dq_chunk->qc_headerbh);
+
+ out:
+- /* Clear the read bit so that next time someone uses this
+- * dquot he reads fresh info from disk and allocates local
+- * dquot structure */
+- clear_bit(DQ_READ_B, &dquot->dq_flags);
+ return status;
+ }
+
+diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
+index 5b572c89e6c4..3d5d717b73eb 100644
+--- a/fs/quota/dquot.c
++++ b/fs/quota/dquot.c
+@@ -580,9 +580,17 @@ int dquot_scan_active(struct super_block *sb,
+ dqstats_inc(DQST_LOOKUPS);
+ dqput(old_dquot);
+ old_dquot = dquot;
+- ret = fn(dquot, priv);
+- if (ret < 0)
+- goto out;
++ /*
++ * ->release_dquot() can be racing with us. Our reference
++ * protects us from new calls to it so just wait for any
++ * outstanding call and recheck the DQ_ACTIVE_B after that.
++ */
++ wait_on_dquot(dquot);
++ if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
++ ret = fn(dquot, priv);
++ if (ret < 0)
++ goto out;
++ }
+ spin_lock(&dq_list_lock);
+ /* We are safe to continue now because our dquot could not
+ * be moved out of the inuse list while we hold the reference */
+diff --git a/fs/splice.c b/fs/splice.c
+index 58ab918afb4c..714471d27531 100644
+--- a/fs/splice.c
++++ b/fs/splice.c
+@@ -554,6 +554,24 @@ static const struct pipe_buf_operations default_pipe_buf_ops = {
+ .get = generic_pipe_buf_get,
+ };
+
++static int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe,
++ struct pipe_buffer *buf)
++{
++ return 1;
++}
++
++/* Pipe buffer operations for a socket and similar. */
++const struct pipe_buf_operations nosteal_pipe_buf_ops = {
++ .can_merge = 0,
++ .map = generic_pipe_buf_map,
++ .unmap = generic_pipe_buf_unmap,
++ .confirm = generic_pipe_buf_confirm,
++ .release = generic_pipe_buf_release,
++ .steal = generic_pipe_buf_nosteal,
++ .get = generic_pipe_buf_get,
++};
++EXPORT_SYMBOL(nosteal_pipe_buf_ops);
++
+ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
+ unsigned long vlen, loff_t offset)
+ {
+diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
+index 643d6c4a5993..e2a360ad6108 100644
+--- a/include/linux/compiler-gcc4.h
++++ b/include/linux/compiler-gcc4.h
+@@ -39,11 +39,7 @@
+ *
+ * (asm goto is automatically volatile - the naming reflects this.)
+ */
+-#if GCC_VERSION <= 40801
+-# define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
+-#else
+-# define asm_volatile_goto(x...) do { asm goto(x); } while (0)
+-#endif
++#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
+
+ #if __GNUC_MINOR__ >= 5
+ /*
+diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
+index 265e2c3cbd1c..f5df3dcfe811 100644
+--- a/include/linux/jiffies.h
++++ b/include/linux/jiffies.h
+@@ -106,13 +106,13 @@ static inline u64 get_jiffies_64(void)
+ #define time_after(a,b) \
+ (typecheck(unsigned long, a) && \
+ typecheck(unsigned long, b) && \
+- ((long)(b) - (long)(a) < 0))
++ ((long)((b) - (a)) < 0))
+ #define time_before(a,b) time_after(b,a)
+
+ #define time_after_eq(a,b) \
+ (typecheck(unsigned long, a) && \
+ typecheck(unsigned long, b) && \
+- ((long)(a) - (long)(b) >= 0))
++ ((long)((a) - (b)) >= 0))
+ #define time_before_eq(a,b) time_after_eq(b,a)
+
+ /*
+@@ -135,13 +135,13 @@ static inline u64 get_jiffies_64(void)
+ #define time_after64(a,b) \
+ (typecheck(__u64, a) && \
+ typecheck(__u64, b) && \
+- ((__s64)(b) - (__s64)(a) < 0))
++ ((__s64)((b) - (a)) < 0))
+ #define time_before64(a,b) time_after64(b,a)
+
+ #define time_after_eq64(a,b) \
+ (typecheck(__u64, a) && \
+ typecheck(__u64, b) && \
+- ((__s64)(a) - (__s64)(b) >= 0))
++ ((__s64)((a) - (b)) >= 0))
+ #define time_before_eq64(a,b) time_after_eq64(b,a)
+
+ /*
+diff --git a/include/linux/libata.h b/include/linux/libata.h
+index 62467ca9603d..b1fcdba2cfbe 100644
+--- a/include/linux/libata.h
++++ b/include/linux/libata.h
+@@ -393,6 +393,8 @@ enum {
+ ATA_HORKAGE_BROKEN_FPDMA_AA = (1 << 15), /* skip AA */
+ ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */
+ ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */
++ ATA_HORKAGE_NOLPM = (1 << 20), /* don't use LPM */
++ ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21), /* some WDs have broken LPM */
+
+ /* DMA mask for user DMA control: User visible values; DO NOT
+ renumber */
+diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
+index 0072a5366e97..8778c26c942e 100644
+--- a/include/linux/pipe_fs_i.h
++++ b/include/linux/pipe_fs_i.h
+@@ -160,6 +160,8 @@ int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
+ int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
+ void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
+
++extern const struct pipe_buf_operations nosteal_pipe_buf_ops;
++
+ /* for F_SETPIPE_SZ and F_GETPIPE_SZ */
+ long pipe_fcntl(struct file *, unsigned int, unsigned long arg);
+ struct pipe_inode_info *get_pipe_info(struct file *file);
+diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
+index df0a779c1bbd..f3680aa895bc 100644
+--- a/include/linux/tracepoint.h
++++ b/include/linux/tracepoint.h
+@@ -60,6 +60,12 @@ struct tp_module {
+ unsigned int num_tracepoints;
+ struct tracepoint * const *tracepoints_ptrs;
+ };
++bool trace_module_has_bad_taint(struct module *mod);
++#else
++static inline bool trace_module_has_bad_taint(struct module *mod)
++{
++ return false;
++}
+ #endif /* CONFIG_MODULES */
+
+ struct tracepoint_iter {
+diff --git a/include/scsi/osd_ore.h b/include/scsi/osd_ore.h
+index f05fa826f89e..47b458c21fed 100644
+--- a/include/scsi/osd_ore.h
++++ b/include/scsi/osd_ore.h
+@@ -101,6 +101,7 @@ struct ore_striping_info {
+ unsigned unit_off;
+ unsigned cur_pg;
+ unsigned cur_comp;
++ unsigned maxdevUnits;
+ };
+
+ struct ore_io_state;
+diff --git a/include/xen/Kbuild b/include/xen/Kbuild
+index 84ad8f02fee5..563161a84eec 100644
+--- a/include/xen/Kbuild
++++ b/include/xen/Kbuild
+@@ -1,2 +1,4 @@
+ header-y += evtchn.h
++header-y += gntalloc.h
++header-y += gntdev.h
+ header-y += privcmd.h
+diff --git a/kernel/cgroup.c b/kernel/cgroup.c
+index 2a1ffb7b1915..93fc15e6ddcb 100644
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -2785,9 +2785,14 @@ static void cgroup_enable_task_cg_lists(void)
+ * We should check if the process is exiting, otherwise
+ * it will race with cgroup_exit() in that the list
+ * entry won't be deleted though the process has exited.
++ * Do it while holding siglock so that we don't end up
++ * racing against cgroup_exit().
+ */
++ spin_lock_irq(&p->sighand->siglock);
+ if (!(p->flags & PF_EXITING) && list_empty(&p->cg_list))
+ list_add(&p->cg_list, &p->cgroups->tasks);
++ spin_unlock_irq(&p->sighand->siglock);
++
+ task_unlock(p);
+ } while_each_thread(g, p);
+ write_unlock(&css_set_lock);
+diff --git a/kernel/cpuset.c b/kernel/cpuset.c
+index 57eb98d1d887..1e2c5f025dcf 100644
+--- a/kernel/cpuset.c
++++ b/kernel/cpuset.c
+@@ -2366,9 +2366,9 @@ int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
+
+ task_lock(current);
+ cs = nearest_hardwall_ancestor(task_cs(current));
++ allowed = node_isset(node, cs->mems_allowed);
+ task_unlock(current);
+
+- allowed = node_isset(node, cs->mems_allowed);
+ mutex_unlock(&callback_mutex);
+ return allowed;
+ }
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 83d5621b7d90..b15b4f75131d 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -7101,14 +7101,14 @@ static void perf_pmu_rotate_stop(struct pmu *pmu)
+ static void __perf_event_exit_context(void *__info)
+ {
+ struct perf_event_context *ctx = __info;
+- struct perf_event *event, *tmp;
++ struct perf_event *event;
+
+ perf_pmu_rotate_stop(ctx->pmu);
+
+- list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
+- __perf_remove_from_context(event);
+- list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
++ rcu_read_lock();
++ list_for_each_entry_rcu(event, &ctx->event_list, event_entry)
+ __perf_remove_from_context(event);
++ rcu_read_unlock();
+ }
+
+ static void perf_event_exit_cpu_context(int cpu)
+@@ -7132,11 +7132,11 @@ static void perf_event_exit_cpu(int cpu)
+ {
+ struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
+
++ perf_event_exit_cpu_context(cpu);
++
+ mutex_lock(&swhash->hlist_mutex);
+ swevent_hlist_release(swhash);
+ mutex_unlock(&swhash->hlist_mutex);
+-
+- perf_event_exit_cpu_context(cpu);
+ }
+ #else
+ static inline void perf_event_exit_cpu(int cpu) { }
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index 52bdd58f7328..477522987ce0 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -819,7 +819,7 @@ static int irq_thread(void *data)
+
+ wake = atomic_dec_and_test(&desc->threads_active);
+
+- if (wake && waitqueue_active(&desc->wait_for_threads))
++ if (wake)
+ wake_up(&desc->wait_for_threads);
+ }
+
+diff --git a/kernel/printk.c b/kernel/printk.c
+index 16688ecc918e..8fac434b0bcc 100644
+--- a/kernel/printk.c
++++ b/kernel/printk.c
+@@ -1165,7 +1165,6 @@ static int __cpuinit console_cpu_notify(struct notifier_block *self,
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_DEAD:
+- case CPU_DYING:
+ case CPU_DOWN_FAILED:
+ case CPU_UP_CANCELED:
+ console_lock();
+diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
+index 37f3f3925d3d..4c6dae17cd1e 100644
+--- a/kernel/sched_fair.c
++++ b/kernel/sched_fair.c
+@@ -4953,15 +4953,15 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p)
+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
+
+ /*
+- * Ensure the task's vruntime is normalized, so that when its
++ * Ensure the task's vruntime is normalized, so that when it's
+ * switched back to the fair class the enqueue_entity(.flags=0) will
+ * do the right thing.
+ *
+- * If it was on_rq, then the dequeue_entity(.flags=0) will already
+- * have normalized the vruntime, if it was !on_rq, then only when
++ * If it's on_rq, then the dequeue_entity(.flags=0) will already
++ * have normalized the vruntime, if it's !on_rq, then only when
+ * the task is sleeping will it still have non-normalized vruntime.
+ */
+- if (!se->on_rq && p->state != TASK_RUNNING) {
++ if (!p->on_rq && p->state != TASK_RUNNING) {
+ /*
+ * Fix up our vruntime so that the current sleep doesn't
+ * cause 'unlimited' sleep bonus.
+diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
+index a470154e0408..955560e25e05 100644
+--- a/kernel/time/jiffies.c
++++ b/kernel/time/jiffies.c
+@@ -51,7 +51,13 @@
+ * HZ shrinks, so values greater than 8 overflow 32bits when
+ * HZ=100.
+ */
++#if HZ < 34
++#define JIFFIES_SHIFT 6
++#elif HZ < 67
++#define JIFFIES_SHIFT 7
++#else
+ #define JIFFIES_SHIFT 8
++#endif
+
+ static cycle_t jiffies_read(struct clocksource *cs)
+ {
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
+index cb7f33e548b3..068c0929904c 100644
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -1161,7 +1161,7 @@ void get_monotonic_boottime(struct timespec *ts)
+ } while (read_seqretry(&xtime_lock, seq));
+
+ set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec + sleep.tv_sec,
+- ts->tv_nsec + tomono.tv_nsec + sleep.tv_nsec + nsecs);
++ (s64)ts->tv_nsec + tomono.tv_nsec + sleep.tv_nsec + nsecs);
+ }
+ EXPORT_SYMBOL_GPL(get_monotonic_boottime);
+
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index cf8b439727b0..a65fa363c024 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -190,6 +190,12 @@ static void update_global_ops(void)
+ global_ops.func = func;
+ }
+
++#ifdef CONFIG_FUNCTION_GRAPH_TRACER
++static void update_function_graph_func(void);
++#else
++static inline void update_function_graph_func(void) { }
++#endif
++
+ static void update_ftrace_function(void)
+ {
+ ftrace_func_t func;
+@@ -237,6 +243,8 @@ static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
+ {
+ struct ftrace_ops **p;
+
++ update_function_graph_func();
++
+ /*
+ * If we are removing the last function, then simply point
+ * to the ftrace_stub.
+@@ -283,6 +291,17 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
+ return 0;
+ }
+
++static void ftrace_sync(struct work_struct *work)
++{
++ /*
++ * This function is just a stub to implement a hard force
++ * of synchronize_sched(). This requires synchronizing
++ * tasks even in userspace and idle.
++ *
++ * Yes, function tracing is rude.
++ */
++}
++
+ static int __unregister_ftrace_function(struct ftrace_ops *ops)
+ {
+ int ret;
+@@ -308,13 +327,6 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
+ if (ftrace_enabled)
+ update_ftrace_function();
+
+- /*
+- * Dynamic ops may be freed, we must make sure that all
+- * callers are done before leaving this function.
+- */
+- if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
+- synchronize_sched();
+-
+ return 0;
+ }
+
+@@ -1775,6 +1787,24 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
+ return 0;
+
+ ftrace_run_update_code(command);
++
++ /*
++ * Dynamic ops may be freed, we must make sure that all
++ * callers are done before leaving this function.
++ * The same goes for freeing the per_cpu data of the control
++ * ops.
++ *
++ * Again, normal synchronize_sched() is not good enough.
++ * We need to do a hard force of sched synchronization.
++ * This is because we use preempt_disable() to do RCU, but
++ * the function tracers can be called where RCU is not watching
++ * (like before user_exit()). We can not rely on the RCU
++ * infrastructure to do the synchronization, thus we must do it
++ * ourselves.
++ */
++ if (ops->flags & (FTRACE_OPS_FL_DYNAMIC))
++ schedule_on_each_cpu(ftrace_sync);
++
+ return 0;
+ }
+
+@@ -4045,6 +4075,7 @@ int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
+ trace_func_graph_ret_t ftrace_graph_return =
+ (trace_func_graph_ret_t)ftrace_stub;
+ trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
++static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
+
+ /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
+ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
+@@ -4185,6 +4216,30 @@ static struct ftrace_ops fgraph_ops __read_mostly = {
+ .flags = FTRACE_OPS_FL_GLOBAL,
+ };
+
++static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
++{
++ if (!ftrace_ops_test(&global_ops, trace->func))
++ return 0;
++ return __ftrace_graph_entry(trace);
++}
++
++/*
++ * The function graph tracer should only trace the functions defined
++ * by set_ftrace_filter and set_ftrace_notrace. If another function
++ * tracer ops is registered, the graph tracer requires testing the
++ * function against the global ops, and not just trace any function
++ * that any ftrace_ops registered.
++ */
++static void update_function_graph_func(void)
++{
++ if (ftrace_ops_list == &ftrace_list_end ||
++ (ftrace_ops_list == &global_ops &&
++ global_ops.next == &ftrace_list_end))
++ ftrace_graph_entry = __ftrace_graph_entry;
++ else
++ ftrace_graph_entry = ftrace_graph_entry_test;
++}
++
+ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
+ trace_func_graph_ent_t entryfunc)
+ {
+@@ -4209,7 +4264,16 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
+ }
+
+ ftrace_graph_return = retfunc;
+- ftrace_graph_entry = entryfunc;
++
++ /*
++ * Update the indirect function to the entryfunc, and the
++ * function that gets called to the entry_test first. Then
++ * call the update fgraph entry function to determine if
++ * the entryfunc should be called directly or not.
++ */
++ __ftrace_graph_entry = entryfunc;
++ ftrace_graph_entry = ftrace_graph_entry_test;
++ update_function_graph_func();
+
+ ret = ftrace_startup(&fgraph_ops, FTRACE_START_FUNC_RET);
+
+@@ -4228,6 +4292,7 @@ void unregister_ftrace_graph(void)
+ ftrace_graph_active--;
+ ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
+ ftrace_graph_entry = ftrace_graph_entry_stub;
++ __ftrace_graph_entry = ftrace_graph_entry_stub;
+ ftrace_shutdown(&fgraph_ops, FTRACE_STOP_FUNC_RET);
+ unregister_pm_notifier(&ftrace_suspend_notifier);
+ unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 6fdc62943e8d..648f25aad84c 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -2040,6 +2040,13 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
+ write &= RB_WRITE_MASK;
+ tail = write - length;
+
++ /*
++ * If this is the first commit on the page, then it has the same
++ * timestamp as the page itself.
++ */
++ if (!tail)
++ delta = 0;
++
+ /* See if we shot pass the end of this buffer page */
+ if (unlikely(write > BUF_PAGE_SIZE))
+ return rb_move_tail(cpu_buffer, length, tail,
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index c212a7f934ec..875fed434e3b 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -1345,6 +1345,16 @@ static void trace_module_add_events(struct module *mod)
+ struct ftrace_module_file_ops *file_ops = NULL;
+ struct ftrace_event_call **call, **start, **end;
+
++ if (!mod->num_trace_events)
++ return;
++
++ /* Don't add infrastructure for mods without tracepoints */
++ if (trace_module_has_bad_taint(mod)) {
++ pr_err("%s: module has bad taint, not creating trace events\n",
++ mod->name);
++ return;
++ }
++
+ start = mod->trace_events;
+ end = mod->trace_events + mod->num_trace_events;
+
+diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
+index f1539decd99d..41b25a0ed616 100644
+--- a/kernel/tracepoint.c
++++ b/kernel/tracepoint.c
+@@ -628,6 +628,11 @@ void tracepoint_iter_reset(struct tracepoint_iter *iter)
+ EXPORT_SYMBOL_GPL(tracepoint_iter_reset);
+
+ #ifdef CONFIG_MODULES
++bool trace_module_has_bad_taint(struct module *mod)
++{
++ return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP));
++}
++
+ static int tracepoint_module_coming(struct module *mod)
+ {
+ struct tp_module *tp_mod, *iter;
+@@ -638,7 +643,7 @@ static int tracepoint_module_coming(struct module *mod)
+ * module headers (for forced load), to make sure we don't cause a crash.
+ * Staging and out-of-tree GPL modules are fine.
+ */
+- if (mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP)))
++ if (trace_module_has_bad_taint(mod))
+ return 0;
+ mutex_lock(&tracepoints_mutex);
+ tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL);
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 0bc9ff0f8e8b..563820c59d54 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -1474,12 +1474,19 @@ static void destroy_worker(struct worker *worker)
+ if (worker->flags & WORKER_IDLE)
+ gcwq->nr_idle--;
+
++ /*
++ * Once WORKER_DIE is set, the kworker may destroy itself at any
++ * point. Pin to ensure the task stays until we're done with it.
++ */
++ get_task_struct(worker->task);
++
+ list_del_init(&worker->entry);
+ worker->flags |= WORKER_DIE;
+
+ spin_unlock_irq(&gcwq->lock);
+
+ kthread_stop(worker->task);
++ put_task_struct(worker->task);
+ kfree(worker);
+
+ spin_lock_irq(&gcwq->lock);
+diff --git a/lib/Makefile b/lib/Makefile
+index a4da283f5dc0..c06efca00c05 100644
+--- a/lib/Makefile
++++ b/lib/Makefile
+@@ -40,6 +40,7 @@ obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
+ lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
+ lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
+
++GCOV_PROFILE_hweight.o := n
+ CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
+ obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
+
+diff --git a/mm/oom_kill.c b/mm/oom_kill.c
+index 069b64e521fc..4dda9482f873 100644
+--- a/mm/oom_kill.c
++++ b/mm/oom_kill.c
+@@ -213,7 +213,7 @@ unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
+ * implementation used by LSMs.
+ */
+ if (has_capability_noaudit(p, CAP_SYS_ADMIN))
+- points -= 30;
++ points -= (points * 3) / 100;
+
+ /*
+ * /proc/pid/oom_score_adj ranges from -1000 to +1000 such that it may
+diff --git a/mm/page-writeback.c b/mm/page-writeback.c
+index ea3f83b21197..b5cd796fcd6c 100644
+--- a/mm/page-writeback.c
++++ b/mm/page-writeback.c
+@@ -1776,11 +1776,12 @@ int __set_page_dirty_nobuffers(struct page *page)
+ if (!TestSetPageDirty(page)) {
+ struct address_space *mapping = page_mapping(page);
+ struct address_space *mapping2;
++ unsigned long flags;
+
+ if (!mapping)
+ return 1;
+
+- spin_lock_irq(&mapping->tree_lock);
++ spin_lock_irqsave(&mapping->tree_lock, flags);
+ mapping2 = page_mapping(page);
+ if (mapping2) { /* Race with truncate? */
+ BUG_ON(mapping2 != mapping);
+@@ -1789,7 +1790,7 @@ int __set_page_dirty_nobuffers(struct page *page)
+ radix_tree_tag_set(&mapping->page_tree,
+ page_index(page), PAGECACHE_TAG_DIRTY);
+ }
+- spin_unlock_irq(&mapping->tree_lock);
++ spin_unlock_irqrestore(&mapping->tree_lock, flags);
+ if (mapping->host) {
+ /* !PageAnon && !swapper_space */
+ __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
+diff --git a/mm/rmap.c b/mm/rmap.c
+index 8685697b88b2..52a2f364f087 100644
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -581,7 +581,11 @@ pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
+ spinlock_t *ptl;
+
+ if (unlikely(PageHuge(page))) {
++ /* when pud is not present, pte will be NULL */
+ pte = huge_pte_offset(mm, address);
++ if (!pte)
++ return NULL;
++
+ ptl = &mm->page_table_lock;
+ goto check;
+ }
+diff --git a/mm/slub.c b/mm/slub.c
+index 5710788c58e7..fc719f7dd407 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -4483,7 +4483,13 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
+ page = c->partial;
+
+ if (page) {
+- x = page->pobjects;
++ node = page_to_nid(page);
++ if (flags & SO_TOTAL)
++ WARN_ON_ONCE(1);
++ else if (flags & SO_OBJECTS)
++ WARN_ON_ONCE(1);
++ else
++ x = page->pages;
+ total += x;
+ nodes[node] += x;
+ }
+diff --git a/mm/swapfile.c b/mm/swapfile.c
+index fad1830ddda5..dbd2b67bb50d 100644
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -1649,7 +1649,6 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
+ p->max = 0;
+ swap_map = p->swap_map;
+ p->swap_map = NULL;
+- p->flags = 0;
+ spin_unlock(&swap_lock);
+ mutex_unlock(&swapon_mutex);
+ vfree(swap_map);
+@@ -1667,6 +1666,16 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
+ mutex_unlock(&inode->i_mutex);
+ }
+ filp_close(swap_file, NULL);
++
++ /*
++ * Clear the SWP_USED flag after all resources are freed so that swapon
++ * can reuse this swap_info in alloc_swap_info() safely. It is ok to
++ * not hold p->lock after we cleared its SWP_WRITEOK.
++ */
++ spin_lock(&swap_lock);
++ p->flags = 0;
++ spin_unlock(&swap_lock);
++
+ err = 0;
+ atomic_inc(&proc_poll_event);
+ wake_up_interruptible(&proc_poll_wait);
+diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
+index c0c21b1ce1ec..6af54f2d0da0 100644
+--- a/net/core/fib_rules.c
++++ b/net/core/fib_rules.c
+@@ -718,6 +718,13 @@ static int fib_rules_event(struct notifier_block *this, unsigned long event,
+ attach_rules(&ops->rules_list, dev);
+ break;
+
++ case NETDEV_CHANGENAME:
++ list_for_each_entry(ops, &net->rules_ops, list) {
++ detach_rules(&ops->rules_list, dev);
++ attach_rules(&ops->rules_list, dev);
++ }
++ break;
++
+ case NETDEV_UNREGISTER:
+ list_for_each_entry(ops, &net->rules_ops, list)
+ detach_rules(&ops->rules_list, dev);
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index af9c3c6832f6..5d6cb54c0965 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -74,36 +74,6 @@
+ static struct kmem_cache *skbuff_head_cache __read_mostly;
+ static struct kmem_cache *skbuff_fclone_cache __read_mostly;
+
+-static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
+- struct pipe_buffer *buf)
+-{
+- put_page(buf->page);
+-}
+-
+-static void sock_pipe_buf_get(struct pipe_inode_info *pipe,
+- struct pipe_buffer *buf)
+-{
+- get_page(buf->page);
+-}
+-
+-static int sock_pipe_buf_steal(struct pipe_inode_info *pipe,
+- struct pipe_buffer *buf)
+-{
+- return 1;
+-}
+-
+-
+-/* Pipe buffer operations for a socket. */
+-static const struct pipe_buf_operations sock_pipe_buf_ops = {
+- .can_merge = 0,
+- .map = generic_pipe_buf_map,
+- .unmap = generic_pipe_buf_unmap,
+- .confirm = generic_pipe_buf_confirm,
+- .release = sock_pipe_buf_release,
+- .steal = sock_pipe_buf_steal,
+- .get = sock_pipe_buf_get,
+-};
+-
+ /*
+ * Keep out-of-line to prevent kernel bloat.
+ * __builtin_return_address is not used because it is not always
+@@ -1665,7 +1635,7 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
+ .partial = partial,
+ .nr_pages_max = MAX_SKB_FRAGS,
+ .flags = flags,
+- .ops = &sock_pipe_buf_ops,
++ .ops = &nosteal_pipe_buf_ops,
+ .spd_release = sock_spd_release,
+ };
+ struct sk_buff *frag_iter;
+diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
+index 488600c167c9..1914f5a6d6e7 100644
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -292,6 +292,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
+ return NULL;
+
+ spin_lock_init(&sta->lock);
++ spin_lock_init(&sta->ps_lock);
+ INIT_WORK(&sta->drv_unblock_wk, sta_unblock);
+ INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work);
+ mutex_init(&sta->ampdu_mlme.mtx);
+@@ -1141,6 +1142,8 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
+
+ skb_queue_head_init(&pending);
+
++ /* sync with ieee80211_tx_h_unicast_ps_buf */
++ spin_lock(&sta->ps_lock);
+ /* Send all buffered frames to the station */
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ int count = skb_queue_len(&pending), tmp;
+@@ -1160,6 +1163,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
+ }
+
+ ieee80211_add_pending_skbs_fn(local, &pending, clear_sta_ps_flags, sta);
++ spin_unlock(&sta->ps_lock);
+
+ local->total_ps_buffered -= buffered;
+
+@@ -1207,6 +1211,7 @@ static void ieee80211_send_null_response(struct ieee80211_sub_if_data *sdata,
+ memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN);
+ memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN);
+ memcpy(nullfunc->addr3, sdata->vif.addr, ETH_ALEN);
++ nullfunc->seq_ctrl = 0;
+
+ skb->priority = tid;
+ skb_set_queue_mapping(skb, ieee802_1d_to_ac[tid]);
+diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
+index 8c8ce05ad26f..a68155f1c4b1 100644
+--- a/net/mac80211/sta_info.h
++++ b/net/mac80211/sta_info.h
+@@ -216,6 +216,7 @@ struct sta_ampdu_mlme {
+ * @drv_unblock_wk: used for driver PS unblocking
+ * @listen_interval: listen interval of this station, when we're acting as AP
+ * @_flags: STA flags, see &enum ieee80211_sta_info_flags, do not use directly
++ * @ps_lock: used for powersave (when mac80211 is the AP) related locking
+ * @ps_tx_buf: buffers (per AC) of frames to transmit to this station
+ * when it leaves power saving state or polls
+ * @tx_filtered: buffers (per AC) of frames we already tried to
+@@ -284,10 +285,8 @@ struct sta_info {
+ /* use the accessors defined below */
+ unsigned long _flags;
+
+- /*
+- * STA powersave frame queues, no more than the internal
+- * locking required.
+- */
++ /* STA powersave lock and frame queues */
++ spinlock_t ps_lock;
+ struct sk_buff_head ps_tx_buf[IEEE80211_NUM_ACS];
+ struct sk_buff_head tx_filtered[IEEE80211_NUM_ACS];
+ unsigned long driver_buffered_tids;
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index 4ff35bfd068e..25bbb2e811bf 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -464,6 +464,20 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
+ #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
+ if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
+ purge_old_ps_buffers(tx->local);
++
++ /* sync with ieee80211_sta_ps_deliver_wakeup */
++ spin_lock(&sta->ps_lock);
++ /*
++ * STA woke up the meantime and all the frames on ps_tx_buf have
++ * been queued to pending queue. No reordering can happen, go
++ * ahead and Tx the packet.
++ */
++ if (!test_sta_flag(sta, WLAN_STA_PS_STA) &&
++ !test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
++ spin_unlock(&sta->ps_lock);
++ return TX_CONTINUE;
++ }
++
+ if (skb_queue_len(&sta->ps_tx_buf[ac]) >= STA_MAX_TX_BUFFER) {
+ struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf[ac]);
+ #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
+@@ -480,6 +494,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
+ info->control.vif = &tx->sdata->vif;
+ info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
+ skb_queue_tail(&sta->ps_tx_buf[ac], tx->skb);
++ spin_unlock(&sta->ps_lock);
+
+ if (!timer_pending(&local->sta_cleanup))
+ mod_timer(&local->sta_cleanup,
+@@ -884,7 +899,7 @@ static int ieee80211_fragment(struct ieee80211_local *local,
+ pos += fraglen;
+ }
+
+- skb->len = hdrlen + per_fragm;
++ skb_trim(skb, hdrlen + per_fragm);
+ return 0;
+ }
+
+diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
+index cb1c4303a07a..f131caf06a19 100644
+--- a/net/sctp/sm_statefuns.c
++++ b/net/sctp/sm_statefuns.c
+@@ -747,6 +747,13 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
+ struct sctp_chunk auth;
+ sctp_ierror_t ret;
+
++ /* Make sure that we and the peer are AUTH capable */
++ if (!sctp_auth_enable || !new_asoc->peer.auth_capable) {
++ kfree_skb(chunk->auth_chunk);
++ sctp_association_free(new_asoc);
++ return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
++ }
++
+ /* set-up our fake chunk so that we can process it */
+ auth.skb = chunk->auth_chunk;
+ auth.asoc = chunk->asoc;
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index c53d01e0a2cf..c28eb7b77f45 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -70,6 +70,7 @@
+ #include <linux/init.h>
+ #include <linux/crypto.h>
+ #include <linux/slab.h>
++#include <linux/compat.h>
+
+ #include <net/ip.h>
+ #include <net/icmp.h>
+@@ -1376,11 +1377,19 @@ SCTP_STATIC int sctp_setsockopt_connectx(struct sock* sk,
+ /*
+ * New (hopefully final) interface for the API.
+ * We use the sctp_getaddrs_old structure so that use-space library
+- * can avoid any unnecessary allocations. The only defferent part
++ * can avoid any unnecessary allocations. The only different part
+ * is that we store the actual length of the address buffer into the
+- * addrs_num structure member. That way we can re-use the existing
++ * addrs_num structure member. That way we can re-use the existing
+ * code.
+ */
++#ifdef CONFIG_COMPAT
++struct compat_sctp_getaddrs_old {
++ sctp_assoc_t assoc_id;
++ s32 addr_num;
++ compat_uptr_t addrs; /* struct sockaddr * */
++};
++#endif
++
+ SCTP_STATIC int sctp_getsockopt_connectx3(struct sock* sk, int len,
+ char __user *optval,
+ int __user *optlen)
+@@ -1389,16 +1398,30 @@ SCTP_STATIC int sctp_getsockopt_connectx3(struct sock* sk, int len,
+ sctp_assoc_t assoc_id = 0;
+ int err = 0;
+
+- if (len < sizeof(param))
+- return -EINVAL;
++#ifdef CONFIG_COMPAT
++ if (is_compat_task()) {
++ struct compat_sctp_getaddrs_old param32;
+
+- if (copy_from_user(&param, optval, sizeof(param)))
+- return -EFAULT;
++ if (len < sizeof(param32))
++ return -EINVAL;
++ if (copy_from_user(&param32, optval, sizeof(param32)))
++ return -EFAULT;
+
+- err = __sctp_setsockopt_connectx(sk,
+- (struct sockaddr __user *)param.addrs,
+- param.addr_num, &assoc_id);
++ param.assoc_id = param32.assoc_id;
++ param.addr_num = param32.addr_num;
++ param.addrs = compat_ptr(param32.addrs);
++ } else
++#endif
++ {
++ if (len < sizeof(param))
++ return -EINVAL;
++ if (copy_from_user(&param, optval, sizeof(param)))
++ return -EFAULT;
++ }
+
++ err = __sctp_setsockopt_connectx(sk, (struct sockaddr __user *)
++ param.addrs, param.addr_num,
++ &assoc_id);
+ if (err == 0 || err == -EINPROGRESS) {
+ if (copy_to_user(optval, &assoc_id, sizeof(assoc_id)))
+ return -EFAULT;
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index b2250da4b598..a0e55e5a7cd1 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -1002,9 +1002,13 @@ call_refreshresult(struct rpc_task *task)
+ task->tk_action = call_refresh;
+ switch (status) {
+ case 0:
+- if (rpcauth_uptodatecred(task))
++ if (rpcauth_uptodatecred(task)) {
+ task->tk_action = call_allocate;
+- return;
++ return;
++ }
++ /* Use rate-limiting and a max number of retries if refresh
++ * had status 0 but failed to update the cred.
++ */
+ case -ETIMEDOUT:
+ rpc_delay(task, 3*HZ);
+ case -EAGAIN:
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index bfb78fab06ea..d77a4f0a4210 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -501,6 +501,7 @@ static int xs_nospace(struct rpc_task *task)
+ struct rpc_rqst *req = task->tk_rqstp;
+ struct rpc_xprt *xprt = req->rq_xprt;
+ struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
++ struct sock *sk = transport->inet;
+ int ret = -EAGAIN;
+
+ dprintk("RPC: %5u xmit incomplete (%u left of %u)\n",
+@@ -518,7 +519,7 @@ static int xs_nospace(struct rpc_task *task)
+ * window size
+ */
+ set_bit(SOCK_NOSPACE, &transport->sock->flags);
+- transport->inet->sk_write_pending++;
++ sk->sk_write_pending++;
+ /* ...and wait for more buffer space */
+ xprt_wait_for_buffer_space(task, xs_nospace_callback);
+ }
+@@ -528,6 +529,9 @@ static int xs_nospace(struct rpc_task *task)
+ }
+
+ spin_unlock_bh(&xprt->transport_lock);
++
++ /* Race breaker in case memory is freed before above code is called */
++ sk->sk_write_space(sk);
+ return ret;
+ }
+
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index eddfdecc122b..54fc90b2408e 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -149,8 +149,8 @@ static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
+
+ static inline unsigned unix_hash_fold(__wsum n)
+ {
+- unsigned hash = (__force unsigned)n;
+- hash ^= hash>>16;
++ unsigned int hash = (__force unsigned int)csum_fold(n);
++
+ hash ^= hash>>8;
+ return hash&(UNIX_HASH_SIZE-1);
+ }
+diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
+index d1d0ae84dc8e..98ff331ba197 100644
+--- a/scripts/mod/file2alias.c
++++ b/scripts/mod/file2alias.c
+@@ -130,8 +130,8 @@ static void do_usb_entry(struct usb_device_id *id,
+ range_lo < 0x9 ? "[%X-9" : "[%X",
+ range_lo);
+ sprintf(alias + strlen(alias),
+- range_hi > 0xA ? "a-%X]" : "%X]",
+- range_lo);
++ range_hi > 0xA ? "A-%X]" : "%X]",
++ range_hi);
+ }
+ }
+ if (bcdDevice_initial_digits < (sizeof(id->bcdDevice_lo) * 2 - 1))
+diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
+index bcf1d73a3e03..4fa7939c2e9f 100644
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -4676,22 +4676,31 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
+ * as fast and as clean as possible. */
+ if (!selinux_policycap_netpeer)
+ return selinux_ip_postroute_compat(skb, ifindex, family);
++
++ secmark_active = selinux_secmark_enabled();
++ peerlbl_active = netlbl_enabled() || selinux_xfrm_enabled();
++ if (!secmark_active && !peerlbl_active)
++ return NF_ACCEPT;
++
++ sk = skb->sk;
++
+ #ifdef CONFIG_XFRM
+ /* If skb->dst->xfrm is non-NULL then the packet is undergoing an IPsec
+ * packet transformation so allow the packet to pass without any checks
+ * since we'll have another chance to perform access control checks
+ * when the packet is on it's final way out.
+ * NOTE: there appear to be some IPv6 multicast cases where skb->dst
+- * is NULL, in this case go ahead and apply access control. */
+- if (skb_dst(skb) != NULL && skb_dst(skb)->xfrm != NULL)
++ * is NULL, in this case go ahead and apply access control.
++ * NOTE: if this is a local socket (skb->sk != NULL) that is in the
++ * TCP listening state we cannot wait until the XFRM processing
++ * is done as we will miss out on the SA label if we do;
++ * unfortunately, this means more work, but it is only once per
++ * connection. */
++ if (skb_dst(skb) != NULL && skb_dst(skb)->xfrm != NULL &&
++ !(sk != NULL && sk->sk_state == TCP_LISTEN))
+ return NF_ACCEPT;
+ #endif
+- secmark_active = selinux_secmark_enabled();
+- peerlbl_active = netlbl_enabled() || selinux_xfrm_enabled();
+- if (!secmark_active && !peerlbl_active)
+- return NF_ACCEPT;
+
+- sk = skb->sk;
+ if (sk == NULL) {
+ /* Without an associated socket the packet is either coming
+ * from the kernel or it is being forwarded; check the packet
+@@ -4719,6 +4728,25 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
+ struct sk_security_struct *sksec = sk->sk_security;
+ if (selinux_skb_peerlbl_sid(skb, family, &skb_sid))
+ return NF_DROP;
++ /* At this point, if the returned skb peerlbl is SECSID_NULL
++ * and the packet has been through at least one XFRM
++ * transformation then we must be dealing with the "final"
++ * form of labeled IPsec packet; since we've already applied
++ * all of our access controls on this packet we can safely
++ * pass the packet. */
++ if (skb_sid == SECSID_NULL) {
++ switch (family) {
++ case PF_INET:
++ if (IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED)
++ return NF_ACCEPT;
++ break;
++ case PF_INET6:
++ if (IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED)
++ return NF_ACCEPT;
++ default:
++ return NF_DROP_ERR(-ECONNREFUSED);
++ }
++ }
+ if (selinux_conn_sid(sksec->sid, skb_sid, &peer_sid))
+ return NF_DROP;
+ secmark_perm = PACKET__SEND;
+diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
+index a7f61d52f05c..23e9cba8d4d2 100644
+--- a/security/selinux/ss/policydb.c
++++ b/security/selinux/ss/policydb.c
+@@ -1914,7 +1914,19 @@ static int filename_trans_read(struct policydb *p, void *fp)
+ if (rc)
+ goto out;
+
+- hashtab_insert(p->filename_trans, ft, otype);
++ rc = hashtab_insert(p->filename_trans, ft, otype);
++ if (rc) {
++ /*
++ * Do not return -EEXIST to the caller, or the system
++ * will not boot.
++ */
++ if (rc != -EEXIST)
++ goto out;
++ /* But free memory to avoid memory leak. */
++ kfree(ft);
++ kfree(name);
++ kfree(otype);
++ }
+ }
+ hash_eval(p->filename_trans, "filenametr");
+ return 0;
+@@ -3202,10 +3214,10 @@ static int filename_write_helper(void *key, void *data, void *ptr)
+ if (rc)
+ return rc;
+
+- buf[0] = ft->stype;
+- buf[1] = ft->ttype;
+- buf[2] = ft->tclass;
+- buf[3] = otype->otype;
++ buf[0] = cpu_to_le32(ft->stype);
++ buf[1] = cpu_to_le32(ft->ttype);
++ buf[2] = cpu_to_le32(ft->tclass);
++ buf[3] = cpu_to_le32(otype->otype);
+
+ rc = put_entry(buf, sizeof(u32), 4, fp);
+ if (rc)
+diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
+index 185f849a26f6..72b20b1089df 100644
+--- a/security/selinux/ss/services.c
++++ b/security/selinux/ss/services.c
+@@ -1229,6 +1229,10 @@ static int security_context_to_sid_core(const char *scontext, u32 scontext_len,
+ struct context context;
+ int rc = 0;
+
++ /* An empty security context is never valid. */
++ if (!scontext_len)
++ return -EINVAL;
++
+ if (!ss_initialized) {
+ int i;
+
+diff --git a/sound/pci/Kconfig b/sound/pci/Kconfig
+index 88168044375f..e7244accd25b 100644
+--- a/sound/pci/Kconfig
++++ b/sound/pci/Kconfig
+@@ -30,6 +30,7 @@ config SND_ALS300
+ select SND_PCM
+ select SND_AC97_CODEC
+ select SND_OPL3_LIB
++ select ZONE_DMA
+ help
+ Say 'Y' or 'M' to include support for Avance Logic ALS300/ALS300+
+
+@@ -54,6 +55,7 @@ config SND_ALI5451
+ tristate "ALi M5451 PCI Audio Controller"
+ select SND_MPU401_UART
+ select SND_AC97_CODEC
++ select ZONE_DMA
+ help
+ Say Y here to include support for the integrated AC97 sound
+ device on motherboards using the ALi M5451 Audio Controller
+@@ -158,6 +160,7 @@ config SND_AZT3328
+ select SND_PCM
+ select SND_RAWMIDI
+ select SND_AC97_CODEC
++ select ZONE_DMA
+ help
+ Say Y here to include support for Aztech AZF3328 (PCI168)
+ soundcards.
+@@ -463,6 +466,7 @@ config SND_EMU10K1
+ select SND_HWDEP
+ select SND_RAWMIDI
+ select SND_AC97_CODEC
++ select ZONE_DMA
+ help
+ Say Y to include support for Sound Blaster PCI 512, Live!,
+ Audigy and E-mu APS (partially supported) soundcards.
+@@ -478,6 +482,7 @@ config SND_EMU10K1X
+ tristate "Emu10k1X (Dell OEM Version)"
+ select SND_AC97_CODEC
+ select SND_RAWMIDI
++ select ZONE_DMA
+ help
+ Say Y here to include support for the Dell OEM version of the
+ Sound Blaster Live!.
+@@ -511,6 +516,7 @@ config SND_ES1938
+ select SND_OPL3_LIB
+ select SND_MPU401_UART
+ select SND_AC97_CODEC
++ select ZONE_DMA
+ help
+ Say Y here to include support for soundcards based on ESS Solo-1
+ (ES1938, ES1946, ES1969) chips.
+@@ -522,6 +528,7 @@ config SND_ES1968
+ tristate "ESS ES1968/1978 (Maestro-1/2/2E)"
+ select SND_MPU401_UART
+ select SND_AC97_CODEC
++ select ZONE_DMA
+ help
+ Say Y here to include support for soundcards based on ESS Maestro
+ 1/2/2E chips.
+@@ -602,6 +609,7 @@ config SND_ICE1712
+ select SND_MPU401_UART
+ select SND_AC97_CODEC
+ select BITREVERSE
++ select ZONE_DMA
+ help
+ Say Y here to include support for soundcards based on the
+ ICE1712 (Envy24) chip.
+@@ -688,6 +696,7 @@ config SND_LX6464ES
+ config SND_MAESTRO3
+ tristate "ESS Allegro/Maestro3"
+ select SND_AC97_CODEC
++ select ZONE_DMA
+ help
+ Say Y here to include support for soundcards based on ESS Maestro 3
+ (Allegro) chips.
+@@ -782,6 +791,7 @@ config SND_SIS7019
+ tristate "SiS 7019 Audio Accelerator"
+ depends on X86 && !X86_64
+ select SND_AC97_CODEC
++ select ZONE_DMA
+ help
+ Say Y here to include support for the SiS 7019 Audio Accelerator.
+
+@@ -793,6 +803,7 @@ config SND_SONICVIBES
+ select SND_OPL3_LIB
+ select SND_MPU401_UART
+ select SND_AC97_CODEC
++ select ZONE_DMA
+ help
+ Say Y here to include support for soundcards based on the S3
+ SonicVibes chip.
+@@ -804,6 +815,7 @@ config SND_TRIDENT
+ tristate "Trident 4D-Wave DX/NX; SiS 7018"
+ select SND_MPU401_UART
+ select SND_AC97_CODEC
++ select ZONE_DMA
+ help
+ Say Y here to include support for soundcards based on Trident
+ 4D-Wave DX/NX or SiS 7018 chips.
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 1f78ca68398f..36bce68756b0 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5157,6 +5157,9 @@ static int alc269_fill_coef(struct hda_codec *codec)
+
+ if (spec->codec_variant != ALC269_TYPE_ALC269VB)
+ return 0;
++ /* ALC271X doesn't seem to support these COEFs (bko#52181) */
++ if (!strcmp(codec->chip_name, "ALC271X"))
++ return 0;
+
+ if ((alc_get_coef0(codec) & 0x00ff) < 0x015) {
+ alc_write_coef_idx(codec, 0xf, 0x960b);
+diff --git a/sound/pci/oxygen/xonar_dg.c b/sound/pci/oxygen/xonar_dg.c
+index bc6eb58be380..59eda0a3cce5 100644
+--- a/sound/pci/oxygen/xonar_dg.c
++++ b/sound/pci/oxygen/xonar_dg.c
+@@ -294,6 +294,16 @@ static int output_switch_put(struct snd_kcontrol *ctl,
+ oxygen_write16_masked(chip, OXYGEN_GPIO_DATA,
+ data->output_sel == 1 ? GPIO_HP_REAR : 0,
+ GPIO_HP_REAR);
++ oxygen_write8_masked(chip, OXYGEN_PLAY_ROUTING,
++ data->output_sel == 0 ?
++ OXYGEN_PLAY_MUTE01 :
++ OXYGEN_PLAY_MUTE23 |
++ OXYGEN_PLAY_MUTE45 |
++ OXYGEN_PLAY_MUTE67,
++ OXYGEN_PLAY_MUTE01 |
++ OXYGEN_PLAY_MUTE23 |
++ OXYGEN_PLAY_MUTE45 |
++ OXYGEN_PLAY_MUTE67);
+ }
+ mutex_unlock(&chip->mutex);
+ return changed;
+@@ -597,7 +607,7 @@ struct oxygen_model model_xonar_dg = {
+ .model_data_size = sizeof(struct dg),
+ .device_config = PLAYBACK_0_TO_I2S |
+ PLAYBACK_1_TO_SPDIF |
+- CAPTURE_0_FROM_I2S_2,
++ CAPTURE_0_FROM_I2S_1,
+ .dac_channels_pcm = 6,
+ .dac_channels_mixer = 0,
+ .function_flags = OXYGEN_FUNCTION_SPI,
+diff --git a/sound/pci/rme9652/rme9652.c b/sound/pci/rme9652/rme9652.c
+index 732c5e837437..1b3586441242 100644
+--- a/sound/pci/rme9652/rme9652.c
++++ b/sound/pci/rme9652/rme9652.c
+@@ -285,7 +285,7 @@ static char channel_map_9636_ds[26] = {
+ /* ADAT channels are remapped */
+ 1, 3, 5, 7, 9, 11, 13, 15,
+ /* channels 8 and 9 are S/PDIF */
+- 24, 25
++ 24, 25,
+ /* others don't exist */
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
+ };
+diff --git a/sound/soc/codecs/adau1701.c b/sound/soc/codecs/adau1701.c
+index 8b7e1c50d6e9..3cca558d7c76 100644
+--- a/sound/soc/codecs/adau1701.c
++++ b/sound/soc/codecs/adau1701.c
+@@ -64,7 +64,7 @@
+
+ #define ADAU1701_SEROCTL_WORD_LEN_24 0x0000
+ #define ADAU1701_SEROCTL_WORD_LEN_20 0x0001
+-#define ADAU1701_SEROCTL_WORD_LEN_16 0x0010
++#define ADAU1701_SEROCTL_WORD_LEN_16 0x0002
+ #define ADAU1701_SEROCTL_WORD_LEN_MASK 0x0003
+
+ #define ADAU1701_AUXNPOW_VBPD 0x40
+diff --git a/sound/soc/codecs/sta32x.c b/sound/soc/codecs/sta32x.c
+index d2f37152f940..25ae0598c6fc 100644
+--- a/sound/soc/codecs/sta32x.c
++++ b/sound/soc/codecs/sta32x.c
+@@ -143,42 +143,42 @@ static const unsigned int sta32x_limiter_drc_release_tlv[] = {
+ 13, 16, TLV_DB_SCALE_ITEM(-1500, 300, 0),
+ };
+
+-static const struct soc_enum sta32x_drc_ac_enum =
+- SOC_ENUM_SINGLE(STA32X_CONFD, STA32X_CONFD_DRC_SHIFT,
+- 2, sta32x_drc_ac);
+-static const struct soc_enum sta32x_auto_eq_enum =
+- SOC_ENUM_SINGLE(STA32X_AUTO1, STA32X_AUTO1_AMEQ_SHIFT,
+- 3, sta32x_auto_eq_mode);
+-static const struct soc_enum sta32x_auto_gc_enum =
+- SOC_ENUM_SINGLE(STA32X_AUTO1, STA32X_AUTO1_AMGC_SHIFT,
+- 4, sta32x_auto_gc_mode);
+-static const struct soc_enum sta32x_auto_xo_enum =
+- SOC_ENUM_SINGLE(STA32X_AUTO2, STA32X_AUTO2_XO_SHIFT,
+- 16, sta32x_auto_xo_mode);
+-static const struct soc_enum sta32x_preset_eq_enum =
+- SOC_ENUM_SINGLE(STA32X_AUTO3, STA32X_AUTO3_PEQ_SHIFT,
+- 32, sta32x_preset_eq_mode);
+-static const struct soc_enum sta32x_limiter_ch1_enum =
+- SOC_ENUM_SINGLE(STA32X_C1CFG, STA32X_CxCFG_LS_SHIFT,
+- 3, sta32x_limiter_select);
+-static const struct soc_enum sta32x_limiter_ch2_enum =
+- SOC_ENUM_SINGLE(STA32X_C2CFG, STA32X_CxCFG_LS_SHIFT,
+- 3, sta32x_limiter_select);
+-static const struct soc_enum sta32x_limiter_ch3_enum =
+- SOC_ENUM_SINGLE(STA32X_C3CFG, STA32X_CxCFG_LS_SHIFT,
+- 3, sta32x_limiter_select);
+-static const struct soc_enum sta32x_limiter1_attack_rate_enum =
+- SOC_ENUM_SINGLE(STA32X_L1AR, STA32X_LxA_SHIFT,
+- 16, sta32x_limiter_attack_rate);
+-static const struct soc_enum sta32x_limiter2_attack_rate_enum =
+- SOC_ENUM_SINGLE(STA32X_L2AR, STA32X_LxA_SHIFT,
+- 16, sta32x_limiter_attack_rate);
+-static const struct soc_enum sta32x_limiter1_release_rate_enum =
+- SOC_ENUM_SINGLE(STA32X_L1AR, STA32X_LxR_SHIFT,
+- 16, sta32x_limiter_release_rate);
+-static const struct soc_enum sta32x_limiter2_release_rate_enum =
+- SOC_ENUM_SINGLE(STA32X_L2AR, STA32X_LxR_SHIFT,
+- 16, sta32x_limiter_release_rate);
++static SOC_ENUM_SINGLE_DECL(sta32x_drc_ac_enum,
++ STA32X_CONFD, STA32X_CONFD_DRC_SHIFT,
++ sta32x_drc_ac);
++static SOC_ENUM_SINGLE_DECL(sta32x_auto_eq_enum,
++ STA32X_AUTO1, STA32X_AUTO1_AMEQ_SHIFT,
++ sta32x_auto_eq_mode);
++static SOC_ENUM_SINGLE_DECL(sta32x_auto_gc_enum,
++ STA32X_AUTO1, STA32X_AUTO1_AMGC_SHIFT,
++ sta32x_auto_gc_mode);
++static SOC_ENUM_SINGLE_DECL(sta32x_auto_xo_enum,
++ STA32X_AUTO2, STA32X_AUTO2_XO_SHIFT,
++ sta32x_auto_xo_mode);
++static SOC_ENUM_SINGLE_DECL(sta32x_preset_eq_enum,
++ STA32X_AUTO3, STA32X_AUTO3_PEQ_SHIFT,
++ sta32x_preset_eq_mode);
++static SOC_ENUM_SINGLE_DECL(sta32x_limiter_ch1_enum,
++ STA32X_C1CFG, STA32X_CxCFG_LS_SHIFT,
++ sta32x_limiter_select);
++static SOC_ENUM_SINGLE_DECL(sta32x_limiter_ch2_enum,
++ STA32X_C2CFG, STA32X_CxCFG_LS_SHIFT,
++ sta32x_limiter_select);
++static SOC_ENUM_SINGLE_DECL(sta32x_limiter_ch3_enum,
++ STA32X_C3CFG, STA32X_CxCFG_LS_SHIFT,
++ sta32x_limiter_select);
++static SOC_ENUM_SINGLE_DECL(sta32x_limiter1_attack_rate_enum,
++ STA32X_L1AR, STA32X_LxA_SHIFT,
++ sta32x_limiter_attack_rate);
++static SOC_ENUM_SINGLE_DECL(sta32x_limiter2_attack_rate_enum,
++ STA32X_L2AR, STA32X_LxA_SHIFT,
++ sta32x_limiter_attack_rate);
++static SOC_ENUM_SINGLE_DECL(sta32x_limiter1_release_rate_enum,
++ STA32X_L1AR, STA32X_LxR_SHIFT,
++ sta32x_limiter_release_rate);
++static SOC_ENUM_SINGLE_DECL(sta32x_limiter2_release_rate_enum,
++ STA32X_L2AR, STA32X_LxR_SHIFT,
++ sta32x_limiter_release_rate);
+
+ /* byte array controls for setting biquad, mixer, scaling coefficients;
+ * for biquads all five coefficients need to be set in one go,
+@@ -350,7 +350,7 @@ SOC_SINGLE_TLV("Treble Tone Control", STA32X_TONE, STA32X_TONE_TTC_SHIFT, 15, 0,
+ SOC_ENUM("Limiter1 Attack Rate (dB/ms)", sta32x_limiter1_attack_rate_enum),
+ SOC_ENUM("Limiter2 Attack Rate (dB/ms)", sta32x_limiter2_attack_rate_enum),
+ SOC_ENUM("Limiter1 Release Rate (dB/ms)", sta32x_limiter1_release_rate_enum),
+-SOC_ENUM("Limiter2 Release Rate (dB/ms)", sta32x_limiter1_release_rate_enum),
++SOC_ENUM("Limiter2 Release Rate (dB/ms)", sta32x_limiter2_release_rate_enum),
+
+ /* depending on mode, the attack/release thresholds have
+ * two different enum definitions; provide both
+diff --git a/sound/soc/codecs/wm8770.c b/sound/soc/codecs/wm8770.c
+index aa05e6507f84..d5f7d8d5b2f6 100644
+--- a/sound/soc/codecs/wm8770.c
++++ b/sound/soc/codecs/wm8770.c
+@@ -163,8 +163,8 @@ static const char *ain_text[] = {
+ "AIN5", "AIN6", "AIN7", "AIN8"
+ };
+
+-static const struct soc_enum ain_enum =
+- SOC_ENUM_DOUBLE(WM8770_ADCMUX, 0, 4, 8, ain_text);
++static SOC_ENUM_DOUBLE_DECL(ain_enum,
++ WM8770_ADCMUX, 0, 4, ain_text);
+
+ static const struct snd_kcontrol_new ain_mux =
+ SOC_DAPM_ENUM("Capture Mux", ain_enum);
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index 41b9fe0ae177..4f7b330d7796 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -831,6 +831,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
+ }
+ break;
+
++ case USB_ID(0x046d, 0x0807): /* Logitech Webcam C500 */
+ case USB_ID(0x046d, 0x0808):
+ case USB_ID(0x046d, 0x0809):
+ case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */
+diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
+index f1324c423835..0e4e909d111e 100644
+--- a/sound/usb/mixer_maps.c
++++ b/sound/usb/mixer_maps.c
+@@ -304,6 +304,11 @@ static struct usbmix_name_map hercules_usb51_map[] = {
+ { 0 } /* terminator */
+ };
+
++static const struct usbmix_name_map kef_x300a_map[] = {
++ { 10, NULL }, /* firmware locks up (?) when we try to access this FU */
++ { 0 }
++};
++
+ /*
+ * Control map entries
+ */
+@@ -371,6 +376,10 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
+ .map = scratch_live_map,
+ .ignore_ctl_error = 1,
+ },
++ {
++ .id = USB_ID(0x27ac, 0x1000),
++ .map = kef_x300a_map,
++ },
+ { 0 } /* terminator */
+ };
+
+diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
+index 3c6f7808efae..279a2f1fba69 100644
+--- a/tools/power/x86/turbostat/turbostat.c
++++ b/tools/power/x86/turbostat/turbostat.c
+@@ -32,6 +32,7 @@
+ #include <dirent.h>
+ #include <string.h>
+ #include <ctype.h>
++#include <cpuid.h>
+
+ #define MSR_TSC 0x10
+ #define MSR_NEHALEM_PLATFORM_INFO 0xCE
+@@ -847,7 +848,7 @@ void check_cpuid()
+
+ eax = ebx = ecx = edx = 0;
+
+- asm("cpuid" : "=a" (max_level), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0));
++ __get_cpuid(0, &max_level, &ebx, &ecx, &edx);
+
+ if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e)
+ genuine_intel = 1;
+@@ -856,7 +857,7 @@ void check_cpuid()
+ fprintf(stderr, "%.4s%.4s%.4s ",
+ (char *)&ebx, (char *)&edx, (char *)&ecx);
+
+- asm("cpuid" : "=a" (fms), "=c" (ecx), "=d" (edx) : "a" (1) : "ebx");
++ __get_cpuid(1, &fms, &ebx, &ecx, &edx);
+ family = (fms >> 8) & 0xf;
+ model = (fms >> 4) & 0xf;
+ stepping = fms & 0xf;
+@@ -878,7 +879,7 @@ void check_cpuid()
+ * This check is valid for both Intel and AMD.
+ */
+ ebx = ecx = edx = 0;
+- asm("cpuid" : "=a" (max_level), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x80000000));
++ __get_cpuid(0x80000000, &max_level, &ebx, &ecx, &edx);
+
+ if (max_level < 0x80000007) {
+ fprintf(stderr, "CPUID: no invariant TSC (max_level 0x%x)\n", max_level);
+@@ -889,7 +890,7 @@ void check_cpuid()
+ * Non-Stop TSC is advertised by CPUID.EAX=0x80000007: EDX.bit8
+ * this check is valid for both Intel and AMD
+ */
+- asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x80000007));
++ __get_cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
+ has_invariant_tsc = edx & (1 << 8);
+
+ if (!has_invariant_tsc) {
+@@ -902,7 +903,7 @@ void check_cpuid()
+ * this check is valid for both Intel and AMD
+ */
+
+- asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x6));
++ __get_cpuid(0x6, &eax, &ebx, &ecx, &edx);
+ has_aperf = ecx & (1 << 0);
+ if (!has_aperf) {
+ fprintf(stderr, "No APERF MSR\n");
+diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
+index a6ec206f36ba..e56b2405f0a3 100644
+--- a/virt/kvm/coalesced_mmio.c
++++ b/virt/kvm/coalesced_mmio.c
+@@ -148,17 +148,13 @@ int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
+ list_add_tail(&dev->list, &kvm->coalesced_zones);
+ mutex_unlock(&kvm->slots_lock);
+
+- return ret;
++ return 0;
+
+ out_free_dev:
+ mutex_unlock(&kvm->slots_lock);
+-
+ kfree(dev);
+
+- if (dev == NULL)
+- return -ENXIO;
+-
+- return 0;
++ return ret;
+ }
+
+ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
diff --git a/1056_linux-3.2.57.patch b/1056_linux-3.2.57.patch
new file mode 100644
index 00000000..ce211110
--- /dev/null
+++ b/1056_linux-3.2.57.patch
@@ -0,0 +1,905 @@
+diff --git a/Makefile b/Makefile
+index ec90bfb29420..c92db9b51a97 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 56
++SUBLEVEL = 57
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
+index 99348c0eaa41..78be2459c9a1 100644
+--- a/arch/s390/kernel/head64.S
++++ b/arch/s390/kernel/head64.S
+@@ -61,7 +61,7 @@ ENTRY(startup_continue)
+ .quad 0 # cr12: tracing off
+ .quad 0 # cr13: home space segment table
+ .quad 0xc0000000 # cr14: machine check handling off
+- .quad 0 # cr15: linkage stack operations
++ .quad .Llinkage_stack # cr15: linkage stack operations
+ .Lpcmsk:.quad 0x0000000180000000
+ .L4malign:.quad 0xffffffffffc00000
+ .Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8
+@@ -69,12 +69,15 @@ ENTRY(startup_continue)
+ .Lparmaddr:
+ .quad PARMAREA
+ .align 64
+-.Lduct: .long 0,0,0,0,.Lduald,0,0,0
++.Lduct: .long 0,.Laste,.Laste,0,.Lduald,0,0,0
+ .long 0,0,0,0,0,0,0,0
++.Laste: .quad 0,0xffffffffffffffff,0,0,0,0,0,0
+ .align 128
+ .Lduald:.rept 8
+ .long 0x80000000,0,0,0 # invalid access-list entries
+ .endr
++.Llinkage_stack:
++ .long 0,0,0x89000000,0,0,0,0x8a000000,0
+
+ ENTRY(_ehead)
+
+diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
+index f1b36cf3e3d0..db2ffef4ea81 100644
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -2451,6 +2451,9 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
+ int emulate = 0;
+ gfn_t pseudo_gfn;
+
++ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
++ return 0;
++
+ for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
+ if (iterator.level == level) {
+ unsigned pte_access = ACC_ALL;
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index aac5ea7620fa..a4f6bda7dcc8 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -6273,8 +6273,8 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ free_vpid(vmx);
+- free_nested(vmx);
+ free_loaded_vmcs(vmx->loaded_vmcs);
++ free_nested(vmx);
+ kfree(vmx->guest_msrs);
+ kvm_vcpu_uninit(vcpu);
+ kmem_cache_free(kvm_vcpu_cache, vmx);
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index 7be5fd9b98ea..bc35070daf78 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -237,11 +237,22 @@ static int synaptics_identify(struct psmouse *psmouse)
+ * Read touchpad resolution and maximum reported coordinates
+ * Resolution is left zero if touchpad does not support the query
+ */
++
++static const int *quirk_min_max;
++
+ static int synaptics_resolution(struct psmouse *psmouse)
+ {
+ struct synaptics_data *priv = psmouse->private;
+ unsigned char resp[3];
+
++ if (quirk_min_max) {
++ priv->x_min = quirk_min_max[0];
++ priv->x_max = quirk_min_max[1];
++ priv->y_min = quirk_min_max[2];
++ priv->y_max = quirk_min_max[3];
++ return 0;
++ }
++
+ if (SYN_ID_MAJOR(priv->identity) < 4)
+ return 0;
+
+@@ -1364,10 +1375,54 @@ static const struct dmi_system_id __initconst olpc_dmi_table[] = {
+ { }
+ };
+
++static const struct dmi_system_id min_max_dmi_table[] __initconst = {
++#if defined(CONFIG_DMI)
++ {
++ /* Lenovo ThinkPad Helix */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Helix"),
++ },
++ .driver_data = (int []){1024, 5052, 2258, 4832},
++ },
++ {
++ /* Lenovo ThinkPad X240 */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X240"),
++ },
++ .driver_data = (int []){1232, 5710, 1156, 4696},
++ },
++ {
++ /* Lenovo ThinkPad T440s */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T440"),
++ },
++ .driver_data = (int []){1024, 5112, 2024, 4832},
++ },
++ {
++ /* Lenovo ThinkPad T540p */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T540"),
++ },
++ .driver_data = (int []){1024, 5056, 2058, 4832},
++ },
++#endif
++ { }
++};
++
+ void __init synaptics_module_init(void)
+ {
++ const struct dmi_system_id *min_max_dmi;
++
+ impaired_toshiba_kbc = dmi_check_system(toshiba_dmi_table);
+ broken_olpc_ec = dmi_check_system(olpc_dmi_table);
++
++ min_max_dmi = dmi_first_match(min_max_dmi_table);
++ if (min_max_dmi)
++ quirk_min_max = min_max_dmi->driver_data;
+ }
+
+ int synaptics_init(struct psmouse *psmouse)
+diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
+index 6729585aed7a..98ab7596e2fb 100644
+--- a/drivers/net/usb/asix.c
++++ b/drivers/net/usb/asix.c
+@@ -183,6 +183,17 @@ struct ax88172_int_data {
+ __le16 res3;
+ } __packed;
+
++struct asix_rx_fixup_info {
++ struct sk_buff *ax_skb;
++ u32 header;
++ u16 size;
++ bool split_head;
++};
++
++struct asix_common_private {
++ struct asix_rx_fixup_info rx_fixup_info;
++};
++
+ static int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ u16 size, void *data)
+ {
+@@ -304,97 +315,89 @@ asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ }
+ }
+
+-static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
++static int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
++ struct asix_rx_fixup_info *rx)
+ {
+- u8 *head;
+- u32 header;
+- char *packet;
+- struct sk_buff *ax_skb;
+- u16 size;
++ int offset = 0;
++
++ while (offset + sizeof(u16) <= skb->len) {
++ u16 remaining = 0;
++ unsigned char *data;
++
++ if (!rx->size) {
++ if ((skb->len - offset == sizeof(u16)) ||
++ rx->split_head) {
++ if(!rx->split_head) {
++ rx->header = get_unaligned_le16(
++ skb->data + offset);
++ rx->split_head = true;
++ offset += sizeof(u16);
++ break;
++ } else {
++ rx->header |= (get_unaligned_le16(
++ skb->data + offset)
++ << 16);
++ rx->split_head = false;
++ offset += sizeof(u16);
++ }
++ } else {
++ rx->header = get_unaligned_le32(skb->data +
++ offset);
++ offset += sizeof(u32);
++ }
+
+- head = (u8 *) skb->data;
+- memcpy(&header, head, sizeof(header));
+- le32_to_cpus(&header);
+- packet = head + sizeof(header);
+-
+- skb_pull(skb, 4);
+-
+- while (skb->len > 0) {
+- if ((header & 0x07ff) != ((~header >> 16) & 0x07ff))
+- netdev_err(dev->net, "asix_rx_fixup() Bad Header Length\n");
+-
+- /* get the packet length */
+- size = (u16) (header & 0x000007ff);
+-
+- if ((skb->len) - ((size + 1) & 0xfffe) == 0) {
+- u8 alignment = (unsigned long)skb->data & 0x3;
+- if (alignment != 0x2) {
+- /*
+- * not 16bit aligned so use the room provided by
+- * the 32 bit header to align the data
+- *
+- * note we want 16bit alignment as MAC header is
+- * 14bytes thus ip header will be aligned on
+- * 32bit boundary so accessing ipheader elements
+- * using a cast to struct ip header wont cause
+- * an unaligned accesses.
+- */
+- u8 realignment = (alignment + 2) & 0x3;
+- memmove(skb->data - realignment,
+- skb->data,
+- size);
+- skb->data -= realignment;
+- skb_set_tail_pointer(skb, size);
++ /* get the packet length */
++ rx->size = (u16) (rx->header & 0x7ff);
++ if (rx->size != ((~rx->header >> 16) & 0x7ff)) {
++ netdev_err(dev->net, "asix_rx_fixup() Bad Header Length 0x%x, offset %d\n",
++ rx->header, offset);
++ rx->size = 0;
++ return 0;
+ }
+- return 2;
++ rx->ax_skb = netdev_alloc_skb_ip_align(dev->net,
++ rx->size);
++ if (!rx->ax_skb)
++ return 0;
+ }
+
+- if (size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) {
++ if (rx->size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) {
+ netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
+- size);
+- return 0;
+- }
+- ax_skb = skb_clone(skb, GFP_ATOMIC);
+- if (ax_skb) {
+- u8 alignment = (unsigned long)packet & 0x3;
+- ax_skb->len = size;
+-
+- if (alignment != 0x2) {
+- /*
+- * not 16bit aligned use the room provided by
+- * the 32 bit header to align the data
+- */
+- u8 realignment = (alignment + 2) & 0x3;
+- memmove(packet - realignment, packet, size);
+- packet -= realignment;
+- }
+- ax_skb->data = packet;
+- skb_set_tail_pointer(ax_skb, size);
+- usbnet_skb_return(dev, ax_skb);
+- } else {
++ rx->size);
++ kfree_skb(rx->ax_skb);
+ return 0;
+ }
+
+- skb_pull(skb, (size + 1) & 0xfffe);
++ if (rx->size > skb->len - offset) {
++ remaining = rx->size - (skb->len - offset);
++ rx->size = skb->len - offset;
++ }
+
+- if (skb->len < sizeof(header))
+- break;
++ data = skb_put(rx->ax_skb, rx->size);
++ memcpy(data, skb->data + offset, rx->size);
++ if (!remaining)
++ usbnet_skb_return(dev, rx->ax_skb);
+
+- head = (u8 *) skb->data;
+- memcpy(&header, head, sizeof(header));
+- le32_to_cpus(&header);
+- packet = head + sizeof(header);
+- skb_pull(skb, 4);
++ offset += (rx->size + 1) & 0xfffe;
++ rx->size = remaining;
+ }
+
+- if (skb->len < 0) {
+- netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d\n",
+- skb->len);
++ if (skb->len != offset) {
++ netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d, %d\n",
++ skb->len, offset);
+ return 0;
+ }
++
+ return 1;
+ }
+
++static int asix_rx_fixup_common(struct usbnet *dev, struct sk_buff *skb)
++{
++ struct asix_common_private *dp = dev->driver_priv;
++ struct asix_rx_fixup_info *rx = &dp->rx_fixup_info;
++
++ return asix_rx_fixup_internal(dev, skb, rx);
++}
++
+ static struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
+ gfp_t flags)
+ {
+@@ -1154,9 +1157,19 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
+ dev->rx_urb_size = 2048;
+ }
+
++ dev->driver_priv = kzalloc(sizeof(struct asix_common_private), GFP_KERNEL);
++ if (!dev->driver_priv)
++ return -ENOMEM;
++
+ return 0;
+ }
+
++static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf)
++{
++ if (dev->driver_priv)
++ kfree(dev->driver_priv);
++}
++
+ static struct ethtool_ops ax88178_ethtool_ops = {
+ .get_drvinfo = asix_get_drvinfo,
+ .get_link = asix_get_link,
+@@ -1489,6 +1502,10 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
+ dev->rx_urb_size = 2048;
+ }
+
++ dev->driver_priv = kzalloc(sizeof(struct asix_common_private), GFP_KERNEL);
++ if (!dev->driver_priv)
++ return -ENOMEM;
++
+ return 0;
+ }
+
+@@ -1535,22 +1552,25 @@ static const struct driver_info hawking_uf200_info = {
+ static const struct driver_info ax88772_info = {
+ .description = "ASIX AX88772 USB 2.0 Ethernet",
+ .bind = ax88772_bind,
++ .unbind = ax88772_unbind,
+ .status = asix_status,
+ .link_reset = ax88772_link_reset,
+ .reset = ax88772_reset,
+- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR,
+- .rx_fixup = asix_rx_fixup,
++ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | FLAG_MULTI_PACKET,
++ .rx_fixup = asix_rx_fixup_common,
+ .tx_fixup = asix_tx_fixup,
+ };
+
+ static const struct driver_info ax88178_info = {
+ .description = "ASIX AX88178 USB 2.0 Ethernet",
+ .bind = ax88178_bind,
++ .unbind = ax88772_unbind,
+ .status = asix_status,
+ .link_reset = ax88178_link_reset,
+ .reset = ax88178_reset,
+- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR,
+- .rx_fixup = asix_rx_fixup,
++ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
++ FLAG_MULTI_PACKET,
++ .rx_fixup = asix_rx_fixup_common,
+ .tx_fixup = asix_tx_fixup,
+ };
+
+diff --git a/drivers/staging/speakup/kobjects.c b/drivers/staging/speakup/kobjects.c
+index 07a7f5432597..68291956b7b6 100644
+--- a/drivers/staging/speakup/kobjects.c
++++ b/drivers/staging/speakup/kobjects.c
+@@ -521,9 +521,9 @@ static ssize_t punc_store(struct kobject *kobj, struct kobj_attribute *attr,
+ spk_lock(flags);
+
+ if (*punc_buf == 'd' || *punc_buf == 'r')
+- x = set_mask_bits(0, var->value, 3);
++ x = spk_set_mask_bits(0, var->value, 3);
+ else
+- x = set_mask_bits(punc_buf, var->value, 3);
++ x = spk_set_mask_bits(punc_buf, var->value, 3);
+
+ spk_unlock(flags);
+ return count;
+diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
+index 0d70f68a22a4..a07635130340 100644
+--- a/drivers/staging/speakup/main.c
++++ b/drivers/staging/speakup/main.c
+@@ -2265,7 +2265,7 @@ static int __init speakup_init(void)
+ (var->var_id >= 0) && (var->var_id < MAXVARS); var++)
+ speakup_register_var(var);
+ for (i = 1; punc_info[i].mask != 0; i++)
+- set_mask_bits(0, i, 2);
++ spk_set_mask_bits(0, i, 2);
+
+ set_key_info(key_defaults, key_buf);
+ if (quiet_boot)
+diff --git a/drivers/staging/speakup/speakup.h b/drivers/staging/speakup/speakup.h
+index 412b87947f66..f39c0a27da26 100644
+--- a/drivers/staging/speakup/speakup.h
++++ b/drivers/staging/speakup/speakup.h
+@@ -71,7 +71,7 @@ extern struct st_var_header *var_header_by_name(const char *name);
+ extern struct punc_var_t *get_punc_var(enum var_id_t var_id);
+ extern int set_num_var(int val, struct st_var_header *var, int how);
+ extern int set_string_var(const char *page, struct st_var_header *var, int len);
+-extern int set_mask_bits(const char *input, const int which, const int how);
++extern int spk_set_mask_bits(const char *input, const int which, const int how);
+ extern special_func special_handler;
+ extern int handle_help(struct vc_data *vc, u_char type, u_char ch, u_short key);
+ extern int synth_init(char *name);
+diff --git a/drivers/staging/speakup/varhandlers.c b/drivers/staging/speakup/varhandlers.c
+index ab7de9389dd6..75eaf27fb4fd 100644
+--- a/drivers/staging/speakup/varhandlers.c
++++ b/drivers/staging/speakup/varhandlers.c
+@@ -267,11 +267,11 @@ int set_string_var(const char *page, struct st_var_header *var, int len)
+ return ret;
+ }
+
+-/* set_mask_bits sets or clears the punc/delim/repeat bits,
++/* spk_set_mask_bits sets or clears the punc/delim/repeat bits,
+ * if input is null uses the defaults.
+ * values for how: 0 clears bits of chars supplied,
+ * 1 clears allk, 2 sets bits for chars */
+-int set_mask_bits(const char *input, const int which, const int how)
++int spk_set_mask_bits(const char *input, const int which, const int how)
+ {
+ u_char *cp;
+ short mask = punc_info[which].mask;
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index c55808edddf4..aa05d5e6c3ae 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -2107,7 +2107,7 @@ cifs_iovec_write(struct file *file, const struct iovec *iov,
+ {
+ unsigned int written;
+ unsigned long num_pages, npages, i;
+- size_t copied, len, cur_len;
++ size_t bytes, copied, len, cur_len;
+ ssize_t total_written = 0;
+ struct kvec *to_send;
+ struct page **pages;
+@@ -2165,17 +2165,45 @@ cifs_iovec_write(struct file *file, const struct iovec *iov,
+ do {
+ size_t save_len = cur_len;
+ for (i = 0; i < npages; i++) {
+- copied = min_t(const size_t, cur_len, PAGE_CACHE_SIZE);
++ bytes = min_t(const size_t, cur_len, PAGE_CACHE_SIZE);
+ copied = iov_iter_copy_from_user(pages[i], &it, 0,
+- copied);
++ bytes);
+ cur_len -= copied;
+ iov_iter_advance(&it, copied);
+ to_send[i+1].iov_base = kmap(pages[i]);
+ to_send[i+1].iov_len = copied;
++ /*
++ * If we didn't copy as much as we expected, then that
++ * may mean we trod into an unmapped area. Stop copying
++ * at that point. On the next pass through the big
++ * loop, we'll likely end up getting a zero-length
++ * write and bailing out of it.
++ */
++ if (copied < bytes)
++ break;
+ }
+
+ cur_len = save_len - cur_len;
+
++ /*
++ * If we have no data to send, then that probably means that
++ * the copy above failed altogether. That's most likely because
++ * the address in the iovec was bogus. Set the rc to -EFAULT,
++ * free anything we allocated and bail out.
++ */
++ if (!cur_len) {
++ kunmap(pages[0]);
++ if (!total_written)
++ total_written = -EFAULT;
++ break;
++ }
++
++ /*
++ * i + 1 now represents the number of pages we actually used in
++ * the copy phase above.
++ */
++ npages = min(npages, i + 1);
++
+ do {
+ if (open_file->invalidHandle) {
+ rc = cifs_reopen_file(open_file, false);
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 45778a62928c..dc9f0ecd8f4a 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -38,6 +38,7 @@
+ #include <linux/printk.h>
+ #include <linux/slab.h>
+ #include <linux/ratelimit.h>
++#include <linux/bitops.h>
+
+ #include "ext4_jbd2.h"
+ #include "xattr.h"
+@@ -3694,18 +3695,20 @@ int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
+ void ext4_set_inode_flags(struct inode *inode)
+ {
+ unsigned int flags = EXT4_I(inode)->i_flags;
++ unsigned int new_fl = 0;
+
+- inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
+ if (flags & EXT4_SYNC_FL)
+- inode->i_flags |= S_SYNC;
++ new_fl |= S_SYNC;
+ if (flags & EXT4_APPEND_FL)
+- inode->i_flags |= S_APPEND;
++ new_fl |= S_APPEND;
+ if (flags & EXT4_IMMUTABLE_FL)
+- inode->i_flags |= S_IMMUTABLE;
++ new_fl |= S_IMMUTABLE;
+ if (flags & EXT4_NOATIME_FL)
+- inode->i_flags |= S_NOATIME;
++ new_fl |= S_NOATIME;
+ if (flags & EXT4_DIRSYNC_FL)
+- inode->i_flags |= S_DIRSYNC;
++ new_fl |= S_DIRSYNC;
++ set_mask_bits(&inode->i_flags,
++ S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC, new_fl);
+ }
+
+ /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
+diff --git a/include/linux/bitops.h b/include/linux/bitops.h
+index fc8a3ffce320..87a375f4e051 100644
+--- a/include/linux/bitops.h
++++ b/include/linux/bitops.h
+@@ -168,6 +168,21 @@ static inline unsigned long __ffs64(u64 word)
+
+ #ifdef __KERNEL__
+
++#ifndef set_mask_bits
++#define set_mask_bits(ptr, _mask, _bits) \
++({ \
++ const typeof(*ptr) mask = (_mask), bits = (_bits); \
++ typeof(*ptr) old, new; \
++ \
++ do { \
++ old = ACCESS_ONCE(*ptr); \
++ new = (old & ~mask) | bits; \
++ } while (cmpxchg(ptr, old, new) != old); \
++ \
++ new; \
++})
++#endif
++
+ #ifndef find_last_bit
+ /**
+ * find_last_bit - find the last set bit in a memory region
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index 85180bf6c04b..13bd6d0e43c5 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -2143,6 +2143,8 @@ extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
+
+ extern struct sk_buff *skb_segment(struct sk_buff *skb, u32 features);
+
++unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
++
+ static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
+ int len, void *buffer)
+ {
+@@ -2555,5 +2557,22 @@ static inline bool skb_is_recycleable(const struct sk_buff *skb, int skb_size)
+
+ return true;
+ }
++
++/**
++ * skb_gso_network_seglen - Return length of individual segments of a gso packet
++ *
++ * @skb: GSO skb
++ *
++ * skb_gso_network_seglen is used to determine the real size of the
++ * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
++ *
++ * The MAC/L2 header is not accounted for.
++ */
++static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
++{
++ unsigned int hdr_len = skb_transport_header(skb) -
++ skb_network_header(skb);
++ return hdr_len + skb_gso_transport_seglen(skb);
++}
+ #endif /* __KERNEL__ */
+ #endif /* _LINUX_SKBUFF_H */
+diff --git a/ipc/msg.c b/ipc/msg.c
+index 7385de25788a..25f1a6139584 100644
+--- a/ipc/msg.c
++++ b/ipc/msg.c
+@@ -296,7 +296,9 @@ static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
+ }
+ atomic_sub(msq->q_cbytes, &ns->msg_bytes);
+ security_msg_queue_free(msq);
++ ipc_lock_by_ptr(&msq->q_perm);
+ ipc_rcu_putref(msq);
++ ipc_unlock(&msq->q_perm);
+ }
+
+ /*
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 5d6cb54c0965..8ac4a0f3e869 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -45,6 +45,8 @@
+ #include <linux/in.h>
+ #include <linux/inet.h>
+ #include <linux/slab.h>
++#include <linux/tcp.h>
++#include <linux/udp.h>
+ #include <linux/netdevice.h>
+ #ifdef CONFIG_NET_CLS_ACT
+ #include <net/pkt_sched.h>
+@@ -3181,3 +3183,26 @@ void __skb_warn_lro_forwarding(const struct sk_buff *skb)
+ " while LRO is enabled\n", skb->dev->name);
+ }
+ EXPORT_SYMBOL(__skb_warn_lro_forwarding);
++
++/**
++ * skb_gso_transport_seglen - Return length of individual segments of a gso packet
++ *
++ * @skb: GSO skb
++ *
++ * skb_gso_transport_seglen is used to determine the real size of the
++ * individual segments, including Layer4 headers (TCP/UDP).
++ *
++ * The MAC/L2 or network (IP, IPv6) headers are not accounted for.
++ */
++unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
++{
++ const struct skb_shared_info *shinfo = skb_shinfo(skb);
++ unsigned int hdr_len;
++
++ if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
++ hdr_len = tcp_hdrlen(skb);
++ else
++ hdr_len = sizeof(struct udphdr);
++ return hdr_len + shinfo->gso_size;
++}
++EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
+diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
+index 29a07b6c7168..e0d9f02fec11 100644
+--- a/net/ipv4/ip_forward.c
++++ b/net/ipv4/ip_forward.c
+@@ -39,6 +39,68 @@
+ #include <net/route.h>
+ #include <net/xfrm.h>
+
++static bool ip_may_fragment(const struct sk_buff *skb)
++{
++ return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) ||
++ !skb->local_df;
++}
++
++static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
++{
++ if (skb->len <= mtu || skb->local_df)
++ return false;
++
++ if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
++ return false;
++
++ return true;
++}
++
++static bool ip_gso_exceeds_dst_mtu(const struct sk_buff *skb)
++{
++ unsigned int mtu;
++
++ if (skb->local_df || !skb_is_gso(skb))
++ return false;
++
++ mtu = dst_mtu(skb_dst(skb));
++
++ /* if seglen > mtu, do software segmentation for IP fragmentation on
++ * output. DF bit cannot be set since ip_forward would have sent
++ * icmp error.
++ */
++ return skb_gso_network_seglen(skb) > mtu;
++}
++
++/* called if GSO skb needs to be fragmented on forward */
++static int ip_forward_finish_gso(struct sk_buff *skb)
++{
++ struct sk_buff *segs;
++ int ret = 0;
++
++ segs = skb_gso_segment(skb, 0);
++ if (IS_ERR(segs)) {
++ kfree_skb(skb);
++ return -ENOMEM;
++ }
++
++ consume_skb(skb);
++
++ do {
++ struct sk_buff *nskb = segs->next;
++ int err;
++
++ segs->next = NULL;
++ err = dst_output(segs);
++
++ if (err && ret == 0)
++ ret = err;
++ segs = nskb;
++ } while (segs);
++
++ return ret;
++}
++
+ static int ip_forward_finish(struct sk_buff *skb)
+ {
+ struct ip_options * opt = &(IPCB(skb)->opt);
+@@ -48,6 +110,9 @@ static int ip_forward_finish(struct sk_buff *skb)
+ if (unlikely(opt->optlen))
+ ip_forward_options(skb);
+
++ if (ip_gso_exceeds_dst_mtu(skb))
++ return ip_forward_finish_gso(skb);
++
+ return dst_output(skb);
+ }
+
+@@ -87,8 +152,7 @@ int ip_forward(struct sk_buff *skb)
+ if (opt->is_strictroute && opt->nexthop != rt->rt_gateway)
+ goto sr_failed;
+
+- if (unlikely(skb->len > dst_mtu(&rt->dst) && !skb_is_gso(skb) &&
+- (ip_hdr(skb)->frag_off & htons(IP_DF))) && !skb->local_df) {
++ if (!ip_may_fragment(skb) && ip_exceeds_mtu(skb, dst_mtu(&rt->dst))) {
+ IP_INC_STATS(dev_net(rt->dst.dev), IPSTATS_MIB_FRAGFAILS);
+ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+ htonl(dst_mtu(&rt->dst)));
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index d3fde7eb5718..cd4b529fbf98 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -381,6 +381,17 @@ static inline int ip6_forward_finish(struct sk_buff *skb)
+ return dst_output(skb);
+ }
+
++static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
++{
++ if (skb->len <= mtu || skb->local_df)
++ return false;
++
++ if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
++ return false;
++
++ return true;
++}
++
+ int ip6_forward(struct sk_buff *skb)
+ {
+ struct dst_entry *dst = skb_dst(skb);
+@@ -504,7 +515,7 @@ int ip6_forward(struct sk_buff *skb)
+ if (mtu < IPV6_MIN_MTU)
+ mtu = IPV6_MIN_MTU;
+
+- if (skb->len > mtu && !skb_is_gso(skb)) {
++ if (ip6_pkt_too_big(skb, mtu)) {
+ /* Again, force OUTPUT device used as source address */
+ skb->dev = dst->dev;
+ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
+index 2e664a69d7db..8aa94ee45743 100644
+--- a/net/netfilter/nf_conntrack_proto_dccp.c
++++ b/net/netfilter/nf_conntrack_proto_dccp.c
+@@ -431,7 +431,7 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
+ const char *msg;
+ u_int8_t state;
+
+- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
++ dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
+ BUG_ON(dh == NULL);
+
+ state = dccp_state_table[CT_DCCP_ROLE_CLIENT][dh->dccph_type][CT_DCCP_NONE];
+@@ -483,7 +483,7 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
+ u_int8_t type, old_state, new_state;
+ enum ct_dccp_roles role;
+
+- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
++ dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
+ BUG_ON(dh == NULL);
+ type = dh->dccph_type;
+
+@@ -575,7 +575,7 @@ static int dccp_error(struct net *net, struct nf_conn *tmpl,
+ unsigned int cscov;
+ const char *msg;
+
+- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
++ dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
+ if (dh == NULL) {
+ msg = "nf_ct_dccp: short packet ";
+ goto out_invalid;
+diff --git a/scripts/package/builddeb b/scripts/package/builddeb
+index 3c6c0b14c807..bee55f647369 100644
+--- a/scripts/package/builddeb
++++ b/scripts/package/builddeb
+@@ -41,9 +41,9 @@ create_package() {
+ parisc*)
+ debarch=hppa ;;
+ mips*)
+- debarch=mips$(grep -q CPU_LITTLE_ENDIAN=y .config && echo el) ;;
++ debarch=mips$(grep -q CPU_LITTLE_ENDIAN=y $KCONFIG_CONFIG && echo el || true) ;;
+ arm*)
+- debarch=arm$(grep -q CONFIG_AEABI=y .config && echo el) ;;
++ debarch=arm$(grep -q CONFIG_AEABI=y $KCONFIG_CONFIG && echo el || true) ;;
+ *)
+ echo "" >&2
+ echo "** ** ** WARNING ** ** **" >&2
+@@ -62,7 +62,7 @@ create_package() {
+ fi
+
+ # Create the package
+- dpkg-gencontrol -isp $forcearch -p$pname -P"$pdir"
++ dpkg-gencontrol -isp $forcearch -Vkernel:debarch="${debarch:-$(dpkg --print-architecture)}" -p$pname -P"$pdir"
+ dpkg --build "$pdir" ..
+ }
+
+@@ -105,12 +105,12 @@ fi
+ if [ "$ARCH" = "um" ] ; then
+ $MAKE linux
+ cp System.map "$tmpdir/usr/lib/uml/modules/$version/System.map"
+- cp .config "$tmpdir/usr/share/doc/$packagename/config"
++ cp $KCONFIG_CONFIG "$tmpdir/usr/share/doc/$packagename/config"
+ gzip "$tmpdir/usr/share/doc/$packagename/config"
+ cp $KBUILD_IMAGE "$tmpdir/usr/bin/linux-$version"
+ else
+ cp System.map "$tmpdir/boot/System.map-$version"
+- cp .config "$tmpdir/boot/config-$version"
++ cp $KCONFIG_CONFIG "$tmpdir/boot/config-$version"
+ # Not all arches include the boot path in KBUILD_IMAGE
+ if [ -e $KBUILD_IMAGE ]; then
+ cp $KBUILD_IMAGE "$tmpdir/boot/vmlinuz-$version"
+@@ -119,7 +119,7 @@ else
+ fi
+ fi
+
+-if grep -q '^CONFIG_MODULES=y' .config ; then
++if grep -q '^CONFIG_MODULES=y' $KCONFIG_CONFIG ; then
+ INSTALL_MOD_PATH="$tmpdir" make KBUILD_SRC= modules_install
+ if [ "$ARCH" = "um" ] ; then
+ mv "$tmpdir/lib/modules/$version"/* "$tmpdir/usr/lib/uml/modules/$version/"
+@@ -240,21 +240,21 @@ fi
+ # Build header package
+ (cd $srctree; find . -name Makefile -o -name Kconfig\* -o -name \*.pl > "$objtree/debian/hdrsrcfiles")
+ (cd $srctree; find arch/$SRCARCH/include include scripts -type f >> "$objtree/debian/hdrsrcfiles")
+-(cd $objtree; find .config Module.symvers include scripts -type f >> "$objtree/debian/hdrobjfiles")
++(cd $objtree; find Module.symvers include scripts -type f >> "$objtree/debian/hdrobjfiles")
+ destdir=$kernel_headers_dir/usr/src/linux-headers-$version
+ mkdir -p "$destdir"
+ (cd $srctree; tar -c -f - -T "$objtree/debian/hdrsrcfiles") | (cd $destdir; tar -xf -)
+ (cd $objtree; tar -c -f - -T "$objtree/debian/hdrobjfiles") | (cd $destdir; tar -xf -)
++(cd $objtree; cp $KCONFIG_CONFIG $destdir/.config) # copy .config manually to be where it's expected to be
+ rm -f "$objtree/debian/hdrsrcfiles" "$objtree/debian/hdrobjfiles"
+-arch=$(dpkg --print-architecture)
+
+ cat <<EOF >> debian/control
+
+ Package: $kernel_headers_packagename
+ Provides: linux-headers, linux-headers-2.6
+-Architecture: $arch
+-Description: Linux kernel headers for $KERNELRELEASE on $arch
+- This package provides kernel header files for $KERNELRELEASE on $arch
++Architecture: any
++Description: Linux kernel headers for $KERNELRELEASE on \${kernel:debarch}
++ This package provides kernel header files for $KERNELRELEASE on \${kernel:debarch}
+ .
+ This is useful for people who need to build external modules
+ EOF
diff --git a/1057_linux-3.2.58.patch b/1057_linux-3.2.58.patch
new file mode 100644
index 00000000..39e721d1
--- /dev/null
+++ b/1057_linux-3.2.58.patch
@@ -0,0 +1,3567 @@
+diff --git a/Documentation/video4linux/gspca.txt b/Documentation/video4linux/gspca.txt
+index b15e29f31121..90aae8564eda 100644
+--- a/Documentation/video4linux/gspca.txt
++++ b/Documentation/video4linux/gspca.txt
+@@ -55,6 +55,7 @@ zc3xx 0458:700f Genius VideoCam Web V2
+ sonixj 0458:7025 Genius Eye 311Q
+ sn9c20x 0458:7029 Genius Look 320s
+ sonixj 0458:702e Genius Slim 310 NB
++sn9c20x 0458:7045 Genius Look 1320 V2
+ sn9c20x 0458:704a Genius Slim 1320
+ sn9c20x 0458:704c Genius i-Look 1321
+ sn9c20x 045e:00f4 LifeCam VX-6000 (SN9C20x + OV9650)
+diff --git a/Makefile b/Makefile
+index c92db9b51a97..d59b394ba6cd 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 57
++SUBLEVEL = 58
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/alpha/lib/csum_partial_copy.c b/arch/alpha/lib/csum_partial_copy.c
+index 1d2ef5a3fc84..40736da9bea8 100644
+--- a/arch/alpha/lib/csum_partial_copy.c
++++ b/arch/alpha/lib/csum_partial_copy.c
+@@ -373,11 +373,6 @@ csum_partial_copy_from_user(const void __user *src, void *dst, int len,
+ __wsum
+ csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
+ {
+- __wsum checksum;
+- mm_segment_t oldfs = get_fs();
+- set_fs(KERNEL_DS);
+- checksum = csum_partial_copy_from_user((__force const void __user *)src,
+- dst, len, sum, NULL);
+- set_fs(oldfs);
+- return checksum;
++ return csum_partial_copy_from_user((__force const void __user *)src,
++ dst, len, sum, NULL);
+ }
+diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
+index 253cc86318bf..aefd4594c9bf 100644
+--- a/arch/arm/include/asm/futex.h
++++ b/arch/arm/include/asm/futex.h
+@@ -3,11 +3,6 @@
+
+ #ifdef __KERNEL__
+
+-#if defined(CONFIG_CPU_USE_DOMAINS) && defined(CONFIG_SMP)
+-/* ARM doesn't provide unprivileged exclusive memory accessors */
+-#include <asm-generic/futex.h>
+-#else
+-
+ #include <linux/futex.h>
+ #include <linux/uaccess.h>
+ #include <asm/errno.h>
+@@ -163,6 +158,5 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+ return ret;
+ }
+
+-#endif /* !(CPU_USE_DOMAINS && SMP) */
+ #endif /* __KERNEL__ */
+ #endif /* _ASM_ARM_FUTEX_H */
+diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
+index 470457e1cfc5..1cb80c4627e4 100644
+--- a/arch/arm/include/asm/pgtable-2level.h
++++ b/arch/arm/include/asm/pgtable-2level.h
+@@ -123,6 +123,7 @@
+ #define L_PTE_USER (_AT(pteval_t, 1) << 8)
+ #define L_PTE_XN (_AT(pteval_t, 1) << 9)
+ #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
++#define L_PTE_NONE (_AT(pteval_t, 1) << 11)
+
+ /*
+ * These are the memory types, defined to be compatible with
+@@ -138,6 +139,7 @@
+ #define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 0x0c) << 2) /* 1100 */
+ #define L_PTE_MT_DEV_WC (_AT(pteval_t, 0x09) << 2) /* 1001 */
+ #define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 0x0b) << 2) /* 1011 */
++#define L_PTE_MT_VECTORS (_AT(pteval_t, 0x0f) << 2) /* 1111 */
+ #define L_PTE_MT_MASK (_AT(pteval_t, 0x0f) << 2)
+
+ #endif /* _ASM_PGTABLE_2LEVEL_H */
+diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
+index 9b419abd1334..fcbac3c50b52 100644
+--- a/arch/arm/include/asm/pgtable.h
++++ b/arch/arm/include/asm/pgtable.h
+@@ -74,7 +74,7 @@ extern pgprot_t pgprot_kernel;
+
+ #define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
+
+-#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY)
++#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY | L_PTE_NONE)
+ #define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN)
+ #define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER)
+ #define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
+@@ -84,7 +84,7 @@ extern pgprot_t pgprot_kernel;
+ #define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN)
+ #define PAGE_KERNEL_EXEC pgprot_kernel
+
+-#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN)
++#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE)
+ #define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
+ #define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
+ #define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
+@@ -279,7 +279,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
+
+ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+ {
+- const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER;
++ const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER | L_PTE_NONE;
+ pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
+ return pte;
+ }
+diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
+index 67f75a0b66d6..4e1ef6edb489 100644
+--- a/arch/arm/mm/Kconfig
++++ b/arch/arm/mm/Kconfig
+@@ -458,7 +458,6 @@ config CPU_32v5
+ config CPU_32v6
+ bool
+ select TLS_REG_EMUL if !CPU_32v6K && !MMU
+- select CPU_USE_DOMAINS if CPU_V6 && MMU
+
+ config CPU_32v6K
+ bool
+@@ -652,7 +651,7 @@ config ARM_THUMBEE
+
+ config SWP_EMULATE
+ bool "Emulate SWP/SWPB instructions"
+- depends on !CPU_USE_DOMAINS && CPU_V7
++ depends on CPU_V7
+ select HAVE_PROC_CPU if PROC_FS
+ default y if SMP
+ help
+diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
+index 9e28fdb1c739..082fa18b2583 100644
+--- a/arch/arm/mm/mmu.c
++++ b/arch/arm/mm/mmu.c
+@@ -426,6 +426,14 @@ static void __init build_mem_type_table(void)
+ mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
+ }
+ /*
++ * We don't use domains on ARMv6 (since this causes problems with
++ * v6/v7 kernels), so we must use a separate memory type for user
++ * r/o, kernel r/w to map the vectors page.
++ */
++ if (cpu_arch == CPU_ARCH_ARMv6)
++ vecs_pgprot |= L_PTE_MT_VECTORS;
++
++ /*
+ * ARMv6 and above have extended page tables.
+ */
+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
+diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
+index 307a4def8d3a..8a3edd4a3e48 100644
+--- a/arch/arm/mm/proc-macros.S
++++ b/arch/arm/mm/proc-macros.S
+@@ -106,13 +106,9 @@
+ * 100x 1 0 1 r/o no acc
+ * 10x0 1 0 1 r/o no acc
+ * 1011 0 0 1 r/w no acc
+- * 110x 0 1 0 r/w r/o
+- * 11x0 0 1 0 r/w r/o
+- * 1111 0 1 1 r/w r/w
+- *
+- * If !CONFIG_CPU_USE_DOMAINS, the following permissions are changed:
+ * 110x 1 1 1 r/o r/o
+ * 11x0 1 1 1 r/o r/o
++ * 1111 0 1 1 r/w r/w
+ */
+ .macro armv6_mt_table pfx
+ \pfx\()_mt_table:
+@@ -131,7 +127,7 @@
+ .long PTE_EXT_TEX(2) @ L_PTE_MT_DEV_NONSHARED
+ .long 0x00 @ unused
+ .long 0x00 @ unused
+- .long 0x00 @ unused
++ .long PTE_CACHEABLE | PTE_BUFFERABLE | PTE_EXT_APX @ L_PTE_MT_VECTORS
+ .endm
+
+ .macro armv6_set_pte_ext pfx
+@@ -152,20 +148,21 @@
+
+ tst r1, #L_PTE_USER
+ orrne r3, r3, #PTE_EXT_AP1
+-#ifdef CONFIG_CPU_USE_DOMAINS
+- @ allow kernel read/write access to read-only user pages
+ tstne r3, #PTE_EXT_APX
+- bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
+-#endif
++
++ @ user read-only -> kernel read-only
++ bicne r3, r3, #PTE_EXT_AP0
+
+ tst r1, #L_PTE_XN
+ orrne r3, r3, #PTE_EXT_XN
+
+- orr r3, r3, r2
++ eor r3, r3, r2
+
+ tst r1, #L_PTE_YOUNG
+ tstne r1, #L_PTE_PRESENT
+ moveq r3, #0
++ tstne r1, #L_PTE_NONE
++ movne r3, #0
+
+ str r3, [r0]
+ mcr p15, 0, r0, c7, c10, 1 @ flush_pte
+diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
+index 19d21ff27b0e..43c69815de69 100644
+--- a/arch/arm/mm/proc-v7.S
++++ b/arch/arm/mm/proc-v7.S
+@@ -160,17 +160,14 @@ ENTRY(cpu_v7_set_pte_ext)
+
+ tst r1, #L_PTE_USER
+ orrne r3, r3, #PTE_EXT_AP1
+-#ifdef CONFIG_CPU_USE_DOMAINS
+- @ allow kernel read/write access to read-only user pages
+- tstne r3, #PTE_EXT_APX
+- bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
+-#endif
+
+ tst r1, #L_PTE_XN
+ orrne r3, r3, #PTE_EXT_XN
+
+ tst r1, #L_PTE_YOUNG
+ tstne r1, #L_PTE_PRESENT
++ eorne r1, r1, #L_PTE_NONE
++ tstne r1, #L_PTE_NONE
+ moveq r3, #0
+
+ ARM( str r3, [r0, #2048]! )
+diff --git a/arch/mips/power/hibernate.S b/arch/mips/power/hibernate.S
+index f8a751c03282..5bf34ec89669 100644
+--- a/arch/mips/power/hibernate.S
++++ b/arch/mips/power/hibernate.S
+@@ -44,6 +44,7 @@ LEAF(swsusp_arch_resume)
+ bne t1, t3, 1b
+ PTR_L t0, PBE_NEXT(t0)
+ bnez t0, 0b
++ jal local_flush_tlb_all /* Avoid TLB mismatch after kernel resume */
+ PTR_LA t0, saved_regs
+ PTR_L ra, PT_R31(t0)
+ PTR_L sp, PT_R29(t0)
+diff --git a/arch/sh/kernel/dumpstack.c b/arch/sh/kernel/dumpstack.c
+index 694158b9a50f..3a6528c1b1cd 100644
+--- a/arch/sh/kernel/dumpstack.c
++++ b/arch/sh/kernel/dumpstack.c
+@@ -80,7 +80,7 @@ static int print_trace_stack(void *data, char *name)
+ */
+ static void print_trace_address(void *data, unsigned long addr, int reliable)
+ {
+- printk(data);
++ printk("%s", (char *)data);
+ printk_address(addr, reliable);
+ }
+
+diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
+index 87537e208445..88d442d49acb 100644
+--- a/arch/sparc/Kconfig
++++ b/arch/sparc/Kconfig
+@@ -24,7 +24,7 @@ config SPARC
+ select HAVE_IRQ_WORK
+ select HAVE_DMA_ATTRS
+ select HAVE_DMA_API_DEBUG
+- select HAVE_ARCH_JUMP_LABEL
++ select HAVE_ARCH_JUMP_LABEL if SPARC64
+ select HAVE_GENERIC_HARDIRQS
+ select GENERIC_IRQ_SHOW
+ select USE_GENERIC_SMP_HELPERS if SMP
+diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
+index 3e1449f07798..6d6c7317d4c7 100644
+--- a/arch/sparc/include/asm/uaccess_64.h
++++ b/arch/sparc/include/asm/uaccess_64.h
+@@ -267,8 +267,8 @@ extern long __strnlen_user(const char __user *, long len);
+
+ #define strlen_user __strlen_user
+ #define strnlen_user __strnlen_user
+-#define __copy_to_user_inatomic ___copy_to_user
+-#define __copy_from_user_inatomic ___copy_from_user
++#define __copy_to_user_inatomic __copy_to_user
++#define __copy_from_user_inatomic __copy_from_user
+
+ #endif /* __ASSEMBLY__ */
+
+diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
+index 31111e35281e..656b5b604936 100644
+--- a/arch/sparc/kernel/pci.c
++++ b/arch/sparc/kernel/pci.c
+@@ -487,8 +487,8 @@ static void __devinit apb_fake_ranges(struct pci_dev *dev,
+ pci_read_config_byte(dev, APB_MEM_ADDRESS_MAP, &map);
+ apb_calc_first_last(map, &first, &last);
+ res = bus->resource[1];
+- res->start = (first << 21);
+- res->end = (last << 21) + ((1 << 21) - 1);
++ res->start = (first << 29);
++ res->end = (last << 29) + ((1 << 29) - 1);
+ res->flags = IORESOURCE_MEM;
+ pci_resource_adjust(res, &pbm->mem_space);
+ }
+diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
+index 817187d42777..557212cfa0f2 100644
+--- a/arch/sparc/kernel/syscalls.S
++++ b/arch/sparc/kernel/syscalls.S
+@@ -184,7 +184,8 @@ linux_sparc_syscall32:
+ mov %i0, %l5 ! IEU1
+ 5: call %l7 ! CTI Group brk forced
+ srl %i5, 0, %o5 ! IEU1
+- ba,a,pt %xcc, 3f
++ ba,pt %xcc, 3f
++ sra %o0, 0, %o0
+
+ /* Linux native system calls enter here... */
+ .align 32
+@@ -212,7 +213,6 @@ linux_sparc_syscall:
+ 3: stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
+ ret_sys_call:
+ ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %g3
+- sra %o0, 0, %o0
+ mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2
+ sllx %g2, 32, %g2
+
+diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
+index 646d192b18a2..1a3cf6e5720c 100644
+--- a/arch/x86/kernel/cpu/mshyperv.c
++++ b/arch/x86/kernel/cpu/mshyperv.c
+@@ -18,6 +18,7 @@
+ #include <asm/hypervisor.h>
+ #include <asm/hyperv.h>
+ #include <asm/mshyperv.h>
++#include <asm/timer.h>
+
+ struct ms_hyperv_info ms_hyperv;
+ EXPORT_SYMBOL_GPL(ms_hyperv);
+@@ -70,6 +71,11 @@ static void __init ms_hyperv_init_platform(void)
+
+ if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
+ clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100);
++
++#ifdef CONFIG_X86_IO_APIC
++ no_timer_check = 1;
++#endif
++
+ }
+
+ const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
+diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
+index ea697263b373..4ac453114d7e 100644
+--- a/arch/x86/kernel/ldt.c
++++ b/arch/x86/kernel/ldt.c
+@@ -230,6 +230,17 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
+ }
+ }
+
++ /*
++ * On x86-64 we do not support 16-bit segments due to
++ * IRET leaking the high bits of the kernel stack address.
++ */
++#ifdef CONFIG_X86_64
++ if (!ldt_info.seg_32bit) {
++ error = -EINVAL;
++ goto out_unlock;
++ }
++#endif
++
+ fill_ldt(&ldt, &ldt_info);
+ if (oldmode)
+ ldt.avl = 0;
+diff --git a/block/blk-core.c b/block/blk-core.c
+index a219c8914daa..ec494ff6d244 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -2077,7 +2077,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
+ if (!req->bio)
+ return false;
+
+- trace_block_rq_complete(req->q, req);
++ trace_block_rq_complete(req->q, req, nr_bytes);
+
+ /*
+ * For fs requests, rq is just carrier of independent bio's
+diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
+index 92ce30268826..263f899e66e9 100644
+--- a/drivers/char/ipmi/ipmi_bt_sm.c
++++ b/drivers/char/ipmi/ipmi_bt_sm.c
+@@ -352,7 +352,7 @@ static inline void write_all_bytes(struct si_sm_data *bt)
+
+ static inline int read_all_bytes(struct si_sm_data *bt)
+ {
+- unsigned char i;
++ unsigned int i;
+
+ /*
+ * length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode.
+diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c
+index b3379d6a5c57..2c6b671bde8e 100644
+--- a/drivers/cpufreq/powernow-k6.c
++++ b/drivers/cpufreq/powernow-k6.c
+@@ -25,41 +25,108 @@
+ static unsigned int busfreq; /* FSB, in 10 kHz */
+ static unsigned int max_multiplier;
+
++static unsigned int param_busfreq = 0;
++static unsigned int param_max_multiplier = 0;
++
++module_param_named(max_multiplier, param_max_multiplier, uint, S_IRUGO);
++MODULE_PARM_DESC(max_multiplier, "Maximum multiplier (allowed values: 20 30 35 40 45 50 55 60)");
++
++module_param_named(bus_frequency, param_busfreq, uint, S_IRUGO);
++MODULE_PARM_DESC(bus_frequency, "Bus frequency in kHz");
+
+ /* Clock ratio multiplied by 10 - see table 27 in AMD#23446 */
+ static struct cpufreq_frequency_table clock_ratio[] = {
+- {45, /* 000 -> 4.5x */ 0},
++ {60, /* 110 -> 6.0x */ 0},
++ {55, /* 011 -> 5.5x */ 0},
+ {50, /* 001 -> 5.0x */ 0},
++ {45, /* 000 -> 4.5x */ 0},
+ {40, /* 010 -> 4.0x */ 0},
+- {55, /* 011 -> 5.5x */ 0},
+- {20, /* 100 -> 2.0x */ 0},
+- {30, /* 101 -> 3.0x */ 0},
+- {60, /* 110 -> 6.0x */ 0},
+ {35, /* 111 -> 3.5x */ 0},
++ {30, /* 101 -> 3.0x */ 0},
++ {20, /* 100 -> 2.0x */ 0},
+ {0, CPUFREQ_TABLE_END}
+ };
+
++static const u8 index_to_register[8] = { 6, 3, 1, 0, 2, 7, 5, 4 };
++static const u8 register_to_index[8] = { 3, 2, 4, 1, 7, 6, 0, 5 };
++
++static const struct {
++ unsigned freq;
++ unsigned mult;
++} usual_frequency_table[] = {
++ { 400000, 40 }, // 100 * 4
++ { 450000, 45 }, // 100 * 4.5
++ { 475000, 50 }, // 95 * 5
++ { 500000, 50 }, // 100 * 5
++ { 506250, 45 }, // 112.5 * 4.5
++ { 533500, 55 }, // 97 * 5.5
++ { 550000, 55 }, // 100 * 5.5
++ { 562500, 50 }, // 112.5 * 5
++ { 570000, 60 }, // 95 * 6
++ { 600000, 60 }, // 100 * 6
++ { 618750, 55 }, // 112.5 * 5.5
++ { 660000, 55 }, // 120 * 5.5
++ { 675000, 60 }, // 112.5 * 6
++ { 720000, 60 }, // 120 * 6
++};
++
++#define FREQ_RANGE 3000
+
+ /**
+ * powernow_k6_get_cpu_multiplier - returns the current FSB multiplier
+ *
+- * Returns the current setting of the frequency multiplier. Core clock
++ * Returns the current setting of the frequency multiplier. Core clock
+ * speed is frequency of the Front-Side Bus multiplied with this value.
+ */
+ static int powernow_k6_get_cpu_multiplier(void)
+ {
+- u64 invalue = 0;
++ unsigned long invalue = 0;
+ u32 msrval;
+
++ local_irq_disable();
++
+ msrval = POWERNOW_IOPORT + 0x1;
+ wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
+ invalue = inl(POWERNOW_IOPORT + 0x8);
+ msrval = POWERNOW_IOPORT + 0x0;
+ wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
+
+- return clock_ratio[(invalue >> 5)&7].index;
++ local_irq_enable();
++
++ return clock_ratio[register_to_index[(invalue >> 5)&7]].index;
+ }
+
++static void powernow_k6_set_cpu_multiplier(unsigned int best_i)
++{
++ unsigned long outvalue, invalue;
++ unsigned long msrval;
++ unsigned long cr0;
++
++ /* we now need to transform best_i to the BVC format, see AMD#23446 */
++
++ /*
++ * The processor doesn't respond to inquiry cycles while changing the
++ * frequency, so we must disable cache.
++ */
++ local_irq_disable();
++ cr0 = read_cr0();
++ write_cr0(cr0 | X86_CR0_CD);
++ wbinvd();
++
++ outvalue = (1<<12) | (1<<10) | (1<<9) | (index_to_register[best_i]<<5);
++
++ msrval = POWERNOW_IOPORT + 0x1;
++ wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
++ invalue = inl(POWERNOW_IOPORT + 0x8);
++ invalue = invalue & 0x1f;
++ outvalue = outvalue | invalue;
++ outl(outvalue, (POWERNOW_IOPORT + 0x8));
++ msrval = POWERNOW_IOPORT + 0x0;
++ wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
++
++ write_cr0(cr0);
++ local_irq_enable();
++}
+
+ /**
+ * powernow_k6_set_state - set the PowerNow! multiplier
+@@ -69,8 +136,6 @@ static int powernow_k6_get_cpu_multiplier(void)
+ */
+ static void powernow_k6_set_state(unsigned int best_i)
+ {
+- unsigned long outvalue = 0, invalue = 0;
+- unsigned long msrval;
+ struct cpufreq_freqs freqs;
+
+ if (clock_ratio[best_i].index > max_multiplier) {
+@@ -84,18 +149,7 @@ static void powernow_k6_set_state(unsigned int best_i)
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+- /* we now need to transform best_i to the BVC format, see AMD#23446 */
+-
+- outvalue = (1<<12) | (1<<10) | (1<<9) | (best_i<<5);
+-
+- msrval = POWERNOW_IOPORT + 0x1;
+- wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
+- invalue = inl(POWERNOW_IOPORT + 0x8);
+- invalue = invalue & 0xf;
+- outvalue = outvalue | invalue;
+- outl(outvalue , (POWERNOW_IOPORT + 0x8));
+- msrval = POWERNOW_IOPORT + 0x0;
+- wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
++ powernow_k6_set_cpu_multiplier(best_i);
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+
+@@ -140,18 +194,57 @@ static int powernow_k6_target(struct cpufreq_policy *policy,
+ return 0;
+ }
+
+-
+ static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
+ {
+ unsigned int i, f;
+ int result;
++ unsigned khz;
+
+ if (policy->cpu != 0)
+ return -ENODEV;
+
+- /* get frequencies */
+- max_multiplier = powernow_k6_get_cpu_multiplier();
+- busfreq = cpu_khz / max_multiplier;
++ max_multiplier = 0;
++ khz = cpu_khz;
++ for (i = 0; i < ARRAY_SIZE(usual_frequency_table); i++) {
++ if (khz >= usual_frequency_table[i].freq - FREQ_RANGE &&
++ khz <= usual_frequency_table[i].freq + FREQ_RANGE) {
++ khz = usual_frequency_table[i].freq;
++ max_multiplier = usual_frequency_table[i].mult;
++ break;
++ }
++ }
++ if (param_max_multiplier) {
++ for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) {
++ if (clock_ratio[i].index == param_max_multiplier) {
++ max_multiplier = param_max_multiplier;
++ goto have_max_multiplier;
++ }
++ }
++ printk(KERN_ERR "powernow-k6: invalid max_multiplier parameter, valid parameters 20, 30, 35, 40, 45, 50, 55, 60\n");
++ return -EINVAL;
++ }
++
++ if (!max_multiplier) {
++ printk(KERN_WARNING "powernow-k6: unknown frequency %u, cannot determine current multiplier\n", khz);
++ printk(KERN_WARNING "powernow-k6: use module parameters max_multiplier and bus_frequency\n");
++ return -EOPNOTSUPP;
++ }
++
++have_max_multiplier:
++ param_max_multiplier = max_multiplier;
++
++ if (param_busfreq) {
++ if (param_busfreq >= 50000 && param_busfreq <= 150000) {
++ busfreq = param_busfreq / 10;
++ goto have_busfreq;
++ }
++ printk(KERN_ERR "powernow-k6: invalid bus_frequency parameter, allowed range 50000 - 150000 kHz\n");
++ return -EINVAL;
++ }
++
++ busfreq = khz / max_multiplier;
++have_busfreq:
++ param_busfreq = busfreq * 10;
+
+ /* table init */
+ for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) {
+@@ -163,7 +256,7 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
+ }
+
+ /* cpuinfo and default policy values */
+- policy->cpuinfo.transition_latency = 200000;
++ policy->cpuinfo.transition_latency = 500000;
+ policy->cur = busfreq * max_multiplier;
+
+ result = cpufreq_frequency_table_cpuinfo(policy, clock_ratio);
+diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
+index 385c58e8405b..0f8114de0877 100644
+--- a/drivers/gpio/gpio-mxs.c
++++ b/drivers/gpio/gpio-mxs.c
+@@ -167,7 +167,8 @@ static void __init mxs_gpio_init_gc(struct mxs_gpio_port *port)
+ ct->regs.ack = PINCTRL_IRQSTAT(port->id) + MXS_CLR;
+ ct->regs.mask = PINCTRL_IRQEN(port->id);
+
+- irq_setup_generic_chip(gc, IRQ_MSK(32), 0, IRQ_NOREQUEST, 0);
++ irq_setup_generic_chip(gc, IRQ_MSK(32), IRQ_GC_INIT_NESTED_LOCK,
++ IRQ_NOREQUEST, 0);
+ }
+
+ static int mxs_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 2ea8a96e0f14..27999d990da8 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -8946,6 +8946,12 @@ struct intel_quirk intel_quirks[] = {
+ /* Acer/Packard Bell NCL20 */
+ { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
+
++ /* Acer Aspire 4736Z */
++ { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
++
++ /* Acer Aspire 5336 */
++ { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
++
+ /* Dell XPS13 HD Sandy Bridge */
+ { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
+ /* Dell XPS13 HD and XPS13 FHD Ivy Bridge */
+diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
+index 12041fa6dca8..b221f2b6305f 100644
+--- a/drivers/gpu/drm/i915/intel_tv.c
++++ b/drivers/gpu/drm/i915/intel_tv.c
+@@ -1599,9 +1599,14 @@ static int tv_is_present_in_vbt(struct drm_device *dev)
+ /*
+ * If the device type is not TV, continue.
+ */
+- if (p_child->device_type != DEVICE_TYPE_INT_TV &&
+- p_child->device_type != DEVICE_TYPE_TV)
++ switch (p_child->device_type) {
++ case DEVICE_TYPE_INT_TV:
++ case DEVICE_TYPE_TV:
++ case DEVICE_TYPE_TV_SVIDEO_COMPOSITE:
++ break;
++ default:
+ continue;
++ }
+ /* Only when the addin_offset is non-zero, it is regarded
+ * as present.
+ */
+diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
+index 63e714307c8e..3291ab8e8f84 100644
+--- a/drivers/gpu/drm/radeon/radeon_display.c
++++ b/drivers/gpu/drm/radeon/radeon_display.c
+@@ -738,6 +738,7 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
+ if (radeon_connector->edid) {
+ drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
+ ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
++ drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid);
+ return ret;
+ }
+ drm_mode_connector_update_edid_property(&radeon_connector->base, NULL);
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+index 34e51a1695b8..907c26ff17a5 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+@@ -147,7 +147,7 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var,
+ }
+
+ if (!vmw_kms_validate_mode_vram(vmw_priv,
+- info->fix.line_length,
++ var->xres * var->bits_per_pixel/8,
+ var->yoffset + var->yres)) {
+ DRM_ERROR("Requested geom can not fit in framebuffer\n");
+ return -EINVAL;
+@@ -162,6 +162,8 @@ static int vmw_fb_set_par(struct fb_info *info)
+ struct vmw_private *vmw_priv = par->vmw_priv;
+ int ret;
+
++ info->fix.line_length = info->var.xres * info->var.bits_per_pixel/8;
++
+ ret = vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
+ info->fix.line_length,
+ par->bpp, par->depth);
+@@ -177,6 +179,7 @@ static int vmw_fb_set_par(struct fb_info *info)
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
++ vmw_write(vmw_priv, SVGA_REG_BYTES_PER_LINE, info->fix.line_length);
+ vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
+ }
+
+diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
+index 810658e8211a..d01edf3c7b28 100644
+--- a/drivers/hv/ring_buffer.c
++++ b/drivers/hv/ring_buffer.c
+@@ -485,7 +485,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
+ /* Make sure all reads are done before we update the read index since */
+ /* the writer may start writing to the read area once the read index */
+ /*is updated */
+- smp_mb();
++ mb();
+
+ /* Update the read index */
+ hv_set_next_read_location(inring_info, next_read_location);
+diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
+index d9b0ebcb67d7..6eeb84d6e5cf 100644
+--- a/drivers/infiniband/hw/ehca/ehca_cq.c
++++ b/drivers/infiniband/hw/ehca/ehca_cq.c
+@@ -296,6 +296,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
+ (my_cq->galpas.user.fw_handle & (PAGE_SIZE - 1));
+ if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
+ ehca_err(device, "Copy to udata failed.");
++ cq = ERR_PTR(-EFAULT);
+ goto create_cq_exit4;
+ }
+ }
+diff --git a/drivers/infiniband/hw/ipath/ipath_diag.c b/drivers/infiniband/hw/ipath/ipath_diag.c
+index 714293b78518..e2f9a51f4a38 100644
+--- a/drivers/infiniband/hw/ipath/ipath_diag.c
++++ b/drivers/infiniband/hw/ipath/ipath_diag.c
+@@ -326,7 +326,7 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
+ size_t count, loff_t *off)
+ {
+ u32 __iomem *piobuf;
+- u32 plen, clen, pbufn;
++ u32 plen, pbufn, maxlen_reserve;
+ struct ipath_diag_pkt odp;
+ struct ipath_diag_xpkt dp;
+ u32 *tmpbuf = NULL;
+@@ -335,51 +335,29 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
+ u64 val;
+ u32 l_state, lt_state; /* LinkState, LinkTrainingState */
+
+- if (count < sizeof(odp)) {
+- ret = -EINVAL;
+- goto bail;
+- }
+
+ if (count == sizeof(dp)) {
+ if (copy_from_user(&dp, data, sizeof(dp))) {
+ ret = -EFAULT;
+ goto bail;
+ }
+- } else if (copy_from_user(&odp, data, sizeof(odp))) {
+- ret = -EFAULT;
++ } else if (count == sizeof(odp)) {
++ if (copy_from_user(&odp, data, sizeof(odp))) {
++ ret = -EFAULT;
++ goto bail;
++ }
++ } else {
++ ret = -EINVAL;
+ goto bail;
+ }
+
+- /*
+- * Due to padding/alignment issues (lessened with new struct)
+- * the old and new structs are the same length. We need to
+- * disambiguate them, which we can do because odp.len has never
+- * been less than the total of LRH+BTH+DETH so far, while
+- * dp.unit (same offset) unit is unlikely to get that high.
+- * Similarly, dp.data, the pointer to user at the same offset
+- * as odp.unit, is almost certainly at least one (512byte)page
+- * "above" NULL. The if-block below can be omitted if compatibility
+- * between a new driver and older diagnostic code is unimportant.
+- * compatibility the other direction (new diags, old driver) is
+- * handled in the diagnostic code, with a warning.
+- */
+- if (dp.unit >= 20 && dp.data < 512) {
+- /* very probable version mismatch. Fix it up */
+- memcpy(&odp, &dp, sizeof(odp));
+- /* We got a legacy dp, copy elements to dp */
+- dp.unit = odp.unit;
+- dp.data = odp.data;
+- dp.len = odp.len;
+- dp.pbc_wd = 0; /* Indicate we need to compute PBC wd */
+- }
+-
+ /* send count must be an exact number of dwords */
+ if (dp.len & 3) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
+- clen = dp.len >> 2;
++ plen = dp.len >> 2;
+
+ dd = ipath_lookup(dp.unit);
+ if (!dd || !(dd->ipath_flags & IPATH_PRESENT) ||
+@@ -422,16 +400,22 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
+ goto bail;
+ }
+
+- /* need total length before first word written */
+- /* +1 word is for the qword padding */
+- plen = sizeof(u32) + dp.len;
+-
+- if ((plen + 4) > dd->ipath_ibmaxlen) {
++ /*
++ * need total length before first word written, plus 2 Dwords. One Dword
++ * is for padding so we get the full user data when not aligned on
++ * a word boundary. The other Dword is to make sure we have room for the
++ * ICRC which gets tacked on later.
++ */
++ maxlen_reserve = 2 * sizeof(u32);
++ if (dp.len > dd->ipath_ibmaxlen - maxlen_reserve) {
+ ipath_dbg("Pkt len 0x%x > ibmaxlen %x\n",
+- plen - 4, dd->ipath_ibmaxlen);
++ dp.len, dd->ipath_ibmaxlen);
+ ret = -EINVAL;
+- goto bail; /* before writing pbc */
++ goto bail;
+ }
++
++ plen = sizeof(u32) + dp.len;
++
+ tmpbuf = vmalloc(plen);
+ if (!tmpbuf) {
+ dev_info(&dd->pcidev->dev, "Unable to allocate tmp buffer, "
+@@ -473,11 +457,11 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
+ */
+ if (dd->ipath_flags & IPATH_PIO_FLUSH_WC) {
+ ipath_flush_wc();
+- __iowrite32_copy(piobuf + 2, tmpbuf, clen - 1);
++ __iowrite32_copy(piobuf + 2, tmpbuf, plen - 1);
+ ipath_flush_wc();
+- __raw_writel(tmpbuf[clen - 1], piobuf + clen + 1);
++ __raw_writel(tmpbuf[plen - 1], piobuf + plen + 1);
+ } else
+- __iowrite32_copy(piobuf + 2, tmpbuf, clen);
++ __iowrite32_copy(piobuf + 2, tmpbuf, plen);
+
+ ipath_flush_wc();
+
+diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
+index 5b71d43bd89c..42dde06fdb91 100644
+--- a/drivers/infiniband/hw/mthca/mthca_provider.c
++++ b/drivers/infiniband/hw/mthca/mthca_provider.c
+@@ -695,6 +695,7 @@ static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries,
+
+ if (context && ib_copy_to_udata(udata, &cq->cqn, sizeof (__u32))) {
+ mthca_free_cq(to_mdev(ibdev), cq);
++ err = -EFAULT;
+ goto err_free;
+ }
+
+diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
+index b0471b4237a7..330eb6e1652f 100644
+--- a/drivers/infiniband/hw/nes/nes_verbs.c
++++ b/drivers/infiniband/hw/nes/nes_verbs.c
+@@ -1183,7 +1183,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
+ nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
+ kfree(nesqp->allocated_buffer);
+ nes_debug(NES_DBG_QP, "ib_copy_from_udata() Failed \n");
+- return NULL;
++ return ERR_PTR(-EFAULT);
+ }
+ if (req.user_wqe_buffers) {
+ virt_wqs = 1;
+diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c
+index 4df80fb7a314..75ca5d27c834 100644
+--- a/drivers/isdn/isdnloop/isdnloop.c
++++ b/drivers/isdn/isdnloop/isdnloop.c
+@@ -518,9 +518,9 @@ static isdnloop_stat isdnloop_cmd_table[] =
+ static void
+ isdnloop_fake_err(isdnloop_card * card)
+ {
+- char buf[60];
++ char buf[64];
+
+- sprintf(buf, "E%s", card->omsg);
++ snprintf(buf, sizeof(buf), "E%s", card->omsg);
+ isdnloop_fake(card, buf, -1);
+ isdnloop_fake(card, "NAK", -1);
+ }
+@@ -903,6 +903,8 @@ isdnloop_parse_cmd(isdnloop_card * card)
+ case 7:
+ /* 0x;EAZ */
+ p += 3;
++ if (strlen(p) >= sizeof(card->eazlist[0]))
++ break;
+ strcpy(card->eazlist[ch - 1], p);
+ break;
+ case 8:
+@@ -1070,6 +1072,12 @@ isdnloop_start(isdnloop_card * card, isdnloop_sdef * sdefp)
+ return -EBUSY;
+ if (copy_from_user((char *) &sdef, (char *) sdefp, sizeof(sdef)))
+ return -EFAULT;
++
++ for (i = 0; i < 3; i++) {
++ if (!memchr(sdef.num[i], 0, sizeof(sdef.num[i])))
++ return -EINVAL;
++ }
++
+ spin_lock_irqsave(&card->isdnloop_lock, flags);
+ switch (sdef.ptype) {
+ case ISDN_PTYPE_EURO:
+@@ -1127,7 +1135,7 @@ isdnloop_command(isdn_ctrl * c, isdnloop_card * card)
+ {
+ ulong a;
+ int i;
+- char cbuf[60];
++ char cbuf[80];
+ isdn_ctrl cmd;
+ isdnloop_cdef cdef;
+
+@@ -1192,7 +1200,6 @@ isdnloop_command(isdn_ctrl * c, isdnloop_card * card)
+ break;
+ if ((c->arg & 255) < ISDNLOOP_BCH) {
+ char *p;
+- char dial[50];
+ char dcode[4];
+
+ a = c->arg;
+@@ -1204,10 +1211,10 @@ isdnloop_command(isdn_ctrl * c, isdnloop_card * card)
+ } else
+ /* Normal Dial */
+ strcpy(dcode, "CAL");
+- strcpy(dial, p);
+- sprintf(cbuf, "%02d;D%s_R%s,%02d,%02d,%s\n", (int) (a + 1),
+- dcode, dial, c->parm.setup.si1,
+- c->parm.setup.si2, c->parm.setup.eazmsn);
++ snprintf(cbuf, sizeof(cbuf),
++ "%02d;D%s_R%s,%02d,%02d,%s\n", (int) (a + 1),
++ dcode, p, c->parm.setup.si1,
++ c->parm.setup.si2, c->parm.setup.eazmsn);
+ i = isdnloop_writecmd(cbuf, strlen(cbuf), 0, card);
+ }
+ break;
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index 2c9dd2c94889..80f8bd5e38ee 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -1298,9 +1298,9 @@ static void process_deferred_bios(struct pool *pool)
+ */
+ if (ensure_next_mapping(pool)) {
+ spin_lock_irqsave(&pool->lock, flags);
++ bio_list_add(&pool->deferred_bios, bio);
+ bio_list_merge(&pool->deferred_bios, &bios);
+ spin_unlock_irqrestore(&pool->lock, flags);
+-
+ break;
+ }
+ process_bio(tc, bio);
+diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c
+index 86e07a139a16..509e2026790e 100644
+--- a/drivers/media/video/gspca/sn9c20x.c
++++ b/drivers/media/video/gspca/sn9c20x.c
+@@ -2521,6 +2521,7 @@ static const struct usb_device_id device_table[] = {
+ {USB_DEVICE(0x045e, 0x00f4), SN9C20X(OV9650, 0x30, 0)},
+ {USB_DEVICE(0x145f, 0x013d), SN9C20X(OV7660, 0x21, 0)},
+ {USB_DEVICE(0x0458, 0x7029), SN9C20X(HV7131R, 0x11, 0)},
++ {USB_DEVICE(0x0458, 0x7045), SN9C20X(MT9M112, 0x5d, LED_REVERSE)},
+ {USB_DEVICE(0x0458, 0x704a), SN9C20X(MT9M112, 0x5d, 0)},
+ {USB_DEVICE(0x0458, 0x704c), SN9C20X(MT9M112, 0x5d, 0)},
+ {USB_DEVICE(0xa168, 0x0610), SN9C20X(HV7131R, 0x11, 0)},
+diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c
+index b015e8e5e8b0..af5c0401ed76 100644
+--- a/drivers/media/video/uvc/uvc_video.c
++++ b/drivers/media/video/uvc/uvc_video.c
+@@ -1267,7 +1267,25 @@ int uvc_video_enable(struct uvc_streaming *stream, int enable)
+
+ if (!enable) {
+ uvc_uninit_video(stream, 1);
+- usb_set_interface(stream->dev->udev, stream->intfnum, 0);
++ if (stream->intf->num_altsetting > 1) {
++ usb_set_interface(stream->dev->udev,
++ stream->intfnum, 0);
++ } else {
++ /* UVC doesn't specify how to inform a bulk-based device
++ * when the video stream is stopped. Windows sends a
++ * CLEAR_FEATURE(HALT) request to the video streaming
++ * bulk endpoint, mimic the same behaviour.
++ */
++ unsigned int epnum = stream->header.bEndpointAddress
++ & USB_ENDPOINT_NUMBER_MASK;
++ unsigned int dir = stream->header.bEndpointAddress
++ & USB_ENDPOINT_DIR_MASK;
++ unsigned int pipe;
++
++ pipe = usb_sndbulkpipe(stream->dev->udev, epnum) | dir;
++ usb_clear_halt(stream->dev->udev, pipe);
++ }
++
+ uvc_queue_enable(&stream->queue, 0);
+ return 0;
+ }
+diff --git a/drivers/mfd/88pm860x-i2c.c b/drivers/mfd/88pm860x-i2c.c
+index e017dc88622a..f035dd3c7a09 100644
+--- a/drivers/mfd/88pm860x-i2c.c
++++ b/drivers/mfd/88pm860x-i2c.c
+@@ -290,6 +290,12 @@ static int __devinit pm860x_probe(struct i2c_client *client,
+ chip->companion_addr = pdata->companion_addr;
+ chip->companion = i2c_new_dummy(chip->client->adapter,
+ chip->companion_addr);
++ if (!chip->companion) {
++ dev_err(&client->dev,
++ "Failed to allocate I2C companion device\n");
++ kfree(chip);
++ return -ENODEV;
++ }
+ i2c_set_clientdata(chip->companion, chip);
+ }
+
+diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
+index f1391c21ef26..b2b69167b068 100644
+--- a/drivers/mfd/Kconfig
++++ b/drivers/mfd/Kconfig
+@@ -772,9 +772,6 @@ config MFD_INTEL_MSIC
+ Passage) chip. This chip embeds audio, battery, GPIO, etc.
+ devices used in Intel Medfield platforms.
+
+-endmenu
+-endif
+-
+ menu "Multimedia Capabilities Port drivers"
+ depends on ARCH_SA1100
+
+@@ -797,3 +794,6 @@ config MCP_UCB1200_TS
+ depends on MCP_UCB1200 && INPUT
+
+ endmenu
++
++endmenu
++endif
+diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
+index 0219115e00c7..90b450c676fe 100644
+--- a/drivers/mfd/max8925-i2c.c
++++ b/drivers/mfd/max8925-i2c.c
+@@ -156,9 +156,18 @@ static int __devinit max8925_probe(struct i2c_client *client,
+ mutex_init(&chip->io_lock);
+
+ chip->rtc = i2c_new_dummy(chip->i2c->adapter, RTC_I2C_ADDR);
++ if (!chip->rtc) {
++ dev_err(chip->dev, "Failed to allocate I2C device for RTC\n");
++ return -ENODEV;
++ }
+ i2c_set_clientdata(chip->rtc, chip);
+
+ chip->adc = i2c_new_dummy(chip->i2c->adapter, ADC_I2C_ADDR);
++ if (!chip->adc) {
++ dev_err(chip->dev, "Failed to allocate I2C device for ADC\n");
++ i2c_unregister_device(chip->rtc);
++ return -ENODEV;
++ }
+ i2c_set_clientdata(chip->adc, chip);
+
+ max8925_device_init(chip, pdata);
+diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c
+index 5be53ae9b61c..1926a54f4bd0 100644
+--- a/drivers/mfd/max8997.c
++++ b/drivers/mfd/max8997.c
+@@ -148,10 +148,26 @@ static int max8997_i2c_probe(struct i2c_client *i2c,
+ mutex_init(&max8997->iolock);
+
+ max8997->rtc = i2c_new_dummy(i2c->adapter, I2C_ADDR_RTC);
++ if (!max8997->rtc) {
++ dev_err(max8997->dev, "Failed to allocate I2C device for RTC\n");
++ return -ENODEV;
++ }
+ i2c_set_clientdata(max8997->rtc, max8997);
++
+ max8997->haptic = i2c_new_dummy(i2c->adapter, I2C_ADDR_HAPTIC);
++ if (!max8997->haptic) {
++ dev_err(max8997->dev, "Failed to allocate I2C device for Haptic\n");
++ ret = -ENODEV;
++ goto err_i2c_haptic;
++ }
+ i2c_set_clientdata(max8997->haptic, max8997);
++
+ max8997->muic = i2c_new_dummy(i2c->adapter, I2C_ADDR_MUIC);
++ if (!max8997->muic) {
++ dev_err(max8997->dev, "Failed to allocate I2C device for MUIC\n");
++ ret = -ENODEV;
++ goto err_i2c_muic;
++ }
+ i2c_set_clientdata(max8997->muic, max8997);
+
+ pm_runtime_set_active(max8997->dev);
+@@ -178,7 +194,9 @@ static int max8997_i2c_probe(struct i2c_client *i2c,
+ err_mfd:
+ mfd_remove_devices(max8997->dev);
+ i2c_unregister_device(max8997->muic);
++err_i2c_muic:
+ i2c_unregister_device(max8997->haptic);
++err_i2c_haptic:
+ i2c_unregister_device(max8997->rtc);
+ err:
+ kfree(max8997);
+diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c
+index de4096aee248..2fa6a28d3a99 100644
+--- a/drivers/mfd/max8998.c
++++ b/drivers/mfd/max8998.c
+@@ -152,6 +152,10 @@ static int max8998_i2c_probe(struct i2c_client *i2c,
+ mutex_init(&max8998->iolock);
+
+ max8998->rtc = i2c_new_dummy(i2c->adapter, RTC_I2C_ADDR);
++ if (!max8998->rtc) {
++ dev_err(&i2c->dev, "Failed to allocate I2C device for RTC\n");
++ return -ENODEV;
++ }
+ i2c_set_clientdata(max8998->rtc, max8998);
+
+ max8998_irq_init(max8998);
+diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
+index 2e88af13293e..a0ba5ac299c2 100644
+--- a/drivers/net/wireless/ath/ath9k/xmit.c
++++ b/drivers/net/wireless/ath/ath9k/xmit.c
+@@ -1390,7 +1390,7 @@ int ath_cabq_update(struct ath_softc *sc)
+ else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
+ sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
+
+- qi.tqi_readyTime = (cur_conf->beacon_interval *
++ qi.tqi_readyTime = (TU_TO_USEC(cur_conf->beacon_interval) *
+ sc->config.cabqReadytime) / 100;
+ ath_txq_update(sc, qnum, &qi);
+
+diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
+index b17d9b6c33a5..0490c7c9c455 100644
+--- a/drivers/net/wireless/b43/phy_n.c
++++ b/drivers/net/wireless/b43/phy_n.c
+@@ -3937,22 +3937,22 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev,
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ u16 old_band_5ghz;
+- u32 tmp32;
++ u16 tmp16;
+
+ old_band_5ghz =
+ b43_phy_read(dev, B43_NPHY_BANDCTL) & B43_NPHY_BANDCTL_5GHZ;
+ if (new_channel->band == IEEE80211_BAND_5GHZ && !old_band_5ghz) {
+- tmp32 = b43_read32(dev, B43_MMIO_PSM_PHY_HDR);
+- b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32 | 4);
++ tmp16 = b43_read16(dev, B43_MMIO_PSM_PHY_HDR);
++ b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16 | 4);
+ b43_phy_set(dev, B43_PHY_B_BBCFG, 0xC000);
+- b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32);
++ b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16);
+ b43_phy_set(dev, B43_NPHY_BANDCTL, B43_NPHY_BANDCTL_5GHZ);
+ } else if (new_channel->band == IEEE80211_BAND_2GHZ && old_band_5ghz) {
+ b43_phy_mask(dev, B43_NPHY_BANDCTL, ~B43_NPHY_BANDCTL_5GHZ);
+- tmp32 = b43_read32(dev, B43_MMIO_PSM_PHY_HDR);
+- b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32 | 4);
++ tmp16 = b43_read16(dev, B43_MMIO_PSM_PHY_HDR);
++ b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16 | 4);
+ b43_phy_mask(dev, B43_PHY_B_BBCFG, 0x3FFF);
+- b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32);
++ b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16);
+ }
+
+ b43_chantab_phy_upload(dev, e);
+diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
+index 94d35ad910c4..4a369732bf89 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
++++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
+@@ -246,13 +246,17 @@ static void iwl_bg_bt_runtime_config(struct work_struct *work)
+ struct iwl_priv *priv =
+ container_of(work, struct iwl_priv, bt_runtime_config);
+
++ mutex_lock(&priv->shrd->mutex);
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+- return;
++ goto out;
+
+ /* dont send host command if rf-kill is on */
+ if (!iwl_is_ready_rf(priv->shrd))
+- return;
++ goto out;
++
+ iwlagn_send_advance_bt_config(priv);
++out:
++ mutex_unlock(&priv->shrd->mutex);
+ }
+
+ static void iwl_bg_bt_full_concurrency(struct work_struct *work)
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
+index c474486e3911..503c1608270a 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
+@@ -924,7 +924,7 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u8 tmp_byte = 0;
+-
++ unsigned long flags;
+ bool rtstatus = true;
+ u8 tmp_u1b;
+ int err = false;
+@@ -936,6 +936,16 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
+
+ rtlpci->being_init_adapter = true;
+
++ /* As this function can take a very long time (up to 350 ms)
++ * and can be called with irqs disabled, reenable the irqs
++ * to let the other devices continue being serviced.
++ *
++ * It is safe doing so since our own interrupts will only be enabled
++ * in a subsequent step.
++ */
++ local_save_flags(flags);
++ local_irq_enable();
++
+ rtlpriv->intf_ops->disable_aspm(hw);
+
+ /* 1. MAC Initialize */
+@@ -969,7 +979,8 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
+ /* 3. Initialize MAC/PHY Config by MACPHY_reg.txt */
+ if (rtl92s_phy_mac_config(hw) != true) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("MAC Config failed\n"));
+- return rtstatus;
++ err = rtstatus;
++ goto exit;
+ }
+
+ /* Make sure BB/RF write OK. We should prevent enter IPS. radio off. */
+@@ -979,7 +990,8 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
+ /* 4. Initialize BB After MAC Config PHY_reg.txt, AGC_Tab.txt */
+ if (rtl92s_phy_bb_config(hw) != true) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, ("BB Config failed\n"));
+- return rtstatus;
++ err = rtstatus;
++ goto exit;
+ }
+
+ /* 5. Initiailze RF RAIO_A.txt RF RAIO_B.txt */
+@@ -1015,7 +1027,8 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
+
+ if (rtl92s_phy_rf_config(hw) != true) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("RF Config failed\n"));
+- return rtstatus;
++ err = rtstatus;
++ goto exit;
+ }
+
+ /* After read predefined TXT, we must set BB/MAC/RF
+@@ -1089,8 +1102,9 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
+
+ rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_ON);
+ rtl92s_dm_init(hw);
++exit:
++ local_irq_restore(flags);
+ rtlpci->being_init_adapter = false;
+-
+ return err;
+ }
+
+diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
+index 9a4626c35f5a..b2528f638031 100644
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -346,8 +346,8 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
+ * into multiple copies tend to give large frags their
+ * own buffers as before.
+ */
+- if ((offset + size > MAX_BUFFER_OFFSET) &&
+- (size <= MAX_BUFFER_OFFSET) && offset && !head)
++ BUG_ON(size > MAX_BUFFER_OFFSET);
++ if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head)
+ return true;
+
+ return false;
+diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
+index 21a676931a27..38a99d281141 100644
+--- a/drivers/scsi/isci/port_config.c
++++ b/drivers/scsi/isci/port_config.c
+@@ -610,6 +610,13 @@ static void sci_apc_agent_link_up(struct isci_host *ihost,
+ sci_apc_agent_configure_ports(ihost, port_agent, iphy, true);
+ } else {
+ /* the phy is already the part of the port */
++ u32 port_state = iport->sm.current_state_id;
++
++ /* if the PORT'S state is resetting then the link up is from
++ * port hard reset in this case, we need to tell the port
++ * that link up is recieved
++ */
++ BUG_ON(port_state != SCI_PORT_RESETTING);
+ port_agent->phy_ready_mask |= 1 << phy_index;
+ sci_port_link_up(iport, iphy);
+ }
+diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
+index 60a530fd5f03..e294d118d89c 100644
+--- a/drivers/scsi/isci/task.c
++++ b/drivers/scsi/isci/task.c
+@@ -1390,7 +1390,7 @@ int isci_task_I_T_nexus_reset(struct domain_device *dev)
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ if (!idev || !test_bit(IDEV_EH, &idev->flags)) {
+- ret = -ENODEV;
++ ret = TMF_RESP_FUNC_COMPLETE;
+ goto out;
+ }
+
+diff --git a/drivers/staging/serqt_usb2/serqt_usb2.c b/drivers/staging/serqt_usb2/serqt_usb2.c
+index c44e41af2880..c2731cafb473 100644
+--- a/drivers/staging/serqt_usb2/serqt_usb2.c
++++ b/drivers/staging/serqt_usb2/serqt_usb2.c
+@@ -772,7 +772,7 @@ static int qt_startup(struct usb_serial *serial)
+ goto startup_error;
+ }
+
+- switch (serial->dev->descriptor.idProduct) {
++ switch (le16_to_cpu(serial->dev->descriptor.idProduct)) {
+ case QUATECH_DSU100:
+ case QUATECH_QSU100:
+ case QUATECH_ESU100A:
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 4a88eea158ab..ab5dd1655c14 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -2358,6 +2358,7 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
+ {
+ struct iscsi_cmd *cmd;
+ struct iscsi_conn *conn_p;
++ bool found = false;
+
+ /*
+ * Only send a Asynchronous Message on connections whos network
+@@ -2366,11 +2367,12 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
+ list_for_each_entry(conn_p, &conn->sess->sess_conn_list, conn_list) {
+ if (conn_p->conn_state == TARG_CONN_STATE_LOGGED_IN) {
+ iscsit_inc_conn_usage_count(conn_p);
++ found = true;
+ break;
+ }
+ }
+
+- if (!conn_p)
++ if (!found)
+ return;
+
+ cmd = iscsit_allocate_cmd(conn_p, GFP_ATOMIC);
+diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
+index ab0a3fa634ac..b3280114659c 100644
+--- a/drivers/target/tcm_fc/tfc_sess.c
++++ b/drivers/target/tcm_fc/tfc_sess.c
+@@ -72,6 +72,7 @@ static struct ft_tport *ft_tport_create(struct fc_lport *lport)
+
+ if (tport) {
+ tport->tpg = tpg;
++ tpg->tport = tport;
+ return tport;
+ }
+
+diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
+index b6b2d18fa38d..7b97e7e3ede4 100644
+--- a/drivers/tty/hvc/hvc_console.c
++++ b/drivers/tty/hvc/hvc_console.c
+@@ -31,6 +31,7 @@
+ #include <linux/list.h>
+ #include <linux/module.h>
+ #include <linux/major.h>
++#include <linux/atomic.h>
+ #include <linux/sysrq.h>
+ #include <linux/tty.h>
+ #include <linux/tty_flip.h>
+@@ -70,6 +71,9 @@ static struct task_struct *hvc_task;
+ /* Picks up late kicks after list walk but before schedule() */
+ static int hvc_kicked;
+
++/* hvc_init is triggered from hvc_alloc, i.e. only when actually used */
++static atomic_t hvc_needs_init __read_mostly = ATOMIC_INIT(-1);
++
+ static int hvc_init(void);
+
+ #ifdef CONFIG_MAGIC_SYSRQ
+@@ -825,7 +829,7 @@ struct hvc_struct *hvc_alloc(uint32_t vtermno, int data,
+ int i;
+
+ /* We wait until a driver actually comes along */
+- if (!hvc_driver) {
++ if (atomic_inc_not_zero(&hvc_needs_init)) {
+ int err = hvc_init();
+ if (err)
+ return ERR_PTR(err);
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index 3f35e42cd120..446df6b39d30 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -1222,9 +1222,9 @@ static void pty_line_name(struct tty_driver *driver, int index, char *p)
+ *
+ * Locking: None
+ */
+-static void tty_line_name(struct tty_driver *driver, int index, char *p)
++static ssize_t tty_line_name(struct tty_driver *driver, int index, char *p)
+ {
+- sprintf(p, "%s%d", driver->name, index + driver->name_base);
++ return sprintf(p, "%s%d", driver->name, index + driver->name_base);
+ }
+
+ /**
+@@ -3321,9 +3321,19 @@ static ssize_t show_cons_active(struct device *dev,
+ if (i >= ARRAY_SIZE(cs))
+ break;
+ }
+- while (i--)
+- count += sprintf(buf + count, "%s%d%c",
+- cs[i]->name, cs[i]->index, i ? ' ':'\n');
++ while (i--) {
++ int index = cs[i]->index;
++ struct tty_driver *drv = cs[i]->device(cs[i], &index);
++
++ /* don't resolve tty0 as some programs depend on it */
++ if (drv && (cs[i]->index > 0 || drv->major != TTY_MAJOR))
++ count += tty_line_name(drv, index, buf + count);
++ else
++ count += sprintf(buf + count, "%s%d",
++ cs[i]->name, cs[i]->index);
++
++ count += sprintf(buf + count, "%c", i ? ' ':'\n');
++ }
+ console_unlock();
+
+ return count;
+diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
+index 4795c0c9a6eb..ae2b763650d8 100644
+--- a/drivers/usb/dwc3/core.h
++++ b/drivers/usb/dwc3/core.h
+@@ -716,15 +716,15 @@ struct dwc3_event_depevt {
+ * 12 - VndrDevTstRcved
+ * @reserved15_12: Reserved, not used
+ * @event_info: Information about this event
+- * @reserved31_24: Reserved, not used
++ * @reserved31_25: Reserved, not used
+ */
+ struct dwc3_event_devt {
+ u32 one_bit:1;
+ u32 device_event:7;
+ u32 type:4;
+ u32 reserved15_12:4;
+- u32 event_info:8;
+- u32 reserved31_24:8;
++ u32 event_info:9;
++ u32 reserved31_25:7;
+ } __packed;
+
+ /**
+diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
+index 271a9d873608..b299c328f51a 100644
+--- a/drivers/usb/gadget/atmel_usba_udc.c
++++ b/drivers/usb/gadget/atmel_usba_udc.c
+@@ -1875,12 +1875,13 @@ static int atmel_usba_stop(struct usb_gadget_driver *driver)
+
+ driver->unbind(&udc->gadget);
+ udc->gadget.dev.driver = NULL;
+- udc->driver = NULL;
+
+ clk_disable(udc->hclk);
+ clk_disable(udc->pclk);
+
+- DBG(DBG_GADGET, "unregistered driver `%s'\n", driver->driver.name);
++ DBG(DBG_GADGET, "unregistered driver `%s'\n", udc->driver->driver.name);
++
++ udc->driver = NULL;
+
+ return 0;
+ }
+diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
+index 5c5812860ec9..7ef84c11453d 100644
+--- a/drivers/vhost/net.c
++++ b/drivers/vhost/net.c
+@@ -319,9 +319,13 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
+ r = -ENOBUFS;
+ goto err;
+ }
+- d = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
++ r = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
+ ARRAY_SIZE(vq->iov) - seg, &out,
+ &in, log, log_num);
++ if (unlikely(r < 0))
++ goto err;
++
++ d = r;
+ if (d == vq->num) {
+ r = 0;
+ goto err;
+@@ -346,6 +350,12 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
+ *iovcount = seg;
+ if (unlikely(log))
+ *log_num = nlogs;
++
++ /* Detect overrun */
++ if (unlikely(datalen > 0)) {
++ r = UIO_MAXIOV + 1;
++ goto err;
++ }
+ return headcount;
+ err:
+ vhost_discard_vq_desc(vq, headcount);
+@@ -400,6 +410,14 @@ static void handle_rx(struct vhost_net *net)
+ /* On error, stop handling until the next kick. */
+ if (unlikely(headcount < 0))
+ break;
++ /* On overrun, truncate and discard */
++ if (unlikely(headcount > UIO_MAXIOV)) {
++ msg.msg_iovlen = 1;
++ err = sock->ops->recvmsg(NULL, sock, &msg,
++ 1, MSG_DONTWAIT | MSG_TRUNC);
++ pr_debug("Discarded rx packet: len %zd\n", sock_len);
++ continue;
++ }
+ /* OK, now we need to know about added descriptors. */
+ if (!headcount) {
+ if (unlikely(vhost_enable_notify(&net->dev, vq))) {
+diff --git a/drivers/video/aty/mach64_accel.c b/drivers/video/aty/mach64_accel.c
+index e45833ce975b..182bd680141f 100644
+--- a/drivers/video/aty/mach64_accel.c
++++ b/drivers/video/aty/mach64_accel.c
+@@ -4,6 +4,7 @@
+ */
+
+ #include <linux/delay.h>
++#include <asm/unaligned.h>
+ #include <linux/fb.h>
+ #include <video/mach64.h>
+ #include "atyfb.h"
+@@ -419,7 +420,7 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image)
+ u32 *pbitmap, dwords = (src_bytes + 3) / 4;
+ for (pbitmap = (u32*)(image->data); dwords; dwords--, pbitmap++) {
+ wait_for_fifo(1, par);
+- aty_st_le32(HOST_DATA0, le32_to_cpup(pbitmap), par);
++ aty_st_le32(HOST_DATA0, get_unaligned_le32(pbitmap), par);
+ }
+ }
+
+diff --git a/drivers/video/aty/mach64_cursor.c b/drivers/video/aty/mach64_cursor.c
+index 46f72ed53510..4b87318dcf44 100644
+--- a/drivers/video/aty/mach64_cursor.c
++++ b/drivers/video/aty/mach64_cursor.c
+@@ -5,6 +5,7 @@
+ #include <linux/fb.h>
+ #include <linux/init.h>
+ #include <linux/string.h>
++#include "../fb_draw.h"
+
+ #include <asm/io.h>
+
+@@ -157,24 +158,33 @@ static int atyfb_cursor(struct fb_info *info, struct fb_cursor *cursor)
+
+ for (i = 0; i < height; i++) {
+ for (j = 0; j < width; j++) {
++ u16 l = 0xaaaa;
+ b = *src++;
+ m = *msk++;
+ switch (cursor->rop) {
+ case ROP_XOR:
+ // Upper 4 bits of mask data
+- fb_writeb(cursor_bits_lookup[(b ^ m) >> 4], dst++);
++ l = cursor_bits_lookup[(b ^ m) >> 4] |
+ // Lower 4 bits of mask
+- fb_writeb(cursor_bits_lookup[(b ^ m) & 0x0f],
+- dst++);
++ (cursor_bits_lookup[(b ^ m) & 0x0f] << 8);
+ break;
+ case ROP_COPY:
+ // Upper 4 bits of mask data
+- fb_writeb(cursor_bits_lookup[(b & m) >> 4], dst++);
++ l = cursor_bits_lookup[(b & m) >> 4] |
+ // Lower 4 bits of mask
+- fb_writeb(cursor_bits_lookup[(b & m) & 0x0f],
+- dst++);
++ (cursor_bits_lookup[(b & m) & 0x0f] << 8);
+ break;
+ }
++ /*
++ * If cursor size is not a multiple of 8 characters
++ * we must pad it with transparent pattern (0xaaaa).
++ */
++ if ((j + 1) * 8 > cursor->image.width) {
++ l = comp(l, 0xaaaa,
++ (1 << ((cursor->image.width & 7) * 2)) - 1);
++ }
++ fb_writeb(l & 0xff, dst++);
++ fb_writeb(l >> 8, dst++);
+ }
+ dst += offset;
+ }
+diff --git a/drivers/video/cfbcopyarea.c b/drivers/video/cfbcopyarea.c
+index bb5a96b1645d..bcb57235fcc7 100644
+--- a/drivers/video/cfbcopyarea.c
++++ b/drivers/video/cfbcopyarea.c
+@@ -43,13 +43,22 @@
+ */
+
+ static void
+-bitcpy(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
+- const unsigned long __iomem *src, int src_idx, int bits,
++bitcpy(struct fb_info *p, unsigned long __iomem *dst, unsigned dst_idx,
++ const unsigned long __iomem *src, unsigned src_idx, int bits,
+ unsigned n, u32 bswapmask)
+ {
+ unsigned long first, last;
+ int const shift = dst_idx-src_idx;
+- int left, right;
++
++#if 0
++ /*
++ * If you suspect bug in this function, compare it with this simple
++ * memmove implementation.
++ */
++ fb_memmove((char *)dst + ((dst_idx & (bits - 1))) / 8,
++ (char *)src + ((src_idx & (bits - 1))) / 8, n / 8);
++ return;
++#endif
+
+ first = fb_shifted_pixels_mask_long(p, dst_idx, bswapmask);
+ last = ~fb_shifted_pixels_mask_long(p, (dst_idx+n) % bits, bswapmask);
+@@ -98,9 +107,8 @@ bitcpy(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
+ unsigned long d0, d1;
+ int m;
+
+- right = shift & (bits - 1);
+- left = -shift & (bits - 1);
+- bswapmask &= shift;
++ int const left = shift & (bits - 1);
++ int const right = -shift & (bits - 1);
+
+ if (dst_idx+n <= bits) {
+ // Single destination word
+@@ -110,15 +118,15 @@ bitcpy(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
+ d0 = fb_rev_pixels_in_long(d0, bswapmask);
+ if (shift > 0) {
+ // Single source word
+- d0 >>= right;
++ d0 <<= left;
+ } else if (src_idx+n <= bits) {
+ // Single source word
+- d0 <<= left;
++ d0 >>= right;
+ } else {
+ // 2 source words
+ d1 = FB_READL(src + 1);
+ d1 = fb_rev_pixels_in_long(d1, bswapmask);
+- d0 = d0<<left | d1>>right;
++ d0 = d0 >> right | d1 << left;
+ }
+ d0 = fb_rev_pixels_in_long(d0, bswapmask);
+ FB_WRITEL(comp(d0, FB_READL(dst), first), dst);
+@@ -135,60 +143,59 @@ bitcpy(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
+ if (shift > 0) {
+ // Single source word
+ d1 = d0;
+- d0 >>= right;
+- dst++;
++ d0 <<= left;
+ n -= bits - dst_idx;
+ } else {
+ // 2 source words
+ d1 = FB_READL(src++);
+ d1 = fb_rev_pixels_in_long(d1, bswapmask);
+
+- d0 = d0<<left | d1>>right;
+- dst++;
++ d0 = d0 >> right | d1 << left;
+ n -= bits - dst_idx;
+ }
+ d0 = fb_rev_pixels_in_long(d0, bswapmask);
+ FB_WRITEL(comp(d0, FB_READL(dst), first), dst);
+ d0 = d1;
++ dst++;
+
+ // Main chunk
+ m = n % bits;
+ n /= bits;
+ while ((n >= 4) && !bswapmask) {
+ d1 = FB_READL(src++);
+- FB_WRITEL(d0 << left | d1 >> right, dst++);
++ FB_WRITEL(d0 >> right | d1 << left, dst++);
+ d0 = d1;
+ d1 = FB_READL(src++);
+- FB_WRITEL(d0 << left | d1 >> right, dst++);
++ FB_WRITEL(d0 >> right | d1 << left, dst++);
+ d0 = d1;
+ d1 = FB_READL(src++);
+- FB_WRITEL(d0 << left | d1 >> right, dst++);
++ FB_WRITEL(d0 >> right | d1 << left, dst++);
+ d0 = d1;
+ d1 = FB_READL(src++);
+- FB_WRITEL(d0 << left | d1 >> right, dst++);
++ FB_WRITEL(d0 >> right | d1 << left, dst++);
+ d0 = d1;
+ n -= 4;
+ }
+ while (n--) {
+ d1 = FB_READL(src++);
+ d1 = fb_rev_pixels_in_long(d1, bswapmask);
+- d0 = d0 << left | d1 >> right;
++ d0 = d0 >> right | d1 << left;
+ d0 = fb_rev_pixels_in_long(d0, bswapmask);
+ FB_WRITEL(d0, dst++);
+ d0 = d1;
+ }
+
+ // Trailing bits
+- if (last) {
+- if (m <= right) {
++ if (m) {
++ if (m <= bits - right) {
+ // Single source word
+- d0 <<= left;
++ d0 >>= right;
+ } else {
+ // 2 source words
+ d1 = FB_READL(src);
+ d1 = fb_rev_pixels_in_long(d1,
+ bswapmask);
+- d0 = d0<<left | d1>>right;
++ d0 = d0 >> right | d1 << left;
+ }
+ d0 = fb_rev_pixels_in_long(d0, bswapmask);
+ FB_WRITEL(comp(d0, FB_READL(dst), last), dst);
+@@ -202,43 +209,46 @@ bitcpy(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
+ */
+
+ static void
+-bitcpy_rev(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
+- const unsigned long __iomem *src, int src_idx, int bits,
++bitcpy_rev(struct fb_info *p, unsigned long __iomem *dst, unsigned dst_idx,
++ const unsigned long __iomem *src, unsigned src_idx, int bits,
+ unsigned n, u32 bswapmask)
+ {
+ unsigned long first, last;
+ int shift;
+
+- dst += (n-1)/bits;
+- src += (n-1)/bits;
+- if ((n-1) % bits) {
+- dst_idx += (n-1) % bits;
+- dst += dst_idx >> (ffs(bits) - 1);
+- dst_idx &= bits - 1;
+- src_idx += (n-1) % bits;
+- src += src_idx >> (ffs(bits) - 1);
+- src_idx &= bits - 1;
+- }
++#if 0
++ /*
++ * If you suspect bug in this function, compare it with this simple
++ * memmove implementation.
++ */
++ fb_memmove((char *)dst + ((dst_idx & (bits - 1))) / 8,
++ (char *)src + ((src_idx & (bits - 1))) / 8, n / 8);
++ return;
++#endif
++
++ dst += (dst_idx + n - 1) / bits;
++ src += (src_idx + n - 1) / bits;
++ dst_idx = (dst_idx + n - 1) % bits;
++ src_idx = (src_idx + n - 1) % bits;
+
+ shift = dst_idx-src_idx;
+
+- first = fb_shifted_pixels_mask_long(p, bits - 1 - dst_idx, bswapmask);
+- last = ~fb_shifted_pixels_mask_long(p, bits - 1 - ((dst_idx-n) % bits),
+- bswapmask);
++ first = ~fb_shifted_pixels_mask_long(p, (dst_idx + 1) % bits, bswapmask);
++ last = fb_shifted_pixels_mask_long(p, (bits + dst_idx + 1 - n) % bits, bswapmask);
+
+ if (!shift) {
+ // Same alignment for source and dest
+
+ if ((unsigned long)dst_idx+1 >= n) {
+ // Single word
+- if (last)
+- first &= last;
+- FB_WRITEL( comp( FB_READL(src), FB_READL(dst), first), dst);
++ if (first)
++ last &= first;
++ FB_WRITEL( comp( FB_READL(src), FB_READL(dst), last), dst);
+ } else {
+ // Multiple destination words
+
+ // Leading bits
+- if (first != ~0UL) {
++ if (first) {
+ FB_WRITEL( comp( FB_READL(src), FB_READL(dst), first), dst);
+ dst--;
+ src--;
+@@ -262,7 +272,7 @@ bitcpy_rev(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
+ FB_WRITEL(FB_READL(src--), dst--);
+
+ // Trailing bits
+- if (last)
++ if (last != -1UL)
+ FB_WRITEL( comp( FB_READL(src), FB_READL(dst), last), dst);
+ }
+ } else {
+@@ -270,29 +280,28 @@ bitcpy_rev(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
+ unsigned long d0, d1;
+ int m;
+
+- int const left = -shift & (bits-1);
+- int const right = shift & (bits-1);
+- bswapmask &= shift;
++ int const left = shift & (bits-1);
++ int const right = -shift & (bits-1);
+
+ if ((unsigned long)dst_idx+1 >= n) {
+ // Single destination word
+- if (last)
+- first &= last;
++ if (first)
++ last &= first;
+ d0 = FB_READL(src);
+ if (shift < 0) {
+ // Single source word
+- d0 <<= left;
++ d0 >>= right;
+ } else if (1+(unsigned long)src_idx >= n) {
+ // Single source word
+- d0 >>= right;
++ d0 <<= left;
+ } else {
+ // 2 source words
+ d1 = FB_READL(src - 1);
+ d1 = fb_rev_pixels_in_long(d1, bswapmask);
+- d0 = d0>>right | d1<<left;
++ d0 = d0 << left | d1 >> right;
+ }
+ d0 = fb_rev_pixels_in_long(d0, bswapmask);
+- FB_WRITEL(comp(d0, FB_READL(dst), first), dst);
++ FB_WRITEL(comp(d0, FB_READL(dst), last), dst);
+ } else {
+ // Multiple destination words
+ /** We must always remember the last value read, because in case
+@@ -307,12 +316,12 @@ bitcpy_rev(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
+ if (shift < 0) {
+ // Single source word
+ d1 = d0;
+- d0 <<= left;
++ d0 >>= right;
+ } else {
+ // 2 source words
+ d1 = FB_READL(src--);
+ d1 = fb_rev_pixels_in_long(d1, bswapmask);
+- d0 = d0>>right | d1<<left;
++ d0 = d0 << left | d1 >> right;
+ }
+ d0 = fb_rev_pixels_in_long(d0, bswapmask);
+ FB_WRITEL(comp(d0, FB_READL(dst), first), dst);
+@@ -325,39 +334,39 @@ bitcpy_rev(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
+ n /= bits;
+ while ((n >= 4) && !bswapmask) {
+ d1 = FB_READL(src--);
+- FB_WRITEL(d0 >> right | d1 << left, dst--);
++ FB_WRITEL(d0 << left | d1 >> right, dst--);
+ d0 = d1;
+ d1 = FB_READL(src--);
+- FB_WRITEL(d0 >> right | d1 << left, dst--);
++ FB_WRITEL(d0 << left | d1 >> right, dst--);
+ d0 = d1;
+ d1 = FB_READL(src--);
+- FB_WRITEL(d0 >> right | d1 << left, dst--);
++ FB_WRITEL(d0 << left | d1 >> right, dst--);
+ d0 = d1;
+ d1 = FB_READL(src--);
+- FB_WRITEL(d0 >> right | d1 << left, dst--);
++ FB_WRITEL(d0 << left | d1 >> right, dst--);
+ d0 = d1;
+ n -= 4;
+ }
+ while (n--) {
+ d1 = FB_READL(src--);
+ d1 = fb_rev_pixels_in_long(d1, bswapmask);
+- d0 = d0 >> right | d1 << left;
++ d0 = d0 << left | d1 >> right;
+ d0 = fb_rev_pixels_in_long(d0, bswapmask);
+ FB_WRITEL(d0, dst--);
+ d0 = d1;
+ }
+
+ // Trailing bits
+- if (last) {
+- if (m <= left) {
++ if (m) {
++ if (m <= bits - left) {
+ // Single source word
+- d0 >>= right;
++ d0 <<= left;
+ } else {
+ // 2 source words
+ d1 = FB_READL(src);
+ d1 = fb_rev_pixels_in_long(d1,
+ bswapmask);
+- d0 = d0>>right | d1<<left;
++ d0 = d0 << left | d1 >> right;
+ }
+ d0 = fb_rev_pixels_in_long(d0, bswapmask);
+ FB_WRITEL(comp(d0, FB_READL(dst), last), dst);
+@@ -371,9 +380,9 @@ void cfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
+ u32 dx = area->dx, dy = area->dy, sx = area->sx, sy = area->sy;
+ u32 height = area->height, width = area->width;
+ unsigned long const bits_per_line = p->fix.line_length*8u;
+- unsigned long __iomem *dst = NULL, *src = NULL;
++ unsigned long __iomem *base = NULL;
+ int bits = BITS_PER_LONG, bytes = bits >> 3;
+- int dst_idx = 0, src_idx = 0, rev_copy = 0;
++ unsigned dst_idx = 0, src_idx = 0, rev_copy = 0;
+ u32 bswapmask = fb_compute_bswapmask(p);
+
+ if (p->state != FBINFO_STATE_RUNNING)
+@@ -389,7 +398,7 @@ void cfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
+
+ // split the base of the framebuffer into a long-aligned address and the
+ // index of the first bit
+- dst = src = (unsigned long __iomem *)((unsigned long)p->screen_base & ~(bytes-1));
++ base = (unsigned long __iomem *)((unsigned long)p->screen_base & ~(bytes-1));
+ dst_idx = src_idx = 8*((unsigned long)p->screen_base & (bytes-1));
+ // add offset of source and target area
+ dst_idx += dy*bits_per_line + dx*p->var.bits_per_pixel;
+@@ -402,20 +411,14 @@ void cfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
+ while (height--) {
+ dst_idx -= bits_per_line;
+ src_idx -= bits_per_line;
+- dst += dst_idx >> (ffs(bits) - 1);
+- dst_idx &= (bytes - 1);
+- src += src_idx >> (ffs(bits) - 1);
+- src_idx &= (bytes - 1);
+- bitcpy_rev(p, dst, dst_idx, src, src_idx, bits,
++ bitcpy_rev(p, base + (dst_idx / bits), dst_idx % bits,
++ base + (src_idx / bits), src_idx % bits, bits,
+ width*p->var.bits_per_pixel, bswapmask);
+ }
+ } else {
+ while (height--) {
+- dst += dst_idx >> (ffs(bits) - 1);
+- dst_idx &= (bytes - 1);
+- src += src_idx >> (ffs(bits) - 1);
+- src_idx &= (bytes - 1);
+- bitcpy(p, dst, dst_idx, src, src_idx, bits,
++ bitcpy(p, base + (dst_idx / bits), dst_idx % bits,
++ base + (src_idx / bits), src_idx % bits, bits,
+ width*p->var.bits_per_pixel, bswapmask);
+ dst_idx += bits_per_line;
+ src_idx += bits_per_line;
+diff --git a/drivers/video/matrox/matroxfb_accel.c b/drivers/video/matrox/matroxfb_accel.c
+index 8335a6fe303e..0d5cb85d071a 100644
+--- a/drivers/video/matrox/matroxfb_accel.c
++++ b/drivers/video/matrox/matroxfb_accel.c
+@@ -192,10 +192,18 @@ void matrox_cfbX_init(struct matrox_fb_info *minfo)
+ minfo->accel.m_dwg_rect = M_DWG_TRAP | M_DWG_SOLID | M_DWG_ARZERO | M_DWG_SGNZERO | M_DWG_SHIFTZERO;
+ if (isMilleniumII(minfo)) minfo->accel.m_dwg_rect |= M_DWG_TRANSC;
+ minfo->accel.m_opmode = mopmode;
++ minfo->accel.m_access = maccess;
++ minfo->accel.m_pitch = mpitch;
+ }
+
+ EXPORT_SYMBOL(matrox_cfbX_init);
+
++static void matrox_accel_restore_maccess(struct matrox_fb_info *minfo)
++{
++ mga_outl(M_MACCESS, minfo->accel.m_access);
++ mga_outl(M_PITCH, minfo->accel.m_pitch);
++}
++
+ static void matrox_accel_bmove(struct matrox_fb_info *minfo, int vxres, int sy,
+ int sx, int dy, int dx, int height, int width)
+ {
+@@ -207,7 +215,8 @@ static void matrox_accel_bmove(struct matrox_fb_info *minfo, int vxres, int sy,
+ CRITBEGIN
+
+ if ((dy < sy) || ((dy == sy) && (dx <= sx))) {
+- mga_fifo(2);
++ mga_fifo(4);
++ matrox_accel_restore_maccess(minfo);
+ mga_outl(M_DWGCTL, M_DWG_BITBLT | M_DWG_SHIFTZERO | M_DWG_SGNZERO |
+ M_DWG_BFCOL | M_DWG_REPLACE);
+ mga_outl(M_AR5, vxres);
+@@ -215,7 +224,8 @@ static void matrox_accel_bmove(struct matrox_fb_info *minfo, int vxres, int sy,
+ start = sy*vxres+sx+curr_ydstorg(minfo);
+ end = start+width;
+ } else {
+- mga_fifo(3);
++ mga_fifo(5);
++ matrox_accel_restore_maccess(minfo);
+ mga_outl(M_DWGCTL, M_DWG_BITBLT | M_DWG_SHIFTZERO | M_DWG_BFCOL | M_DWG_REPLACE);
+ mga_outl(M_SGN, 5);
+ mga_outl(M_AR5, -vxres);
+@@ -224,7 +234,8 @@ static void matrox_accel_bmove(struct matrox_fb_info *minfo, int vxres, int sy,
+ start = end+width;
+ dy += height-1;
+ }
+- mga_fifo(4);
++ mga_fifo(6);
++ matrox_accel_restore_maccess(minfo);
+ mga_outl(M_AR0, end);
+ mga_outl(M_AR3, start);
+ mga_outl(M_FXBNDRY, ((dx+width)<<16) | dx);
+@@ -246,7 +257,8 @@ static void matrox_accel_bmove_lin(struct matrox_fb_info *minfo, int vxres,
+ CRITBEGIN
+
+ if ((dy < sy) || ((dy == sy) && (dx <= sx))) {
+- mga_fifo(2);
++ mga_fifo(4);
++ matrox_accel_restore_maccess(minfo);
+ mga_outl(M_DWGCTL, M_DWG_BITBLT | M_DWG_SHIFTZERO | M_DWG_SGNZERO |
+ M_DWG_BFCOL | M_DWG_REPLACE);
+ mga_outl(M_AR5, vxres);
+@@ -254,7 +266,8 @@ static void matrox_accel_bmove_lin(struct matrox_fb_info *minfo, int vxres,
+ start = sy*vxres+sx+curr_ydstorg(minfo);
+ end = start+width;
+ } else {
+- mga_fifo(3);
++ mga_fifo(5);
++ matrox_accel_restore_maccess(minfo);
+ mga_outl(M_DWGCTL, M_DWG_BITBLT | M_DWG_SHIFTZERO | M_DWG_BFCOL | M_DWG_REPLACE);
+ mga_outl(M_SGN, 5);
+ mga_outl(M_AR5, -vxres);
+@@ -263,7 +276,8 @@ static void matrox_accel_bmove_lin(struct matrox_fb_info *minfo, int vxres,
+ start = end+width;
+ dy += height-1;
+ }
+- mga_fifo(5);
++ mga_fifo(7);
++ matrox_accel_restore_maccess(minfo);
+ mga_outl(M_AR0, end);
+ mga_outl(M_AR3, start);
+ mga_outl(M_FXBNDRY, ((dx+width)<<16) | dx);
+@@ -298,7 +312,8 @@ static void matroxfb_accel_clear(struct matrox_fb_info *minfo, u_int32_t color,
+
+ CRITBEGIN
+
+- mga_fifo(5);
++ mga_fifo(7);
++ matrox_accel_restore_maccess(minfo);
+ mga_outl(M_DWGCTL, minfo->accel.m_dwg_rect | M_DWG_REPLACE);
+ mga_outl(M_FCOL, color);
+ mga_outl(M_FXBNDRY, ((sx + width) << 16) | sx);
+@@ -341,7 +356,8 @@ static void matroxfb_cfb4_clear(struct matrox_fb_info *minfo, u_int32_t bgx,
+ width >>= 1;
+ sx >>= 1;
+ if (width) {
+- mga_fifo(5);
++ mga_fifo(7);
++ matrox_accel_restore_maccess(minfo);
+ mga_outl(M_DWGCTL, minfo->accel.m_dwg_rect | M_DWG_REPLACE2);
+ mga_outl(M_FCOL, bgx);
+ mga_outl(M_FXBNDRY, ((sx + width) << 16) | sx);
+@@ -415,7 +431,8 @@ static void matroxfb_1bpp_imageblit(struct matrox_fb_info *minfo, u_int32_t fgx,
+
+ CRITBEGIN
+
+- mga_fifo(3);
++ mga_fifo(5);
++ matrox_accel_restore_maccess(minfo);
+ if (easy)
+ mga_outl(M_DWGCTL, M_DWG_ILOAD | M_DWG_SGNZERO | M_DWG_SHIFTZERO | M_DWG_BMONOWF | M_DWG_LINEAR | M_DWG_REPLACE);
+ else
+@@ -425,7 +442,8 @@ static void matroxfb_1bpp_imageblit(struct matrox_fb_info *minfo, u_int32_t fgx,
+ fxbndry = ((xx + width - 1) << 16) | xx;
+ mmio = minfo->mmio.vbase;
+
+- mga_fifo(6);
++ mga_fifo(8);
++ matrox_accel_restore_maccess(minfo);
+ mga_writel(mmio, M_FXBNDRY, fxbndry);
+ mga_writel(mmio, M_AR0, ar0);
+ mga_writel(mmio, M_AR3, 0);
+diff --git a/drivers/video/matrox/matroxfb_base.h b/drivers/video/matrox/matroxfb_base.h
+index 11ed57bb704e..556d96ce40bf 100644
+--- a/drivers/video/matrox/matroxfb_base.h
++++ b/drivers/video/matrox/matroxfb_base.h
+@@ -307,6 +307,8 @@ struct matrox_accel_data {
+ #endif
+ u_int32_t m_dwg_rect;
+ u_int32_t m_opmode;
++ u_int32_t m_access;
++ u_int32_t m_pitch;
+ };
+
+ struct v4l2_queryctrl;
+diff --git a/drivers/video/tgafb.c b/drivers/video/tgafb.c
+index aba7686b1a32..ac2cf6dcc598 100644
+--- a/drivers/video/tgafb.c
++++ b/drivers/video/tgafb.c
+@@ -1146,222 +1146,57 @@ copyarea_line_32bpp(struct fb_info *info, u32 dy, u32 sy,
+ __raw_writel(TGA_MODE_SBM_24BPP|TGA_MODE_SIMPLE, tga_regs+TGA_MODE_REG);
+ }
+
+-/* The general case of forward copy in 8bpp mode. */
++/* The (almost) general case of backward copy in 8bpp mode. */
+ static inline void
+-copyarea_foreward_8bpp(struct fb_info *info, u32 dx, u32 dy, u32 sx, u32 sy,
+- u32 height, u32 width, u32 line_length)
++copyarea_8bpp(struct fb_info *info, u32 dx, u32 dy, u32 sx, u32 sy,
++ u32 height, u32 width, u32 line_length,
++ const struct fb_copyarea *area)
+ {
+ struct tga_par *par = (struct tga_par *) info->par;
+- unsigned long i, copied, left;
+- unsigned long dpos, spos, dalign, salign, yincr;
+- u32 smask_first, dmask_first, dmask_last;
+- int pixel_shift, need_prime, need_second;
+- unsigned long n64, n32, xincr_first;
++ unsigned i, yincr;
++ int depos, sepos, backward, last_step, step;
++ u32 mask_last;
++ unsigned n32;
+ void __iomem *tga_regs;
+ void __iomem *tga_fb;
+
+- yincr = line_length;
+- if (dy > sy) {
+- dy += height - 1;
+- sy += height - 1;
+- yincr = -yincr;
+- }
+-
+- /* Compute the offsets and alignments in the frame buffer.
+- More than anything else, these control how we do copies. */
+- dpos = dy * line_length + dx;
+- spos = sy * line_length + sx;
+- dalign = dpos & 7;
+- salign = spos & 7;
+- dpos &= -8;
+- spos &= -8;
+-
+- /* Compute the value for the PIXELSHIFT register. This controls
+- both non-co-aligned source and destination and copy direction. */
+- if (dalign >= salign)
+- pixel_shift = dalign - salign;
+- else
+- pixel_shift = 8 - (salign - dalign);
+-
+- /* Figure out if we need an additional priming step for the
+- residue register. */
+- need_prime = (salign > dalign);
+- if (need_prime)
+- dpos -= 8;
+-
+- /* Begin by copying the leading unaligned destination. Copy enough
+- to make the next destination address 32-byte aligned. */
+- copied = 32 - (dalign + (dpos & 31));
+- if (copied == 32)
+- copied = 0;
+- xincr_first = (copied + 7) & -8;
+- smask_first = dmask_first = (1ul << copied) - 1;
+- smask_first <<= salign;
+- dmask_first <<= dalign + need_prime*8;
+- if (need_prime && copied > 24)
+- copied -= 8;
+- left = width - copied;
+-
+- /* Care for small copies. */
+- if (copied > width) {
+- u32 t;
+- t = (1ul << width) - 1;
+- t <<= dalign + need_prime*8;
+- dmask_first &= t;
+- left = 0;
+- }
+-
+- /* Attempt to use 64-byte copies. This is only possible if the
+- source and destination are co-aligned at 64 bytes. */
+- n64 = need_second = 0;
+- if ((dpos & 63) == (spos & 63)
+- && (height == 1 || line_length % 64 == 0)) {
+- /* We may need a 32-byte copy to ensure 64 byte alignment. */
+- need_second = (dpos + xincr_first) & 63;
+- if ((need_second & 32) != need_second)
+- printk(KERN_ERR "tgafb: need_second wrong\n");
+- if (left >= need_second + 64) {
+- left -= need_second;
+- n64 = left / 64;
+- left %= 64;
+- } else
+- need_second = 0;
+- }
+-
+- /* Copy trailing full 32-byte sections. This will be the main
+- loop if the 64 byte loop can't be used. */
+- n32 = left / 32;
+- left %= 32;
+-
+- /* Copy the trailing unaligned destination. */
+- dmask_last = (1ul << left) - 1;
+-
+- tga_regs = par->tga_regs_base;
+- tga_fb = par->tga_fb_base;
+-
+- /* Set up the MODE and PIXELSHIFT registers. */
+- __raw_writel(TGA_MODE_SBM_8BPP|TGA_MODE_COPY, tga_regs+TGA_MODE_REG);
+- __raw_writel(pixel_shift, tga_regs+TGA_PIXELSHIFT_REG);
+- wmb();
+-
+- for (i = 0; i < height; ++i) {
+- unsigned long j;
+- void __iomem *sfb;
+- void __iomem *dfb;
+-
+- sfb = tga_fb + spos;
+- dfb = tga_fb + dpos;
+- if (dmask_first) {
+- __raw_writel(smask_first, sfb);
+- wmb();
+- __raw_writel(dmask_first, dfb);
+- wmb();
+- sfb += xincr_first;
+- dfb += xincr_first;
+- }
+-
+- if (need_second) {
+- __raw_writel(0xffffffff, sfb);
+- wmb();
+- __raw_writel(0xffffffff, dfb);
+- wmb();
+- sfb += 32;
+- dfb += 32;
+- }
+-
+- if (n64 && (((unsigned long)sfb | (unsigned long)dfb) & 63))
+- printk(KERN_ERR
+- "tgafb: misaligned copy64 (s:%p, d:%p)\n",
+- sfb, dfb);
+-
+- for (j = 0; j < n64; ++j) {
+- __raw_writel(sfb - tga_fb, tga_regs+TGA_COPY64_SRC);
+- wmb();
+- __raw_writel(dfb - tga_fb, tga_regs+TGA_COPY64_DST);
+- wmb();
+- sfb += 64;
+- dfb += 64;
+- }
+-
+- for (j = 0; j < n32; ++j) {
+- __raw_writel(0xffffffff, sfb);
+- wmb();
+- __raw_writel(0xffffffff, dfb);
+- wmb();
+- sfb += 32;
+- dfb += 32;
+- }
+-
+- if (dmask_last) {
+- __raw_writel(0xffffffff, sfb);
+- wmb();
+- __raw_writel(dmask_last, dfb);
+- wmb();
+- }
+-
+- spos += yincr;
+- dpos += yincr;
++ /* Do acceleration only if we are aligned on 8 pixels */
++ if ((dx | sx | width) & 7) {
++ cfb_copyarea(info, area);
++ return;
+ }
+
+- /* Reset the MODE register to normal. */
+- __raw_writel(TGA_MODE_SBM_8BPP|TGA_MODE_SIMPLE, tga_regs+TGA_MODE_REG);
+-}
+-
+-/* The (almost) general case of backward copy in 8bpp mode. */
+-static inline void
+-copyarea_backward_8bpp(struct fb_info *info, u32 dx, u32 dy, u32 sx, u32 sy,
+- u32 height, u32 width, u32 line_length,
+- const struct fb_copyarea *area)
+-{
+- struct tga_par *par = (struct tga_par *) info->par;
+- unsigned long i, left, yincr;
+- unsigned long depos, sepos, dealign, sealign;
+- u32 mask_first, mask_last;
+- unsigned long n32;
+- void __iomem *tga_regs;
+- void __iomem *tga_fb;
+-
+ yincr = line_length;
+ if (dy > sy) {
+ dy += height - 1;
+ sy += height - 1;
+ yincr = -yincr;
+ }
++ backward = dy == sy && dx > sx && dx < sx + width;
+
+ /* Compute the offsets and alignments in the frame buffer.
+ More than anything else, these control how we do copies. */
+- depos = dy * line_length + dx + width;
+- sepos = sy * line_length + sx + width;
+- dealign = depos & 7;
+- sealign = sepos & 7;
+-
+- /* ??? The documentation appears to be incorrect (or very
+- misleading) wrt how pixel shifting works in backward copy
+- mode, i.e. when PIXELSHIFT is negative. I give up for now.
+- Do handle the common case of co-aligned backward copies,
+- but frob everything else back on generic code. */
+- if (dealign != sealign) {
+- cfb_copyarea(info, area);
+- return;
+- }
+-
+- /* We begin the copy with the trailing pixels of the
+- unaligned destination. */
+- mask_first = (1ul << dealign) - 1;
+- left = width - dealign;
+-
+- /* Care for small copies. */
+- if (dealign > width) {
+- mask_first ^= (1ul << (dealign - width)) - 1;
+- left = 0;
+- }
++ depos = dy * line_length + dx;
++ sepos = sy * line_length + sx;
++ if (backward)
++ depos += width, sepos += width;
+
+ /* Next copy full words at a time. */
+- n32 = left / 32;
+- left %= 32;
++ n32 = width / 32;
++ last_step = width % 32;
+
+ /* Finally copy the unaligned head of the span. */
+- mask_last = -1 << (32 - left);
++ mask_last = (1ul << last_step) - 1;
++
++ if (!backward) {
++ step = 32;
++ last_step = 32;
++ } else {
++ step = -32;
++ last_step = -last_step;
++ sepos -= 32;
++ depos -= 32;
++ }
+
+ tga_regs = par->tga_regs_base;
+ tga_fb = par->tga_fb_base;
+@@ -1378,25 +1213,33 @@ copyarea_backward_8bpp(struct fb_info *info, u32 dx, u32 dy, u32 sx, u32 sy,
+
+ sfb = tga_fb + sepos;
+ dfb = tga_fb + depos;
+- if (mask_first) {
+- __raw_writel(mask_first, sfb);
+- wmb();
+- __raw_writel(mask_first, dfb);
+- wmb();
+- }
+
+- for (j = 0; j < n32; ++j) {
+- sfb -= 32;
+- dfb -= 32;
++ for (j = 0; j < n32; j++) {
++ if (j < 2 && j + 1 < n32 && !backward &&
++ !(((unsigned long)sfb | (unsigned long)dfb) & 63)) {
++ do {
++ __raw_writel(sfb - tga_fb, tga_regs+TGA_COPY64_SRC);
++ wmb();
++ __raw_writel(dfb - tga_fb, tga_regs+TGA_COPY64_DST);
++ wmb();
++ sfb += 64;
++ dfb += 64;
++ j += 2;
++ } while (j + 1 < n32);
++ j--;
++ continue;
++ }
+ __raw_writel(0xffffffff, sfb);
+ wmb();
+ __raw_writel(0xffffffff, dfb);
+ wmb();
++ sfb += step;
++ dfb += step;
+ }
+
+ if (mask_last) {
+- sfb -= 32;
+- dfb -= 32;
++ sfb += last_step - step;
++ dfb += last_step - step;
+ __raw_writel(mask_last, sfb);
+ wmb();
+ __raw_writel(mask_last, dfb);
+@@ -1457,14 +1300,9 @@ tgafb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
+ else if (bpp == 32)
+ cfb_copyarea(info, area);
+
+- /* Detect overlapping source and destination that requires
+- a backward copy. */
+- else if (dy == sy && dx > sx && dx < sx + width)
+- copyarea_backward_8bpp(info, dx, dy, sx, sy, height,
+- width, line_length, area);
+ else
+- copyarea_foreward_8bpp(info, dx, dy, sx, sy, height,
+- width, line_length);
++ copyarea_8bpp(info, dx, dy, sx, sy, height,
++ width, line_length, area);
+ }
+
+
+diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
+index 94fd738a7741..28153fb2225d 100644
+--- a/drivers/virtio/virtio_balloon.c
++++ b/drivers/virtio/virtio_balloon.c
+@@ -271,6 +271,12 @@ static int balloon(void *_vballoon)
+ else if (diff < 0)
+ leak_balloon(vb, -diff);
+ update_balloon_size(vb);
++
++ /*
++ * For large balloon changes, we could spend a lot of time
++ * and always have work to do. Be nice if preempt disabled.
++ */
++ cond_resched();
+ }
+ return 0;
+ }
+diff --git a/drivers/w1/w1_netlink.c b/drivers/w1/w1_netlink.c
+index 40788c925d1c..73705aff53cb 100644
+--- a/drivers/w1/w1_netlink.c
++++ b/drivers/w1/w1_netlink.c
+@@ -54,28 +54,29 @@ static void w1_send_slave(struct w1_master *dev, u64 rn)
+ struct w1_netlink_msg *hdr = (struct w1_netlink_msg *)(msg + 1);
+ struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)(hdr + 1);
+ int avail;
++ u64 *data;
+
+ /* update kernel slave list */
+ w1_slave_found(dev, rn);
+
+ avail = dev->priv_size - cmd->len;
+
+- if (avail > 8) {
+- u64 *data = (void *)(cmd + 1) + cmd->len;
++ if (avail < 8) {
++ msg->ack++;
++ cn_netlink_send(msg, 0, GFP_KERNEL);
+
+- *data = rn;
+- cmd->len += 8;
+- hdr->len += 8;
+- msg->len += 8;
+- return;
++ msg->len = sizeof(struct w1_netlink_msg) +
++ sizeof(struct w1_netlink_cmd);
++ hdr->len = sizeof(struct w1_netlink_cmd);
++ cmd->len = 0;
+ }
+
+- msg->ack++;
+- cn_netlink_send(msg, 0, GFP_KERNEL);
++ data = (void *)(cmd + 1) + cmd->len;
+
+- msg->len = sizeof(struct w1_netlink_msg) + sizeof(struct w1_netlink_cmd);
+- hdr->len = sizeof(struct w1_netlink_cmd);
+- cmd->len = 0;
++ *data = rn;
++ cmd->len += 8;
++ hdr->len += 8;
++ msg->len += 8;
+ }
+
+ static int w1_process_search_command(struct w1_master *dev, struct cn_msg *msg,
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 6b2a7248ca8b..cfdf6feec104 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -2731,6 +2731,8 @@ static int barrier_all_devices(struct btrfs_fs_info *info)
+ /* send down all the barriers */
+ head = &info->fs_devices->devices;
+ list_for_each_entry_rcu(dev, head, dev_list) {
++ if (dev->missing)
++ continue;
+ if (!dev->bdev) {
+ errors++;
+ continue;
+@@ -2745,6 +2747,8 @@ static int barrier_all_devices(struct btrfs_fs_info *info)
+
+ /* wait for all the barriers */
+ list_for_each_entry_rcu(dev, head, dev_list) {
++ if (dev->missing)
++ continue;
+ if (!dev->bdev) {
+ errors++;
+ continue;
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index 81376d94cd3c..292e847f4193 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -460,7 +460,8 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *info = root->fs_info;
+ int count = 0;
+
+- if (--trans->use_count) {
++ if (trans->use_count > 1) {
++ trans->use_count--;
+ trans->block_rsv = trans->orig_rsv;
+ return 0;
+ }
+@@ -494,17 +495,10 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
+ }
+
+ if (lock && cur_trans->blocked && !cur_trans->in_commit) {
+- if (throttle) {
+- /*
+- * We may race with somebody else here so end up having
+- * to call end_transaction on ourselves again, so inc
+- * our use_count.
+- */
+- trans->use_count++;
++ if (throttle)
+ return btrfs_commit_transaction(trans, root);
+- } else {
++ else
+ wake_up_process(info->transaction_kthread);
+- }
+ }
+
+ WARN_ON(cur_trans != info->running_transaction);
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index bf35fe01a83f..834d9a19e379 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -2372,6 +2372,27 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
+ ex_ee_block = le32_to_cpu(ex->ee_block);
+ ex_ee_len = ext4_ext_get_actual_len(ex);
+
++ /*
++ * If we're starting with an extent other than the last one in the
++ * node, we need to see if it shares a cluster with the extent to
++ * the right (towards the end of the file). If its leftmost cluster
++ * is this extent's rightmost cluster and it is not cluster aligned,
++ * we'll mark it as a partial that is not to be deallocated.
++ */
++
++ if (ex != EXT_LAST_EXTENT(eh)) {
++ ext4_fsblk_t current_pblk, right_pblk;
++ long long current_cluster, right_cluster;
++
++ current_pblk = ext4_ext_pblock(ex) + ex_ee_len - 1;
++ current_cluster = (long long)EXT4_B2C(sbi, current_pblk);
++ right_pblk = ext4_ext_pblock(ex + 1);
++ right_cluster = (long long)EXT4_B2C(sbi, right_pblk);
++ if (current_cluster == right_cluster &&
++ EXT4_PBLK_COFF(sbi, right_pblk))
++ *partial_cluster = -right_cluster;
++ }
++
+ trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster);
+
+ while (ex >= EXT_FIRST_EXTENT(eh) &&
+diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c
+index 16a5047903a6..406d9cc84ba8 100644
+--- a/fs/jffs2/compr_rtime.c
++++ b/fs/jffs2/compr_rtime.c
+@@ -33,7 +33,7 @@ static int jffs2_rtime_compress(unsigned char *data_in,
+ unsigned char *cpage_out,
+ uint32_t *sourcelen, uint32_t *dstlen)
+ {
+- short positions[256];
++ unsigned short positions[256];
+ int outpos = 0;
+ int pos=0;
+
+@@ -74,7 +74,7 @@ static int jffs2_rtime_decompress(unsigned char *data_in,
+ unsigned char *cpage_out,
+ uint32_t srclen, uint32_t destlen)
+ {
+- short positions[256];
++ unsigned short positions[256];
+ int outpos = 0;
+ int pos=0;
+
+diff --git a/fs/jffs2/nodelist.h b/fs/jffs2/nodelist.h
+index e4619b00f7c5..fa35ff79ab35 100644
+--- a/fs/jffs2/nodelist.h
++++ b/fs/jffs2/nodelist.h
+@@ -231,7 +231,7 @@ struct jffs2_tmp_dnode_info
+ uint32_t version;
+ uint32_t data_crc;
+ uint32_t partial_crc;
+- uint16_t csize;
++ uint32_t csize;
+ uint16_t overlapped;
+ };
+
+diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c
+index 694aa5b03505..145ba39227ab 100644
+--- a/fs/jffs2/nodemgmt.c
++++ b/fs/jffs2/nodemgmt.c
+@@ -128,6 +128,7 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
+ spin_unlock(&c->erase_completion_lock);
+
+ schedule();
++ remove_wait_queue(&c->erase_wait, &wait);
+ } else
+ spin_unlock(&c->erase_completion_lock);
+ } else if (ret)
+@@ -158,19 +159,24 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
+ int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize,
+ uint32_t *len, uint32_t sumsize)
+ {
+- int ret = -EAGAIN;
++ int ret;
+ minsize = PAD(minsize);
+
+ D1(printk(KERN_DEBUG "jffs2_reserve_space_gc(): Requested 0x%x bytes\n", minsize));
+
+- spin_lock(&c->erase_completion_lock);
+- while(ret == -EAGAIN) {
++ while (true) {
++ spin_lock(&c->erase_completion_lock);
+ ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
+ if (ret) {
+ D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret));
+ }
++ spin_unlock(&c->erase_completion_lock);
++
++ if (ret == -EAGAIN)
++ cond_resched();
++ else
++ break;
+ }
+- spin_unlock(&c->erase_completion_lock);
+ if (!ret)
+ ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
+
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index e065497c45ae..315a1ba47178 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -1224,6 +1224,12 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
+ /* If op is non-idempotent */
+ if (opdesc->op_flags & OP_MODIFIES_SOMETHING) {
+ plen = opdesc->op_rsize_bop(rqstp, op);
++ /*
++ * If there's still another operation, make sure
++ * we'll have space to at least encode an error:
++ */
++ if (resp->opcnt < args->opcnt)
++ plen += COMPOUND_ERR_SLACK_SPACE;
+ op->status = nfsd4_check_resp_size(resp, plen);
+ }
+
+@@ -1381,7 +1387,8 @@ static inline u32 nfsd4_setattr_rsize(struct svc_rqst *rqstp, struct nfsd4_op *o
+
+ static inline u32 nfsd4_setclientid_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+ {
+- return (op_encode_hdr_size + 2 + 1024) * sizeof(__be32);
++ return (op_encode_hdr_size + 2 + XDR_QUADLEN(NFS4_VERIFIER_SIZE)) *
++ sizeof(__be32);
+ }
+
+ static inline u32 nfsd4_write_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index ade5316d0cf5..4835b90d4180 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -2413,6 +2413,8 @@ out_acl:
+ WRITE64(stat.ino);
+ }
+ if (bmval2 & FATTR4_WORD2_SUPPATTR_EXCLCREAT) {
++ if ((buflen -= 16) < 0)
++ goto out_resource;
+ WRITE32(3);
+ WRITE32(NFSD_SUPPATTR_EXCLCREAT_WORD0);
+ WRITE32(NFSD_SUPPATTR_EXCLCREAT_WORD1);
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index 6a66fc08546d..11e1888e20e5 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -406,6 +406,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
+ int ftype = 0;
+ __be32 err;
+ int host_err;
++ bool get_write_count;
+ int size_change = 0;
+
+ if (iap->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_SIZE))
+@@ -413,10 +414,18 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
+ if (iap->ia_valid & ATTR_SIZE)
+ ftype = S_IFREG;
+
++ /* Callers that do fh_verify should do the fh_want_write: */
++ get_write_count = !fhp->fh_dentry;
++
+ /* Get inode */
+ err = fh_verify(rqstp, fhp, ftype, accmode);
+ if (err)
+ goto out;
++ if (get_write_count) {
++ host_err = fh_want_write(fhp);
++ if (host_err)
++ return nfserrno(host_err);
++ }
+
+ dentry = fhp->fh_dentry;
+ inode = dentry->d_inode;
+diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
+index 85d4d422458a..c7fe96233bd7 100644
+--- a/fs/nfsd/vfs.h
++++ b/fs/nfsd/vfs.h
+@@ -108,4 +108,14 @@ struct posix_acl *nfsd_get_posix_acl(struct svc_fh *, int);
+ int nfsd_set_posix_acl(struct svc_fh *, int, struct posix_acl *);
+ #endif
+
++static inline int fh_want_write(struct svc_fh *fh)
++{
++ return mnt_want_write(fh->fh_export->ex_path.mnt);
++}
++
++static inline void fh_drop_write(struct svc_fh *fh)
++{
++ mnt_drop_write(fh->fh_export->ex_path.mnt);
++}
++
+ #endif /* LINUX_NFSD_VFS_H */
+diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
+index 5d18ad10c27f..4f66e007dae1 100644
+--- a/fs/ocfs2/buffer_head_io.c
++++ b/fs/ocfs2/buffer_head_io.c
+@@ -90,7 +90,6 @@ int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh,
+ * information for this bh as it's not marked locally
+ * uptodate. */
+ ret = -EIO;
+- put_bh(bh);
+ mlog_errno(ret);
+ }
+
+@@ -420,7 +419,6 @@ int ocfs2_write_super_or_backup(struct ocfs2_super *osb,
+
+ if (!buffer_uptodate(bh)) {
+ ret = -EIO;
+- put_bh(bh);
+ mlog_errno(ret);
+ }
+
+diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
+index 01ebfd0bdad7..d15b0714ee3e 100644
+--- a/fs/ocfs2/dlm/dlmrecovery.c
++++ b/fs/ocfs2/dlm/dlmrecovery.c
+@@ -540,7 +540,10 @@ master_here:
+ /* success! see if any other nodes need recovery */
+ mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n",
+ dlm->name, dlm->reco.dead_node, dlm->node_num);
+- dlm_reset_recovery(dlm);
++ spin_lock(&dlm->spinlock);
++ __dlm_reset_recovery(dlm);
++ dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
++ spin_unlock(&dlm->spinlock);
+ }
+ dlm_end_recovery(dlm);
+
+@@ -698,6 +701,14 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
+ if (all_nodes_done) {
+ int ret;
+
++ /* Set this flag on recovery master to avoid
++ * a new recovery for another dead node start
++ * before the recovery is not done. That may
++ * cause recovery hung.*/
++ spin_lock(&dlm->spinlock);
++ dlm->reco.state |= DLM_RECO_STATE_FINALIZE;
++ spin_unlock(&dlm->spinlock);
++
+ /* all nodes are now in DLM_RECO_NODE_DATA_DONE state
+ * just send a finalize message to everyone and
+ * clean up */
+@@ -1752,13 +1763,13 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
+ struct dlm_migratable_lockres *mres)
+ {
+ struct dlm_migratable_lock *ml;
+- struct list_head *queue;
++ struct list_head *queue, *iter;
+ struct list_head *tmpq = NULL;
+ struct dlm_lock *newlock = NULL;
+ struct dlm_lockstatus *lksb = NULL;
+ int ret = 0;
+ int i, j, bad;
+- struct dlm_lock *lock = NULL;
++ struct dlm_lock *lock;
+ u8 from = O2NM_MAX_NODES;
+ unsigned int added = 0;
+ __be64 c;
+@@ -1793,14 +1804,16 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
+ /* MIGRATION ONLY! */
+ BUG_ON(!(mres->flags & DLM_MRES_MIGRATION));
+
++ lock = NULL;
+ spin_lock(&res->spinlock);
+ for (j = DLM_GRANTED_LIST; j <= DLM_BLOCKED_LIST; j++) {
+ tmpq = dlm_list_idx_to_ptr(res, j);
+- list_for_each_entry(lock, tmpq, list) {
+- if (lock->ml.cookie != ml->cookie)
+- lock = NULL;
+- else
++ list_for_each(iter, tmpq) {
++ lock = list_entry(iter,
++ struct dlm_lock, list);
++ if (lock->ml.cookie == ml->cookie)
+ break;
++ lock = NULL;
+ }
+ if (lock)
+ break;
+@@ -2870,8 +2883,8 @@ int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data,
+ BUG();
+ }
+ dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
++ __dlm_reset_recovery(dlm);
+ spin_unlock(&dlm->spinlock);
+- dlm_reset_recovery(dlm);
+ dlm_kick_recovery_thread(dlm);
+ break;
+ default:
+diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
+index 133e9355dc6f..8048eea5955b 100644
+--- a/fs/reiserfs/dir.c
++++ b/fs/reiserfs/dir.c
+@@ -128,6 +128,7 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent,
+ char *d_name;
+ off_t d_off;
+ ino_t d_ino;
++ loff_t cur_pos = deh_offset(deh);
+
+ if (!de_visible(deh))
+ /* it is hidden entry */
+@@ -200,8 +201,9 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent,
+ if (local_buf != small_buf) {
+ kfree(local_buf);
+ }
+- // next entry should be looked for with such offset
+- next_pos = deh_offset(deh) + 1;
++
++ /* deh_offset(deh) may be invalid now. */
++ next_pos = cur_pos + 1;
+
+ if (item_moved(&tmp_ih, &path_to_entry)) {
+ goto research;
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index c17fdfbd323f..cb34ff42e4c3 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1690,6 +1690,24 @@ static inline pid_t task_tgid_vnr(struct task_struct *tsk)
+ }
+
+
++static int pid_alive(const struct task_struct *p);
++static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
++{
++ pid_t pid = 0;
++
++ rcu_read_lock();
++ if (pid_alive(tsk))
++ pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
++ rcu_read_unlock();
++
++ return pid;
++}
++
++static inline pid_t task_ppid_nr(const struct task_struct *tsk)
++{
++ return task_ppid_nr_ns(tsk, &init_pid_ns);
++}
++
+ static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
+ struct pid_namespace *ns)
+ {
+@@ -1727,7 +1745,7 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk)
+ * If pid_alive fails, then pointers within the task structure
+ * can be stale and must not be dereferenced.
+ */
+-static inline int pid_alive(struct task_struct *p)
++static inline int pid_alive(const struct task_struct *p)
+ {
+ return p->pids[PIDTYPE_PID].pid != NULL;
+ }
+diff --git a/include/trace/events/block.h b/include/trace/events/block.h
+index 05c5e61f0a7c..048e2658d895 100644
+--- a/include/trace/events/block.h
++++ b/include/trace/events/block.h
+@@ -81,6 +81,7 @@ DEFINE_EVENT(block_rq_with_error, block_rq_requeue,
+ * block_rq_complete - block IO operation completed by device driver
+ * @q: queue containing the block operation request
+ * @rq: block operations request
++ * @nr_bytes: number of completed bytes
+ *
+ * The block_rq_complete tracepoint event indicates that some portion
+ * of operation request has been completed by the device driver. If
+@@ -88,11 +89,37 @@ DEFINE_EVENT(block_rq_with_error, block_rq_requeue,
+ * do for the request. If @rq->bio is non-NULL then there is
+ * additional work required to complete the request.
+ */
+-DEFINE_EVENT(block_rq_with_error, block_rq_complete,
++TRACE_EVENT(block_rq_complete,
+
+- TP_PROTO(struct request_queue *q, struct request *rq),
++ TP_PROTO(struct request_queue *q, struct request *rq,
++ unsigned int nr_bytes),
+
+- TP_ARGS(q, rq)
++ TP_ARGS(q, rq, nr_bytes),
++
++ TP_STRUCT__entry(
++ __field( dev_t, dev )
++ __field( sector_t, sector )
++ __field( unsigned int, nr_sector )
++ __field( int, errors )
++ __array( char, rwbs, RWBS_LEN )
++ __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
++ ),
++
++ TP_fast_assign(
++ __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
++ __entry->sector = blk_rq_pos(rq);
++ __entry->nr_sector = nr_bytes >> 9;
++ __entry->errors = rq->errors;
++
++ blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, nr_bytes);
++ blk_dump_cmd(__get_str(cmd), rq);
++ ),
++
++ TP_printk("%d,%d %s (%s) %llu + %u [%d]",
++ MAJOR(__entry->dev), MINOR(__entry->dev),
++ __entry->rwbs, __get_str(cmd),
++ (unsigned long long)__entry->sector,
++ __entry->nr_sector, __entry->errors)
+ );
+
+ DECLARE_EVENT_CLASS(block_rq,
+diff --git a/kernel/auditsc.c b/kernel/auditsc.c
+index 47b7fc1ea893..aeac7cccd5e9 100644
+--- a/kernel/auditsc.c
++++ b/kernel/auditsc.c
+@@ -473,7 +473,7 @@ static int audit_filter_rules(struct task_struct *tsk,
+ case AUDIT_PPID:
+ if (ctx) {
+ if (!ctx->ppid)
+- ctx->ppid = sys_getppid();
++ ctx->ppid = task_ppid_nr(tsk);
+ result = audit_comparator(ctx->ppid, f->op, f->val);
+ }
+ break;
+@@ -1335,7 +1335,7 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
+ /* tsk == current */
+ context->pid = tsk->pid;
+ if (!context->ppid)
+- context->ppid = sys_getppid();
++ context->ppid = task_ppid_nr(tsk);
+ cred = current_cred();
+ context->uid = cred->uid;
+ context->gid = cred->gid;
+diff --git a/kernel/exit.c b/kernel/exit.c
+index 234e152c609e..fde15f96b158 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -734,9 +734,6 @@ static void reparent_leader(struct task_struct *father, struct task_struct *p,
+ struct list_head *dead)
+ {
+ list_move_tail(&p->sibling, &p->real_parent->children);
+-
+- if (p->exit_state == EXIT_DEAD)
+- return;
+ /*
+ * If this is a threaded reparent there is no need to
+ * notify anyone anything has happened.
+@@ -744,9 +741,19 @@ static void reparent_leader(struct task_struct *father, struct task_struct *p,
+ if (same_thread_group(p->real_parent, father))
+ return;
+
+- /* We don't want people slaying init. */
++ /*
++ * We don't want people slaying init.
++ *
++ * Note: we do this even if it is EXIT_DEAD, wait_task_zombie()
++ * can change ->exit_state to EXIT_ZOMBIE. If this is the final
++ * state, do_notify_parent() was already called and ->exit_signal
++ * doesn't matter.
++ */
+ p->exit_signal = SIGCHLD;
+
++ if (p->exit_state == EXIT_DEAD)
++ return;
++
+ /* If it has exited notify the new parent about this child's death. */
+ if (!p->ptrace &&
+ p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) {
+diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
+index 16fc34a0806f..92cac05aa7d1 100644
+--- a/kernel/trace/blktrace.c
++++ b/kernel/trace/blktrace.c
+@@ -699,6 +699,7 @@ void blk_trace_shutdown(struct request_queue *q)
+ * blk_add_trace_rq - Add a trace for a request oriented action
+ * @q: queue the io is for
+ * @rq: the source request
++ * @nr_bytes: number of completed bytes
+ * @what: the action
+ *
+ * Description:
+@@ -706,7 +707,7 @@ void blk_trace_shutdown(struct request_queue *q)
+ *
+ **/
+ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
+- u32 what)
++ unsigned int nr_bytes, u32 what)
+ {
+ struct blk_trace *bt = q->blk_trace;
+
+@@ -715,11 +716,11 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
+
+ if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
+ what |= BLK_TC_ACT(BLK_TC_PC);
+- __blk_add_trace(bt, 0, blk_rq_bytes(rq), rq->cmd_flags,
++ __blk_add_trace(bt, 0, nr_bytes, rq->cmd_flags,
+ what, rq->errors, rq->cmd_len, rq->cmd);
+ } else {
+ what |= BLK_TC_ACT(BLK_TC_FS);
+- __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
++ __blk_add_trace(bt, blk_rq_pos(rq), nr_bytes,
+ rq->cmd_flags, what, rq->errors, 0, NULL);
+ }
+ }
+@@ -727,33 +728,34 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
+ static void blk_add_trace_rq_abort(void *ignore,
+ struct request_queue *q, struct request *rq)
+ {
+- blk_add_trace_rq(q, rq, BLK_TA_ABORT);
++ blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_ABORT);
+ }
+
+ static void blk_add_trace_rq_insert(void *ignore,
+ struct request_queue *q, struct request *rq)
+ {
+- blk_add_trace_rq(q, rq, BLK_TA_INSERT);
++ blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_INSERT);
+ }
+
+ static void blk_add_trace_rq_issue(void *ignore,
+ struct request_queue *q, struct request *rq)
+ {
+- blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
++ blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_ISSUE);
+ }
+
+ static void blk_add_trace_rq_requeue(void *ignore,
+ struct request_queue *q,
+ struct request *rq)
+ {
+- blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
++ blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_REQUEUE);
+ }
+
+ static void blk_add_trace_rq_complete(void *ignore,
+ struct request_queue *q,
+- struct request *rq)
++ struct request *rq,
++ unsigned int nr_bytes)
+ {
+- blk_add_trace_rq(q, rq, BLK_TA_COMPLETE);
++ blk_add_trace_rq(q, rq, nr_bytes, BLK_TA_COMPLETE);
+ }
+
+ /**
+diff --git a/lib/nlattr.c b/lib/nlattr.c
+index a8408b6cacdf..190ae10126b0 100644
+--- a/lib/nlattr.c
++++ b/lib/nlattr.c
+@@ -299,9 +299,15 @@ int nla_memcmp(const struct nlattr *nla, const void *data,
+ */
+ int nla_strcmp(const struct nlattr *nla, const char *str)
+ {
+- int len = strlen(str) + 1;
+- int d = nla_len(nla) - len;
++ int len = strlen(str);
++ char *buf = nla_data(nla);
++ int attrlen = nla_len(nla);
++ int d;
+
++ if (attrlen > 0 && buf[attrlen - 1] == '\0')
++ attrlen--;
++
++ d = attrlen - len;
+ if (d == 0)
+ d = memcmp(nla_data(nla), str, len);
+
+diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
+index f8a3f1a829b8..33459e0b8bea 100644
+--- a/lib/percpu_counter.c
++++ b/lib/percpu_counter.c
+@@ -166,7 +166,7 @@ static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb,
+ struct percpu_counter *fbc;
+
+ compute_batch_value();
+- if (action != CPU_DEAD)
++ if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
+ return NOTIFY_OK;
+
+ cpu = (unsigned long)hcpu;
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 3a5aae2d6ee7..d399f5ff5d9c 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -1447,6 +1447,7 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
+ while (min_count < persistent_huge_pages(h)) {
+ if (!free_pool_huge_page(h, nodes_allowed, 0))
+ break;
++ cond_resched_lock(&hugetlb_lock);
+ }
+ while (count < persistent_huge_pages(h)) {
+ if (!adjust_pool_surplus(h, nodes_allowed, 1))
+diff --git a/mm/mlock.c b/mm/mlock.c
+index 4f4f53bdc65d..39b3a7d4bd84 100644
+--- a/mm/mlock.c
++++ b/mm/mlock.c
+@@ -78,6 +78,7 @@ void __clear_page_mlock(struct page *page)
+ */
+ void mlock_vma_page(struct page *page)
+ {
++ /* Serialize with page migration */
+ BUG_ON(!PageLocked(page));
+
+ if (!TestSetPageMlocked(page)) {
+@@ -105,6 +106,7 @@ void mlock_vma_page(struct page *page)
+ */
+ void munlock_vma_page(struct page *page)
+ {
++ /* For try_to_munlock() and to serialize with page migration */
+ BUG_ON(!PageLocked(page));
+
+ if (TestClearPageMlocked(page)) {
+diff --git a/mm/rmap.c b/mm/rmap.c
+index 52a2f364f087..9ac405ba91df 100644
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -1385,9 +1385,19 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
+ BUG_ON(!page || PageAnon(page));
+
+ if (locked_vma) {
+- mlock_vma_page(page); /* no-op if already mlocked */
+- if (page == check_page)
++ if (page == check_page) {
++ /* we know we have check_page locked */
++ mlock_vma_page(page);
+ ret = SWAP_MLOCK;
++ } else if (trylock_page(page)) {
++ /*
++ * If we can lock the page, perform mlock.
++ * Otherwise leave the page alone, it will be
++ * eventually encountered again later.
++ */
++ mlock_vma_page(page);
++ unlock_page(page);
++ }
+ continue; /* don't unmap */
+ }
+
+diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
+index 48a62d89f8ae..c43a788b11df 100644
+--- a/net/8021q/vlan_dev.c
++++ b/net/8021q/vlan_dev.c
+@@ -529,6 +529,9 @@ static int vlan_passthru_hard_header(struct sk_buff *skb, struct net_device *dev
+ {
+ struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+
++ if (saddr == NULL)
++ saddr = dev->dev_addr;
++
+ return dev_hard_header(skb, real_dev, type, daddr, saddr, len);
+ }
+
+diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
+index 2157984a9eb9..398a2973179d 100644
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -1138,6 +1138,12 @@ static int br_ip6_multicast_query(struct net_bridge *br,
+
+ br_multicast_query_received(br, port, !ipv6_addr_any(&ip6h->saddr));
+
++ /* RFC2710+RFC3810 (MLDv1+MLDv2) require link-local source addresses */
++ if (!(ipv6_addr_type(&ip6h->saddr) & IPV6_ADDR_LINKLOCAL)) {
++ err = -EINVAL;
++ goto out;
++ }
++
+ if (skb->len == sizeof(*mld)) {
+ if (!pskb_may_pull(skb, sizeof(*mld))) {
+ err = -EINVAL;
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 5d41293a6005..b9edff0bbc2c 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -900,8 +900,11 @@ retry:
+ * Lifetime is greater than REGEN_ADVANCE time units. In particular,
+ * an implementation must not create a temporary address with a zero
+ * Preferred Lifetime.
++ * Use age calculation as in addrconf_verify to avoid unnecessary
++ * temporary addresses being generated.
+ */
+- if (tmp_prefered_lft <= regen_advance) {
++ age = (now - tmp_tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
++ if (tmp_prefered_lft <= regen_advance + age) {
+ in6_ifa_put(ifp);
+ in6_dev_put(idev);
+ ret = -1;
+diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
+index d50545371b55..ceced6774354 100644
+--- a/net/ipv6/icmp.c
++++ b/net/ipv6/icmp.c
+@@ -499,7 +499,7 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
+ np->tclass, NULL, &fl6, (struct rt6_info*)dst,
+ MSG_DONTWAIT, np->dontfrag);
+ if (err) {
+- ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
++ ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTERRORS);
+ ip6_flush_pending_frames(sk);
+ } else {
+ err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr,
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index cd4b529fbf98..7871cc65d99c 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1191,21 +1191,19 @@ static void ip6_append_data_mtu(unsigned int *mtu,
+ unsigned int fragheaderlen,
+ struct sk_buff *skb,
+ struct rt6_info *rt,
+- bool pmtuprobe)
++ unsigned int orig_mtu)
+ {
+ if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
+ if (skb == NULL) {
+ /* first fragment, reserve header_len */
+- *mtu = *mtu - rt->dst.header_len;
++ *mtu = orig_mtu - rt->dst.header_len;
+
+ } else {
+ /*
+ * this fragment is not first, the headers
+ * space is regarded as data space.
+ */
+- *mtu = min(*mtu, pmtuprobe ?
+- rt->dst.dev->mtu :
+- dst_mtu(rt->dst.path));
++ *mtu = orig_mtu;
+ }
+ *maxfraglen = ((*mtu - fragheaderlen) & ~7)
+ + fragheaderlen - sizeof(struct frag_hdr);
+@@ -1222,7 +1220,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ struct inet_cork *cork;
+ struct sk_buff *skb, *skb_prev = NULL;
+- unsigned int maxfraglen, fragheaderlen, mtu;
++ unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
+ int exthdrlen;
+ int dst_exthdrlen;
+ int hh_len;
+@@ -1307,6 +1305,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
+ dst_exthdrlen = 0;
+ mtu = cork->fragsize;
+ }
++ orig_mtu = mtu;
+
+ hh_len = LL_RESERVED_SPACE(rt->dst.dev);
+
+@@ -1389,8 +1388,7 @@ alloc_new_skb:
+ if (skb == NULL || skb_prev == NULL)
+ ip6_append_data_mtu(&mtu, &maxfraglen,
+ fragheaderlen, skb, rt,
+- np->pmtudisc ==
+- IPV6_PMTUDISC_PROBE);
++ orig_mtu);
+
+ skb_prev = skb;
+
+@@ -1660,8 +1658,8 @@ int ip6_push_pending_frames(struct sock *sk)
+ if (proto == IPPROTO_ICMPV6) {
+ struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
+
+- ICMP6MSGOUT_INC_STATS_BH(net, idev, icmp6_hdr(skb)->icmp6_type);
+- ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
++ ICMP6MSGOUT_INC_STATS(net, idev, icmp6_hdr(skb)->icmp6_type);
++ ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
+ }
+
+ err = ip6_local_out(skb);
+diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
+index d20a9be8334e..4f12b660045d 100644
+--- a/net/ipv6/mcast.c
++++ b/net/ipv6/mcast.c
+@@ -1435,11 +1435,12 @@ static void mld_sendpack(struct sk_buff *skb)
+ dst_output);
+ out:
+ if (!err) {
+- ICMP6MSGOUT_INC_STATS_BH(net, idev, ICMPV6_MLD2_REPORT);
+- ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
+- IP6_UPD_PO_STATS_BH(net, idev, IPSTATS_MIB_OUTMCAST, payload_len);
+- } else
+- IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_OUTDISCARDS);
++ ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT);
++ ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
++ IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, payload_len);
++ } else {
++ IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
++ }
+
+ rcu_read_unlock();
+ return;
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 9a4f4377ce92..39e11f923a73 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -1250,7 +1250,7 @@ int ip6_route_add(struct fib6_config *cfg)
+ goto out;
+ }
+
+- rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, NULL, DST_NOCOUNT);
++ rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, NULL, (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT);
+
+ if (rt == NULL) {
+ err = -ENOMEM;
+diff --git a/net/rds/iw.c b/net/rds/iw.c
+index 7826d46baa70..589935661d66 100644
+--- a/net/rds/iw.c
++++ b/net/rds/iw.c
+@@ -239,7 +239,8 @@ static int rds_iw_laddr_check(__be32 addr)
+ ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
+ /* due to this, we will claim to support IB devices unless we
+ check node_type. */
+- if (ret || cm_id->device->node_type != RDMA_NODE_RNIC)
++ if (ret || !cm_id->device ||
++ cm_id->device->node_type != RDMA_NODE_RNIC)
+ ret = -EADDRNOTAVAIL;
+
+ rdsdebug("addr %pI4 ret %d node type %d\n",
+diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
+index 0121e0ab0351..c95a3f2b6309 100644
+--- a/net/sctp/sm_make_chunk.c
++++ b/net/sctp/sm_make_chunk.c
+@@ -1366,8 +1366,8 @@ static void sctp_chunk_destroy(struct sctp_chunk *chunk)
+ BUG_ON(!list_empty(&chunk->list));
+ list_del_init(&chunk->transmitted_list);
+
+- /* Free the chunk skb data and the SCTP_chunk stub itself. */
+- dev_kfree_skb(chunk->skb);
++ consume_skb(chunk->skb);
++ consume_skb(chunk->auth_chunk);
+
+ SCTP_DBG_OBJCNT_DEC(chunk);
+ kmem_cache_free(sctp_chunk_cachep, chunk);
+diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
+index f131caf06a19..5ac33b600538 100644
+--- a/net/sctp/sm_statefuns.c
++++ b/net/sctp/sm_statefuns.c
+@@ -749,7 +749,6 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
+
+ /* Make sure that we and the peer are AUTH capable */
+ if (!sctp_auth_enable || !new_asoc->peer.auth_capable) {
+- kfree_skb(chunk->auth_chunk);
+ sctp_association_free(new_asoc);
+ return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+ }
+@@ -764,10 +763,6 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
+ auth.transport = chunk->transport;
+
+ ret = sctp_sf_authenticate(ep, new_asoc, type, &auth);
+-
+- /* We can now safely free the auth_chunk clone */
+- kfree_skb(chunk->auth_chunk);
+-
+ if (ret != SCTP_IERROR_NO_ERROR) {
+ sctp_association_free(new_asoc);
+ return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+diff --git a/net/socket.c b/net/socket.c
+index d4faaded1a9d..3faa358d377f 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -1884,6 +1884,10 @@ static int copy_msghdr_from_user(struct msghdr *kmsg,
+ {
+ if (copy_from_user(kmsg, umsg, sizeof(struct msghdr)))
+ return -EFAULT;
++
++ if (kmsg->msg_namelen < 0)
++ return -EINVAL;
++
+ if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
+ kmsg->msg_namelen = sizeof(struct sockaddr_storage);
+ return 0;
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 54fc90b2408e..8705ee3b11ef 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -1771,8 +1771,11 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
+ goto out;
+
+ err = mutex_lock_interruptible(&u->readlock);
+- if (err) {
+- err = sock_intr_errno(sock_rcvtimeo(sk, noblock));
++ if (unlikely(err)) {
++ /* recvmsg() in non blocking mode is supposed to return -EAGAIN
++ * sk_rcvtimeo is not honored by mutex_lock_interruptible()
++ */
++ err = noblock ? -EAGAIN : -ERESTARTSYS;
+ goto out;
+ }
+
+@@ -1887,6 +1890,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct unix_sock *u = unix_sk(sk);
+ struct sockaddr_un *sunaddr = msg->msg_name;
+ int copied = 0;
++ int noblock = flags & MSG_DONTWAIT;
+ int check_creds = 0;
+ int target;
+ int err = 0;
+@@ -1901,7 +1905,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
+ goto out;
+
+ target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
+- timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
++ timeo = sock_rcvtimeo(sk, noblock);
+
+ /* Lock the socket to prevent queue disordering
+ * while sleeps in memcpy_tomsg
+@@ -1913,8 +1917,11 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
+ }
+
+ err = mutex_lock_interruptible(&u->readlock);
+- if (err) {
+- err = sock_intr_errno(timeo);
++ if (unlikely(err)) {
++ /* recvmsg() in non blocking mode is supposed to return -EAGAIN
++ * sk_rcvtimeo is not honored by mutex_lock_interruptible()
++ */
++ err = noblock ? -EAGAIN : -ERESTARTSYS;
+ goto out;
+ }
+
+diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
+index 4fa7939c2e9f..69477ffcce1d 100644
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -1328,15 +1328,33 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
+ isec->sid = sbsec->sid;
+
+ if ((sbsec->flags & SE_SBPROC) && !S_ISLNK(inode->i_mode)) {
+- if (opt_dentry) {
+- isec->sclass = inode_mode_to_security_class(inode->i_mode);
+- rc = selinux_proc_get_sid(opt_dentry,
+- isec->sclass,
+- &sid);
+- if (rc)
+- goto out_unlock;
+- isec->sid = sid;
+- }
++ /* We must have a dentry to determine the label on
++ * procfs inodes */
++ if (opt_dentry)
++ /* Called from d_instantiate or
++ * d_splice_alias. */
++ dentry = dget(opt_dentry);
++ else
++ /* Called from selinux_complete_init, try to
++ * find a dentry. */
++ dentry = d_find_alias(inode);
++ /*
++ * This can be hit on boot when a file is accessed
++ * before the policy is loaded. When we load policy we
++ * may find inodes that have no dentry on the
++ * sbsec->isec_head list. No reason to complain as
++ * these will get fixed up the next time we go through
++ * inode_doinit() with a dentry, before these inodes
++ * could be used again by userspace.
++ */
++ if (!dentry)
++ goto out_unlock;
++ isec->sclass = inode_mode_to_security_class(inode->i_mode);
++ rc = selinux_proc_get_sid(dentry, isec->sclass, &sid);
++ dput(dentry);
++ if (rc)
++ goto out_unlock;
++ isec->sid = sid;
+ }
+ break;
+ }
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 36bce68756b0..d307adbb8137 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -3855,6 +3855,7 @@ static void alc_auto_init_std(struct hda_codec *codec)
+
+ static const struct snd_pci_quirk beep_white_list[] = {
+ SND_PCI_QUIRK(0x1043, 0x103c, "ASUS", 1),
++ SND_PCI_QUIRK(0x1043, 0x115d, "ASUS", 1),
+ SND_PCI_QUIRK(0x1043, 0x829f, "ASUS", 1),
+ SND_PCI_QUIRK(0x1043, 0x83ce, "EeePC", 1),
+ SND_PCI_QUIRK(0x1043, 0x831a, "EeePC", 1),
+diff --git a/sound/pci/ice1712/ice1712.c b/sound/pci/ice1712/ice1712.c
+index 44446f2222d9..7a4a1962ec35 100644
+--- a/sound/pci/ice1712/ice1712.c
++++ b/sound/pci/ice1712/ice1712.c
+@@ -686,9 +686,10 @@ static snd_pcm_uframes_t snd_ice1712_playback_pointer(struct snd_pcm_substream *
+ if (!(snd_ice1712_read(ice, ICE1712_IREG_PBK_CTRL) & 1))
+ return 0;
+ ptr = runtime->buffer_size - inw(ice->ddma_port + 4);
++ ptr = bytes_to_frames(substream->runtime, ptr);
+ if (ptr == runtime->buffer_size)
+ ptr = 0;
+- return bytes_to_frames(substream->runtime, ptr);
++ return ptr;
+ }
+
+ static snd_pcm_uframes_t snd_ice1712_playback_ds_pointer(struct snd_pcm_substream *substream)
+@@ -705,9 +706,10 @@ static snd_pcm_uframes_t snd_ice1712_playback_ds_pointer(struct snd_pcm_substrea
+ addr = ICE1712_DSC_ADDR0;
+ ptr = snd_ice1712_ds_read(ice, substream->number * 2, addr) -
+ ice->playback_con_virt_addr[substream->number];
++ ptr = bytes_to_frames(substream->runtime, ptr);
+ if (ptr == substream->runtime->buffer_size)
+ ptr = 0;
+- return bytes_to_frames(substream->runtime, ptr);
++ return ptr;
+ }
+
+ static snd_pcm_uframes_t snd_ice1712_capture_pointer(struct snd_pcm_substream *substream)
+@@ -718,9 +720,10 @@ static snd_pcm_uframes_t snd_ice1712_capture_pointer(struct snd_pcm_substream *s
+ if (!(snd_ice1712_read(ice, ICE1712_IREG_CAP_CTRL) & 1))
+ return 0;
+ ptr = inl(ICEREG(ice, CONCAP_ADDR)) - ice->capture_con_virt_addr;
++ ptr = bytes_to_frames(substream->runtime, ptr);
+ if (ptr == substream->runtime->buffer_size)
+ ptr = 0;
+- return bytes_to_frames(substream->runtime, ptr);
++ return ptr;
+ }
+
+ static const struct snd_pcm_hardware snd_ice1712_playback = {
+@@ -1114,9 +1117,10 @@ static snd_pcm_uframes_t snd_ice1712_playback_pro_pointer(struct snd_pcm_substre
+ if (!(inl(ICEMT(ice, PLAYBACK_CONTROL)) & ICE1712_PLAYBACK_START))
+ return 0;
+ ptr = ice->playback_pro_size - (inw(ICEMT(ice, PLAYBACK_SIZE)) << 2);
++ ptr = bytes_to_frames(substream->runtime, ptr);
+ if (ptr == substream->runtime->buffer_size)
+ ptr = 0;
+- return bytes_to_frames(substream->runtime, ptr);
++ return ptr;
+ }
+
+ static snd_pcm_uframes_t snd_ice1712_capture_pro_pointer(struct snd_pcm_substream *substream)
+@@ -1127,9 +1131,10 @@ static snd_pcm_uframes_t snd_ice1712_capture_pro_pointer(struct snd_pcm_substrea
+ if (!(inl(ICEMT(ice, PLAYBACK_CONTROL)) & ICE1712_CAPTURE_START_SHADOW))
+ return 0;
+ ptr = ice->capture_pro_size - (inw(ICEMT(ice, CAPTURE_SIZE)) << 2);
++ ptr = bytes_to_frames(substream->runtime, ptr);
+ if (ptr == substream->runtime->buffer_size)
+ ptr = 0;
+- return bytes_to_frames(substream->runtime, ptr);
++ return ptr;
+ }
+
+ static const struct snd_pcm_hardware snd_ice1712_playback_pro = {
diff --git a/1058_linux-3.2.59.patch b/1058_linux-3.2.59.patch
new file mode 100644
index 00000000..db1dfa16
--- /dev/null
+++ b/1058_linux-3.2.59.patch
@@ -0,0 +1,1213 @@
+diff --git a/Makefile b/Makefile
+index d59b394ba6cd..1be3414a141f 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 58
++SUBLEVEL = 59
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/include/asm/div64.h b/arch/arm/include/asm/div64.h
+index d3f0a9eee9f6..506e33baabd3 100644
+--- a/arch/arm/include/asm/div64.h
++++ b/arch/arm/include/asm/div64.h
+@@ -156,7 +156,7 @@
+ /* Select the best insn combination to perform the */ \
+ /* actual __m * __n / (__p << 64) operation. */ \
+ if (!__c) { \
+- asm ( "umull %Q0, %R0, %1, %Q2\n\t" \
++ asm ( "umull %Q0, %R0, %Q1, %Q2\n\t" \
+ "mov %Q0, #0" \
+ : "=&r" (__res) \
+ : "r" (__m), "r" (__n) \
+diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
+index 3735abd7f8f6..4014d9064be1 100644
+--- a/arch/parisc/kernel/syscall_table.S
++++ b/arch/parisc/kernel/syscall_table.S
+@@ -395,7 +395,7 @@
+ ENTRY_COMP(vmsplice)
+ ENTRY_COMP(move_pages) /* 295 */
+ ENTRY_SAME(getcpu)
+- ENTRY_SAME(epoll_pwait)
++ ENTRY_COMP(epoll_pwait)
+ ENTRY_COMP(statfs64)
+ ENTRY_COMP(fstatfs64)
+ ENTRY_COMP(kexec_load) /* 300 */
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index b3f0f5ad8c41..2b662725fd90 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4718,21 +4718,26 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
+ static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
+ {
+ struct ata_queued_cmd *qc = NULL;
+- unsigned int i;
++ unsigned int i, tag;
+
+ /* no command while frozen */
+ if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
+ return NULL;
+
+- /* the last tag is reserved for internal command. */
+- for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
+- if (!test_and_set_bit(i, &ap->qc_allocated)) {
+- qc = __ata_qc_from_tag(ap, i);
++ for (i = 0; i < ATA_MAX_QUEUE; i++) {
++ tag = (i + ap->last_tag + 1) % ATA_MAX_QUEUE;
++
++ /* the last tag is reserved for internal command. */
++ if (tag == ATA_TAG_INTERNAL)
++ continue;
++
++ if (!test_and_set_bit(tag, &ap->qc_allocated)) {
++ qc = __ata_qc_from_tag(ap, tag);
++ qc->tag = tag;
++ ap->last_tag = tag;
+ break;
+ }
+-
+- if (qc)
+- qc->tag = i;
++ }
+
+ return qc;
+ }
+diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
+index 7a90d4ae6e52..6d0f3e138221 100644
+--- a/drivers/block/floppy.c
++++ b/drivers/block/floppy.c
+@@ -3060,7 +3060,10 @@ static int raw_cmd_copyout(int cmd, void __user *param,
+ int ret;
+
+ while (ptr) {
+- ret = copy_to_user(param, ptr, sizeof(*ptr));
++ struct floppy_raw_cmd cmd = *ptr;
++ cmd.next = NULL;
++ cmd.kernel_data = NULL;
++ ret = copy_to_user(param, &cmd, sizeof(cmd));
+ if (ret)
+ return -EFAULT;
+ param += sizeof(struct floppy_raw_cmd);
+@@ -3114,10 +3117,11 @@ loop:
+ return -ENOMEM;
+ *rcmd = ptr;
+ ret = copy_from_user(ptr, param, sizeof(*ptr));
+- if (ret)
+- return -EFAULT;
+ ptr->next = NULL;
+ ptr->buffer_length = 0;
++ ptr->kernel_data = NULL;
++ if (ret)
++ return -EFAULT;
+ param += sizeof(struct floppy_raw_cmd);
+ if (ptr->cmd_count > 33)
+ /* the command may now also take up the space
+@@ -3133,7 +3137,6 @@ loop:
+ for (i = 0; i < 16; i++)
+ ptr->reply[i] = 0;
+ ptr->resultcode = 0;
+- ptr->kernel_data = NULL;
+
+ if (ptr->flags & (FD_RAW_READ | FD_RAW_WRITE)) {
+ if (ptr->length <= 0)
+diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
+index 4cd392dbf115..2861ef4570fc 100644
+--- a/drivers/firmware/dmi_scan.c
++++ b/drivers/firmware/dmi_scan.c
+@@ -534,9 +534,15 @@ static bool dmi_matches(const struct dmi_system_id *dmi)
+ int s = dmi->matches[i].slot;
+ if (s == DMI_NONE)
+ break;
+- if (dmi_ident[s]
+- && strstr(dmi_ident[s], dmi->matches[i].substr))
+- continue;
++ if (dmi_ident[s]) {
++ if (!dmi->matches[i].exact_match &&
++ strstr(dmi_ident[s], dmi->matches[i].substr))
++ continue;
++ else if (dmi->matches[i].exact_match &&
++ !strcmp(dmi_ident[s], dmi->matches[i].substr))
++ continue;
++ }
++
+ /* No match */
+ return false;
+ }
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index bc35070daf78..886c1914146b 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -1394,6 +1394,14 @@ static const struct dmi_system_id min_max_dmi_table[] __initconst = {
+ .driver_data = (int []){1232, 5710, 1156, 4696},
+ },
+ {
++ /* Lenovo ThinkPad T431s */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T431"),
++ },
++ .driver_data = (int []){1024, 5112, 2024, 4832},
++ },
++ {
+ /* Lenovo ThinkPad T440s */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+@@ -1402,6 +1410,14 @@ static const struct dmi_system_id min_max_dmi_table[] __initconst = {
+ .driver_data = (int []){1024, 5112, 2024, 4832},
+ },
+ {
++ /* Lenovo ThinkPad L440 */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L440"),
++ },
++ .driver_data = (int []){1024, 5112, 2024, 4832},
++ },
++ {
+ /* Lenovo ThinkPad T540p */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+@@ -1409,6 +1425,32 @@ static const struct dmi_system_id min_max_dmi_table[] __initconst = {
+ },
+ .driver_data = (int []){1024, 5056, 2058, 4832},
+ },
++ {
++ /* Lenovo ThinkPad L540 */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L540"),
++ },
++ .driver_data = (int []){1024, 5112, 2024, 4832},
++ },
++ {
++ /* Lenovo Yoga S1 */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
++ "ThinkPad S1 Yoga"),
++ },
++ .driver_data = (int []){1232, 5710, 1156, 4696},
++ },
++ {
++ /* Lenovo ThinkPad X1 Carbon Haswell (3rd generation) */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_VERSION,
++ "ThinkPad X1 Carbon 2nd"),
++ },
++ .driver_data = (int []){1024, 5112, 2024, 4832},
++ },
+ #endif
+ { }
+ };
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+index 5829e0b47e7e..ba7c861e2b93 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+@@ -58,7 +58,6 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
+
+ cq->ring = ring;
+ cq->is_tx = mode;
+- spin_lock_init(&cq->lock);
+
+ err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
+ cq->buf_size, 2 * PAGE_SIZE);
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+index 78d776bc355c..308349a43d60 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+@@ -355,15 +355,11 @@ static void mlx4_en_netpoll(struct net_device *dev)
+ {
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_cq *cq;
+- unsigned long flags;
+ int i;
+
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ cq = &priv->rx_cq[i];
+- spin_lock_irqsave(&cq->lock, flags);
+- napi_synchronize(&cq->napi);
+- mlx4_en_process_rx_cq(dev, cq, 0);
+- spin_unlock_irqrestore(&cq->lock, flags);
++ napi_schedule(&cq->napi);
+ }
+ }
+ #endif
+diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+index 207b5add3ca8..492055885109 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
++++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+@@ -300,7 +300,6 @@ struct mlx4_en_cq {
+ struct mlx4_cq mcq;
+ struct mlx4_hwq_resources wqres;
+ int ring;
+- spinlock_t lock;
+ struct net_device *dev;
+ struct napi_struct napi;
+ /* Per-core Tx cq processing support */
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
+index a004ad75d568..ba7ef2fd4997 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
+@@ -1228,11 +1228,14 @@ static void rtl92c_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
+ if (rtlhal->interface == INTF_PCI) {
+ rcu_read_lock();
+ sta = ieee80211_find_sta(mac->vif, mac->bssid);
++ if (!sta)
++ goto out_unlock;
+ }
+ rtlpriv->cfg->ops->update_rate_tbl(hw, sta,
+ p_ra->ratr_state);
+
+ p_ra->pre_ratr_state = p_ra->ratr_state;
++ out_unlock:
+ if (rtlhal->interface == INTF_PCI)
+ rcu_read_unlock();
+ }
+diff --git a/drivers/staging/rtl8712/rtl871x_recv.c b/drivers/staging/rtl8712/rtl871x_recv.c
+index 7069f06d9b5d..4cc68d1b5069 100644
+--- a/drivers/staging/rtl8712/rtl871x_recv.c
++++ b/drivers/staging/rtl8712/rtl871x_recv.c
+@@ -254,7 +254,7 @@ union recv_frame *r8712_portctrl(struct _adapter *adapter,
+ struct sta_info *psta;
+ struct sta_priv *pstapriv;
+ union recv_frame *prtnframe;
+- u16 ether_type = 0;
++ u16 ether_type;
+
+ pstapriv = &adapter->stapriv;
+ ptr = get_recvframe_data(precv_frame);
+@@ -263,15 +263,14 @@ union recv_frame *r8712_portctrl(struct _adapter *adapter,
+ psta = r8712_get_stainfo(pstapriv, psta_addr);
+ auth_alg = adapter->securitypriv.AuthAlgrthm;
+ if (auth_alg == 2) {
++ /* get ether_type */
++ ptr = ptr + pfhdr->attrib.hdrlen + LLC_HEADER_SIZE;
++ memcpy(&ether_type, ptr, 2);
++ ether_type = ntohs((unsigned short)ether_type);
++
+ if ((psta != NULL) && (psta->ieee8021x_blocked)) {
+ /* blocked
+ * only accept EAPOL frame */
+- prtnframe = precv_frame;
+- /*get ether_type */
+- ptr = ptr + pfhdr->attrib.hdrlen +
+- pfhdr->attrib.iv_len + LLC_HEADER_SIZE;
+- memcpy(&ether_type, ptr, 2);
+- ether_type = ntohs((unsigned short)ether_type);
+ if (ether_type == 0x888e)
+ prtnframe = precv_frame;
+ else {
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index 0f8a785a5f60..bac83d82c117 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -1997,7 +1997,9 @@ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file,
+ tty->ops->flush_chars(tty);
+ } else {
+ while (nr > 0) {
++ mutex_lock(&tty->output_lock);
+ c = tty->ops->write(tty, b, nr);
++ mutex_unlock(&tty->output_lock);
+ if (c < 0) {
+ retval = c;
+ goto break_out;
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 636ee9e5ff39..320db2a585ad 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1493,13 +1493,27 @@ static const struct usb_device_id acm_ids[] = {
+ },
+ /* Motorola H24 HSPA module: */
+ { USB_DEVICE(0x22b8, 0x2d91) }, /* modem */
+- { USB_DEVICE(0x22b8, 0x2d92) }, /* modem + diagnostics */
+- { USB_DEVICE(0x22b8, 0x2d93) }, /* modem + AT port */
+- { USB_DEVICE(0x22b8, 0x2d95) }, /* modem + AT port + diagnostics */
+- { USB_DEVICE(0x22b8, 0x2d96) }, /* modem + NMEA */
+- { USB_DEVICE(0x22b8, 0x2d97) }, /* modem + diagnostics + NMEA */
+- { USB_DEVICE(0x22b8, 0x2d99) }, /* modem + AT port + NMEA */
+- { USB_DEVICE(0x22b8, 0x2d9a) }, /* modem + AT port + diagnostics + NMEA */
++ { USB_DEVICE(0x22b8, 0x2d92), /* modem + diagnostics */
++ .driver_info = NO_UNION_NORMAL, /* handle only modem interface */
++ },
++ { USB_DEVICE(0x22b8, 0x2d93), /* modem + AT port */
++ .driver_info = NO_UNION_NORMAL, /* handle only modem interface */
++ },
++ { USB_DEVICE(0x22b8, 0x2d95), /* modem + AT port + diagnostics */
++ .driver_info = NO_UNION_NORMAL, /* handle only modem interface */
++ },
++ { USB_DEVICE(0x22b8, 0x2d96), /* modem + NMEA */
++ .driver_info = NO_UNION_NORMAL, /* handle only modem interface */
++ },
++ { USB_DEVICE(0x22b8, 0x2d97), /* modem + diagnostics + NMEA */
++ .driver_info = NO_UNION_NORMAL, /* handle only modem interface */
++ },
++ { USB_DEVICE(0x22b8, 0x2d99), /* modem + AT port + NMEA */
++ .driver_info = NO_UNION_NORMAL, /* handle only modem interface */
++ },
++ { USB_DEVICE(0x22b8, 0x2d9a), /* modem + AT port + diagnostics + NMEA */
++ .driver_info = NO_UNION_NORMAL, /* handle only modem interface */
++ },
+
+ { USB_DEVICE(0x0572, 0x1329), /* Hummingbird huc56s (Conexant) */
+ .driver_info = NO_UNION_NORMAL, /* union descriptor misplaced on
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 2b4f42bac98f..188654458c2e 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -570,10 +570,11 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
+ struct xhci_dequeue_state *state)
+ {
+ struct xhci_virt_device *dev = xhci->devs[slot_id];
++ struct xhci_virt_ep *ep = &dev->eps[ep_index];
+ struct xhci_ring *ep_ring;
+ struct xhci_generic_trb *trb;
+- struct xhci_ep_ctx *ep_ctx;
+ dma_addr_t addr;
++ u64 hw_dequeue;
+
+ ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
+ ep_index, stream_id);
+@@ -583,52 +584,62 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
+ stream_id);
+ return;
+ }
+- state->new_cycle_state = 0;
+- xhci_dbg(xhci, "Finding segment containing stopped TRB.\n");
+- state->new_deq_seg = find_trb_seg(cur_td->start_seg,
+- dev->eps[ep_index].stopped_trb,
+- &state->new_cycle_state);
+- if (!state->new_deq_seg) {
+- WARN_ON(1);
+- return;
+- }
+
+ /* Dig out the cycle state saved by the xHC during the stop ep cmd */
+ xhci_dbg(xhci, "Finding endpoint context\n");
+- ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
+- state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq);
++ /* 4.6.9 the css flag is written to the stream context for streams */
++ if (ep->ep_state & EP_HAS_STREAMS) {
++ struct xhci_stream_ctx *ctx =
++ &ep->stream_info->stream_ctx_array[stream_id];
++ hw_dequeue = le64_to_cpu(ctx->stream_ring);
++ } else {
++ struct xhci_ep_ctx *ep_ctx
++ = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
++ hw_dequeue = le64_to_cpu(ep_ctx->deq);
++ }
++
++ /* Find virtual address and segment of hardware dequeue pointer */
++ state->new_deq_seg = ep_ring->deq_seg;
++ state->new_deq_ptr = ep_ring->dequeue;
++ while (xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr)
++ != (dma_addr_t)(hw_dequeue & ~0xf)) {
++ next_trb(xhci, ep_ring, &state->new_deq_seg,
++ &state->new_deq_ptr);
++ if (state->new_deq_ptr == ep_ring->dequeue) {
++ WARN_ON(1);
++ return;
++ }
++ }
++ /*
++ * Find cycle state for last_trb, starting at old cycle state of
++ * hw_dequeue. If there is only one segment ring, find_trb_seg() will
++ * return immediately and cannot toggle the cycle state if this search
++ * wraps around, so add one more toggle manually in that case.
++ */
++ state->new_cycle_state = hw_dequeue & 0x1;
++ if (ep_ring->first_seg == ep_ring->first_seg->next &&
++ cur_td->last_trb < state->new_deq_ptr)
++ state->new_cycle_state ^= 0x1;
+
+ state->new_deq_ptr = cur_td->last_trb;
+ xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n");
+ state->new_deq_seg = find_trb_seg(state->new_deq_seg,
+- state->new_deq_ptr,
+- &state->new_cycle_state);
++ state->new_deq_ptr, &state->new_cycle_state);
+ if (!state->new_deq_seg) {
+ WARN_ON(1);
+ return;
+ }
+
++ /* Increment to find next TRB after last_trb. Cycle if appropriate. */
+ trb = &state->new_deq_ptr->generic;
+ if (TRB_TYPE_LINK_LE32(trb->field[3]) &&
+ (trb->field[3] & cpu_to_le32(LINK_TOGGLE)))
+ state->new_cycle_state ^= 0x1;
+ next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
+
+- /*
+- * If there is only one segment in a ring, find_trb_seg()'s while loop
+- * will not run, and it will return before it has a chance to see if it
+- * needs to toggle the cycle bit. It can't tell if the stalled transfer
+- * ended just before the link TRB on a one-segment ring, or if the TD
+- * wrapped around the top of the ring, because it doesn't have the TD in
+- * question. Look for the one-segment case where stalled TRB's address
+- * is greater than the new dequeue pointer address.
+- */
+- if (ep_ring->first_seg == ep_ring->first_seg->next &&
+- state->new_deq_ptr < dev->eps[ep_index].stopped_trb)
+- state->new_cycle_state ^= 0x1;
++ /* Don't update the ring cycle state for the producer (us). */
+ xhci_dbg(xhci, "Cycle state = 0x%x\n", state->new_cycle_state);
+
+- /* Don't update the ring cycle state for the producer (us). */
+ xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n",
+ state->new_deq_seg);
+ addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
+@@ -813,7 +824,6 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
+ if (list_empty(&ep->cancelled_td_list)) {
+ xhci_stop_watchdog_timer_in_irq(xhci, ep);
+ ep->stopped_td = NULL;
+- ep->stopped_trb = NULL;
+ ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
+ return;
+ }
+@@ -880,11 +890,9 @@ remove_finished_td:
+ ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
+ }
+
+- /* Clear stopped_td and stopped_trb if endpoint is not halted */
+- if (!(ep->ep_state & EP_HALTED)) {
++ /* Clear stopped_td if endpoint is not halted */
++ if (!(ep->ep_state & EP_HALTED))
+ ep->stopped_td = NULL;
+- ep->stopped_trb = NULL;
+- }
+
+ /*
+ * Drop the lock and complete the URBs in the cancelled TD list.
+@@ -1744,14 +1752,12 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
+ struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
+ ep->ep_state |= EP_HALTED;
+ ep->stopped_td = td;
+- ep->stopped_trb = event_trb;
+ ep->stopped_stream = stream_id;
+
+ xhci_queue_reset_ep(xhci, slot_id, ep_index);
+ xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
+
+ ep->stopped_td = NULL;
+- ep->stopped_trb = NULL;
+ ep->stopped_stream = 0;
+
+ xhci_ring_cmd_db(xhci);
+@@ -1833,7 +1839,6 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ * the ring dequeue pointer or take this TD off any lists yet.
+ */
+ ep->stopped_td = td;
+- ep->stopped_trb = event_trb;
+ return 0;
+ } else {
+ if (trb_comp_code == COMP_STALL) {
+@@ -1845,7 +1850,6 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ * USB class driver clear the stall later.
+ */
+ ep->stopped_td = td;
+- ep->stopped_trb = event_trb;
+ ep->stopped_stream = ep_ring->stream_id;
+ } else if (xhci_requires_manual_halt_cleanup(xhci,
+ ep_ctx, trb_comp_code)) {
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 03c35da16e48..b2eac8db55e8 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -378,16 +378,16 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd)
+
+ #else
+
+-static int xhci_try_enable_msi(struct usb_hcd *hcd)
++static inline int xhci_try_enable_msi(struct usb_hcd *hcd)
+ {
+ return 0;
+ }
+
+-static void xhci_cleanup_msix(struct xhci_hcd *xhci)
++static inline void xhci_cleanup_msix(struct xhci_hcd *xhci)
+ {
+ }
+
+-static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
++static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
+ {
+ }
+
+@@ -2884,7 +2884,6 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
+ xhci_ring_cmd_db(xhci);
+ }
+ virt_ep->stopped_td = NULL;
+- virt_ep->stopped_trb = NULL;
+ virt_ep->stopped_stream = 0;
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index cf4fd241fc77..1bc91c8927aa 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -835,8 +835,6 @@ struct xhci_virt_ep {
+ #define EP_GETTING_NO_STREAMS (1 << 5)
+ /* ---- Related to URB cancellation ---- */
+ struct list_head cancelled_td_list;
+- /* The TRB that was last reported in a stopped endpoint ring */
+- union xhci_trb *stopped_trb;
+ struct xhci_td *stopped_td;
+ unsigned int stopped_stream;
+ /* Watchdog timer for stop endpoint command to cancel URBs */
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index c408ff73e99d..01fd64a0a9ee 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -110,6 +110,7 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x10C4, 0x8218) }, /* Lipowsky Industrie Elektronik GmbH, HARP-1 */
+ { USB_DEVICE(0x10C4, 0x822B) }, /* Modem EDGE(GSM) Comander 2 */
+ { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */
++ { USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */
+ { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */
+ { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
+ { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 5c97d9fcc613..332f04d76bd9 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -920,6 +920,39 @@ static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(FTDI_VID, FTDI_Z3X_PID) },
+ /* Cressi Devices */
+ { USB_DEVICE(FTDI_VID, FTDI_CRESSI_PID) },
++ /* Brainboxes Devices */
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_001_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_012_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_023_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_034_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_101_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_1_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_2_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_3_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_4_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_5_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_6_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_7_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_8_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_257_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_1_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_2_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_3_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_4_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_313_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_324_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_346_1_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_346_2_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_357_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_606_1_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_606_2_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_606_3_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_701_1_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_701_2_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_1_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_2_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_3_PID) },
++ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_4_PID) },
+ { }, /* Optional parameter entry */
+ { } /* Terminating entry */
+ };
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 71fe2de79bab..83a440fd28d6 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -1326,3 +1326,40 @@
+ * Manufacturer: Cressi
+ */
+ #define FTDI_CRESSI_PID 0x87d0
++
++/*
++ * Brainboxes devices
++ */
++#define BRAINBOXES_VID 0x05d1
++#define BRAINBOXES_VX_001_PID 0x1001 /* VX-001 ExpressCard 1 Port RS232 */
++#define BRAINBOXES_VX_012_PID 0x1002 /* VX-012 ExpressCard 2 Port RS232 */
++#define BRAINBOXES_VX_023_PID 0x1003 /* VX-023 ExpressCard 1 Port RS422/485 */
++#define BRAINBOXES_VX_034_PID 0x1004 /* VX-034 ExpressCard 2 Port RS422/485 */
++#define BRAINBOXES_US_101_PID 0x1011 /* US-101 1xRS232 */
++#define BRAINBOXES_US_324_PID 0x1013 /* US-324 1xRS422/485 1Mbaud */
++#define BRAINBOXES_US_606_1_PID 0x2001 /* US-606 6 Port RS232 Serial Port 1 and 2 */
++#define BRAINBOXES_US_606_2_PID 0x2002 /* US-606 6 Port RS232 Serial Port 3 and 4 */
++#define BRAINBOXES_US_606_3_PID 0x2003 /* US-606 6 Port RS232 Serial Port 4 and 6 */
++#define BRAINBOXES_US_701_1_PID 0x2011 /* US-701 4xRS232 1Mbaud Port 1 and 2 */
++#define BRAINBOXES_US_701_2_PID 0x2012 /* US-701 4xRS422 1Mbaud Port 3 and 4 */
++#define BRAINBOXES_US_279_1_PID 0x2021 /* US-279 8xRS422 1Mbaud Port 1 and 2 */
++#define BRAINBOXES_US_279_2_PID 0x2022 /* US-279 8xRS422 1Mbaud Port 3 and 4 */
++#define BRAINBOXES_US_279_3_PID 0x2023 /* US-279 8xRS422 1Mbaud Port 5 and 6 */
++#define BRAINBOXES_US_279_4_PID 0x2024 /* US-279 8xRS422 1Mbaud Port 7 and 8 */
++#define BRAINBOXES_US_346_1_PID 0x3011 /* US-346 4xRS422/485 1Mbaud Port 1 and 2 */
++#define BRAINBOXES_US_346_2_PID 0x3012 /* US-346 4xRS422/485 1Mbaud Port 3 and 4 */
++#define BRAINBOXES_US_257_PID 0x5001 /* US-257 2xRS232 1Mbaud */
++#define BRAINBOXES_US_313_PID 0x6001 /* US-313 2xRS422/485 1Mbaud */
++#define BRAINBOXES_US_357_PID 0x7001 /* US_357 1xRS232/422/485 */
++#define BRAINBOXES_US_842_1_PID 0x8001 /* US-842 8xRS422/485 1Mbaud Port 1 and 2 */
++#define BRAINBOXES_US_842_2_PID 0x8002 /* US-842 8xRS422/485 1Mbaud Port 3 and 4 */
++#define BRAINBOXES_US_842_3_PID 0x8003 /* US-842 8xRS422/485 1Mbaud Port 5 and 6 */
++#define BRAINBOXES_US_842_4_PID 0x8004 /* US-842 8xRS422/485 1Mbaud Port 7 and 8 */
++#define BRAINBOXES_US_160_1_PID 0x9001 /* US-160 16xRS232 1Mbaud Port 1 and 2 */
++#define BRAINBOXES_US_160_2_PID 0x9002 /* US-160 16xRS232 1Mbaud Port 3 and 4 */
++#define BRAINBOXES_US_160_3_PID 0x9003 /* US-160 16xRS232 1Mbaud Port 5 and 6 */
++#define BRAINBOXES_US_160_4_PID 0x9004 /* US-160 16xRS232 1Mbaud Port 7 and 8 */
++#define BRAINBOXES_US_160_5_PID 0x9005 /* US-160 16xRS232 1Mbaud Port 9 and 10 */
++#define BRAINBOXES_US_160_6_PID 0x9006 /* US-160 16xRS232 1Mbaud Port 11 and 12 */
++#define BRAINBOXES_US_160_7_PID 0x9007 /* US-160 16xRS232 1Mbaud Port 13 and 14 */
++#define BRAINBOXES_US_160_8_PID 0x9008 /* US-160 16xRS232 1Mbaud Port 15 and 16 */
+diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
+index f42119d22467..c575e0a17270 100644
+--- a/drivers/usb/serial/io_ti.c
++++ b/drivers/usb/serial/io_ti.c
+@@ -36,6 +36,7 @@
+ #include <linux/spinlock.h>
+ #include <linux/mutex.h>
+ #include <linux/serial.h>
++#include <linux/swab.h>
+ #include <linux/kfifo.h>
+ #include <linux/ioctl.h>
+ #include <linux/firmware.h>
+@@ -306,7 +307,7 @@ static int read_download_mem(struct usb_device *dev, int start_address,
+ {
+ int status = 0;
+ __u8 read_length;
+- __be16 be_start_address;
++ u16 be_start_address;
+
+ dbg("%s - @ %x for %d", __func__, start_address, length);
+
+@@ -323,10 +324,14 @@ static int read_download_mem(struct usb_device *dev, int start_address,
+ dbg("%s - @ %x for %d", __func__,
+ start_address, read_length);
+ }
+- be_start_address = cpu_to_be16(start_address);
++ /*
++ * NOTE: Must use swab as wIndex is sent in little-endian
++ * byte order regardless of host byte order.
++ */
++ be_start_address = swab16((u16)start_address);
+ status = ti_vread_sync(dev, UMPC_MEMORY_READ,
+ (__u16)address_type,
+- (__force __u16)be_start_address,
++ be_start_address,
+ buffer, read_length);
+
+ if (status) {
+@@ -426,7 +431,7 @@ static int write_i2c_mem(struct edgeport_serial *serial,
+ {
+ int status = 0;
+ int write_length;
+- __be16 be_start_address;
++ u16 be_start_address;
+
+ /* We can only send a maximum of 1 aligned byte page at a time */
+
+@@ -442,11 +447,16 @@ static int write_i2c_mem(struct edgeport_serial *serial,
+ usb_serial_debug_data(debug, &serial->serial->dev->dev,
+ __func__, write_length, buffer);
+
+- /* Write first page */
+- be_start_address = cpu_to_be16(start_address);
++ /*
++ * Write first page.
++ *
++ * NOTE: Must use swab as wIndex is sent in little-endian byte order
++ * regardless of host byte order.
++ */
++ be_start_address = swab16((u16)start_address);
+ status = ti_vsend_sync(serial->serial->dev,
+ UMPC_MEMORY_WRITE, (__u16)address_type,
+- (__force __u16)be_start_address,
++ be_start_address,
+ buffer, write_length);
+ if (status) {
+ dbg("%s - ERROR %d", __func__, status);
+@@ -470,11 +480,16 @@ static int write_i2c_mem(struct edgeport_serial *serial,
+ usb_serial_debug_data(debug, &serial->serial->dev->dev,
+ __func__, write_length, buffer);
+
+- /* Write next page */
+- be_start_address = cpu_to_be16(start_address);
++ /*
++ * Write next page.
++ *
++ * NOTE: Must use swab as wIndex is sent in little-endian byte
++ * order regardless of host byte order.
++ */
++ be_start_address = swab16((u16)start_address);
+ status = ti_vsend_sync(serial->serial->dev, UMPC_MEMORY_WRITE,
+ (__u16)address_type,
+- (__force __u16)be_start_address,
++ be_start_address,
+ buffer, write_length);
+ if (status) {
+ dev_err(&serial->serial->dev->dev, "%s - ERROR %d\n",
+@@ -681,8 +696,8 @@ static int get_descriptor_addr(struct edgeport_serial *serial,
+ if (rom_desc->Type == desc_type)
+ return start_address;
+
+- start_address = start_address + sizeof(struct ti_i2c_desc)
+- + rom_desc->Size;
++ start_address = start_address + sizeof(struct ti_i2c_desc) +
++ le16_to_cpu(rom_desc->Size);
+
+ } while ((start_address < TI_MAX_I2C_SIZE) && rom_desc->Type);
+
+@@ -695,7 +710,7 @@ static int valid_csum(struct ti_i2c_desc *rom_desc, __u8 *buffer)
+ __u16 i;
+ __u8 cs = 0;
+
+- for (i = 0; i < rom_desc->Size; i++)
++ for (i = 0; i < le16_to_cpu(rom_desc->Size); i++)
+ cs = (__u8)(cs + buffer[i]);
+
+ if (cs != rom_desc->CheckSum) {
+@@ -749,7 +764,7 @@ static int check_i2c_image(struct edgeport_serial *serial)
+ break;
+
+ if ((start_address + sizeof(struct ti_i2c_desc) +
+- rom_desc->Size) > TI_MAX_I2C_SIZE) {
++ le16_to_cpu(rom_desc->Size)) > TI_MAX_I2C_SIZE) {
+ status = -ENODEV;
+ dbg("%s - structure too big, erroring out.", __func__);
+ break;
+@@ -764,7 +779,8 @@ static int check_i2c_image(struct edgeport_serial *serial)
+ /* Read the descriptor data */
+ status = read_rom(serial, start_address +
+ sizeof(struct ti_i2c_desc),
+- rom_desc->Size, buffer);
++ le16_to_cpu(rom_desc->Size),
++ buffer);
+ if (status)
+ break;
+
+@@ -773,7 +789,7 @@ static int check_i2c_image(struct edgeport_serial *serial)
+ break;
+ }
+ start_address = start_address + sizeof(struct ti_i2c_desc) +
+- rom_desc->Size;
++ le16_to_cpu(rom_desc->Size);
+
+ } while ((rom_desc->Type != I2C_DESC_TYPE_ION) &&
+ (start_address < TI_MAX_I2C_SIZE));
+@@ -812,7 +828,7 @@ static int get_manuf_info(struct edgeport_serial *serial, __u8 *buffer)
+
+ /* Read the descriptor data */
+ status = read_rom(serial, start_address+sizeof(struct ti_i2c_desc),
+- rom_desc->Size, buffer);
++ le16_to_cpu(rom_desc->Size), buffer);
+ if (status)
+ goto exit;
+
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 5f5047fcfecf..9823e79e9f56 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -234,8 +234,31 @@ static void option_instat_callback(struct urb *urb);
+ #define QUALCOMM_VENDOR_ID 0x05C6
+
+ #define CMOTECH_VENDOR_ID 0x16d8
+-#define CMOTECH_PRODUCT_6008 0x6008
+-#define CMOTECH_PRODUCT_6280 0x6280
++#define CMOTECH_PRODUCT_6001 0x6001
++#define CMOTECH_PRODUCT_CMU_300 0x6002
++#define CMOTECH_PRODUCT_6003 0x6003
++#define CMOTECH_PRODUCT_6004 0x6004
++#define CMOTECH_PRODUCT_6005 0x6005
++#define CMOTECH_PRODUCT_CGU_628A 0x6006
++#define CMOTECH_PRODUCT_CHE_628S 0x6007
++#define CMOTECH_PRODUCT_CMU_301 0x6008
++#define CMOTECH_PRODUCT_CHU_628 0x6280
++#define CMOTECH_PRODUCT_CHU_628S 0x6281
++#define CMOTECH_PRODUCT_CDU_680 0x6803
++#define CMOTECH_PRODUCT_CDU_685A 0x6804
++#define CMOTECH_PRODUCT_CHU_720S 0x7001
++#define CMOTECH_PRODUCT_7002 0x7002
++#define CMOTECH_PRODUCT_CHU_629K 0x7003
++#define CMOTECH_PRODUCT_7004 0x7004
++#define CMOTECH_PRODUCT_7005 0x7005
++#define CMOTECH_PRODUCT_CGU_629 0x7006
++#define CMOTECH_PRODUCT_CHU_629S 0x700a
++#define CMOTECH_PRODUCT_CHU_720I 0x7211
++#define CMOTECH_PRODUCT_7212 0x7212
++#define CMOTECH_PRODUCT_7213 0x7213
++#define CMOTECH_PRODUCT_7251 0x7251
++#define CMOTECH_PRODUCT_7252 0x7252
++#define CMOTECH_PRODUCT_7253 0x7253
+
+ #define TELIT_VENDOR_ID 0x1bc7
+ #define TELIT_PRODUCT_UC864E 0x1003
+@@ -243,6 +266,7 @@ static void option_instat_callback(struct urb *urb);
+ #define TELIT_PRODUCT_CC864_DUAL 0x1005
+ #define TELIT_PRODUCT_CC864_SINGLE 0x1006
+ #define TELIT_PRODUCT_DE910_DUAL 0x1010
++#define TELIT_PRODUCT_UE910_V2 0x1012
+ #define TELIT_PRODUCT_LE920 0x1200
+
+ /* ZTE PRODUCTS */
+@@ -291,6 +315,7 @@ static void option_instat_callback(struct urb *urb);
+ #define ALCATEL_PRODUCT_X060S_X200 0x0000
+ #define ALCATEL_PRODUCT_X220_X500D 0x0017
+ #define ALCATEL_PRODUCT_L100V 0x011e
++#define ALCATEL_PRODUCT_L800MA 0x0203
+
+ #define PIRELLI_VENDOR_ID 0x1266
+ #define PIRELLI_PRODUCT_C100_1 0x1002
+@@ -353,6 +378,7 @@ static void option_instat_callback(struct urb *urb);
+ #define OLIVETTI_PRODUCT_OLICARD100 0xc000
+ #define OLIVETTI_PRODUCT_OLICARD145 0xc003
+ #define OLIVETTI_PRODUCT_OLICARD200 0xc005
++#define OLIVETTI_PRODUCT_OLICARD500 0xc00b
+
+ /* Celot products */
+ #define CELOT_VENDOR_ID 0x211f
+@@ -514,6 +540,10 @@ static const struct option_blacklist_info huawei_cdc12_blacklist = {
+ .reserved = BIT(1) | BIT(2),
+ };
+
++static const struct option_blacklist_info net_intf0_blacklist = {
++ .reserved = BIT(0),
++};
++
+ static const struct option_blacklist_info net_intf1_blacklist = {
+ .reserved = BIT(1),
+ };
+@@ -1048,13 +1078,53 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
+- { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */
+- { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
++ .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6004) },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6005) },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CGU_628A) },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHE_628S),
++ .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_301),
++ .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_628),
++ .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_628S) },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU_680) },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU_685A) },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_720S),
++ .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7002),
++ .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_629K),
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7004),
++ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7005) },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CGU_629),
++ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_629S),
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_720I),
++ .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7212),
++ .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7213),
++ .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7251),
++ .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7252),
++ .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
++ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7253),
++ .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864G) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
++ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
+ .driver_info = (kernel_ulong_t)&telit_le920_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
+@@ -1518,6 +1588,8 @@ static const struct usb_device_id option_ids[] = {
+ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L100V),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++ { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L800MA),
++ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
+ { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
+ { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
+@@ -1563,6 +1635,9 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200),
+ .driver_info = (kernel_ulong_t)&net_intf6_blacklist
+ },
++ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD500),
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist
++ },
+ { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
+ { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
+diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
+index 7b293178e8a6..45a288cc423c 100644
+--- a/drivers/usb/serial/pl2303.c
++++ b/drivers/usb/serial/pl2303.c
+@@ -86,6 +86,9 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(YCCABLE_VENDOR_ID, YCCABLE_PRODUCT_ID) },
+ { USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) },
+ { USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) },
++ { USB_DEVICE(HP_VENDOR_ID, HP_LD960_PRODUCT_ID) },
++ { USB_DEVICE(HP_VENDOR_ID, HP_LCM220_PRODUCT_ID) },
++ { USB_DEVICE(HP_VENDOR_ID, HP_LCM960_PRODUCT_ID) },
+ { USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) },
+ { USB_DEVICE(ZEAGLE_VENDOR_ID, ZEAGLE_N2ITION3_PRODUCT_ID) },
+ { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
+diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
+index c38b8c00c06f..42bc082896ac 100644
+--- a/drivers/usb/serial/pl2303.h
++++ b/drivers/usb/serial/pl2303.h
+@@ -121,8 +121,11 @@
+ #define SUPERIAL_VENDOR_ID 0x5372
+ #define SUPERIAL_PRODUCT_ID 0x2303
+
+-/* Hewlett-Packard LD220-HP POS Pole Display */
++/* Hewlett-Packard POS Pole Displays */
+ #define HP_VENDOR_ID 0x03f0
++#define HP_LD960_PRODUCT_ID 0x0b39
++#define HP_LCM220_PRODUCT_ID 0x3139
++#define HP_LCM960_PRODUCT_ID 0x3239
+ #define HP_LD220_PRODUCT_ID 0x3524
+
+ /* Cressi Edy (diving computer) PC interface */
+diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
+index a7fa673466ce..dbdfeb4647ee 100644
+--- a/drivers/usb/serial/sierra.c
++++ b/drivers/usb/serial/sierra.c
+@@ -305,7 +305,6 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x0f3d, 0x68A3), /* Airprime/Sierra Wireless Direct IP modems */
+ .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
+ },
+- { USB_DEVICE(0x413C, 0x08133) }, /* Dell Computer Corp. Wireless 5720 VZW Mobile Broadband (EVDO Rev-A) Minicard GPS Port */
+
+ { }
+ };
+diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
+index 850faa447000..8f76a2b1d721 100644
+--- a/drivers/usb/serial/usb-serial.c
++++ b/drivers/usb/serial/usb-serial.c
+@@ -1400,10 +1400,12 @@ void usb_serial_deregister(struct usb_serial_driver *device)
+ /* must be called with BKL held */
+ printk(KERN_INFO "USB Serial deregistering driver %s\n",
+ device->description);
++
+ mutex_lock(&table_lock);
+ list_del(&device->driver_list);
+- usb_serial_bus_deregister(device);
+ mutex_unlock(&table_lock);
++
++ usb_serial_bus_deregister(device);
+ }
+ EXPORT_SYMBOL_GPL(usb_serial_deregister);
+
+diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
+index f8962a957d65..a1fee6f1546a 100644
+--- a/fs/btrfs/inode-map.c
++++ b/fs/btrfs/inode-map.c
+@@ -207,24 +207,14 @@ again:
+
+ void btrfs_return_ino(struct btrfs_root *root, u64 objectid)
+ {
+- struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
+ struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;
+
+ if (!btrfs_test_opt(root, INODE_MAP_CACHE))
+ return;
+-
+ again:
+ if (root->cached == BTRFS_CACHE_FINISHED) {
+- __btrfs_add_free_space(ctl, objectid, 1);
++ __btrfs_add_free_space(pinned, objectid, 1);
+ } else {
+- /*
+- * If we are in the process of caching free ino chunks,
+- * to avoid adding the same inode number to the free_ino
+- * tree twice due to cross transaction, we'll leave it
+- * in the pinned tree until a transaction is committed
+- * or the caching work is done.
+- */
+-
+ mutex_lock(&root->fs_commit_mutex);
+ spin_lock(&root->cache_lock);
+ if (root->cached == BTRFS_CACHE_FINISHED) {
+@@ -236,11 +226,7 @@ again:
+
+ start_caching(root);
+
+- if (objectid <= root->cache_progress ||
+- objectid > root->highest_objectid)
+- __btrfs_add_free_space(ctl, objectid, 1);
+- else
+- __btrfs_add_free_space(pinned, objectid, 1);
++ __btrfs_add_free_space(pinned, objectid, 1);
+
+ mutex_unlock(&root->fs_commit_mutex);
+ }
+diff --git a/fs/ext4/file.c b/fs/ext4/file.c
+index cb70f1812a70..61999221fbfa 100644
+--- a/fs/ext4/file.c
++++ b/fs/ext4/file.c
+@@ -80,7 +80,7 @@ ext4_unaligned_aio(struct inode *inode, const struct iovec *iov,
+ size_t count = iov_length(iov, nr_segs);
+ loff_t final_size = pos + count;
+
+- if (pos >= inode->i_size)
++ if (pos >= i_size_read(inode))
+ return 0;
+
+ if ((pos & blockmask) || (final_size & blockmask))
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index dc9f0ecd8f4a..55d4f461992c 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -481,6 +481,11 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
+ ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
+ "logical block %lu\n", inode->i_ino, flags, map->m_len,
+ (unsigned long) map->m_lblk);
++
++ /* We can handle the block number less than EXT_MAX_BLOCKS */
++ if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS))
++ return -EIO;
++
+ /*
+ * Try to see if we can get the block without requesting a new
+ * file system block.
+diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
+index 54f566d97176..b46a675e7704 100644
+--- a/fs/ext4/page-io.c
++++ b/fs/ext4/page-io.c
+@@ -241,13 +241,14 @@ static void ext4_end_bio(struct bio *bio, int error)
+
+ if (error) {
+ io_end->flag |= EXT4_IO_END_ERROR;
+- ext4_warning(inode->i_sb, "I/O error writing to inode %lu "
++ ext4_warning(inode->i_sb, "I/O error %d writing to inode %lu "
+ "(offset %llu size %ld starting block %llu)",
+- inode->i_ino,
++ error, inode->i_ino,
+ (unsigned long long) io_end->offset,
+ (long) io_end->size,
+ (unsigned long long)
+ bi_sector >> (inode->i_blkbits - 9));
++ mapping_set_error(inode->i_mapping, error);
+ }
+
+ if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
+diff --git a/fs/locks.c b/fs/locks.c
+index fcc50ab71cc6..d4f1d89d9bc6 100644
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -1253,11 +1253,10 @@ int __break_lease(struct inode *inode, unsigned int mode)
+
+ restart:
+ break_time = flock->fl_break_time;
+- if (break_time != 0) {
++ if (break_time != 0)
+ break_time -= jiffies;
+- if (break_time == 0)
+- break_time++;
+- }
++ if (break_time == 0)
++ break_time++;
+ locks_insert_block(flock, new_fl);
+ unlock_flocks();
+ error = wait_event_interruptible_timeout(new_fl->fl_wait,
+diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
+index 7748d6a18d97..809a38ad96aa 100644
+--- a/fs/nfsd/nfs4callback.c
++++ b/fs/nfsd/nfs4callback.c
+@@ -633,9 +633,11 @@ static int max_cb_time(void)
+
+ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn, struct nfsd4_session *ses)
+ {
++ int maxtime = max_cb_time();
+ struct rpc_timeout timeparms = {
+- .to_initval = max_cb_time(),
++ .to_initval = maxtime,
+ .to_retries = 0,
++ .to_maxval = maxtime,
+ };
+ struct rpc_create_args args = {
+ .net = &init_net,
+diff --git a/include/linux/libata.h b/include/linux/libata.h
+index b1fcdba2cfbe..375dfdf0f397 100644
+--- a/include/linux/libata.h
++++ b/include/linux/libata.h
+@@ -763,6 +763,7 @@ struct ata_port {
+ unsigned long qc_allocated;
+ unsigned int qc_active;
+ int nr_active_links; /* #links with active qcs */
++ unsigned int last_tag; /* track next tag hw expects */
+
+ struct ata_link link; /* host default link */
+ struct ata_link *slave_link; /* see ata_slave_link_init() */
+diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
+index 468819cdde87..226e0ff90e61 100644
+--- a/include/linux/mod_devicetable.h
++++ b/include/linux/mod_devicetable.h
+@@ -461,7 +461,8 @@ enum dmi_field {
+ };
+
+ struct dmi_strmatch {
+- unsigned char slot;
++ unsigned char slot:7;
++ unsigned char exact_match:1;
+ char substr[79];
+ };
+
+@@ -489,7 +490,8 @@ struct dmi_system_id {
+ #define dmi_device_id dmi_system_id
+ #endif
+
+-#define DMI_MATCH(a, b) { a, b }
++#define DMI_MATCH(a, b) { .slot = a, .substr = b }
++#define DMI_EXACT_MATCH(a, b) { .slot = a, .substr = b, .exact_match = 1 }
+
+ #define PLATFORM_NAME_SIZE 20
+ #define PLATFORM_MODULE_PREFIX "platform:"
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index d399f5ff5d9c..cac2441956dc 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -1078,6 +1078,7 @@ static void return_unused_surplus_pages(struct hstate *h,
+ while (nr_pages--) {
+ if (!free_pool_huge_page(h, &node_states[N_HIGH_MEMORY], 1))
+ break;
++ cond_resched_lock(&hugetlb_lock);
+ }
+ }
+
+diff --git a/mm/memory.c b/mm/memory.c
+index d5f913b9fae4..483e6650593e 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -1852,12 +1852,17 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long address, unsigned int fault_flags)
+ {
+ struct vm_area_struct *vma;
++ vm_flags_t vm_flags;
+ int ret;
+
+ vma = find_extend_vma(mm, address);
+ if (!vma || address < vma->vm_start)
+ return -EFAULT;
+
++ vm_flags = (fault_flags & FAULT_FLAG_WRITE) ? VM_WRITE : VM_READ;
++ if (!(vm_flags & vma->vm_flags))
++ return -EFAULT;
++
+ ret = handle_mm_fault(mm, vma, address, fault_flags);
+ if (ret & VM_FAULT_ERROR) {
+ if (ret & VM_FAULT_OOM)
diff --git a/1059_linux-3.2.60.patch b/1059_linux-3.2.60.patch
new file mode 100644
index 00000000..6bb18592
--- /dev/null
+++ b/1059_linux-3.2.60.patch
@@ -0,0 +1,2964 @@
+diff --git a/Documentation/input/elantech.txt b/Documentation/input/elantech.txt
+index 5602eb71ad5d..e1ae127ed099 100644
+--- a/Documentation/input/elantech.txt
++++ b/Documentation/input/elantech.txt
+@@ -504,9 +504,12 @@ byte 5:
+ * reg_10
+
+ bit 7 6 5 4 3 2 1 0
+- 0 0 0 0 0 0 0 A
++ 0 0 0 0 R F T A
+
+ A: 1 = enable absolute tracking
++ T: 1 = enable two finger mode auto correct
++ F: 1 = disable ABS Position Filter
++ R: 1 = enable real hardware resolution
+
+ 6.2 Native absolute mode 6 byte packet format
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+diff --git a/Makefile b/Makefile
+index 1be3414a141f..317d5eab3c66 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 2
+-SUBLEVEL = 59
++SUBLEVEL = 60
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/powerpc/lib/crtsavres.S b/arch/powerpc/lib/crtsavres.S
+index 1c893f05d224..21ecdf5e55f9 100644
+--- a/arch/powerpc/lib/crtsavres.S
++++ b/arch/powerpc/lib/crtsavres.S
+@@ -230,6 +230,87 @@ _GLOBAL(_rest32gpr_31_x)
+ mr 1,11
+ blr
+
++#ifdef CONFIG_ALTIVEC
++/* Called with r0 pointing just beyond the end of the vector save area. */
++
++_GLOBAL(_savevr_20)
++ li r11,-192
++ stvx vr20,r11,r0
++_GLOBAL(_savevr_21)
++ li r11,-176
++ stvx vr21,r11,r0
++_GLOBAL(_savevr_22)
++ li r11,-160
++ stvx vr22,r11,r0
++_GLOBAL(_savevr_23)
++ li r11,-144
++ stvx vr23,r11,r0
++_GLOBAL(_savevr_24)
++ li r11,-128
++ stvx vr24,r11,r0
++_GLOBAL(_savevr_25)
++ li r11,-112
++ stvx vr25,r11,r0
++_GLOBAL(_savevr_26)
++ li r11,-96
++ stvx vr26,r11,r0
++_GLOBAL(_savevr_27)
++ li r11,-80
++ stvx vr27,r11,r0
++_GLOBAL(_savevr_28)
++ li r11,-64
++ stvx vr28,r11,r0
++_GLOBAL(_savevr_29)
++ li r11,-48
++ stvx vr29,r11,r0
++_GLOBAL(_savevr_30)
++ li r11,-32
++ stvx vr30,r11,r0
++_GLOBAL(_savevr_31)
++ li r11,-16
++ stvx vr31,r11,r0
++ blr
++
++_GLOBAL(_restvr_20)
++ li r11,-192
++ lvx vr20,r11,r0
++_GLOBAL(_restvr_21)
++ li r11,-176
++ lvx vr21,r11,r0
++_GLOBAL(_restvr_22)
++ li r11,-160
++ lvx vr22,r11,r0
++_GLOBAL(_restvr_23)
++ li r11,-144
++ lvx vr23,r11,r0
++_GLOBAL(_restvr_24)
++ li r11,-128
++ lvx vr24,r11,r0
++_GLOBAL(_restvr_25)
++ li r11,-112
++ lvx vr25,r11,r0
++_GLOBAL(_restvr_26)
++ li r11,-96
++ lvx vr26,r11,r0
++_GLOBAL(_restvr_27)
++ li r11,-80
++ lvx vr27,r11,r0
++_GLOBAL(_restvr_28)
++ li r11,-64
++ lvx vr28,r11,r0
++_GLOBAL(_restvr_29)
++ li r11,-48
++ lvx vr29,r11,r0
++_GLOBAL(_restvr_30)
++ li r11,-32
++ lvx vr30,r11,r0
++_GLOBAL(_restvr_31)
++ li r11,-16
++ lvx vr31,r11,r0
++ blr
++
++#endif /* CONFIG_ALTIVEC */
++
+ #else /* CONFIG_PPC64 */
+
+ .globl _savegpr0_14
+@@ -353,6 +434,111 @@ _restgpr0_31:
+ mtlr r0
+ blr
+
++#ifdef CONFIG_ALTIVEC
++/* Called with r0 pointing just beyond the end of the vector save area. */
++
++.globl _savevr_20
++_savevr_20:
++ li r12,-192
++ stvx vr20,r12,r0
++.globl _savevr_21
++_savevr_21:
++ li r12,-176
++ stvx vr21,r12,r0
++.globl _savevr_22
++_savevr_22:
++ li r12,-160
++ stvx vr22,r12,r0
++.globl _savevr_23
++_savevr_23:
++ li r12,-144
++ stvx vr23,r12,r0
++.globl _savevr_24
++_savevr_24:
++ li r12,-128
++ stvx vr24,r12,r0
++.globl _savevr_25
++_savevr_25:
++ li r12,-112
++ stvx vr25,r12,r0
++.globl _savevr_26
++_savevr_26:
++ li r12,-96
++ stvx vr26,r12,r0
++.globl _savevr_27
++_savevr_27:
++ li r12,-80
++ stvx vr27,r12,r0
++.globl _savevr_28
++_savevr_28:
++ li r12,-64
++ stvx vr28,r12,r0
++.globl _savevr_29
++_savevr_29:
++ li r12,-48
++ stvx vr29,r12,r0
++.globl _savevr_30
++_savevr_30:
++ li r12,-32
++ stvx vr30,r12,r0
++.globl _savevr_31
++_savevr_31:
++ li r12,-16
++ stvx vr31,r12,r0
++ blr
++
++.globl _restvr_20
++_restvr_20:
++ li r12,-192
++ lvx vr20,r12,r0
++.globl _restvr_21
++_restvr_21:
++ li r12,-176
++ lvx vr21,r12,r0
++.globl _restvr_22
++_restvr_22:
++ li r12,-160
++ lvx vr22,r12,r0
++.globl _restvr_23
++_restvr_23:
++ li r12,-144
++ lvx vr23,r12,r0
++.globl _restvr_24
++_restvr_24:
++ li r12,-128
++ lvx vr24,r12,r0
++.globl _restvr_25
++_restvr_25:
++ li r12,-112
++ lvx vr25,r12,r0
++.globl _restvr_26
++_restvr_26:
++ li r12,-96
++ lvx vr26,r12,r0
++.globl _restvr_27
++_restvr_27:
++ li r12,-80
++ lvx vr27,r12,r0
++.globl _restvr_28
++_restvr_28:
++ li r12,-64
++ lvx vr28,r12,r0
++.globl _restvr_29
++_restvr_29:
++ li r12,-48
++ lvx vr29,r12,r0
++.globl _restvr_30
++_restvr_30:
++ li r12,-32
++ lvx vr30,r12,r0
++.globl _restvr_31
++_restvr_31:
++ li r12,-16
++ lvx vr31,r12,r0
++ blr
++
++#endif /* CONFIG_ALTIVEC */
++
+ #endif /* CONFIG_PPC64 */
+
+ #endif
+diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h
+index 439a9acc132d..48fa3915fd02 100644
+--- a/arch/x86/include/asm/hugetlb.h
++++ b/arch/x86/include/asm/hugetlb.h
+@@ -51,6 +51,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+ {
++ ptep_clear_flush(vma, addr, ptep);
+ }
+
+ static inline int huge_pte_none(pte_t pte)
+diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
+index 4ac453114d7e..3e0ccbffff6b 100644
+--- a/arch/x86/kernel/ldt.c
++++ b/arch/x86/kernel/ldt.c
+@@ -21,6 +21,8 @@
+ #include <asm/mmu_context.h>
+ #include <asm/syscalls.h>
+
++int sysctl_ldt16 = 0;
++
+ #ifdef CONFIG_SMP
+ static void flush_ldt(void *current_mm)
+ {
+@@ -235,7 +237,7 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
+ * IRET leaking the high bits of the kernel stack address.
+ */
+ #ifdef CONFIG_X86_64
+- if (!ldt_info.seg_32bit) {
++ if (!ldt_info.seg_32bit && !sysctl_ldt16) {
+ error = -EINVAL;
+ goto out_unlock;
+ }
+diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
+index 468d591dde31..51bdc0546733 100644
+--- a/arch/x86/vdso/vdso32-setup.c
++++ b/arch/x86/vdso/vdso32-setup.c
+@@ -41,6 +41,7 @@ enum {
+ #ifdef CONFIG_X86_64
+ #define vdso_enabled sysctl_vsyscall32
+ #define arch_setup_additional_pages syscall32_setup_pages
++extern int sysctl_ldt16;
+ #endif
+
+ /*
+@@ -388,6 +389,13 @@ static ctl_table abi_table2[] = {
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
++ {
++ .procname = "ldt16",
++ .data = &sysctl_ldt16,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec
++ },
+ {}
+ };
+
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index 8176b82b2e4c..392306472e5e 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -70,6 +70,8 @@ enum ec_command {
+ #define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
+ #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
+ #define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */
++#define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query
++ * when trying to clear the EC */
+
+ enum {
+ EC_FLAGS_QUERY_PENDING, /* Query is pending */
+@@ -123,6 +125,7 @@ EXPORT_SYMBOL(first_ec);
+ static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */
+ static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */
+ static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */
++static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
+
+ /* --------------------------------------------------------------------------
+ Transaction Management
+@@ -203,13 +206,13 @@ unlock:
+ spin_unlock_irqrestore(&ec->curr_lock, flags);
+ }
+
+-static int acpi_ec_sync_query(struct acpi_ec *ec);
++static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data);
+
+ static int ec_check_sci_sync(struct acpi_ec *ec, u8 state)
+ {
+ if (state & ACPI_EC_FLAG_SCI) {
+ if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
+- return acpi_ec_sync_query(ec);
++ return acpi_ec_sync_query(ec, NULL);
+ }
+ return 0;
+ }
+@@ -449,6 +452,27 @@ int ec_transaction(u8 command,
+
+ EXPORT_SYMBOL(ec_transaction);
+
++/*
++ * Process _Q events that might have accumulated in the EC.
++ * Run with locked ec mutex.
++ */
++static void acpi_ec_clear(struct acpi_ec *ec)
++{
++ int i, status;
++ u8 value = 0;
++
++ for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
++ status = acpi_ec_sync_query(ec, &value);
++ if (status || !value)
++ break;
++ }
++
++ if (unlikely(i == ACPI_EC_CLEAR_MAX))
++ pr_warn("Warning: Maximum of %d stale EC events cleared\n", i);
++ else
++ pr_info("%d stale EC events cleared\n", i);
++}
++
+ void acpi_ec_block_transactions(void)
+ {
+ struct acpi_ec *ec = first_ec;
+@@ -472,6 +496,10 @@ void acpi_ec_unblock_transactions(void)
+ mutex_lock(&ec->lock);
+ /* Allow transactions to be carried out again */
+ clear_bit(EC_FLAGS_BLOCKED, &ec->flags);
++
++ if (EC_FLAGS_CLEAR_ON_RESUME)
++ acpi_ec_clear(ec);
++
+ mutex_unlock(&ec->lock);
+ }
+
+@@ -561,13 +589,18 @@ static void acpi_ec_run(void *cxt)
+ kfree(handler);
+ }
+
+-static int acpi_ec_sync_query(struct acpi_ec *ec)
++static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data)
+ {
+ u8 value = 0;
+ int status;
+ struct acpi_ec_query_handler *handler, *copy;
+- if ((status = acpi_ec_query_unlocked(ec, &value)))
++
++ status = acpi_ec_query_unlocked(ec, &value);
++ if (data)
++ *data = value;
++ if (status)
+ return status;
++
+ list_for_each_entry(handler, &ec->list, node) {
+ if (value == handler->query_bit) {
+ /* have custom handler for this bit */
+@@ -590,7 +623,7 @@ static void acpi_ec_gpe_query(void *ec_cxt)
+ if (!ec)
+ return;
+ mutex_lock(&ec->lock);
+- acpi_ec_sync_query(ec);
++ acpi_ec_sync_query(ec, NULL);
+ mutex_unlock(&ec->lock);
+ }
+
+@@ -828,6 +861,13 @@ static int acpi_ec_add(struct acpi_device *device)
+
+ /* EC is fully operational, allow queries */
+ clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
++
++ /* Clear stale _Q events if hardware might require that */
++ if (EC_FLAGS_CLEAR_ON_RESUME) {
++ mutex_lock(&ec->lock);
++ acpi_ec_clear(ec);
++ mutex_unlock(&ec->lock);
++ }
+ return ret;
+ }
+
+@@ -929,6 +969,30 @@ static int ec_enlarge_storm_threshold(const struct dmi_system_id *id)
+ return 0;
+ }
+
++/*
++ * On some hardware it is necessary to clear events accumulated by the EC during
++ * sleep. These ECs stop reporting GPEs until they are manually polled, if too
++ * many events are accumulated. (e.g. Samsung Series 5/9 notebooks)
++ *
++ * https://bugzilla.kernel.org/show_bug.cgi?id=44161
++ *
++ * Ideally, the EC should also be instructed NOT to accumulate events during
++ * sleep (which Windows seems to do somehow), but the interface to control this
++ * behaviour is not known at this time.
++ *
++ * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx,
++ * however it is very likely that other Samsung models are affected.
++ *
++ * On systems which don't accumulate _Q events during sleep, this extra check
++ * should be harmless.
++ */
++static int ec_clear_on_resume(const struct dmi_system_id *id)
++{
++ pr_debug("Detected system needing EC poll on resume.\n");
++ EC_FLAGS_CLEAR_ON_RESUME = 1;
++ return 0;
++}
++
+ static struct dmi_system_id __initdata ec_dmi_table[] = {
+ {
+ ec_skip_dsdt_scan, "Compal JFL92", {
+@@ -968,6 +1032,9 @@ static struct dmi_system_id __initdata ec_dmi_table[] = {
+ ec_validate_ecdt, "ASUS hardware", {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),}, NULL},
++ {
++ ec_clear_on_resume, "Samsung hardware", {
++ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
+ {},
+ };
+
+diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
+index f8f41e0e8a8c..89b30f32ba68 100644
+--- a/drivers/atm/ambassador.c
++++ b/drivers/atm/ambassador.c
+@@ -802,7 +802,7 @@ static void fill_rx_pool (amb_dev * dev, unsigned char pool,
+ }
+ // cast needed as there is no %? for pointer differences
+ PRINTD (DBG_SKB, "allocated skb at %p, head %p, area %li",
+- skb, skb->head, (long) (skb_end_pointer(skb) - skb->head));
++ skb, skb->head, (long) skb_end_offset(skb));
+ rx.handle = virt_to_bus (skb);
+ rx.host_address = cpu_to_be32 (virt_to_bus (skb->data));
+ if (rx_give (dev, &rx, pool))
+diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
+index b0e75ce3c8fc..81845fa2a9cd 100644
+--- a/drivers/atm/idt77252.c
++++ b/drivers/atm/idt77252.c
+@@ -1258,7 +1258,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
+ tail = readl(SAR_REG_RAWCT);
+
+ pci_dma_sync_single_for_cpu(card->pcidev, IDT77252_PRV_PADDR(queue),
+- skb_end_pointer(queue) - queue->head - 16,
++ skb_end_offset(queue) - 16,
+ PCI_DMA_FROMDEVICE);
+
+ while (head != tail) {
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index 3539f9b1e002..6fe003a2fca8 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -81,6 +81,7 @@ static struct usb_device_id ath3k_table[] = {
+ { USB_DEVICE(0x04CA, 0x3004) },
+ { USB_DEVICE(0x04CA, 0x3005) },
+ { USB_DEVICE(0x04CA, 0x3006) },
++ { USB_DEVICE(0x04CA, 0x3007) },
+ { USB_DEVICE(0x04CA, 0x3008) },
+ { USB_DEVICE(0x13d3, 0x3362) },
+ { USB_DEVICE(0x0CF3, 0xE004) },
+@@ -123,6 +124,7 @@ static struct usb_device_id ath3k_blist_tbl[] = {
+ { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index f18b5a28f10c..dddcb1dc6065 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -152,6 +152,7 @@ static struct usb_device_id blacklist_table[] = {
+ { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
++ { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
+diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
+index 7e2d54bffad6..9b8d231b1da6 100644
+--- a/drivers/crypto/caam/error.c
++++ b/drivers/crypto/caam/error.c
+@@ -16,9 +16,13 @@
+ char *tmp; \
+ \
+ tmp = kmalloc(sizeof(format) + max_alloc, GFP_ATOMIC); \
+- sprintf(tmp, format, param); \
+- strcat(str, tmp); \
+- kfree(tmp); \
++ if (likely(tmp)) { \
++ sprintf(tmp, format, param); \
++ strcat(str, tmp); \
++ kfree(tmp); \
++ } else { \
++ strcat(str, "kmalloc failure in SPRINTFCAT"); \
++ } \
+ }
+
+ static void report_jump_idx(u32 status, char *outstr)
+diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
+index 9a353c2216d0..9b01145e6723 100644
+--- a/drivers/dma/mv_xor.c
++++ b/drivers/dma/mv_xor.c
+@@ -218,12 +218,10 @@ static void mv_set_mode(struct mv_xor_chan *chan,
+
+ static void mv_chan_activate(struct mv_xor_chan *chan)
+ {
+- u32 activation;
+-
+ dev_dbg(chan->device->common.dev, " activate chan.\n");
+- activation = __raw_readl(XOR_ACTIVATION(chan));
+- activation |= 0x1;
+- __raw_writel(activation, XOR_ACTIVATION(chan));
++
++ /* writel ensures all descriptors are flushed before activation */
++ writel(BIT(0), XOR_ACTIVATION(chan));
+ }
+
+ static char mv_chan_is_busy(struct mv_xor_chan *chan)
+diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
+index 3df56c7c692f..5ee8cca9ee6c 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
++++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
+@@ -332,9 +332,6 @@ bool nouveau_acpi_rom_supported(struct pci_dev *pdev)
+ acpi_status status;
+ acpi_handle dhandle, rom_handle;
+
+- if (!nouveau_dsm_priv.dsm_detected && !nouveau_dsm_priv.optimus_detected)
+- return false;
+-
+ dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ if (!dhandle)
+ return false;
+diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
+index d306cc8fdeaa..ccf324b451c4 100644
+--- a/drivers/gpu/drm/radeon/radeon_bios.c
++++ b/drivers/gpu/drm/radeon/radeon_bios.c
+@@ -173,6 +173,20 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
+ }
+ }
+
++ if (!found) {
++ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
++ dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
++ if (!dhandle)
++ continue;
++
++ status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
++ if (!ACPI_FAILURE(status)) {
++ found = true;
++ break;
++ }
++ }
++ }
++
+ if (!found)
+ return false;
+
+diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
+index f3ae60759c5f..3e35bbe96e3a 100644
+--- a/drivers/gpu/drm/radeon/radeon_object.c
++++ b/drivers/gpu/drm/radeon/radeon_object.c
+@@ -513,22 +513,30 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
+ rbo = container_of(bo, struct radeon_bo, tbo);
+ radeon_bo_check_tiling(rbo, 0, 0);
+ rdev = rbo->rdev;
+- if (bo->mem.mem_type == TTM_PL_VRAM) {
+- size = bo->mem.num_pages << PAGE_SHIFT;
+- offset = bo->mem.start << PAGE_SHIFT;
+- if ((offset + size) > rdev->mc.visible_vram_size) {
+- /* hurrah the memory is not visible ! */
+- radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
+- rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
+- r = ttm_bo_validate(bo, &rbo->placement, false, true, false);
+- if (unlikely(r != 0))
+- return r;
+- offset = bo->mem.start << PAGE_SHIFT;
+- /* this should not happen */
+- if ((offset + size) > rdev->mc.visible_vram_size)
+- return -EINVAL;
+- }
++ if (bo->mem.mem_type != TTM_PL_VRAM)
++ return 0;
++
++ size = bo->mem.num_pages << PAGE_SHIFT;
++ offset = bo->mem.start << PAGE_SHIFT;
++ if ((offset + size) <= rdev->mc.visible_vram_size)
++ return 0;
++
++ /* hurrah the memory is not visible ! */
++ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
++ rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
++ r = ttm_bo_validate(bo, &rbo->placement, false, true, false);
++ if (unlikely(r == -ENOMEM)) {
++ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
++ return ttm_bo_validate(bo, &rbo->placement, false, true, false);
++ } else if (unlikely(r != 0)) {
++ return r;
+ }
++
++ offset = bo->mem.start << PAGE_SHIFT;
++ /* this should never happen */
++ if ((offset + size) > rdev->mc.visible_vram_size)
++ return -EINVAL;
++
+ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+index 40932fbdac0f..84ba033dd68a 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+@@ -558,14 +558,36 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
+ } *cmd;
+ int ret;
+ struct vmw_resource *res;
++ SVGA3dCmdSurfaceDMASuffix *suffix;
++ uint32_t bo_size;
+
+ cmd = container_of(header, struct vmw_dma_cmd, header);
++ suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
++ header->size - sizeof(*suffix));
++
++ /* Make sure device and verifier stays in sync. */
++ if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
++ DRM_ERROR("Invalid DMA suffix size.\n");
++ return -EINVAL;
++ }
++
+ ret = vmw_translate_guest_ptr(dev_priv, sw_context,
+ &cmd->dma.guest.ptr,
+ &vmw_bo);
+ if (unlikely(ret != 0))
+ return ret;
+
++ /* Make sure DMA doesn't cross BO boundaries. */
++ bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
++ if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
++ DRM_ERROR("Invalid DMA offset.\n");
++ return -EINVAL;
++ }
++
++ bo_size -= cmd->dma.guest.ptr.offset;
++ if (unlikely(suffix->maximumOffset > bo_size))
++ suffix->maximumOffset = bo_size;
++
+ bo = &vmw_bo->base;
+ ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
+ cmd->dma.host.sid, &srf);
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index ca2b3e6c4e9f..ccc89b0e14d9 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -678,6 +678,13 @@
+ #define USB_DEVICE_ID_SYMBOL_SCANNER_1 0x0800
+ #define USB_DEVICE_ID_SYMBOL_SCANNER_2 0x1300
+
++#define USB_VENDOR_ID_SYNAPTICS 0x06cb
++#define USB_DEVICE_ID_SYNAPTICS_LTS1 0x0af8
++#define USB_DEVICE_ID_SYNAPTICS_LTS2 0x1d10
++#define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3
++#define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3
++#define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710
++
+ #define USB_VENDOR_ID_THRUSTMASTER 0x044f
+
+ #define USB_VENDOR_ID_TOPSEED 0x0766
+diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
+index f98fbad966e7..71c258295045 100644
+--- a/drivers/hid/usbhid/hid-quirks.c
++++ b/drivers/hid/usbhid/hid-quirks.c
+@@ -100,6 +100,11 @@ static const struct hid_blacklist {
+ { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS },
++ { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS1, HID_QUIRK_NO_INIT_REPORTS },
++ { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS2, HID_QUIRK_NO_INIT_REPORTS },
++ { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_HD, HID_QUIRK_NO_INIT_REPORTS },
++ { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_QUAD_HD, HID_QUIRK_NO_INIT_REPORTS },
++ { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP_V103, HID_QUIRK_NO_INIT_REPORTS },
+
+ { 0, 0 }
+ };
+diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
+index cd2a6e437aec..7da08accab8b 100644
+--- a/drivers/hwmon/emc1403.c
++++ b/drivers/hwmon/emc1403.c
+@@ -159,7 +159,7 @@ static ssize_t store_hyst(struct device *dev,
+ if (retval < 0)
+ goto fail;
+
+- hyst = val - retval * 1000;
++ hyst = retval * 1000 - val;
+ hyst = DIV_ROUND_CLOSEST(hyst, 1000);
+ if (hyst < 0 || hyst > 255) {
+ retval = -ERANGE;
+@@ -290,7 +290,7 @@ static int emc1403_detect(struct i2c_client *client,
+ }
+
+ id = i2c_smbus_read_byte_data(client, THERMAL_REVISION_REG);
+- if (id != 0x01)
++ if (id < 0x01 || id > 0x04)
+ return -ENODEV;
+
+ return 0;
+diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
+index 3c2812f13d96..aadb3984f0d3 100644
+--- a/drivers/i2c/busses/i2c-designware-core.c
++++ b/drivers/i2c/busses/i2c-designware-core.c
+@@ -346,6 +346,9 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
+ ic_con &= ~DW_IC_CON_10BITADDR_MASTER;
+ dw_writel(dev, ic_con, DW_IC_CON);
+
++ /* enforce disabled interrupts (due to HW issues) */
++ i2c_dw_disable_int(dev);
++
+ /* Enable the adapter */
+ dw_writel(dev, 1, DW_IC_ENABLE);
+
+diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
+index 4c1718081685..7d6d2b76b92e 100644
+--- a/drivers/i2c/busses/i2c-s3c2410.c
++++ b/drivers/i2c/busses/i2c-s3c2410.c
+@@ -1082,10 +1082,10 @@ static int s3c24xx_i2c_resume(struct device *dev)
+ struct platform_device *pdev = to_platform_device(dev);
+ struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
+
+- i2c->suspended = 0;
+ clk_enable(i2c->clk);
+ s3c24xx_i2c_init(i2c);
+ clk_disable(i2c->clk);
++ i2c->suspended = 0;
+
+ return 0;
+ }
+diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
+index e2a9867c19d5..342a059184a3 100644
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -11,6 +11,7 @@
+ */
+
+ #include <linux/delay.h>
++#include <linux/dmi.h>
+ #include <linux/slab.h>
+ #include <linux/module.h>
+ #include <linux/input.h>
+@@ -783,7 +784,11 @@ static int elantech_set_absolute_mode(struct psmouse *psmouse)
+ break;
+
+ case 3:
+- etd->reg_10 = 0x0b;
++ if (etd->set_hw_resolution)
++ etd->reg_10 = 0x0b;
++ else
++ etd->reg_10 = 0x03;
++
+ if (elantech_write_reg(psmouse, 0x10, etd->reg_10))
+ rc = -1;
+
+@@ -1206,6 +1211,22 @@ static int elantech_reconnect(struct psmouse *psmouse)
+ }
+
+ /*
++ * Some hw_version 3 models go into error state when we try to set bit 3 of r10
++ */
++static const struct dmi_system_id no_hw_res_dmi_table[] = {
++#if defined(CONFIG_DMI) && defined(CONFIG_X86)
++ {
++ /* Gigabyte U2442 */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "U2442"),
++ },
++ },
++#endif
++ { }
++};
++
++/*
+ * determine hardware version and set some properties according to it.
+ */
+ static int elantech_set_properties(struct elantech_data *etd)
+@@ -1254,6 +1275,9 @@ static int elantech_set_properties(struct elantech_data *etd)
+ etd->reports_pressure = true;
+ }
+
++ /* Enable real hardware resolution on hw_version 3 ? */
++ etd->set_hw_resolution = !dmi_check_system(no_hw_res_dmi_table);
++
+ return 0;
+ }
+
+diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h
+index 9e5f1aabea7e..3569bed80a28 100644
+--- a/drivers/input/mouse/elantech.h
++++ b/drivers/input/mouse/elantech.h
+@@ -128,6 +128,7 @@ struct elantech_data {
+ bool paritycheck;
+ bool jumpy_cursor;
+ bool reports_pressure;
++ bool set_hw_resolution;
+ unsigned char hw_version;
+ unsigned int fw_version;
+ unsigned int single_finger_reports;
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index 886c1914146b..8a398076d1e5 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -1394,6 +1394,14 @@ static const struct dmi_system_id min_max_dmi_table[] __initconst = {
+ .driver_data = (int []){1232, 5710, 1156, 4696},
+ },
+ {
++ /* Lenovo ThinkPad Edge E431 */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Edge E431"),
++ },
++ .driver_data = (int []){1024, 5022, 2508, 4832},
++ },
++ {
+ /* Lenovo ThinkPad T431s */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 2d0544c92737..db4b4a82843a 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -8122,7 +8122,8 @@ static int md_notify_reboot(struct notifier_block *this,
+ if (mddev_trylock(mddev)) {
+ if (mddev->pers)
+ __md_stop_writes(mddev);
+- mddev->safemode = 2;
++ if (mddev->persistent)
++ mddev->safemode = 2;
+ mddev_unlock(mddev);
+ }
+ need_delay = 1;
+diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
+index 6edc9ba81203..298703fca3b5 100644
+--- a/drivers/media/media-device.c
++++ b/drivers/media/media-device.c
+@@ -90,6 +90,7 @@ static long media_device_enum_entities(struct media_device *mdev,
+ struct media_entity *ent;
+ struct media_entity_desc u_ent;
+
++ memset(&u_ent, 0, sizeof(u_ent));
+ if (copy_from_user(&u_ent.id, &uent->id, sizeof(u_ent.id)))
+ return -EFAULT;
+
+diff --git a/drivers/media/video/ov7670.c b/drivers/media/video/ov7670.c
+index 8aa058531280..17125d96297b 100644
+--- a/drivers/media/video/ov7670.c
++++ b/drivers/media/video/ov7670.c
+@@ -937,7 +937,7 @@ static int ov7670_enum_framesizes(struct v4l2_subdev *sd,
+ * windows that fall outside that.
+ */
+ for (i = 0; i < N_WIN_SIZES; i++) {
+- struct ov7670_win_size *win = &ov7670_win_sizes[index];
++ struct ov7670_win_size *win = &ov7670_win_sizes[i];
+ if (info->min_width && win->width < info->min_width)
+ continue;
+ if (info->min_height && win->height < info->min_height)
+diff --git a/drivers/media/video/v4l2-compat-ioctl32.c b/drivers/media/video/v4l2-compat-ioctl32.c
+index c68531b88279..2671959f01bb 100644
+--- a/drivers/media/video/v4l2-compat-ioctl32.c
++++ b/drivers/media/video/v4l2-compat-ioctl32.c
+@@ -178,6 +178,9 @@ struct v4l2_create_buffers32 {
+
+ static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
+ {
++ if (get_user(kp->type, &up->type))
++ return -EFAULT;
++
+ switch (kp->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+@@ -208,17 +211,16 @@ static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __us
+
+ static int get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
+ {
+- if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)) ||
+- get_user(kp->type, &up->type))
+- return -EFAULT;
++ if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)))
++ return -EFAULT;
+ return __get_v4l2_format32(kp, up);
+ }
+
+ static int get_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up)
+ {
+ if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_create_buffers32)) ||
+- copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format.fmt)))
+- return -EFAULT;
++ copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format)))
++ return -EFAULT;
+ return __get_v4l2_format32(&kp->format, &up->format);
+ }
+
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 1bf36ac44a10..5af2a8f29bc0 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -4914,6 +4914,7 @@ static int __init bonding_init(void)
+ out:
+ return res;
+ err:
++ bond_destroy_debugfs();
+ rtnl_link_unregister(&bond_link_ops);
+ err_link:
+ unregister_pernet_subsys(&bond_net_ops);
+diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
+index 2c7f5036f570..5192f8615258 100644
+--- a/drivers/net/can/sja1000/peak_pci.c
++++ b/drivers/net/can/sja1000/peak_pci.c
+@@ -39,9 +39,9 @@ MODULE_LICENSE("GPL v2");
+ #define DRV_NAME "peak_pci"
+
+ struct peak_pci_chan {
+- void __iomem *cfg_base; /* Common for all channels */
+- struct net_device *next_dev; /* Chain of network devices */
+- u16 icr_mask; /* Interrupt mask for fast ack */
++ void __iomem *cfg_base; /* Common for all channels */
++ struct net_device *prev_dev; /* Chain of network devices */
++ u16 icr_mask; /* Interrupt mask for fast ack */
+ };
+
+ #define PEAK_PCI_CAN_CLOCK (16000000 / 2)
+@@ -98,7 +98,7 @@ static int __devinit peak_pci_probe(struct pci_dev *pdev,
+ {
+ struct sja1000_priv *priv;
+ struct peak_pci_chan *chan;
+- struct net_device *dev, *dev0 = NULL;
++ struct net_device *dev, *prev_dev;
+ void __iomem *cfg_base, *reg_base;
+ u16 sub_sys_id, icr;
+ int i, err, channels;
+@@ -196,18 +196,14 @@ static int __devinit peak_pci_probe(struct pci_dev *pdev,
+ }
+
+ /* Create chain of SJA1000 devices */
+- if (i == 0)
+- dev0 = dev;
+- else
+- chan->next_dev = dev;
++ chan->prev_dev = pci_get_drvdata(pdev);
++ pci_set_drvdata(pdev, dev);
+
+ dev_info(&pdev->dev,
+ "%s at reg_base=0x%p cfg_base=0x%p irq=%d\n",
+ dev->name, priv->reg_base, chan->cfg_base, dev->irq);
+ }
+
+- pci_set_drvdata(pdev, dev0);
+-
+ /* Enable interrupts */
+ writew(icr, cfg_base + PITA_ICR + 2);
+
+@@ -217,12 +213,13 @@ failure_remove_channels:
+ /* Disable interrupts */
+ writew(0x0, cfg_base + PITA_ICR + 2);
+
+- for (dev = dev0; dev; dev = chan->next_dev) {
+- unregister_sja1000dev(dev);
+- free_sja1000dev(dev);
++ for (dev = pci_get_drvdata(pdev); dev; dev = prev_dev) {
+ priv = netdev_priv(dev);
+ chan = priv->priv;
+- dev = chan->next_dev;
++ prev_dev = chan->prev_dev;
++
++ unregister_sja1000dev(dev);
++ free_sja1000dev(dev);
+ }
+
+ pci_iounmap(pdev, reg_base);
+@@ -241,7 +238,7 @@ failure_disable_pci:
+
+ static void __devexit peak_pci_remove(struct pci_dev *pdev)
+ {
+- struct net_device *dev = pci_get_drvdata(pdev); /* First device */
++ struct net_device *dev = pci_get_drvdata(pdev); /* Last device */
+ struct sja1000_priv *priv = netdev_priv(dev);
+ struct peak_pci_chan *chan = priv->priv;
+ void __iomem *cfg_base = chan->cfg_base;
+@@ -252,10 +249,12 @@ static void __devexit peak_pci_remove(struct pci_dev *pdev)
+
+ /* Loop over all registered devices */
+ while (1) {
++ struct net_device *prev_dev = chan->prev_dev;
++
+ dev_info(&pdev->dev, "removing device %s\n", dev->name);
+ unregister_sja1000dev(dev);
+ free_sja1000dev(dev);
+- dev = chan->next_dev;
++ dev = prev_dev;
+ if (!dev)
+ break;
+ priv = netdev_priv(dev);
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index c77c462bcee5..2615433b279f 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -10656,7 +10656,9 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
+ if (tg3_flag(tp, MAX_RXPEND_64) &&
+ tp->rx_pending > 63)
+ tp->rx_pending = 63;
+- tp->rx_jumbo_pending = ering->rx_jumbo_pending;
++
++ if (tg3_flag(tp, JUMBO_RING_ENABLE))
++ tp->rx_jumbo_pending = ering->rx_jumbo_pending;
+
+ for (i = 0; i < tp->irq_max; i++)
+ tp->napi[i].tx_pending = ering->tx_pending;
+diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
+index 301b39ed96ce..b74cdf6a0d7d 100644
+--- a/drivers/net/macvlan.c
++++ b/drivers/net/macvlan.c
+@@ -236,11 +236,9 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
+ const struct macvlan_dev *vlan = netdev_priv(dev);
+ const struct macvlan_port *port = vlan->port;
+ const struct macvlan_dev *dest;
+- __u8 ip_summed = skb->ip_summed;
+
+ if (vlan->mode == MACVLAN_MODE_BRIDGE) {
+ const struct ethhdr *eth = (void *)skb->data;
+- skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* send to other bridge ports directly */
+ if (is_multicast_ether_addr(eth->h_dest)) {
+@@ -258,7 +256,6 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
+ }
+
+ xmit_world:
+- skb->ip_summed = ip_summed;
+ skb->dev = vlan->lowerdev;
+ return dev_queue_xmit(skb);
+ }
+@@ -394,8 +391,10 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change)
+ struct macvlan_dev *vlan = netdev_priv(dev);
+ struct net_device *lowerdev = vlan->lowerdev;
+
+- if (change & IFF_ALLMULTI)
+- dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
++ if (dev->flags & IFF_UP) {
++ if (change & IFF_ALLMULTI)
++ dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
++ }
+ }
+
+ static void macvlan_set_multicast_list(struct net_device *dev)
+diff --git a/drivers/net/wimax/i2400m/usb-rx.c b/drivers/net/wimax/i2400m/usb-rx.c
+index e3257681e360..b78ee676e102 100644
+--- a/drivers/net/wimax/i2400m/usb-rx.c
++++ b/drivers/net/wimax/i2400m/usb-rx.c
+@@ -277,7 +277,7 @@ retry:
+ d_printf(1, dev, "RX: size changed to %d, received %d, "
+ "copied %d, capacity %ld\n",
+ rx_size, read_size, rx_skb->len,
+- (long) (skb_end_pointer(new_skb) - new_skb->head));
++ (long) skb_end_offset(new_skb));
+ goto retry;
+ }
+ /* In most cases, it happens due to the hardware scheduling a
+diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
+index 5c38281955d4..1d4c579178f3 100644
+--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
++++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
+@@ -651,20 +651,18 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
+ bss_conf->bssid);
+
+ /*
+- * Update the beacon. This is only required on USB devices. PCI
+- * devices fetch beacons periodically.
+- */
+- if (changes & BSS_CHANGED_BEACON && rt2x00_is_usb(rt2x00dev))
+- rt2x00queue_update_beacon(rt2x00dev, vif);
+-
+- /*
+ * Start/stop beaconing.
+ */
+ if (changes & BSS_CHANGED_BEACON_ENABLED) {
+ if (!bss_conf->enable_beacon && intf->enable_beacon) {
+- rt2x00queue_clear_beacon(rt2x00dev, vif);
+ rt2x00dev->intf_beaconing--;
+ intf->enable_beacon = false;
++ /*
++ * Clear beacon in the H/W for this vif. This is needed
++ * to disable beaconing on this particular interface
++ * and keep it running on other interfaces.
++ */
++ rt2x00queue_clear_beacon(rt2x00dev, vif);
+
+ if (rt2x00dev->intf_beaconing == 0) {
+ /*
+@@ -675,11 +673,15 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
+ rt2x00queue_stop_queue(rt2x00dev->bcn);
+ mutex_unlock(&intf->beacon_skb_mutex);
+ }
+-
+-
+ } else if (bss_conf->enable_beacon && !intf->enable_beacon) {
+ rt2x00dev->intf_beaconing++;
+ intf->enable_beacon = true;
++ /*
++ * Upload beacon to the H/W. This is only required on
++ * USB devices. PCI devices fetch beacons periodically.
++ */
++ if (rt2x00_is_usb(rt2x00dev))
++ rt2x00queue_update_beacon(rt2x00dev, vif);
+
+ if (rt2x00dev->intf_beaconing == 1) {
+ /*
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+index d3920daa3c32..79fc4b770500 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+@@ -1158,12 +1158,23 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ int err = 0;
+ static bool iqk_initialized;
++ unsigned long flags;
++
++ /* As this function can take a very long time (up to 350 ms)
++ * and can be called with irqs disabled, reenable the irqs
++ * to let the other devices continue being serviced.
++ *
++ * It is safe doing so since our own interrupts will only be enabled
++ * in a subsequent step.
++ */
++ local_save_flags(flags);
++ local_irq_enable();
+
+ rtlhal->hw_type = HARDWARE_TYPE_RTL8192CU;
+ err = _rtl92cu_init_mac(hw);
+ if (err) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("init mac failed!\n"));
+- return err;
++ goto exit;
+ }
+ err = rtl92c_download_fw(hw);
+ if (err) {
+@@ -1171,7 +1182,7 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
+ ("Failed to download FW. Init HW without FW now..\n"));
+ err = 1;
+ rtlhal->fw_ready = false;
+- return err;
++ goto exit;
+ } else {
+ rtlhal->fw_ready = true;
+ }
+@@ -1212,6 +1223,8 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
+ _update_mac_setting(hw);
+ rtl92c_dm_init(hw);
+ _dump_registers(hw);
++exit:
++ local_irq_restore(flags);
+ return err;
+ }
+
+diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
+index 3ffc1b2b3932..b888675a228a 100644
+--- a/drivers/pci/hotplug/shpchp_ctrl.c
++++ b/drivers/pci/hotplug/shpchp_ctrl.c
+@@ -285,8 +285,8 @@ static int board_added(struct slot *p_slot)
+ return WRONG_BUS_FREQUENCY;
+ }
+
+- bsp = ctrl->pci_dev->bus->cur_bus_speed;
+- msp = ctrl->pci_dev->bus->max_bus_speed;
++ bsp = ctrl->pci_dev->subordinate->cur_bus_speed;
++ msp = ctrl->pci_dev->subordinate->max_bus_speed;
+
+ /* Check if there are other slots or devices on the same bus */
+ if (!list_empty(&ctrl->pci_dev->subordinate->devices))
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index 8e6c4fafb3eb..2a8d6aa441c3 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -3405,7 +3405,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
+ /* Do not issue duplicate brightness change events to
+ * userspace. tpacpi_detect_brightness_capabilities() must have
+ * been called before this point */
+- if (tp_features.bright_acpimode && acpi_video_backlight_support()) {
++ if (acpi_video_backlight_support()) {
+ pr_info("This ThinkPad has standard ACPI backlight "
+ "brightness control, supported by the ACPI "
+ "video driver\n");
+diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+index 987c6d6adb76..01780a963181 100644
+--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
++++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+@@ -8166,7 +8166,6 @@ _scsih_suspend(struct pci_dev *pdev, pm_message_t state)
+
+ mpt2sas_base_free_resources(ioc);
+ pci_save_state(pdev);
+- pci_disable_device(pdev);
+ pci_set_power_state(pdev, device_state);
+ return 0;
+ }
+diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
+index 2542c3743904..c5da0d24b4aa 100644
+--- a/drivers/staging/octeon/ethernet-tx.c
++++ b/drivers/staging/octeon/ethernet-tx.c
+@@ -344,7 +344,7 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
+ }
+ if (unlikely
+ (skb->truesize !=
+- sizeof(*skb) + skb_end_pointer(skb) - skb->head)) {
++ sizeof(*skb) + skb_end_offset(skb))) {
+ /*
+ printk("TX buffer truesize has been changed\n");
+ */
+diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
+index 7b97e7e3ede4..443547bf5407 100644
+--- a/drivers/tty/hvc/hvc_console.c
++++ b/drivers/tty/hvc/hvc_console.c
+@@ -190,7 +190,7 @@ static struct tty_driver *hvc_console_device(struct console *c, int *index)
+ return hvc_driver;
+ }
+
+-static int __init hvc_console_setup(struct console *co, char *options)
++static int hvc_console_setup(struct console *co, char *options)
+ {
+ if (co->index < 0 || co->index >= MAX_NR_HVC_CONSOLES)
+ return -ENODEV;
+diff --git a/drivers/usb/storage/shuttle_usbat.c b/drivers/usb/storage/shuttle_usbat.c
+index 0b00091d2ae9..ff8aeeebe5fa 100644
+--- a/drivers/usb/storage/shuttle_usbat.c
++++ b/drivers/usb/storage/shuttle_usbat.c
+@@ -1846,7 +1846,7 @@ static int usbat_probe(struct usb_interface *intf,
+ us->transport_name = "Shuttle USBAT";
+ us->transport = usbat_flash_transport;
+ us->transport_reset = usb_stor_CB_reset;
+- us->max_lun = 1;
++ us->max_lun = 0;
+
+ result = usb_stor_probe2(us);
+ return result;
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index 08711bc24124..49d222d965da 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -226,6 +226,20 @@ UNUSUAL_DEV( 0x0421, 0x0495, 0x0370, 0x0370,
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_MAX_SECTORS_64 ),
+
++/* Reported by Daniele Forsi <dforsi@gmail.com> */
++UNUSUAL_DEV( 0x0421, 0x04b9, 0x0350, 0x0350,
++ "Nokia",
++ "5300",
++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++ US_FL_MAX_SECTORS_64 ),
++
++/* Patch submitted by Victor A. Santos <victoraur.santos@gmail.com> */
++UNUSUAL_DEV( 0x0421, 0x05af, 0x0742, 0x0742,
++ "Nokia",
++ "305",
++ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++ US_FL_MAX_SECTORS_64),
++
+ /* Patch submitted by Mikhail Zolotaryov <lebon@lebon.org.ua> */
+ UNUSUAL_DEV( 0x0421, 0x06aa, 0x1110, 0x1110,
+ "Nokia",
+diff --git a/drivers/video/tgafb.c b/drivers/video/tgafb.c
+index ac2cf6dcc598..3b15bcac3766 100644
+--- a/drivers/video/tgafb.c
++++ b/drivers/video/tgafb.c
+@@ -192,6 +192,8 @@ tgafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
+
+ if (var->xres_virtual != var->xres || var->yres_virtual != var->yres)
+ return -EINVAL;
++ if (var->xres * var->yres * (var->bits_per_pixel >> 3) > info->fix.smem_len)
++ return -EINVAL;
+ if (var->nonstd)
+ return -EINVAL;
+ if (1000000000 / var->pixclock > TGA_PLL_MAX_FREQ)
+@@ -272,6 +274,7 @@ tgafb_set_par(struct fb_info *info)
+ par->yres = info->var.yres;
+ par->pll_freq = pll_freq = 1000000000 / info->var.pixclock;
+ par->bits_per_pixel = info->var.bits_per_pixel;
++ info->fix.line_length = par->xres * (par->bits_per_pixel >> 3);
+
+ tga_type = par->tga_type;
+
+@@ -1318,6 +1321,7 @@ tgafb_init_fix(struct fb_info *info)
+ int tga_bus_tc = TGA_BUS_TC(par->dev);
+ u8 tga_type = par->tga_type;
+ const char *tga_type_name = NULL;
++ unsigned memory_size;
+
+ switch (tga_type) {
+ case TGA_TYPE_8PLANE:
+@@ -1325,21 +1329,25 @@ tgafb_init_fix(struct fb_info *info)
+ tga_type_name = "Digital ZLXp-E1";
+ if (tga_bus_tc)
+ tga_type_name = "Digital ZLX-E1";
++ memory_size = 2097152;
+ break;
+ case TGA_TYPE_24PLANE:
+ if (tga_bus_pci)
+ tga_type_name = "Digital ZLXp-E2";
+ if (tga_bus_tc)
+ tga_type_name = "Digital ZLX-E2";
++ memory_size = 8388608;
+ break;
+ case TGA_TYPE_24PLUSZ:
+ if (tga_bus_pci)
+ tga_type_name = "Digital ZLXp-E3";
+ if (tga_bus_tc)
+ tga_type_name = "Digital ZLX-E3";
++ memory_size = 16777216;
+ break;
+ default:
+ tga_type_name = "Unknown";
++ memory_size = 16777216;
+ break;
+ }
+
+@@ -1351,9 +1359,8 @@ tgafb_init_fix(struct fb_info *info)
+ ? FB_VISUAL_PSEUDOCOLOR
+ : FB_VISUAL_DIRECTCOLOR);
+
+- info->fix.line_length = par->xres * (par->bits_per_pixel >> 3);
+ info->fix.smem_start = (size_t) par->tga_fb_base;
+- info->fix.smem_len = info->fix.line_length * par->yres;
++ info->fix.smem_len = memory_size;
+ info->fix.mmio_start = (size_t) par->tga_regs_base;
+ info->fix.mmio_len = 512;
+
+@@ -1478,6 +1485,9 @@ tgafb_register(struct device *dev)
+ modedb_tga = &modedb_tc;
+ modedbsize_tga = 1;
+ }
++
++ tgafb_init_fix(info);
++
+ ret = fb_find_mode(&info->var, info,
+ mode_option ? mode_option : mode_option_tga,
+ modedb_tga, modedbsize_tga, NULL,
+@@ -1495,7 +1505,6 @@ tgafb_register(struct device *dev)
+ }
+
+ tgafb_set_par(info);
+- tgafb_init_fix(info);
+
+ if (register_framebuffer(info) < 0) {
+ printk(KERN_ERR "tgafb: Could not register framebuffer\n");
+diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
+index 9c51aff02ae2..435a9be1265e 100644
+--- a/fs/nfsd/nfs4acl.c
++++ b/fs/nfsd/nfs4acl.c
+@@ -373,8 +373,10 @@ sort_pacl(struct posix_acl *pacl)
+ * by uid/gid. */
+ int i, j;
+
+- if (pacl->a_count <= 4)
+- return; /* no users or groups */
++ /* no users or groups */
++ if (!pacl || pacl->a_count <= 4)
++ return;
++
+ i = 1;
+ while (pacl->a_entries[i].e_tag == ACL_USER)
+ i++;
+@@ -498,13 +500,12 @@ posix_state_to_acl(struct posix_acl_state *state, unsigned int flags)
+
+ /*
+ * ACLs with no ACEs are treated differently in the inheritable
+- * and effective cases: when there are no inheritable ACEs, we
+- * set a zero-length default posix acl:
++ * and effective cases: when there are no inheritable ACEs,
++ * calls ->set_acl with a NULL ACL structure.
+ */
+- if (state->empty && (flags & NFS4_ACL_TYPE_DEFAULT)) {
+- pacl = posix_acl_alloc(0, GFP_KERNEL);
+- return pacl ? pacl : ERR_PTR(-ENOMEM);
+- }
++ if (state->empty && (flags & NFS4_ACL_TYPE_DEFAULT))
++ return NULL;
++
+ /*
+ * When there are no effective ACEs, the following will end
+ * up setting a 3-element effective posix ACL with all
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 4cef99ff2408..b2e0a5585aed 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -986,6 +986,18 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name)
+ }
+ memcpy(clp->cl_name.data, name.data, name.len);
+ clp->cl_name.len = name.len;
++ INIT_LIST_HEAD(&clp->cl_sessions);
++ idr_init(&clp->cl_stateids);
++ atomic_set(&clp->cl_refcount, 0);
++ clp->cl_cb_state = NFSD4_CB_UNKNOWN;
++ INIT_LIST_HEAD(&clp->cl_idhash);
++ INIT_LIST_HEAD(&clp->cl_strhash);
++ INIT_LIST_HEAD(&clp->cl_openowners);
++ INIT_LIST_HEAD(&clp->cl_delegations);
++ INIT_LIST_HEAD(&clp->cl_lru);
++ INIT_LIST_HEAD(&clp->cl_callbacks);
++ spin_lock_init(&clp->cl_lock);
++ rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
+ return clp;
+ }
+
+@@ -999,6 +1011,7 @@ free_client(struct nfs4_client *clp)
+ list_del(&ses->se_perclnt);
+ nfsd4_put_session(ses);
+ }
++ rpc_destroy_wait_queue(&clp->cl_cb_waitq);
+ if (clp->cl_cred.cr_group_info)
+ put_group_info(clp->cl_cred.cr_group_info);
+ kfree(clp->cl_principal);
+@@ -1163,7 +1176,6 @@ static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir,
+ if (clp == NULL)
+ return NULL;
+
+- INIT_LIST_HEAD(&clp->cl_sessions);
+
+ princ = svc_gss_principal(rqstp);
+ if (princ) {
+@@ -1174,21 +1186,10 @@ static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir,
+ }
+ }
+
+- idr_init(&clp->cl_stateids);
+ memcpy(clp->cl_recdir, recdir, HEXDIR_LEN);
+- atomic_set(&clp->cl_refcount, 0);
+- clp->cl_cb_state = NFSD4_CB_UNKNOWN;
+- INIT_LIST_HEAD(&clp->cl_idhash);
+- INIT_LIST_HEAD(&clp->cl_strhash);
+- INIT_LIST_HEAD(&clp->cl_openowners);
+- INIT_LIST_HEAD(&clp->cl_delegations);
+- INIT_LIST_HEAD(&clp->cl_lru);
+- INIT_LIST_HEAD(&clp->cl_callbacks);
+- spin_lock_init(&clp->cl_lock);
+ INIT_WORK(&clp->cl_cb_null.cb_work, nfsd4_do_callback_rpc);
+ clp->cl_time = get_seconds();
+ clear_bit(0, &clp->cl_cb_slot_busy);
+- rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
+ copy_verf(clp, verf);
+ rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
+ clp->cl_flavor = rqstp->rq_flavor;
+@@ -3375,9 +3376,16 @@ out:
+ static __be32
+ nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp)
+ {
+- if (check_for_locks(stp->st_file, lockowner(stp->st_stateowner)))
++ struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
++
++ if (check_for_locks(stp->st_file, lo))
+ return nfserr_locks_held;
+- release_lock_stateid(stp);
++ /*
++ * Currently there's a 1-1 lock stateid<->lockowner
++ * correspondance, and we have to delete the lockowner when we
++ * delete the lock stateid:
++ */
++ unhash_lockowner(lo);
+ return nfs_ok;
+ }
+
+@@ -3812,6 +3820,10 @@ static bool same_lockowner_ino(struct nfs4_lockowner *lo, struct inode *inode, c
+
+ if (!same_owner_str(&lo->lo_owner, owner, clid))
+ return false;
++ if (list_empty(&lo->lo_owner.so_stateids)) {
++ WARN_ON_ONCE(1);
++ return false;
++ }
+ lst = list_first_entry(&lo->lo_owner.so_stateids,
+ struct nfs4_ol_stateid, st_perstateowner);
+ return lst->st_file->fi_inode == inode;
+diff --git a/fs/posix_acl.c b/fs/posix_acl.c
+index cea4623f1ed6..6c70ab22a3e3 100644
+--- a/fs/posix_acl.c
++++ b/fs/posix_acl.c
+@@ -155,6 +155,12 @@ posix_acl_equiv_mode(const struct posix_acl *acl, umode_t *mode_p)
+ umode_t mode = 0;
+ int not_equiv = 0;
+
++ /*
++ * A null ACL can always be presented as mode bits.
++ */
++ if (!acl)
++ return 0;
++
+ FOREACH_ACL_ENTRY(pa, acl, pe) {
+ switch (pa->e_tag) {
+ case ACL_USER_OBJ:
+diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
+index 26eafcef75be..a3ebb09d4283 100644
+--- a/include/linux/ftrace.h
++++ b/include/linux/ftrace.h
+@@ -260,6 +260,7 @@ extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
+ extern int ftrace_arch_read_dyn_info(char *buf, int size);
+
+ extern int skip_trace(unsigned long ip);
++extern void ftrace_module_init(struct module *mod);
+
+ extern void ftrace_disable_daemon(void);
+ extern void ftrace_enable_daemon(void);
+@@ -272,6 +273,7 @@ static inline void ftrace_set_filter(unsigned char *buf, int len, int reset)
+ static inline void ftrace_disable_daemon(void) { }
+ static inline void ftrace_enable_daemon(void) { }
+ static inline void ftrace_release_mod(struct module *mod) {}
++static inline void ftrace_module_init(struct module *mod) {}
+ static inline int register_ftrace_command(struct ftrace_func_command *cmd)
+ {
+ return -EINVAL;
+diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
+index e6796c19dc62..f93d8c11e085 100644
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -95,7 +95,6 @@ struct kvm_async_pf {
+ unsigned long addr;
+ struct kvm_arch_async_pf arch;
+ struct page *page;
+- bool done;
+ };
+
+ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index 13bd6d0e43c5..c445e5221d49 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -617,11 +617,21 @@ static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
+ {
+ return skb->head + skb->end;
+ }
++
++static inline unsigned int skb_end_offset(const struct sk_buff *skb)
++{
++ return skb->end;
++}
+ #else
+ static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
+ {
+ return skb->end;
+ }
++
++static inline unsigned int skb_end_offset(const struct sk_buff *skb)
++{
++ return skb->end - skb->head;
++}
+ #endif
+
+ /* Internal */
+@@ -2549,7 +2559,7 @@ static inline bool skb_is_recycleable(const struct sk_buff *skb, int skb_size)
+ return false;
+
+ skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
+- if (skb_end_pointer(skb) - skb->head < skb_size)
++ if (skb_end_offset(skb) < skb_size)
+ return false;
+
+ if (skb_shared(skb) || skb_cloned(skb))
+diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
+index 5e91b72fc718..4913dacd1333 100644
+--- a/include/net/ip6_route.h
++++ b/include/net/ip6_route.h
+@@ -34,6 +34,11 @@ struct route_info {
+ #define RT6_LOOKUP_F_SRCPREF_PUBLIC 0x00000010
+ #define RT6_LOOKUP_F_SRCPREF_COA 0x00000020
+
++/* We do not (yet ?) support IPv6 jumbograms (RFC 2675)
++ * Unlike IPv4, hdr->seg_len doesn't include the IPv6 header
++ */
++#define IP6_MAX_MTU (0xFFFF + sizeof(struct ipv6hdr))
++
+ /*
+ * rt6_srcprefs2flags() and rt6_flags2srcprefs() translate
+ * between IPV6_ADDR_PREFERENCES socket option values
+diff --git a/include/trace/events/module.h b/include/trace/events/module.h
+index 161932737416..ca298c7157ae 100644
+--- a/include/trace/events/module.h
++++ b/include/trace/events/module.h
+@@ -78,7 +78,7 @@ DECLARE_EVENT_CLASS(module_refcnt,
+
+ TP_fast_assign(
+ __entry->ip = ip;
+- __entry->refcnt = __this_cpu_read(mod->refptr->incs) + __this_cpu_read(mod->refptr->decs);
++ __entry->refcnt = __this_cpu_read(mod->refptr->incs) - __this_cpu_read(mod->refptr->decs);
+ __assign_str(name, mod->name);
+ ),
+
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index b15b4f75131d..1d1edcb2d9a2 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -4899,6 +4899,9 @@ struct swevent_htable {
+
+ /* Recursion avoidance in each contexts */
+ int recursion[PERF_NR_CONTEXTS];
++
++ /* Keeps track of cpu being initialized/exited */
++ bool online;
+ };
+
+ static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
+@@ -5141,8 +5144,14 @@ static int perf_swevent_add(struct perf_event *event, int flags)
+ hwc->state = !(flags & PERF_EF_START);
+
+ head = find_swevent_head(swhash, event);
+- if (WARN_ON_ONCE(!head))
++ if (!head) {
++ /*
++ * We can race with cpu hotplug code. Do not
++ * WARN if the cpu just got unplugged.
++ */
++ WARN_ON_ONCE(swhash->online);
+ return -EINVAL;
++ }
+
+ hlist_add_head_rcu(&event->hlist_entry, head);
+
+@@ -6301,6 +6310,9 @@ SYSCALL_DEFINE5(perf_event_open,
+ if (attr.freq) {
+ if (attr.sample_freq > sysctl_perf_event_sample_rate)
+ return -EINVAL;
++ } else {
++ if (attr.sample_period & (1ULL << 63))
++ return -EINVAL;
+ }
+
+ /*
+@@ -7078,6 +7090,7 @@ static void __cpuinit perf_event_init_cpu(int cpu)
+ struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
+
+ mutex_lock(&swhash->hlist_mutex);
++ swhash->online = true;
+ if (swhash->hlist_refcount > 0) {
+ struct swevent_hlist *hlist;
+
+@@ -7135,6 +7148,7 @@ static void perf_event_exit_cpu(int cpu)
+ perf_event_exit_cpu_context(cpu);
+
+ mutex_lock(&swhash->hlist_mutex);
++ swhash->online = false;
+ swevent_hlist_release(swhash);
+ mutex_unlock(&swhash->hlist_mutex);
+ }
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 8888815c578b..1bb37d0f3d7a 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -588,6 +588,55 @@ void exit_pi_state_list(struct task_struct *curr)
+ raw_spin_unlock_irq(&curr->pi_lock);
+ }
+
++/*
++ * We need to check the following states:
++ *
++ * Waiter | pi_state | pi->owner | uTID | uODIED | ?
++ *
++ * [1] NULL | --- | --- | 0 | 0/1 | Valid
++ * [2] NULL | --- | --- | >0 | 0/1 | Valid
++ *
++ * [3] Found | NULL | -- | Any | 0/1 | Invalid
++ *
++ * [4] Found | Found | NULL | 0 | 1 | Valid
++ * [5] Found | Found | NULL | >0 | 1 | Invalid
++ *
++ * [6] Found | Found | task | 0 | 1 | Valid
++ *
++ * [7] Found | Found | NULL | Any | 0 | Invalid
++ *
++ * [8] Found | Found | task | ==taskTID | 0/1 | Valid
++ * [9] Found | Found | task | 0 | 0 | Invalid
++ * [10] Found | Found | task | !=taskTID | 0/1 | Invalid
++ *
++ * [1] Indicates that the kernel can acquire the futex atomically. We
++ * came came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit.
++ *
++ * [2] Valid, if TID does not belong to a kernel thread. If no matching
++ * thread is found then it indicates that the owner TID has died.
++ *
++ * [3] Invalid. The waiter is queued on a non PI futex
++ *
++ * [4] Valid state after exit_robust_list(), which sets the user space
++ * value to FUTEX_WAITERS | FUTEX_OWNER_DIED.
++ *
++ * [5] The user space value got manipulated between exit_robust_list()
++ * and exit_pi_state_list()
++ *
++ * [6] Valid state after exit_pi_state_list() which sets the new owner in
++ * the pi_state but cannot access the user space value.
++ *
++ * [7] pi_state->owner can only be NULL when the OWNER_DIED bit is set.
++ *
++ * [8] Owner and user space value match
++ *
++ * [9] There is no transient state which sets the user space TID to 0
++ * except exit_robust_list(), but this is indicated by the
++ * FUTEX_OWNER_DIED bit. See [4]
++ *
++ * [10] There is no transient state which leaves owner and user space
++ * TID out of sync.
++ */
+ static int
+ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
+ union futex_key *key, struct futex_pi_state **ps)
+@@ -603,12 +652,13 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
+ plist_for_each_entry_safe(this, next, head, list) {
+ if (match_futex(&this->key, key)) {
+ /*
+- * Another waiter already exists - bump up
+- * the refcount and return its pi_state:
++ * Sanity check the waiter before increasing
++ * the refcount and attaching to it.
+ */
+ pi_state = this->pi_state;
+ /*
+- * Userspace might have messed up non-PI and PI futexes
++ * Userspace might have messed up non-PI and
++ * PI futexes [3]
+ */
+ if (unlikely(!pi_state))
+ return -EINVAL;
+@@ -616,34 +666,70 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
+ WARN_ON(!atomic_read(&pi_state->refcount));
+
+ /*
+- * When pi_state->owner is NULL then the owner died
+- * and another waiter is on the fly. pi_state->owner
+- * is fixed up by the task which acquires
+- * pi_state->rt_mutex.
+- *
+- * We do not check for pid == 0 which can happen when
+- * the owner died and robust_list_exit() cleared the
+- * TID.
++ * Handle the owner died case:
+ */
+- if (pid && pi_state->owner) {
++ if (uval & FUTEX_OWNER_DIED) {
++ /*
++ * exit_pi_state_list sets owner to NULL and
++ * wakes the topmost waiter. The task which
++ * acquires the pi_state->rt_mutex will fixup
++ * owner.
++ */
++ if (!pi_state->owner) {
++ /*
++ * No pi state owner, but the user
++ * space TID is not 0. Inconsistent
++ * state. [5]
++ */
++ if (pid)
++ return -EINVAL;
++ /*
++ * Take a ref on the state and
++ * return. [4]
++ */
++ goto out_state;
++ }
++
+ /*
+- * Bail out if user space manipulated the
+- * futex value.
++ * If TID is 0, then either the dying owner
++ * has not yet executed exit_pi_state_list()
++ * or some waiter acquired the rtmutex in the
++ * pi state, but did not yet fixup the TID in
++ * user space.
++ *
++ * Take a ref on the state and return. [6]
+ */
+- if (pid != task_pid_vnr(pi_state->owner))
++ if (!pid)
++ goto out_state;
++ } else {
++ /*
++ * If the owner died bit is not set,
++ * then the pi_state must have an
++ * owner. [7]
++ */
++ if (!pi_state->owner)
+ return -EINVAL;
+ }
+
++ /*
++ * Bail out if user space manipulated the
++ * futex value. If pi state exists then the
++ * owner TID must be the same as the user
++ * space TID. [9/10]
++ */
++ if (pid != task_pid_vnr(pi_state->owner))
++ return -EINVAL;
++
++ out_state:
+ atomic_inc(&pi_state->refcount);
+ *ps = pi_state;
+-
+ return 0;
+ }
+ }
+
+ /*
+ * We are the first waiter - try to look up the real owner and attach
+- * the new pi_state to it, but bail out when TID = 0
++ * the new pi_state to it, but bail out when TID = 0 [1]
+ */
+ if (!pid)
+ return -ESRCH;
+@@ -651,6 +737,11 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
+ if (!p)
+ return -ESRCH;
+
++ if (!p->mm) {
++ put_task_struct(p);
++ return -EPERM;
++ }
++
+ /*
+ * We need to look at the task state flags to figure out,
+ * whether the task is exiting. To protect against the do_exit
+@@ -671,6 +762,9 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
+ return ret;
+ }
+
++ /*
++ * No existing pi state. First waiter. [2]
++ */
+ pi_state = alloc_pi_state();
+
+ /*
+@@ -742,10 +836,18 @@ retry:
+ return -EDEADLK;
+
+ /*
+- * Surprise - we got the lock. Just return to userspace:
++ * Surprise - we got the lock, but we do not trust user space at all.
+ */
+- if (unlikely(!curval))
+- return 1;
++ if (unlikely(!curval)) {
++ /*
++ * We verify whether there is kernel state for this
++ * futex. If not, we can safely assume, that the 0 ->
++ * TID transition is correct. If state exists, we do
++ * not bother to fixup the user space state as it was
++ * corrupted already.
++ */
++ return futex_top_waiter(hb, key) ? -EINVAL : 1;
++ }
+
+ uval = curval;
+
+@@ -875,6 +977,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
+ struct task_struct *new_owner;
+ struct futex_pi_state *pi_state = this->pi_state;
+ u32 uninitialized_var(curval), newval;
++ int ret = 0;
+
+ if (!pi_state)
+ return -EINVAL;
+@@ -898,23 +1001,19 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
+ new_owner = this->task;
+
+ /*
+- * We pass it to the next owner. (The WAITERS bit is always
+- * kept enabled while there is PI state around. We must also
+- * preserve the owner died bit.)
++ * We pass it to the next owner. The WAITERS bit is always
++ * kept enabled while there is PI state around. We cleanup the
++ * owner died bit, because we are the owner.
+ */
+- if (!(uval & FUTEX_OWNER_DIED)) {
+- int ret = 0;
++ newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
+
+- newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
+-
+- if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
+- ret = -EFAULT;
+- else if (curval != uval)
+- ret = -EINVAL;
+- if (ret) {
+- raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
+- return ret;
+- }
++ if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
++ ret = -EFAULT;
++ else if (curval != uval)
++ ret = -EINVAL;
++ if (ret) {
++ raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
++ return ret;
+ }
+
+ raw_spin_lock_irq(&pi_state->owner->pi_lock);
+@@ -1193,7 +1292,7 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
+ *
+ * Returns:
+ * 0 - failed to acquire the lock atomicly
+- * 1 - acquired the lock
++ * >0 - acquired the lock, return value is vpid of the top_waiter
+ * <0 - error
+ */
+ static int futex_proxy_trylock_atomic(u32 __user *pifutex,
+@@ -1204,7 +1303,7 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex,
+ {
+ struct futex_q *top_waiter = NULL;
+ u32 curval;
+- int ret;
++ int ret, vpid;
+
+ if (get_futex_value_locked(&curval, pifutex))
+ return -EFAULT;
+@@ -1232,11 +1331,13 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex,
+ * the contended case or if set_waiters is 1. The pi_state is returned
+ * in ps in contended cases.
+ */
++ vpid = task_pid_vnr(top_waiter->task);
+ ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
+ set_waiters);
+- if (ret == 1)
++ if (ret == 1) {
+ requeue_pi_wake_futex(top_waiter, key2, hb2);
+-
++ return vpid;
++ }
+ return ret;
+ }
+
+@@ -1268,10 +1369,16 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
+ struct futex_hash_bucket *hb1, *hb2;
+ struct plist_head *head1;
+ struct futex_q *this, *next;
+- u32 curval2;
+
+ if (requeue_pi) {
+ /*
++ * Requeue PI only works on two distinct uaddrs. This
++ * check is only valid for private futexes. See below.
++ */
++ if (uaddr1 == uaddr2)
++ return -EINVAL;
++
++ /*
+ * requeue_pi requires a pi_state, try to allocate it now
+ * without any locks in case it fails.
+ */
+@@ -1309,6 +1416,15 @@ retry:
+ if (unlikely(ret != 0))
+ goto out_put_key1;
+
++ /*
++ * The check above which compares uaddrs is not sufficient for
++ * shared futexes. We need to compare the keys:
++ */
++ if (requeue_pi && match_futex(&key1, &key2)) {
++ ret = -EINVAL;
++ goto out_put_keys;
++ }
++
+ hb1 = hash_futex(&key1);
+ hb2 = hash_futex(&key2);
+
+@@ -1354,16 +1470,25 @@ retry_private:
+ * At this point the top_waiter has either taken uaddr2 or is
+ * waiting on it. If the former, then the pi_state will not
+ * exist yet, look it up one more time to ensure we have a
+- * reference to it.
++ * reference to it. If the lock was taken, ret contains the
++ * vpid of the top waiter task.
+ */
+- if (ret == 1) {
++ if (ret > 0) {
+ WARN_ON(pi_state);
+ drop_count++;
+ task_count++;
+- ret = get_futex_value_locked(&curval2, uaddr2);
+- if (!ret)
+- ret = lookup_pi_state(curval2, hb2, &key2,
+- &pi_state);
++ /*
++ * If we acquired the lock, then the user
++ * space value of uaddr2 should be vpid. It
++ * cannot be changed by the top waiter as it
++ * is blocked on hb2 lock if it tries to do
++ * so. If something fiddled with it behind our
++ * back the pi state lookup might unearth
++ * it. So we rather use the known value than
++ * rereading and handing potential crap to
++ * lookup_pi_state.
++ */
++ ret = lookup_pi_state(ret, hb2, &key2, &pi_state);
+ }
+
+ switch (ret) {
+@@ -2133,9 +2258,10 @@ retry:
+ /*
+ * To avoid races, try to do the TID -> 0 atomic transition
+ * again. If it succeeds then we can return without waking
+- * anyone else up:
++ * anyone else up. We only try this if neither the waiters nor
++ * the owner died bit are set.
+ */
+- if (!(uval & FUTEX_OWNER_DIED) &&
++ if (!(uval & ~FUTEX_TID_MASK) &&
+ cmpxchg_futex_value_locked(&uval, uaddr, vpid, 0))
+ goto pi_faulted;
+ /*
+@@ -2167,11 +2293,9 @@ retry:
+ /*
+ * No waiters - kernel unlocks the futex:
+ */
+- if (!(uval & FUTEX_OWNER_DIED)) {
+- ret = unlock_futex_pi(uaddr, uval);
+- if (ret == -EFAULT)
+- goto pi_faulted;
+- }
++ ret = unlock_futex_pi(uaddr, uval);
++ if (ret == -EFAULT)
++ goto pi_faulted;
+
+ out_unlock:
+ spin_unlock(&hb->lock);
+@@ -2331,6 +2455,15 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ if (ret)
+ goto out_key2;
+
++ /*
++ * The check above which compares uaddrs is not sufficient for
++ * shared futexes. We need to compare the keys:
++ */
++ if (match_futex(&q.key, &key2)) {
++ ret = -EINVAL;
++ goto out_put_keys;
++ }
++
+ /* Queue the futex_q, drop the hb lock, wait for wakeup. */
+ futex_wait_queue_me(hb, &q, to);
+
+diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
+index 60f7e326b6c8..20e88afba69b 100644
+--- a/kernel/hrtimer.c
++++ b/kernel/hrtimer.c
+@@ -232,6 +232,11 @@ again:
+ goto again;
+ }
+ timer->base = new_base;
++ } else {
++ if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
++ cpu = this_cpu;
++ goto again;
++ }
+ }
+ return new_base;
+ }
+@@ -567,6 +572,23 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
+
+ cpu_base->expires_next.tv64 = expires_next.tv64;
+
++ /*
++ * If a hang was detected in the last timer interrupt then we
++ * leave the hang delay active in the hardware. We want the
++ * system to make progress. That also prevents the following
++ * scenario:
++ * T1 expires 50ms from now
++ * T2 expires 5s from now
++ *
++ * T1 is removed, so this code is called and would reprogram
++ * the hardware to 5s from now. Any hrtimer_start after that
++ * will not reprogram the hardware due to hang_detected being
++ * set. So we'd effectivly block all timers until the T2 event
++ * fires.
++ */
++ if (cpu_base->hang_detected)
++ return;
++
+ if (cpu_base->expires_next.tv64 != KTIME_MAX)
+ tick_program_event(cpu_base->expires_next, 1);
+ }
+@@ -958,11 +980,8 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+ /* Remove an active timer from the queue: */
+ ret = remove_hrtimer(timer, base);
+
+- /* Switch the timer base, if necessary: */
+- new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
+-
+ if (mode & HRTIMER_MODE_REL) {
+- tim = ktime_add_safe(tim, new_base->get_time());
++ tim = ktime_add_safe(tim, base->get_time());
+ /*
+ * CONFIG_TIME_LOW_RES is a temporary way for architectures
+ * to signal that they simply return xtime in
+@@ -977,6 +996,9 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+
+ hrtimer_set_expires_range_ns(timer, tim, delta_ns);
+
++ /* Switch the timer base, if necessary: */
++ new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
++
+ timer_stats_hrtimer_set_start_info(timer);
+
+ leftmost = enqueue_hrtimer(timer, new_base);
+diff --git a/kernel/module.c b/kernel/module.c
+index 65362d9ab783..95ecd9ffc91f 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -2888,6 +2888,9 @@ static struct module *load_module(void __user *umod,
+ /* This has to be done once we're sure module name is unique. */
+ dynamic_debug_setup(info.debug, info.num_debug);
+
++ /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
++ ftrace_module_init(mod);
++
+ /* Find duplicate symbols */
+ err = verify_export_symbols(mod);
+ if (err < 0)
+diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c
+index a86cf9d9eb11..1f4afddf158a 100644
+--- a/kernel/sched_cpupri.c
++++ b/kernel/sched_cpupri.c
+@@ -68,8 +68,7 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
+ int idx = 0;
+ int task_pri = convert_prio(p->prio);
+
+- if (task_pri >= MAX_RT_PRIO)
+- return 0;
++ BUG_ON(task_pri >= CPUPRI_NR_PRIORITIES);
+
+ for (idx = 0; idx < task_pri; idx++) {
+ struct cpupri_vec *vec = &cp->pri_to_cpu[idx];
+diff --git a/kernel/timer.c b/kernel/timer.c
+index f8b05a467883..349953ee9027 100644
+--- a/kernel/timer.c
++++ b/kernel/timer.c
+@@ -769,7 +769,7 @@ unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
+
+ bit = find_last_bit(&mask, BITS_PER_LONG);
+
+- mask = (1 << bit) - 1;
++ mask = (1UL << bit) - 1;
+
+ expires_limit = expires_limit & ~(mask);
+
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index a65fa363c024..dcbafedc38ae 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -3542,16 +3542,11 @@ static void ftrace_init_module(struct module *mod,
+ ftrace_process_locs(mod, start, end);
+ }
+
+-static int ftrace_module_notify_enter(struct notifier_block *self,
+- unsigned long val, void *data)
++void ftrace_module_init(struct module *mod)
+ {
+- struct module *mod = data;
+-
+- if (val == MODULE_STATE_COMING)
+- ftrace_init_module(mod, mod->ftrace_callsites,
+- mod->ftrace_callsites +
+- mod->num_ftrace_callsites);
+- return 0;
++ ftrace_init_module(mod, mod->ftrace_callsites,
++ mod->ftrace_callsites +
++ mod->num_ftrace_callsites);
+ }
+
+ static int ftrace_module_notify_exit(struct notifier_block *self,
+@@ -3565,11 +3560,6 @@ static int ftrace_module_notify_exit(struct notifier_block *self,
+ return 0;
+ }
+ #else
+-static int ftrace_module_notify_enter(struct notifier_block *self,
+- unsigned long val, void *data)
+-{
+- return 0;
+-}
+ static int ftrace_module_notify_exit(struct notifier_block *self,
+ unsigned long val, void *data)
+ {
+@@ -3577,11 +3567,6 @@ static int ftrace_module_notify_exit(struct notifier_block *self,
+ }
+ #endif /* CONFIG_MODULES */
+
+-struct notifier_block ftrace_module_enter_nb = {
+- .notifier_call = ftrace_module_notify_enter,
+- .priority = INT_MAX, /* Run before anything that can use kprobes */
+-};
+-
+ struct notifier_block ftrace_module_exit_nb = {
+ .notifier_call = ftrace_module_notify_exit,
+ .priority = INT_MIN, /* Run after anything that can remove kprobes */
+@@ -3618,10 +3603,6 @@ void __init ftrace_init(void)
+ __start_mcount_loc,
+ __stop_mcount_loc);
+
+- ret = register_module_notifier(&ftrace_module_enter_nb);
+- if (ret)
+- pr_warning("Failed to register trace ftrace module enter notifier\n");
+-
+ ret = register_module_notifier(&ftrace_module_exit_nb);
+ if (ret)
+ pr_warning("Failed to register trace ftrace module exit notifier\n");
+diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
+index 41b25a0ed616..088fbc525ef4 100644
+--- a/kernel/tracepoint.c
++++ b/kernel/tracepoint.c
+@@ -638,6 +638,9 @@ static int tracepoint_module_coming(struct module *mod)
+ struct tp_module *tp_mod, *iter;
+ int ret = 0;
+
++ if (!mod->num_tracepoints)
++ return 0;
++
+ /*
+ * We skip modules that taint the kernel, especially those with different
+ * module headers (for forced load), to make sure we don't cause a crash.
+@@ -681,6 +684,9 @@ static int tracepoint_module_going(struct module *mod)
+ {
+ struct tp_module *pos;
+
++ if (!mod->num_tracepoints)
++ return 0;
++
+ mutex_lock(&tracepoints_mutex);
+ tracepoint_update_probe_range(mod->tracepoints_ptrs,
+ mod->tracepoints_ptrs + mod->num_tracepoints);
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 96c4bcf188b1..51901b184872 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -1033,15 +1033,16 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
+ return 0;
+ } else if (PageHuge(hpage)) {
+ /*
+- * Check "just unpoisoned", "filter hit", and
+- * "race with other subpage."
++ * Check "filter hit" and "race with other subpage."
+ */
+ lock_page(hpage);
+- if (!PageHWPoison(hpage)
+- || (hwpoison_filter(p) && TestClearPageHWPoison(p))
+- || (p != hpage && TestSetPageHWPoison(hpage))) {
+- atomic_long_sub(nr_pages, &mce_bad_pages);
+- return 0;
++ if (PageHWPoison(hpage)) {
++ if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
++ || (p != hpage && TestSetPageHWPoison(hpage))) {
++ atomic_long_sub(nr_pages, &mce_bad_pages);
++ unlock_page(hpage);
++ return 0;
++ }
+ }
+ set_page_hwpoison_huge_page(hpage);
+ res = dequeue_hwpoisoned_huge_page(hpage);
+@@ -1093,6 +1094,8 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
+ */
+ if (!PageHWPoison(p)) {
+ printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
++ atomic_long_sub(nr_pages, &mce_bad_pages);
++ put_page(hpage);
+ res = 0;
+ goto out;
+ }
+diff --git a/mm/page-writeback.c b/mm/page-writeback.c
+index b5cd796fcd6c..d2ac057008f2 100644
+--- a/mm/page-writeback.c
++++ b/mm/page-writeback.c
+@@ -559,7 +559,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
+ * => fast response on large errors; small oscillation near setpoint
+ */
+ setpoint = (freerun + limit) / 2;
+- x = div_s64((setpoint - dirty) << RATELIMIT_CALC_SHIFT,
++ x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
+ limit - setpoint + 1);
+ pos_ratio = x;
+ pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
+@@ -625,7 +625,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
+ x_intercept = bdi_setpoint + span;
+
+ if (bdi_dirty < x_intercept - span / 4) {
+- pos_ratio = div_u64(pos_ratio * (x_intercept - bdi_dirty),
++ pos_ratio = div64_u64(pos_ratio * (x_intercept - bdi_dirty),
+ x_intercept - bdi_setpoint + 1);
+ } else
+ pos_ratio /= 4;
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index aa1264910c4e..4d99d42f30ff 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -610,14 +610,17 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
+ if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
+ struct hci_cp_auth_requested cp;
+
+- /* encrypt must be pending if auth is also pending */
+- set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
+-
+ cp.handle = cpu_to_le16(conn->handle);
+ hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
+ sizeof(cp), &cp);
++
++ /* If we're already encrypted set the REAUTH_PEND flag,
++ * otherwise set the ENCRYPT_PEND.
++ */
+ if (conn->key_type != 0xff)
+ set_bit(HCI_CONN_REAUTH_PEND, &conn->pend);
++ else
++ set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
+ }
+
+ return 0;
+diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
+index cbf9ccd45784..99a48a39c009 100644
+--- a/net/bridge/br_netlink.c
++++ b/net/bridge/br_netlink.c
+@@ -211,11 +211,26 @@ static int br_validate(struct nlattr *tb[], struct nlattr *data[])
+ return 0;
+ }
+
++static int br_dev_newlink(struct net *src_net, struct net_device *dev,
++ struct nlattr *tb[], struct nlattr *data[])
++{
++ struct net_bridge *br = netdev_priv(dev);
++
++ if (tb[IFLA_ADDRESS]) {
++ spin_lock_bh(&br->lock);
++ br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
++ spin_unlock_bh(&br->lock);
++ }
++
++ return register_netdevice(dev);
++}
++
+ struct rtnl_link_ops br_link_ops __read_mostly = {
+ .kind = "bridge",
+ .priv_size = sizeof(struct net_bridge),
+ .setup = br_dev_setup,
+ .validate = br_validate,
++ .newlink = br_dev_newlink,
+ .dellink = br_dev_delete,
+ };
+
+diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
+index 5864cc491369..45f93f8a10c8 100644
+--- a/net/bridge/netfilter/ebtables.c
++++ b/net/bridge/netfilter/ebtables.c
+@@ -1044,10 +1044,9 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl,
+ if (repl->num_counters &&
+ copy_to_user(repl->counters, counterstmp,
+ repl->num_counters * sizeof(struct ebt_counter))) {
+- ret = -EFAULT;
++ /* Silent error, can't fail, new table is already in place */
++ net_warn_ratelimited("ebtables: counters copy to user failed while replacing table\n");
+ }
+- else
+- ret = 0;
+
+ /* decrease module count and free resources */
+ EBT_ENTRY_ITERATE(table->entries, table->entries_size,
+diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
+index ad5b70801f37..20ba2d59dc23 100644
+--- a/net/ceph/messenger.c
++++ b/net/ceph/messenger.c
+@@ -284,6 +284,37 @@ static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov,
+ return r;
+ }
+
++static int __ceph_tcp_sendpage(struct socket *sock, struct page *page,
++ int offset, size_t size, bool more)
++{
++ int flags = MSG_DONTWAIT | MSG_NOSIGNAL | (more ? MSG_MORE : MSG_EOR);
++ int ret;
++
++ ret = kernel_sendpage(sock, page, offset, size, flags);
++ if (ret == -EAGAIN)
++ ret = 0;
++
++ return ret;
++}
++
++static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
++ int offset, size_t size, bool more)
++{
++ int ret;
++ struct kvec iov;
++
++ /* sendpage cannot properly handle pages with page_count == 0,
++ * we need to fallback to sendmsg if that's the case */
++ if (page_count(page) >= 1)
++ return __ceph_tcp_sendpage(sock, page, offset, size, more);
++
++ iov.iov_base = kmap(page) + offset;
++ iov.iov_len = size;
++ ret = ceph_tcp_sendmsg(sock, &iov, 1, size, more);
++ kunmap(page);
++
++ return ret;
++}
+
+ /*
+ * Shutdown/close the socket for the given connection.
+@@ -851,18 +882,14 @@ static int write_partial_msg_pages(struct ceph_connection *con)
+ cpu_to_le32(crc32c(tmpcrc, base, len));
+ con->out_msg_pos.did_page_crc = 1;
+ }
+- ret = kernel_sendpage(con->sock, page,
++ ret = ceph_tcp_sendpage(con->sock, page,
+ con->out_msg_pos.page_pos + page_shift,
+- len,
+- MSG_DONTWAIT | MSG_NOSIGNAL |
+- MSG_MORE);
++ len, 1);
+
+ if (crc &&
+ (msg->pages || msg->pagelist || msg->bio || in_trail))
+ kunmap(page);
+
+- if (ret == -EAGAIN)
+- ret = 0;
+ if (ret <= 0)
+ goto out;
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 7bcf37df0ce9..854da1571997 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3648,6 +3648,7 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
+ skb->vlan_tci = 0;
+ skb->dev = napi->dev;
+ skb->skb_iif = 0;
++ skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
+
+ napi->skb = skb;
+ }
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 5dea45279215..9c88080e4049 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -320,6 +320,8 @@ load_b:
+
+ if (skb_is_nonlinear(skb))
+ return 0;
++ if (skb->len < sizeof(struct nlattr))
++ return 0;
+ if (A > skb->len - sizeof(struct nlattr))
+ return 0;
+
+@@ -336,11 +338,13 @@ load_b:
+
+ if (skb_is_nonlinear(skb))
+ return 0;
++ if (skb->len < sizeof(struct nlattr))
++ return 0;
+ if (A > skb->len - sizeof(struct nlattr))
+ return 0;
+
+ nla = (struct nlattr *)&skb->data[A];
+- if (nla->nla_len > A - skb->len)
++ if (nla->nla_len > skb->len - A)
+ return 0;
+
+ nla = nla_find_nested(nla, X);
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 5b7d5f25d43e..7beaf10cc837 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -744,7 +744,8 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev,
+ return 0;
+ }
+
+-static size_t rtnl_port_size(const struct net_device *dev)
++static size_t rtnl_port_size(const struct net_device *dev,
++ u32 ext_filter_mask)
+ {
+ size_t port_size = nla_total_size(4) /* PORT_VF */
+ + nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */
+@@ -760,7 +761,8 @@ static size_t rtnl_port_size(const struct net_device *dev)
+ size_t port_self_size = nla_total_size(sizeof(struct nlattr))
+ + port_size;
+
+- if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent)
++ if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
++ !(ext_filter_mask & RTEXT_FILTER_VF))
+ return 0;
+ if (dev_num_vf(dev->dev.parent))
+ return port_self_size + vf_ports_size +
+@@ -791,7 +793,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
+ + nla_total_size(ext_filter_mask
+ & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
+ + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
+- + rtnl_port_size(dev) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
++ + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
+ + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
+ + rtnl_link_get_af_size(dev); /* IFLA_AF_SPEC */
+ }
+@@ -851,11 +853,13 @@ static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
+ return 0;
+ }
+
+-static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev)
++static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev,
++ u32 ext_filter_mask)
+ {
+ int err;
+
+- if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent)
++ if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
++ !(ext_filter_mask & RTEXT_FILTER_VF))
+ return 0;
+
+ err = rtnl_port_self_fill(skb, dev);
+@@ -1002,7 +1006,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
+ nla_nest_end(skb, vfinfo);
+ }
+
+- if (rtnl_port_fill(skb, dev))
++ if (rtnl_port_fill(skb, dev, ext_filter_mask))
+ goto nla_put_failure;
+
+ if (dev->rtnl_link_ops) {
+@@ -1057,6 +1061,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
+ struct hlist_node *node;
+ struct nlattr *tb[IFLA_MAX+1];
+ u32 ext_filter_mask = 0;
++ int err;
+
+ s_h = cb->args[0];
+ s_idx = cb->args[1];
+@@ -1077,11 +1082,17 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
+ hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
+ if (idx < s_idx)
+ goto cont;
+- if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
+- NETLINK_CB(cb->skb).pid,
+- cb->nlh->nlmsg_seq, 0,
+- NLM_F_MULTI,
+- ext_filter_mask) <= 0)
++ err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
++ NETLINK_CB(cb->skb).pid,
++ cb->nlh->nlmsg_seq, 0,
++ NLM_F_MULTI,
++ ext_filter_mask);
++ /* If we ran out of room on the first message,
++ * we're in trouble
++ */
++ WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
++
++ if (err <= 0)
+ goto out;
+
+ nl_dump_check_consistent(cb, nlmsg_hdr(skb));
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 8ac4a0f3e869..9204d9b747f2 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -743,7 +743,7 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
+ struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
+ {
+ int headerlen = skb_headroom(skb);
+- unsigned int size = (skb_end_pointer(skb) - skb->head) + skb->data_len;
++ unsigned int size = skb_end_offset(skb) + skb->data_len;
+ struct sk_buff *n = alloc_skb(size, gfp_mask);
+
+ if (!n)
+@@ -843,7 +843,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
+ {
+ int i;
+ u8 *data;
+- int size = nhead + (skb_end_pointer(skb) - skb->head) + ntail;
++ int size = nhead + skb_end_offset(skb) + ntail;
+ long off;
+ bool fastpath;
+
+@@ -2642,14 +2642,13 @@ struct sk_buff *skb_segment(struct sk_buff *skb, u32 features)
+ if (unlikely(!nskb))
+ goto err;
+
+- hsize = skb_end_pointer(nskb) - nskb->head;
++ hsize = skb_end_offset(nskb);
+ if (skb_cow_head(nskb, doffset + headroom)) {
+ kfree_skb(nskb);
+ goto err;
+ }
+
+- nskb->truesize += skb_end_pointer(nskb) - nskb->head -
+- hsize;
++ nskb->truesize += skb_end_offset(nskb) - hsize;
+ skb_release_head_state(nskb);
+ __skb_push(nskb, doffset);
+ } else {
+@@ -3197,12 +3196,14 @@ EXPORT_SYMBOL(__skb_warn_lro_forwarding);
+ unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
+ {
+ const struct skb_shared_info *shinfo = skb_shinfo(skb);
+- unsigned int hdr_len;
+
+ if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
+- hdr_len = tcp_hdrlen(skb);
+- else
+- hdr_len = sizeof(struct udphdr);
+- return hdr_len + shinfo->gso_size;
++ return tcp_hdrlen(skb) + shinfo->gso_size;
++
++ /* UFO sets gso_size to the size of the fragmentation
++ * payload, i.e. the size of the L4 (UDP) header is already
++ * accounted for.
++ */
++ return shinfo->gso_size;
+ }
+ EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
+diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
+index d01f9c658dec..76da979baed0 100644
+--- a/net/ipv4/fib_semantics.c
++++ b/net/ipv4/fib_semantics.c
+@@ -752,13 +752,13 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
+ fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
+ if (fi == NULL)
+ goto failure;
++ fib_info_cnt++;
+ if (cfg->fc_mx) {
+ fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
+ if (!fi->fib_metrics)
+ goto failure;
+ } else
+ fi->fib_metrics = (u32 *) dst_default_metrics;
+- fib_info_cnt++;
+
+ fi->fib_net = hold_net(net);
+ fi->fib_protocol = cfg->fc_protocol;
+diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
+index e0d9f02fec11..7593f3a46035 100644
+--- a/net/ipv4/ip_forward.c
++++ b/net/ipv4/ip_forward.c
+@@ -42,12 +42,12 @@
+ static bool ip_may_fragment(const struct sk_buff *skb)
+ {
+ return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) ||
+- !skb->local_df;
++ skb->local_df;
+ }
+
+ static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
+ {
+- if (skb->len <= mtu || skb->local_df)
++ if (skb->len <= mtu)
+ return false;
+
+ if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
+diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
+index fd7a3f68917f..bcb6e6197595 100644
+--- a/net/ipv4/netfilter/arp_tables.c
++++ b/net/ipv4/netfilter/arp_tables.c
+@@ -1039,8 +1039,10 @@ static int __do_replace(struct net *net, const char *name,
+
+ xt_free_table_info(oldinfo);
+ if (copy_to_user(counters_ptr, counters,
+- sizeof(struct xt_counters) * num_counters) != 0)
+- ret = -EFAULT;
++ sizeof(struct xt_counters) * num_counters) != 0) {
++ /* Silent error, can't fail, new table is already in place */
++ net_warn_ratelimited("arptables: counters copy to user failed while replacing table\n");
++ }
+ vfree(counters);
+ xt_table_unlock(t);
+ return ret;
+diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
+index 24e556e83a3b..f98a1cf54c5b 100644
+--- a/net/ipv4/netfilter/ip_tables.c
++++ b/net/ipv4/netfilter/ip_tables.c
+@@ -1227,8 +1227,10 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
+
+ xt_free_table_info(oldinfo);
+ if (copy_to_user(counters_ptr, counters,
+- sizeof(struct xt_counters) * num_counters) != 0)
+- ret = -EFAULT;
++ sizeof(struct xt_counters) * num_counters) != 0) {
++ /* Silent error, can't fail, new table is already in place */
++ net_warn_ratelimited("iptables: counters copy to user failed while replacing table\n");
++ }
+ vfree(counters);
+ xt_table_unlock(t);
+ return ret;
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
+index 00975b6db7e9..d495d4bcd745 100644
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -203,26 +203,33 @@ static int ping_init_sock(struct sock *sk)
+ struct net *net = sock_net(sk);
+ gid_t group = current_egid();
+ gid_t range[2];
+- struct group_info *group_info = get_current_groups();
+- int i, j, count = group_info->ngroups;
++ struct group_info *group_info;
++ int i, j, count;
++ int ret = 0;
+
+ inet_get_ping_group_range_net(net, range, range+1);
+ if (range[0] <= group && group <= range[1])
+ return 0;
+
++ group_info = get_current_groups();
++ count = group_info->ngroups;
+ for (i = 0; i < group_info->nblocks; i++) {
+ int cp_count = min_t(int, NGROUPS_PER_BLOCK, count);
+
+ for (j = 0; j < cp_count; j++) {
+ group = group_info->blocks[i][j];
+ if (range[0] <= group && group <= range[1])
+- return 0;
++ goto out_release_group;
+ }
+
+ count -= cp_count;
+ }
+
+- return -EACCES;
++ ret = -EACCES;
++
++out_release_group:
++ put_group_info(group_info);
++ return ret;
+ }
+
+ static void ping_close(struct sock *sk, long timeout)
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 6768ce2f50b1..6526110ba784 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -2142,7 +2142,7 @@ static int __mkroute_input(struct sk_buff *skb,
+ struct in_device *out_dev;
+ unsigned int flags = 0;
+ __be32 spec_dst;
+- u32 itag;
++ u32 itag = 0;
+
+ /* get a working reference to the output device */
+ out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
+diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
+index b78eac2b7079..ed3d6d4a6d65 100644
+--- a/net/ipv4/tcp_cubic.c
++++ b/net/ipv4/tcp_cubic.c
+@@ -406,7 +406,7 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
+ ratio -= ca->delayed_ack >> ACK_RATIO_SHIFT;
+ ratio += cnt;
+
+- ca->delayed_ack = min(ratio, ACK_RATIO_LIMIT);
++ ca->delayed_ack = clamp(ratio, 1U, ACK_RATIO_LIMIT);
+ }
+
+ /* Some calls are for duplicates without timetamps */
+diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
+index 94874b0bdcdc..2e752b2f5b30 100644
+--- a/net/ipv6/netfilter/ip6_tables.c
++++ b/net/ipv6/netfilter/ip6_tables.c
+@@ -1249,8 +1249,10 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
+
+ xt_free_table_info(oldinfo);
+ if (copy_to_user(counters_ptr, counters,
+- sizeof(struct xt_counters) * num_counters) != 0)
+- ret = -EFAULT;
++ sizeof(struct xt_counters) * num_counters) != 0) {
++ /* Silent error, can't fail, new table is already in place */
++ net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n");
++ }
+ vfree(counters);
+ xt_table_unlock(t);
+ return ret;
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 39e11f923a73..782f67a4f568 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -1056,7 +1056,7 @@ static unsigned int ip6_mtu(const struct dst_entry *dst)
+ unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+
+ if (mtu)
+- return mtu;
++ goto out;
+
+ mtu = IPV6_MIN_MTU;
+
+@@ -1066,7 +1066,8 @@ static unsigned int ip6_mtu(const struct dst_entry *dst)
+ mtu = idev->cnf.mtu6;
+ rcu_read_unlock();
+
+- return mtu;
++out:
++ return min_t(unsigned int, mtu, IP6_MAX_MTU);
+ }
+
+ static struct dst_entry *icmp6_dst_gc_list;
+diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
+index 969cd3eacd2b..e0f0934d6c39 100644
+--- a/net/l2tp/l2tp_ppp.c
++++ b/net/l2tp/l2tp_ppp.c
+@@ -772,9 +772,9 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
+ session->deref = pppol2tp_session_sock_put;
+
+ /* If PMTU discovery was enabled, use the MTU that was discovered */
+- dst = sk_dst_get(sk);
++ dst = sk_dst_get(tunnel->sock);
+ if (dst != NULL) {
+- u32 pmtu = dst_mtu(__sk_dst_get(sk));
++ u32 pmtu = dst_mtu(__sk_dst_get(tunnel->sock));
+ if (pmtu != 0)
+ session->mtu = session->mru = pmtu -
+ PPPOL2TP_HEADER_OVERHEAD;
+diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
+index e051398fdf6b..d067ed16bab1 100644
+--- a/net/sched/act_mirred.c
++++ b/net/sched/act_mirred.c
+@@ -201,13 +201,12 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
+ out:
+ if (err) {
+ m->tcf_qstats.overlimits++;
+- /* should we be asking for packet to be dropped?
+- * may make sense for redirect case only
+- */
+- retval = TC_ACT_SHOT;
+- } else {
++ if (m->tcfm_eaction != TCA_EGRESS_MIRROR)
++ retval = TC_ACT_SHOT;
++ else
++ retval = m->tcf_action;
++ } else
+ retval = m->tcf_action;
+- }
+ spin_unlock(&m->tcf_lock);
+
+ return retval;
+diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
+index 6f6ad8686833..de35e01f03a9 100644
+--- a/net/sctp/protocol.c
++++ b/net/sctp/protocol.c
+@@ -528,8 +528,13 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
+ continue;
+ if ((laddr->state == SCTP_ADDR_SRC) &&
+ (AF_INET == laddr->a.sa.sa_family)) {
+- fl4->saddr = laddr->a.v4.sin_addr.s_addr;
+ fl4->fl4_sport = laddr->a.v4.sin_port;
++ flowi4_update_output(fl4,
++ asoc->base.sk->sk_bound_dev_if,
++ RT_CONN_FLAGS(asoc->base.sk),
++ daddr->v4.sin_addr.s_addr,
++ laddr->a.v4.sin_addr.s_addr);
++
+ rt = ip_route_output_key(&init_net, fl4);
+ if (!IS_ERR(rt)) {
+ dst = &rt->dst;
+diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
+index 619228d156c4..dc5748f3e384 100644
+--- a/scripts/mod/modpost.c
++++ b/scripts/mod/modpost.c
+@@ -569,12 +569,16 @@ static int ignore_undef_symbol(struct elf_info *info, const char *symname)
+ if (strncmp(symname, "_restgpr_", sizeof("_restgpr_") - 1) == 0 ||
+ strncmp(symname, "_savegpr_", sizeof("_savegpr_") - 1) == 0 ||
+ strncmp(symname, "_rest32gpr_", sizeof("_rest32gpr_") - 1) == 0 ||
+- strncmp(symname, "_save32gpr_", sizeof("_save32gpr_") - 1) == 0)
++ strncmp(symname, "_save32gpr_", sizeof("_save32gpr_") - 1) == 0 ||
++ strncmp(symname, "_restvr_", sizeof("_restvr_") - 1) == 0 ||
++ strncmp(symname, "_savevr_", sizeof("_savevr_") - 1) == 0)
+ return 1;
+ if (info->hdr->e_machine == EM_PPC64)
+ /* Special register function linked on all modules during final link of .ko */
+ if (strncmp(symname, "_restgpr0_", sizeof("_restgpr0_") - 1) == 0 ||
+- strncmp(symname, "_savegpr0_", sizeof("_savegpr0_") - 1) == 0)
++ strncmp(symname, "_savegpr0_", sizeof("_savegpr0_") - 1) == 0 ||
++ strncmp(symname, "_restvr_", sizeof("_restvr_") - 1) == 0 ||
++ strncmp(symname, "_savevr_", sizeof("_savevr_") - 1) == 0)
+ return 1;
+ /* Do not ignore this symbol */
+ return 0;
+diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
+index 74268b4c2ee1..bdd2c0d690d6 100644
+--- a/virt/kvm/async_pf.c
++++ b/virt/kvm/async_pf.c
+@@ -75,7 +75,6 @@ static void async_pf_execute(struct work_struct *work)
+ spin_lock(&vcpu->async_pf.lock);
+ list_add_tail(&apf->link, &vcpu->async_pf.done);
+ apf->page = page;
+- apf->done = true;
+ spin_unlock(&vcpu->async_pf.lock);
+
+ /*
+@@ -88,7 +87,7 @@ static void async_pf_execute(struct work_struct *work)
+ if (waitqueue_active(&vcpu->wq))
+ wake_up_interruptible(&vcpu->wq);
+
+- mmdrop(mm);
++ mmput(mm);
+ kvm_put_kvm(vcpu->kvm);
+ }
+
+@@ -99,10 +98,12 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
+ struct kvm_async_pf *work =
+ list_entry(vcpu->async_pf.queue.next,
+ typeof(*work), queue);
+- cancel_work_sync(&work->work);
+ list_del(&work->queue);
+- if (!work->done) /* work was canceled */
++ if (cancel_work_sync(&work->work)) {
++ mmput(work->mm);
++ kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */
+ kmem_cache_free(async_pf_cache, work);
++ }
+ }
+
+ spin_lock(&vcpu->async_pf.lock);
+@@ -163,13 +164,12 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
+ return 0;
+
+ work->page = NULL;
+- work->done = false;
+ work->vcpu = vcpu;
+ work->gva = gva;
+ work->addr = gfn_to_hva(vcpu->kvm, gfn);
+ work->arch = *arch;
+ work->mm = current->mm;
+- atomic_inc(&work->mm->mm_count);
++ atomic_inc(&work->mm->mm_users);
+ kvm_get_kvm(work->vcpu->kvm);
+
+ /* this can't really happen otherwise gfn_to_pfn_async
+@@ -187,7 +187,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
+ return 1;
+ retry_sync:
+ kvm_put_kvm(work->vcpu->kvm);
+- mmdrop(work->mm);
++ mmput(work->mm);
+ kmem_cache_free(async_pf_cache, work);
+ return 0;
+ }
diff --git a/1500_selinux-add-SOCK_DIAG_BY_FAMILY-to-the-list-of-netli.patch b/1500_selinux-add-SOCK_DIAG_BY_FAMILY-to-the-list-of-netli.patch
new file mode 100644
index 00000000..34c25306
--- /dev/null
+++ b/1500_selinux-add-SOCK_DIAG_BY_FAMILY-to-the-list-of-netli.patch
@@ -0,0 +1,56 @@
+From 6a96e15096da6e7491107321cfa660c7c2aa119d Mon Sep 17 00:00:00 2001
+From: Paul Moore <pmoore@redhat.com>
+Date: Tue, 28 Jan 2014 14:45:41 -0500
+Subject: [PATCH 1/2] selinux: add SOCK_DIAG_BY_FAMILY to the list of netlink
+ message types
+
+The SELinux AF_NETLINK/NETLINK_SOCK_DIAG socket class was missing the
+SOCK_DIAG_BY_FAMILY definition which caused SELINUX_ERR messages when
+the ss tool was run.
+
+ # ss
+ Netid State Recv-Q Send-Q Local Address:Port Peer Address:Port
+ u_str ESTAB 0 0 * 14189 * 14190
+ u_str ESTAB 0 0 * 14145 * 14144
+ u_str ESTAB 0 0 * 14151 * 14150
+ {...}
+ # ausearch -m SELINUX_ERR
+ ----
+ time->Thu Jan 23 11:11:16 2014
+ type=SYSCALL msg=audit(1390493476.445:374):
+ arch=c000003e syscall=44 success=yes exit=40
+ a0=3 a1=7fff03aa11f0 a2=28 a3=0 items=0 ppid=1852 pid=1895
+ auid=0 uid=0 gid=0 euid=0 suid=0 fsuid=0 egid=0 sgid=0 fsgid=0
+ tty=pts0 ses=1 comm="ss" exe="/usr/sbin/ss"
+ subj=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023 key=(null)
+ type=SELINUX_ERR msg=audit(1390493476.445:374):
+ SELinux: unrecognized netlink message type=20 for sclass=32
+
+Signed-off-by: Paul Moore <pmoore@redhat.com>
+---
+ security/selinux/nlmsgtab.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
+index 332ac8a..2df7b90 100644
+--- a/security/selinux/nlmsgtab.c
++++ b/security/selinux/nlmsgtab.c
+@@ -17,6 +17,7 @@
+ #include <linux/inet_diag.h>
+ #include <linux/xfrm.h>
+ #include <linux/audit.h>
++#include <linux/sock_diag.h>
+
+ #include "flask.h"
+ #include "av_permissions.h"
+@@ -78,6 +79,7 @@ static struct nlmsg_perm nlmsg_tcpdiag_perms[] =
+ {
+ { TCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
+ { DCCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
++ { SOCK_DIAG_BY_FAMILY, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
+ };
+
+ static struct nlmsg_perm nlmsg_xfrm_perms[] =
+--
+1.9.2
+
diff --git a/1505_XATTR_USER_PREFIX.patch b/1505_XATTR_USER_PREFIX.patch
new file mode 100644
index 00000000..5d54c316
--- /dev/null
+++ b/1505_XATTR_USER_PREFIX.patch
@@ -0,0 +1,55 @@
+From: Anthony G. Basile <blueness@gentoo.org>
+
+This patch adds support for a restricted user-controlled namespace on
+tmpfs filesystem used to house PaX flags. The namespace must be of the
+form user.pax.* and its value cannot exceed a size of 8 bytes.
+
+This is needed even on all Gentoo systems so that XATTR_PAX flags
+are preserved for users who might build packages using portage on
+a tmpfs system with a non-hardened kernel and then switch to a
+hardened kernel with XATTR_PAX enabled.
+
+The namespace is added to any user with Extended Attribute support
+enabled for tmpfs. Users who do not enable xattrs will not have
+the XATTR_PAX flags preserved.
+
+diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
+index e4629b9..6958086 100644
+--- a/include/linux/xattr.h
++++ b/include/linux/xattr.h
+@@ -63,3 +63,8 @@
+ #endif /* __KERNEL__ */
++
++/* User namespace */
++#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
++#define XATTR_PAX_FLAGS_SUFFIX "flags"
++#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
+
+ #endif /* _UAPI_LINUX_XATTR_H */
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 1c44af7..f23bb1b 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -2201,6 +2201,7 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
+ static int shmem_xattr_validate(const char *name)
+ {
+ struct { const char *prefix; size_t len; } arr[] = {
++ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
+ { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
+ { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
+ };
+@@ -2256,7 +2257,14 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
+ if (err)
+ return err;
++
++ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
++ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
++ return -EOPNOTSUPP;
++ if (size > 8)
++ return -EINVAL;
++ }
+
+ if (size == 0)
+ value = ""; /* empty EA, do not remove */
+
+ return shmem_xattr_set(dentry, name, value, size, flags);
diff --git a/1512_af_key-initialize-satype-in-key_notify_policy_flush.patch b/1512_af_key-initialize-satype-in-key_notify_policy_flush.patch
new file mode 100644
index 00000000..a28fbc48
--- /dev/null
+++ b/1512_af_key-initialize-satype-in-key_notify_policy_flush.patch
@@ -0,0 +1,29 @@
+From 85dfb745ee40232876663ae206cba35f24ab2a40 Mon Sep 17 00:00:00 2001
+From: Nicolas Dichtel <nicolas.dichtel@6wind.com>
+Date: Mon, 18 Feb 2013 16:24:20 +0100
+Subject: [PATCH] af_key: initialize satype in key_notify_policy_flush()
+
+This field was left uninitialized. Some user daemons perform check against this
+field.
+
+Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
+Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
+---
+ net/key/af_key.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/net/key/af_key.c b/net/key/af_key.c
+index 9ef7985..d5a4a79 100644
+--- a/net/key/af_key.c
++++ b/net/key/af_key.c
+@@ -2694,6 +2694,7 @@ static int key_notify_policy_flush(const struct km_event *c)
+ hdr->sadb_msg_pid = c->portid;
+ hdr->sadb_msg_version = PF_KEY_V2;
+ hdr->sadb_msg_errno = (uint8_t) 0;
++ hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC;
+ hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
+ pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
+ return 0;
+--
+1.8.2.1
+
diff --git a/2300_per-pci-device-msi-irq-listing.patch b/2300_per-pci-device-msi-irq-listing.patch
new file mode 100644
index 00000000..d4ffeb5e
--- /dev/null
+++ b/2300_per-pci-device-msi-irq-listing.patch
@@ -0,0 +1,211 @@
+--- a/Documentation/ABI/testing/sysfs-bus-pci 2012-01-05 01:55:44.000000000 +0200
++++ b/Documentation/ABI/testing/sysfs-bus-pci 2012-02-03 13:32:19.188582988 +0200
+@@ -66,6 +66,24 @@ Description:
+ re-discover previously removed devices.
+ Depends on CONFIG_HOTPLUG.
+
++What: /sys/bus/pci/devices/.../msi_irqs/
++Date: September, 2011
++Contact: Neil Horman <nhorman@tuxdriver.com>
++Description:
++ The /sys/devices/.../msi_irqs directory contains a variable set
++ of sub-directories, with each sub-directory being named after a
++ corresponding msi irq vector allocated to that device. Each
++ numbered sub-directory N contains attributes of that irq.
++ Note that this directory is not created for device drivers which
++ do not support msi irqs
++
++What: /sys/bus/pci/devices/.../msi_irqs/<N>/mode
++Date: September 2011
++Contact: Neil Horman <nhorman@tuxdriver.com>
++Description:
++ This attribute indicates the mode that the irq vector named by
++ the parent directory is in (msi vs. msix)
++
+ What: /sys/bus/pci/devices/.../remove
+ Date: January 2009
+ Contact: Linux PCI developers <linux-pci@vger.kernel.org>
+--- a/drivers/pci/msi.c 2012-02-02 00:33:14.568582984 +0200
++++ b/drivers/pci/msi.c 2012-02-03 13:32:19.196582978 +0200
+@@ -323,6 +323,8 @@ static void free_msi_irqs(struct pci_dev
+ if (list_is_last(&entry->list, &dev->msi_list))
+ iounmap(entry->mask_base);
+ }
++ kobject_del(&entry->kobj);
++ kobject_put(&entry->kobj);
+ list_del(&entry->list);
+ kfree(entry);
+ }
+@@ -403,6 +405,98 @@ void pci_restore_msi_state(struct pci_de
+ }
+ EXPORT_SYMBOL_GPL(pci_restore_msi_state);
+
++
++#define to_msi_attr(obj) container_of(obj, struct msi_attribute, attr)
++#define to_msi_desc(obj) container_of(obj, struct msi_desc, kobj)
++
++struct msi_attribute {
++ struct attribute attr;
++ ssize_t (*show)(struct msi_desc *entry, struct msi_attribute *attr,
++ char *buf);
++ ssize_t (*store)(struct msi_desc *entry, struct msi_attribute *attr,
++ const char *buf, size_t count);
++};
++
++static ssize_t show_msi_mode(struct msi_desc *entry, struct msi_attribute *atr,
++ char *buf)
++{
++ return sprintf(buf, "%s\n", entry->msi_attrib.is_msix ? "msix" : "msi");
++}
++
++static ssize_t msi_irq_attr_show(struct kobject *kobj,
++ struct attribute *attr, char *buf)
++{
++ struct msi_attribute *attribute = to_msi_attr(attr);
++ struct msi_desc *entry = to_msi_desc(kobj);
++
++ if (!attribute->show)
++ return -EIO;
++
++ return attribute->show(entry, attribute, buf);
++}
++
++static const struct sysfs_ops msi_irq_sysfs_ops = {
++ .show = msi_irq_attr_show,
++};
++
++static struct msi_attribute mode_attribute =
++ __ATTR(mode, S_IRUGO, show_msi_mode, NULL);
++
++
++struct attribute *msi_irq_default_attrs[] = {
++ &mode_attribute.attr,
++ NULL
++};
++
++void msi_kobj_release(struct kobject *kobj)
++{
++ struct msi_desc *entry = to_msi_desc(kobj);
++
++ pci_dev_put(entry->dev);
++}
++
++static struct kobj_type msi_irq_ktype = {
++ .release = msi_kobj_release,
++ .sysfs_ops = &msi_irq_sysfs_ops,
++ .default_attrs = msi_irq_default_attrs,
++};
++
++static int populate_msi_sysfs(struct pci_dev *pdev)
++{
++ struct msi_desc *entry;
++ struct kobject *kobj;
++ int ret;
++ int count = 0;
++
++ pdev->msi_kset = kset_create_and_add("msi_irqs", NULL, &pdev->dev.kobj);
++ if (!pdev->msi_kset)
++ return -ENOMEM;
++
++ list_for_each_entry(entry, &pdev->msi_list, list) {
++ kobj = &entry->kobj;
++ kobj->kset = pdev->msi_kset;
++ pci_dev_get(pdev);
++ ret = kobject_init_and_add(kobj, &msi_irq_ktype, NULL,
++ "%u", entry->irq);
++ if (ret)
++ goto out_unroll;
++
++ count++;
++ }
++
++ return 0;
++
++out_unroll:
++ list_for_each_entry(entry, &pdev->msi_list, list) {
++ if (!count)
++ break;
++ kobject_del(&entry->kobj);
++ kobject_put(&entry->kobj);
++ count--;
++ }
++ return ret;
++}
++
+ /**
+ * msi_capability_init - configure device's MSI capability structure
+ * @dev: pointer to the pci_dev data structure of MSI device function
+@@ -454,6 +548,13 @@ static int msi_capability_init(struct pc
+ return ret;
+ }
+
++ ret = populate_msi_sysfs(dev);
++ if (ret) {
++ msi_mask_irq(entry, mask, ~mask);
++ free_msi_irqs(dev);
++ return ret;
++ }
++
+ /* Set MSI enabled bits */
+ pci_intx_for_msi(dev, 0);
+ msi_set_enable(dev, pos, 1);
+@@ -574,6 +675,12 @@ static int msix_capability_init(struct p
+
+ msix_program_entries(dev, entries);
+
++ ret = populate_msi_sysfs(dev);
++ if (ret) {
++ ret = 0;
++ goto error;
++ }
++
+ /* Set MSI-X enabled bits and unmask the function */
+ pci_intx_for_msi(dev, 0);
+ dev->msix_enabled = 1;
+@@ -732,6 +839,8 @@ void pci_disable_msi(struct pci_dev *dev
+
+ pci_msi_shutdown(dev);
+ free_msi_irqs(dev);
++ kset_unregister(dev->msi_kset);
++ dev->msi_kset = NULL;
+ }
+ EXPORT_SYMBOL(pci_disable_msi);
+
+@@ -830,6 +939,8 @@ void pci_disable_msix(struct pci_dev *de
+
+ pci_msix_shutdown(dev);
+ free_msi_irqs(dev);
++ kset_unregister(dev->msi_kset);
++ dev->msi_kset = NULL;
+ }
+ EXPORT_SYMBOL(pci_disable_msix);
+
+--- a/include/linux/msi.h 2012-01-05 01:55:44.000000000 +0200
++++ b/include/linux/msi.h 2012-02-03 13:32:19.196582978 +0200
+@@ -1,6 +1,7 @@
+ #ifndef LINUX_MSI_H
+ #define LINUX_MSI_H
+
++#include <linux/kobject.h>
+ #include <linux/list.h>
+
+ struct msi_msg {
+@@ -44,6 +45,8 @@ struct msi_desc {
+
+ /* Last set MSI message */
+ struct msi_msg msg;
++
++ struct kobject kobj;
+ };
+
+ /*
+--- a/include/linux/pci.h 2012-01-05 01:55:44.000000000 +0200
++++ b/include/linux/pci.h 2012-02-03 13:32:19.196582978 +0200
+@@ -336,6 +336,7 @@ struct pci_dev {
+ struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
+ #ifdef CONFIG_PCI_MSI
+ struct list_head msi_list;
++ struct kset *msi_kset;
+ #endif
+ struct pci_vpd *vpd;
+ #ifdef CONFIG_PCI_ATS
diff --git a/2400_kcopy-patch-for-infiniband-driver.patch b/2400_kcopy-patch-for-infiniband-driver.patch
new file mode 100644
index 00000000..01825d23
--- /dev/null
+++ b/2400_kcopy-patch-for-infiniband-driver.patch
@@ -0,0 +1,725 @@
+From 3b2e8091390000a15bdceccb57a6e3956a751134 Mon Sep 17 00:00:00 2001
+From: Alexey Shvetsov <alexxy@gentoo.org>
+Date: Tue, 17 Jan 2012 21:08:49 +0400
+Subject: [PATCH] [kcopy] Add kcopy driver
+
+Add kcopy driver from qlogic to implement zero copy for infiniband psm
+userspace driver
+
+Signed-off-by: Alexey Shvetsov <alexxy@gentoo.org>
+---
+ drivers/char/Kconfig | 2 +
+ drivers/char/Makefile | 2 +
+ drivers/char/kcopy/Kconfig | 17 ++
+ drivers/char/kcopy/Makefile | 4 +
+ drivers/char/kcopy/kcopy.c | 646 +++++++++++++++++++++++++++++++++++++++++++
+ 5 files changed, 671 insertions(+), 0 deletions(-)
+ create mode 100644 drivers/char/kcopy/Kconfig
+ create mode 100644 drivers/char/kcopy/Makefile
+ create mode 100644 drivers/char/kcopy/kcopy.c
+
+diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
+index 4364303..f206545 100644
+--- a/drivers/char/Kconfig
++++ b/drivers/char/Kconfig
+@@ -6,6 +6,8 @@ menu "Character devices"
+
+ source "drivers/tty/Kconfig"
+
++source "drivers/char/kcopy/Kconfig"
++
+ config DEVKMEM
+ bool "/dev/kmem virtual device support"
+ default y
+--- a/drivers/char/Makefile 2012-02-01 10:18:43.568153662 -0500
++++ b/drivers/char/Makefile 2012-02-01 10:19:40.726839554 -0500
+@@ -65,3 +65,4 @@ obj-$(CONFIG_JS_RTC) += js-rtc.o
+ js-rtc-y = rtc.o
+
+ obj-$(CONFIG_TILE_SROM) += tile-srom.o
++obj-$(CONFIG_KCOPY) += kcopy/
+diff --git a/drivers/char/kcopy/Kconfig b/drivers/char/kcopy/Kconfig
+new file mode 100644
+index 0000000..453ae52
+--- /dev/null
++++ b/drivers/char/kcopy/Kconfig
+@@ -0,0 +1,17 @@
++#
++# KCopy character device configuration
++#
++
++menu "KCopy"
++
++config KCOPY
++ tristate "Memory-to-memory copies using kernel assist"
++ default m
++ ---help---
++ High-performance inter-process memory copies. Can often save a
++ memory copy to shared memory in the application. Useful at least
++ for MPI applications where the point-to-point nature of vmsplice
++ and pipes can be a limiting factor in performance.
++
++endmenu
++
+diff --git a/drivers/char/kcopy/Makefile b/drivers/char/kcopy/Makefile
+new file mode 100644
+index 0000000..9cb269b
+--- /dev/null
++++ b/drivers/char/kcopy/Makefile
+@@ -0,0 +1,4 @@
++#
++# Makefile for the kernel character device drivers.
++#
++obj-$(CONFIG_KCOPY) += kcopy.o
+diff --git a/drivers/char/kcopy/kcopy.c b/drivers/char/kcopy/kcopy.c
+new file mode 100644
+index 0000000..a9f915c
+--- /dev/null
++++ b/drivers/char/kcopy/kcopy.c
+@@ -0,0 +1,646 @@
++#include <linux/module.h>
++#include <linux/fs.h>
++#include <linux/cdev.h>
++#include <linux/device.h>
++#include <linux/mutex.h>
++#include <linux/mman.h>
++#include <linux/highmem.h>
++#include <linux/spinlock.h>
++#include <linux/sched.h>
++#include <linux/rbtree.h>
++#include <linux/rcupdate.h>
++#include <linux/uaccess.h>
++#include <linux/slab.h>
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Arthur Jones <arthur.jones@qlogic.com>");
++MODULE_DESCRIPTION("QLogic kcopy driver");
++
++#define KCOPY_ABI 1
++#define KCOPY_MAX_MINORS 64
++
++struct kcopy_device {
++ struct cdev cdev;
++ struct class *class;
++ struct device *devp[KCOPY_MAX_MINORS];
++ dev_t dev;
++
++ struct kcopy_file *kf[KCOPY_MAX_MINORS];
++ struct mutex open_lock;
++};
++
++static struct kcopy_device kcopy_dev;
++
++/* per file data / one of these is shared per minor */
++struct kcopy_file {
++ int count;
++
++ /* pid indexed */
++ struct rb_root live_map_tree;
++
++ struct mutex map_lock;
++};
++
++struct kcopy_map_entry {
++ int count;
++ struct task_struct *task;
++ pid_t pid;
++ struct kcopy_file *file; /* file backpointer */
++
++ struct list_head list; /* free map list */
++ struct rb_node node; /* live map tree */
++};
++
++#define KCOPY_GET_SYSCALL 1
++#define KCOPY_PUT_SYSCALL 2
++#define KCOPY_ABI_SYSCALL 3
++
++struct kcopy_syscall {
++ __u32 tag;
++ pid_t pid;
++ __u64 n;
++ __u64 src;
++ __u64 dst;
++};
++
++static const void __user *kcopy_syscall_src(const struct kcopy_syscall *ks)
++{
++ return (const void __user *) (unsigned long) ks->src;
++}
++
++static void __user *kcopy_syscall_dst(const struct kcopy_syscall *ks)
++{
++ return (void __user *) (unsigned long) ks->dst;
++}
++
++static unsigned long kcopy_syscall_n(const struct kcopy_syscall *ks)
++{
++ return (unsigned long) ks->n;
++}
++
++static struct kcopy_map_entry *kcopy_create_entry(struct kcopy_file *file)
++{
++ struct kcopy_map_entry *kme =
++ kmalloc(sizeof(struct kcopy_map_entry), GFP_KERNEL);
++
++ if (!kme)
++ return NULL;
++
++ kme->count = 1;
++ kme->file = file;
++ kme->task = current;
++ kme->pid = current->tgid;
++ INIT_LIST_HEAD(&kme->list);
++
++ return kme;
++}
++
++static struct kcopy_map_entry *
++kcopy_lookup_pid(struct rb_root *root, pid_t pid)
++{
++ struct rb_node *node = root->rb_node;
++
++ while (node) {
++ struct kcopy_map_entry *kme =
++ container_of(node, struct kcopy_map_entry, node);
++
++ if (pid < kme->pid)
++ node = node->rb_left;
++ else if (pid > kme->pid)
++ node = node->rb_right;
++ else
++ return kme;
++ }
++
++ return NULL;
++}
++
++static int kcopy_insert(struct rb_root *root, struct kcopy_map_entry *kme)
++{
++ struct rb_node **new = &(root->rb_node);
++ struct rb_node *parent = NULL;
++
++ while (*new) {
++ struct kcopy_map_entry *tkme =
++ container_of(*new, struct kcopy_map_entry, node);
++
++ parent = *new;
++ if (kme->pid < tkme->pid)
++ new = &((*new)->rb_left);
++ else if (kme->pid > tkme->pid)
++ new = &((*new)->rb_right);
++ else {
++ printk(KERN_INFO "!!! debugging: bad rb tree !!!\n");
++ return -EINVAL;
++ }
++ }
++
++ rb_link_node(&kme->node, parent, new);
++ rb_insert_color(&kme->node, root);
++
++ return 0;
++}
++
++static int kcopy_open(struct inode *inode, struct file *filp)
++{
++ int ret;
++ const int minor = iminor(inode);
++ struct kcopy_file *kf = NULL;
++ struct kcopy_map_entry *kme;
++ struct kcopy_map_entry *okme;
++
++ if (minor < 0 || minor >= KCOPY_MAX_MINORS)
++ return -ENODEV;
++
++ mutex_lock(&kcopy_dev.open_lock);
++
++ if (!kcopy_dev.kf[minor]) {
++ kf = kmalloc(sizeof(struct kcopy_file), GFP_KERNEL);
++
++ if (!kf) {
++ ret = -ENOMEM;
++ goto bail;
++ }
++
++ kf->count = 1;
++ kf->live_map_tree = RB_ROOT;
++ mutex_init(&kf->map_lock);
++ kcopy_dev.kf[minor] = kf;
++ } else {
++ if (filp->f_flags & O_EXCL) {
++ ret = -EBUSY;
++ goto bail;
++ }
++ kcopy_dev.kf[minor]->count++;
++ }
++
++ kme = kcopy_create_entry(kcopy_dev.kf[minor]);
++ if (!kme) {
++ ret = -ENOMEM;
++ goto err_free_kf;
++ }
++
++ kf = kcopy_dev.kf[minor];
++
++ mutex_lock(&kf->map_lock);
++
++ okme = kcopy_lookup_pid(&kf->live_map_tree, kme->pid);
++ if (okme) {
++ /* pid already exists... */
++ okme->count++;
++ kfree(kme);
++ kme = okme;
++ } else
++ ret = kcopy_insert(&kf->live_map_tree, kme);
++
++ mutex_unlock(&kf->map_lock);
++
++ filp->private_data = kme;
++
++ ret = 0;
++ goto bail;
++
++err_free_kf:
++ if (kf) {
++ kcopy_dev.kf[minor] = NULL;
++ kfree(kf);
++ }
++bail:
++ mutex_unlock(&kcopy_dev.open_lock);
++ return ret;
++}
++
++static int kcopy_flush(struct file *filp, fl_owner_t id)
++{
++ struct kcopy_map_entry *kme = filp->private_data;
++ struct kcopy_file *kf = kme->file;
++
++ if (file_count(filp) == 1) {
++ mutex_lock(&kf->map_lock);
++ kme->count--;
++
++ if (!kme->count) {
++ rb_erase(&kme->node, &kf->live_map_tree);
++ kfree(kme);
++ }
++ mutex_unlock(&kf->map_lock);
++ }
++
++ return 0;
++}
++
++static int kcopy_release(struct inode *inode, struct file *filp)
++{
++ const int minor = iminor(inode);
++
++ mutex_lock(&kcopy_dev.open_lock);
++ kcopy_dev.kf[minor]->count--;
++ if (!kcopy_dev.kf[minor]->count) {
++ kfree(kcopy_dev.kf[minor]);
++ kcopy_dev.kf[minor] = NULL;
++ }
++ mutex_unlock(&kcopy_dev.open_lock);
++
++ return 0;
++}
++
++static void kcopy_put_pages(struct page **pages, int npages)
++{
++ int j;
++
++ for (j = 0; j < npages; j++)
++ put_page(pages[j]);
++}
++
++static int kcopy_validate_task(struct task_struct *p)
++{
++ return p && ((current_euid() == task_euid(p)) || (current_euid() == task_uid(p)));
++}
++
++static int kcopy_get_pages(struct kcopy_file *kf, pid_t pid,
++ struct page **pages, void __user *addr,
++ int write, size_t npages)
++{
++ int err;
++ struct mm_struct *mm;
++ struct kcopy_map_entry *rkme;
++
++ mutex_lock(&kf->map_lock);
++
++ rkme = kcopy_lookup_pid(&kf->live_map_tree, pid);
++ if (!rkme || !kcopy_validate_task(rkme->task)) {
++ err = -EINVAL;
++ goto bail_unlock;
++ }
++
++ mm = get_task_mm(rkme->task);
++ if (unlikely(!mm)) {
++ err = -ENOMEM;
++ goto bail_unlock;
++ }
++
++ down_read(&mm->mmap_sem);
++ err = get_user_pages(rkme->task, mm,
++ (unsigned long) addr, npages, write, 0,
++ pages, NULL);
++
++ if (err < npages && err > 0) {
++ kcopy_put_pages(pages, err);
++ err = -ENOMEM;
++ } else if (err == npages)
++ err = 0;
++
++ up_read(&mm->mmap_sem);
++
++ mmput(mm);
++
++bail_unlock:
++ mutex_unlock(&kf->map_lock);
++
++ return err;
++}
++
++static unsigned long kcopy_copy_pages_from_user(void __user *src,
++ struct page **dpages,
++ unsigned doff,
++ unsigned long n)
++{
++ struct page *dpage = *dpages;
++ char *daddr = kmap(dpage);
++ int ret = 0;
++
++ while (1) {
++ const unsigned long nleft = PAGE_SIZE - doff;
++ const unsigned long nc = (n < nleft) ? n : nleft;
++
++ /* if (copy_from_user(daddr + doff, src, nc)) { */
++ if (__copy_from_user_nocache(daddr + doff, src, nc)) {
++ ret = -EFAULT;
++ goto bail;
++ }
++
++ n -= nc;
++ if (n == 0)
++ break;
++
++ doff += nc;
++ doff &= ~PAGE_MASK;
++ if (doff == 0) {
++ kunmap(dpage);
++ dpages++;
++ dpage = *dpages;
++ daddr = kmap(dpage);
++ }
++
++ src += nc;
++ }
++
++bail:
++ kunmap(dpage);
++
++ return ret;
++}
++
++static unsigned long kcopy_copy_pages_to_user(void __user *dst,
++ struct page **spages,
++ unsigned soff,
++ unsigned long n)
++{
++ struct page *spage = *spages;
++ const char *saddr = kmap(spage);
++ int ret = 0;
++
++ while (1) {
++ const unsigned long nleft = PAGE_SIZE - soff;
++ const unsigned long nc = (n < nleft) ? n : nleft;
++
++ if (copy_to_user(dst, saddr + soff, nc)) {
++ ret = -EFAULT;
++ goto bail;
++ }
++
++ n -= nc;
++ if (n == 0)
++ break;
++
++ soff += nc;
++ soff &= ~PAGE_MASK;
++ if (soff == 0) {
++ kunmap(spage);
++ spages++;
++ spage = *spages;
++ saddr = kmap(spage);
++ }
++
++ dst += nc;
++ }
++
++bail:
++ kunmap(spage);
++
++ return ret;
++}
++
++static unsigned long kcopy_copy_to_user(void __user *dst,
++ struct kcopy_file *kf, pid_t pid,
++ void __user *src,
++ unsigned long n)
++{
++ struct page **pages;
++ const int pages_len = PAGE_SIZE / sizeof(struct page *);
++ int ret = 0;
++
++ pages = (struct page **) __get_free_page(GFP_KERNEL);
++ if (!pages) {
++ ret = -ENOMEM;
++ goto bail;
++ }
++
++ while (n) {
++ const unsigned long soff = (unsigned long) src & ~PAGE_MASK;
++ const unsigned long spages_left =
++ (soff + n + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ const unsigned long spages_cp =
++ min_t(unsigned long, spages_left, pages_len);
++ const unsigned long sbytes =
++ PAGE_SIZE - soff + (spages_cp - 1) * PAGE_SIZE;
++ const unsigned long nbytes = min_t(unsigned long, sbytes, n);
++
++ ret = kcopy_get_pages(kf, pid, pages, src, 0, spages_cp);
++ if (unlikely(ret))
++ goto bail_free;
++
++ ret = kcopy_copy_pages_to_user(dst, pages, soff, nbytes);
++ kcopy_put_pages(pages, spages_cp);
++ if (ret)
++ goto bail_free;
++ dst = (char *) dst + nbytes;
++ src = (char *) src + nbytes;
++
++ n -= nbytes;
++ }
++
++bail_free:
++ free_page((unsigned long) pages);
++bail:
++ return ret;
++}
++
++static unsigned long kcopy_copy_from_user(const void __user *src,
++ struct kcopy_file *kf, pid_t pid,
++ void __user *dst,
++ unsigned long n)
++{
++ struct page **pages;
++ const int pages_len = PAGE_SIZE / sizeof(struct page *);
++ int ret = 0;
++
++ pages = (struct page **) __get_free_page(GFP_KERNEL);
++ if (!pages) {
++ ret = -ENOMEM;
++ goto bail;
++ }
++
++ while (n) {
++ const unsigned long doff = (unsigned long) dst & ~PAGE_MASK;
++ const unsigned long dpages_left =
++ (doff + n + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ const unsigned long dpages_cp =
++ min_t(unsigned long, dpages_left, pages_len);
++ const unsigned long dbytes =
++ PAGE_SIZE - doff + (dpages_cp - 1) * PAGE_SIZE;
++ const unsigned long nbytes = min_t(unsigned long, dbytes, n);
++
++ ret = kcopy_get_pages(kf, pid, pages, dst, 1, dpages_cp);
++ if (unlikely(ret))
++ goto bail_free;
++
++ ret = kcopy_copy_pages_from_user((void __user *) src,
++ pages, doff, nbytes);
++ kcopy_put_pages(pages, dpages_cp);
++ if (ret)
++ goto bail_free;
++
++ dst = (char *) dst + nbytes;
++ src = (char *) src + nbytes;
++
++ n -= nbytes;
++ }
++
++bail_free:
++ free_page((unsigned long) pages);
++bail:
++ return ret;
++}
++
++static int kcopy_do_get(struct kcopy_map_entry *kme, pid_t pid,
++ const void __user *src, void __user *dst,
++ unsigned long n)
++{
++ struct kcopy_file *kf = kme->file;
++ int ret = 0;
++
++ if (n == 0) {
++ ret = -EINVAL;
++ goto bail;
++ }
++
++ ret = kcopy_copy_to_user(dst, kf, pid, (void __user *) src, n);
++
++bail:
++ return ret;
++}
++
++static int kcopy_do_put(struct kcopy_map_entry *kme, const void __user *src,
++ pid_t pid, void __user *dst,
++ unsigned long n)
++{
++ struct kcopy_file *kf = kme->file;
++ int ret = 0;
++
++ if (n == 0) {
++ ret = -EINVAL;
++ goto bail;
++ }
++
++ ret = kcopy_copy_from_user(src, kf, pid, (void __user *) dst, n);
++
++bail:
++ return ret;
++}
++
++static int kcopy_do_abi(u32 __user *dst)
++{
++ u32 val = KCOPY_ABI;
++ int err;
++
++ err = put_user(val, dst);
++ if (err)
++ return -EFAULT;
++
++ return 0;
++}
++
++ssize_t kcopy_write(struct file *filp, const char __user *data, size_t cnt,
++ loff_t *o)
++{
++ struct kcopy_map_entry *kme = filp->private_data;
++ struct kcopy_syscall ks;
++ int err = 0;
++ const void __user *src;
++ void __user *dst;
++ unsigned long n;
++
++ if (cnt != sizeof(struct kcopy_syscall)) {
++ err = -EINVAL;
++ goto bail;
++ }
++
++ err = copy_from_user(&ks, data, cnt);
++ if (unlikely(err))
++ goto bail;
++
++ src = kcopy_syscall_src(&ks);
++ dst = kcopy_syscall_dst(&ks);
++ n = kcopy_syscall_n(&ks);
++ if (ks.tag == KCOPY_GET_SYSCALL)
++ err = kcopy_do_get(kme, ks.pid, src, dst, n);
++ else if (ks.tag == KCOPY_PUT_SYSCALL)
++ err = kcopy_do_put(kme, src, ks.pid, dst, n);
++ else if (ks.tag == KCOPY_ABI_SYSCALL)
++ err = kcopy_do_abi(dst);
++ else
++ err = -EINVAL;
++
++bail:
++ return err ? err : cnt;
++}
++
++static const struct file_operations kcopy_fops = {
++ .owner = THIS_MODULE,
++ .open = kcopy_open,
++ .release = kcopy_release,
++ .flush = kcopy_flush,
++ .write = kcopy_write,
++};
++
++static int __init kcopy_init(void)
++{
++ int ret;
++ const char *name = "kcopy";
++ int i;
++ int ninit = 0;
++
++ mutex_init(&kcopy_dev.open_lock);
++
++ ret = alloc_chrdev_region(&kcopy_dev.dev, 0, KCOPY_MAX_MINORS, name);
++ if (ret)
++ goto bail;
++
++ kcopy_dev.class = class_create(THIS_MODULE, (char *) name);
++
++ if (IS_ERR(kcopy_dev.class)) {
++ ret = PTR_ERR(kcopy_dev.class);
++ printk(KERN_ERR "kcopy: Could not create "
++ "device class (err %d)\n", -ret);
++ goto bail_chrdev;
++ }
++
++ cdev_init(&kcopy_dev.cdev, &kcopy_fops);
++ ret = cdev_add(&kcopy_dev.cdev, kcopy_dev.dev, KCOPY_MAX_MINORS);
++ if (ret < 0) {
++ printk(KERN_ERR "kcopy: Could not add cdev (err %d)\n",
++ -ret);
++ goto bail_class;
++ }
++
++ for (i = 0; i < KCOPY_MAX_MINORS; i++) {
++ char devname[8];
++ const int minor = MINOR(kcopy_dev.dev) + i;
++ const dev_t dev = MKDEV(MAJOR(kcopy_dev.dev), minor);
++
++ snprintf(devname, sizeof(devname), "kcopy%02d", i);
++ kcopy_dev.devp[i] =
++ device_create(kcopy_dev.class, NULL,
++ dev, NULL, devname);
++
++ if (IS_ERR(kcopy_dev.devp[i])) {
++ ret = PTR_ERR(kcopy_dev.devp[i]);
++ printk(KERN_ERR "kcopy: Could not create "
++ "devp %d (err %d)\n", i, -ret);
++ goto bail_cdev_add;
++ }
++
++ ninit++;
++ }
++
++ ret = 0;
++ goto bail;
++
++bail_cdev_add:
++ for (i = 0; i < ninit; i++)
++ device_unregister(kcopy_dev.devp[i]);
++
++ cdev_del(&kcopy_dev.cdev);
++bail_class:
++ class_destroy(kcopy_dev.class);
++bail_chrdev:
++ unregister_chrdev_region(kcopy_dev.dev, KCOPY_MAX_MINORS);
++bail:
++ return ret;
++}
++
++static void __exit kcopy_fini(void)
++{
++ int i;
++
++ for (i = 0; i < KCOPY_MAX_MINORS; i++)
++ device_unregister(kcopy_dev.devp[i]);
++
++ cdev_del(&kcopy_dev.cdev);
++ class_destroy(kcopy_dev.class);
++ unregister_chrdev_region(kcopy_dev.dev, KCOPY_MAX_MINORS);
++}
++
++module_init(kcopy_init);
++module_exit(kcopy_fini);
diff --git a/2600_Input-ALPS-synaptics-touchpad.patch b/2600_Input-ALPS-synaptics-touchpad.patch
new file mode 100644
index 00000000..385e8780
--- /dev/null
+++ b/2600_Input-ALPS-synaptics-touchpad.patch
@@ -0,0 +1,1267 @@
+Only in linux-3.2/Documentation/input: alps.txt
+diff -ur linux-3.2.old/drivers/input/mouse/alps.c linux-3.2/drivers/input/mouse/alps.c
+--- linux-3.2.old/drivers/input/mouse/alps.c 2012-01-04 17:55:44.000000000 -0600
++++ linux-3.2/drivers/input/mouse/alps.c 2012-01-05 18:53:42.231275880 -0600
+@@ -17,13 +17,63 @@
+
+ #include <linux/slab.h>
+ #include <linux/input.h>
++#include <linux/input/mt.h>
+ #include <linux/serio.h>
+ #include <linux/libps2.h>
+
+ #include "psmouse.h"
+ #include "alps.h"
+
+-#define ALPS_OLDPROTO 0x01 /* old style input */
++/*
++ * Definitions for ALPS version 3 and 4 command mode protocol
++ */
++#define ALPS_V3_X_MAX 2000
++#define ALPS_V3_Y_MAX 1400
++
++#define ALPS_BITMAP_X_BITS 15
++#define ALPS_BITMAP_Y_BITS 11
++
++#define ALPS_CMD_NIBBLE_10 0x01f2
++
++static const struct alps_nibble_commands alps_v3_nibble_commands[] = {
++ { PSMOUSE_CMD_SETPOLL, 0x00 }, /* 0 */
++ { PSMOUSE_CMD_RESET_DIS, 0x00 }, /* 1 */
++ { PSMOUSE_CMD_SETSCALE21, 0x00 }, /* 2 */
++ { PSMOUSE_CMD_SETRATE, 0x0a }, /* 3 */
++ { PSMOUSE_CMD_SETRATE, 0x14 }, /* 4 */
++ { PSMOUSE_CMD_SETRATE, 0x28 }, /* 5 */
++ { PSMOUSE_CMD_SETRATE, 0x3c }, /* 6 */
++ { PSMOUSE_CMD_SETRATE, 0x50 }, /* 7 */
++ { PSMOUSE_CMD_SETRATE, 0x64 }, /* 8 */
++ { PSMOUSE_CMD_SETRATE, 0xc8 }, /* 9 */
++ { ALPS_CMD_NIBBLE_10, 0x00 }, /* a */
++ { PSMOUSE_CMD_SETRES, 0x00 }, /* b */
++ { PSMOUSE_CMD_SETRES, 0x01 }, /* c */
++ { PSMOUSE_CMD_SETRES, 0x02 }, /* d */
++ { PSMOUSE_CMD_SETRES, 0x03 }, /* e */
++ { PSMOUSE_CMD_SETSCALE11, 0x00 }, /* f */
++};
++
++static const struct alps_nibble_commands alps_v4_nibble_commands[] = {
++ { PSMOUSE_CMD_ENABLE, 0x00 }, /* 0 */
++ { PSMOUSE_CMD_RESET_DIS, 0x00 }, /* 1 */
++ { PSMOUSE_CMD_SETSCALE21, 0x00 }, /* 2 */
++ { PSMOUSE_CMD_SETRATE, 0x0a }, /* 3 */
++ { PSMOUSE_CMD_SETRATE, 0x14 }, /* 4 */
++ { PSMOUSE_CMD_SETRATE, 0x28 }, /* 5 */
++ { PSMOUSE_CMD_SETRATE, 0x3c }, /* 6 */
++ { PSMOUSE_CMD_SETRATE, 0x50 }, /* 7 */
++ { PSMOUSE_CMD_SETRATE, 0x64 }, /* 8 */
++ { PSMOUSE_CMD_SETRATE, 0xc8 }, /* 9 */
++ { ALPS_CMD_NIBBLE_10, 0x00 }, /* a */
++ { PSMOUSE_CMD_SETRES, 0x00 }, /* b */
++ { PSMOUSE_CMD_SETRES, 0x01 }, /* c */
++ { PSMOUSE_CMD_SETRES, 0x02 }, /* d */
++ { PSMOUSE_CMD_SETRES, 0x03 }, /* e */
++ { PSMOUSE_CMD_SETSCALE11, 0x00 }, /* f */
++};
++
++
+ #define ALPS_DUALPOINT 0x02 /* touchpad has trackstick */
+ #define ALPS_PASS 0x04 /* device has a pass-through port */
+
+@@ -35,30 +85,33 @@
+ 6-byte ALPS packet */
+
+ static const struct alps_model_info alps_model_data[] = {
+- { { 0x32, 0x02, 0x14 }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* Toshiba Salellite Pro M10 */
+- { { 0x33, 0x02, 0x0a }, 0x88, 0xf8, ALPS_OLDPROTO }, /* UMAX-530T */
+- { { 0x53, 0x02, 0x0a }, 0xf8, 0xf8, 0 },
+- { { 0x53, 0x02, 0x14 }, 0xf8, 0xf8, 0 },
+- { { 0x60, 0x03, 0xc8 }, 0xf8, 0xf8, 0 }, /* HP ze1115 */
+- { { 0x63, 0x02, 0x0a }, 0xf8, 0xf8, 0 },
+- { { 0x63, 0x02, 0x14 }, 0xf8, 0xf8, 0 },
+- { { 0x63, 0x02, 0x28 }, 0xf8, 0xf8, ALPS_FW_BK_2 }, /* Fujitsu Siemens S6010 */
+- { { 0x63, 0x02, 0x3c }, 0x8f, 0x8f, ALPS_WHEEL }, /* Toshiba Satellite S2400-103 */
+- { { 0x63, 0x02, 0x50 }, 0xef, 0xef, ALPS_FW_BK_1 }, /* NEC Versa L320 */
+- { { 0x63, 0x02, 0x64 }, 0xf8, 0xf8, 0 },
+- { { 0x63, 0x03, 0xc8 }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* Dell Latitude D800 */
+- { { 0x73, 0x00, 0x0a }, 0xf8, 0xf8, ALPS_DUALPOINT }, /* ThinkPad R61 8918-5QG */
+- { { 0x73, 0x02, 0x0a }, 0xf8, 0xf8, 0 },
+- { { 0x73, 0x02, 0x14 }, 0xf8, 0xf8, ALPS_FW_BK_2 }, /* Ahtec Laptop */
+- { { 0x20, 0x02, 0x0e }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* XXX */
+- { { 0x22, 0x02, 0x0a }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT },
+- { { 0x22, 0x02, 0x14 }, 0xff, 0xff, ALPS_PASS | ALPS_DUALPOINT }, /* Dell Latitude D600 */
++ { { 0x32, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* Toshiba Salellite Pro M10 */
++ { { 0x33, 0x02, 0x0a }, 0x00, ALPS_PROTO_V1, 0x88, 0xf8, 0 }, /* UMAX-530T */
++ { { 0x53, 0x02, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
++ { { 0x53, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
++ { { 0x60, 0x03, 0xc8 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 }, /* HP ze1115 */
++ { { 0x63, 0x02, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
++ { { 0x63, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
++ { { 0x63, 0x02, 0x28 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_FW_BK_2 }, /* Fujitsu Siemens S6010 */
++ { { 0x63, 0x02, 0x3c }, 0x00, ALPS_PROTO_V2, 0x8f, 0x8f, ALPS_WHEEL }, /* Toshiba Satellite S2400-103 */
++ { { 0x63, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xef, 0xef, ALPS_FW_BK_1 }, /* NEC Versa L320 */
++ { { 0x63, 0x02, 0x64 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
++ { { 0x63, 0x03, 0xc8 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* Dell Latitude D800 */
++ { { 0x73, 0x00, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_DUALPOINT }, /* ThinkPad R61 8918-5QG */
++ { { 0x73, 0x02, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
++ { { 0x73, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_FW_BK_2 }, /* Ahtec Laptop */
++ { { 0x20, 0x02, 0x0e }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* XXX */
++ { { 0x22, 0x02, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT },
++ { { 0x22, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff, ALPS_PASS | ALPS_DUALPOINT }, /* Dell Latitude D600 */
+ /* Dell Latitude E5500, E6400, E6500, Precision M4400 */
+- { { 0x62, 0x02, 0x14 }, 0xcf, 0xcf,
++ { { 0x62, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf,
+ ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },
+- { { 0x73, 0x02, 0x50 }, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */
+- { { 0x52, 0x01, 0x14 }, 0xff, 0xff,
+- ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */
++ { { 0x73, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */
++ { { 0x52, 0x01, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff,
++ ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */
++ { { 0x73, 0x02, 0x64 }, 0x9b, ALPS_PROTO_V3, 0x8f, 0x8f, ALPS_DUALPOINT },
++ { { 0x73, 0x02, 0x64 }, 0x9d, ALPS_PROTO_V3, 0x8f, 0x8f, ALPS_DUALPOINT },
++ { { 0x73, 0x02, 0x64 }, 0x8a, ALPS_PROTO_V4, 0x8f, 0x8f, 0 },
+ };
+
+ /*
+@@ -67,42 +120,7 @@
+ * isn't valid per PS/2 spec.
+ */
+
+-/*
+- * PS/2 packet format
+- *
+- * byte 0: 0 0 YSGN XSGN 1 M R L
+- * byte 1: X7 X6 X5 X4 X3 X2 X1 X0
+- * byte 2: Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0
+- *
+- * Note that the device never signals overflow condition.
+- *
+- * ALPS absolute Mode - new format
+- *
+- * byte 0: 1 ? ? ? 1 ? ? ?
+- * byte 1: 0 x6 x5 x4 x3 x2 x1 x0
+- * byte 2: 0 x10 x9 x8 x7 ? fin ges
+- * byte 3: 0 y9 y8 y7 1 M R L
+- * byte 4: 0 y6 y5 y4 y3 y2 y1 y0
+- * byte 5: 0 z6 z5 z4 z3 z2 z1 z0
+- *
+- * Dualpoint device -- interleaved packet format
+- *
+- * byte 0: 1 1 0 0 1 1 1 1
+- * byte 1: 0 x6 x5 x4 x3 x2 x1 x0
+- * byte 2: 0 x10 x9 x8 x7 0 fin ges
+- * byte 3: 0 0 YSGN XSGN 1 1 1 1
+- * byte 4: X7 X6 X5 X4 X3 X2 X1 X0
+- * byte 5: Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0
+- * byte 6: 0 y9 y8 y7 1 m r l
+- * byte 7: 0 y6 y5 y4 y3 y2 y1 y0
+- * byte 8: 0 z6 z5 z4 z3 z2 z1 z0
+- *
+- * CAPITALS = stick, miniscules = touchpad
+- *
+- * ?'s can have different meanings on different models,
+- * such as wheel rotation, extra buttons, stick buttons
+- * on a dualpoint, etc.
+- */
++/* Packet formats are described in Documentation/input/alps.txt */
+
+ static bool alps_is_valid_first_byte(const struct alps_model_info *model,
+ unsigned char data)
+@@ -137,7 +155,7 @@
+ input_sync(dev2);
+ }
+
+-static void alps_process_packet(struct psmouse *psmouse)
++static void alps_process_packet_v1_v2(struct psmouse *psmouse)
+ {
+ struct alps_data *priv = psmouse->private;
+ const struct alps_model_info *model = priv->i;
+@@ -147,7 +165,7 @@
+ int x, y, z, ges, fin, left, right, middle;
+ int back = 0, forward = 0;
+
+- if (model->flags & ALPS_OLDPROTO) {
++ if (model->proto_version == ALPS_PROTO_V1) {
+ left = packet[2] & 0x10;
+ right = packet[2] & 0x08;
+ middle = 0;
+@@ -239,6 +257,403 @@
+ input_sync(dev);
+ }
+
++/*
++ * Process bitmap data from v3 and v4 protocols. Returns the number of
++ * fingers detected. A return value of 0 means at least one of the
++ * bitmaps was empty.
++ *
++ * The bitmaps don't have enough data to track fingers, so this function
++ * only generates points representing a bounding box of all contacts.
++ * These points are returned in x1, y1, x2, and y2 when the return value
++ * is greater than 0.
++ */
++static int alps_process_bitmap(unsigned int x_map, unsigned int y_map,
++ int *x1, int *y1, int *x2, int *y2)
++{
++ struct alps_bitmap_point {
++ int start_bit;
++ int num_bits;
++ };
++
++ int fingers_x = 0, fingers_y = 0, fingers;
++ int i, bit, prev_bit;
++ struct alps_bitmap_point x_low = {0,}, x_high = {0,};
++ struct alps_bitmap_point y_low = {0,}, y_high = {0,};
++ struct alps_bitmap_point *point;
++
++ if (!x_map || !y_map)
++ return 0;
++
++ *x1 = *y1 = *x2 = *y2 = 0;
++
++ prev_bit = 0;
++ point = &x_low;
++ for (i = 0; x_map != 0; i++, x_map >>= 1) {
++ bit = x_map & 1;
++ if (bit) {
++ if (!prev_bit) {
++ point->start_bit = i;
++ fingers_x++;
++ }
++ point->num_bits++;
++ } else {
++ if (prev_bit)
++ point = &x_high;
++ else
++ point->num_bits = 0;
++ }
++ prev_bit = bit;
++ }
++
++ /*
++ * y bitmap is reversed for what we need (lower positions are in
++ * higher bits), so we process from the top end.
++ */
++ y_map = y_map << (sizeof(y_map) * BITS_PER_BYTE - ALPS_BITMAP_Y_BITS);
++ prev_bit = 0;
++ point = &y_low;
++ for (i = 0; y_map != 0; i++, y_map <<= 1) {
++ bit = y_map & (1 << (sizeof(y_map) * BITS_PER_BYTE - 1));
++ if (bit) {
++ if (!prev_bit) {
++ point->start_bit = i;
++ fingers_y++;
++ }
++ point->num_bits++;
++ } else {
++ if (prev_bit)
++ point = &y_high;
++ else
++ point->num_bits = 0;
++ }
++ prev_bit = bit;
++ }
++
++ /*
++ * Fingers can overlap, so we use the maximum count of fingers
++ * on either axis as the finger count.
++ */
++ fingers = max(fingers_x, fingers_y);
++
++ /*
++ * If total fingers is > 1 but either axis reports only a single
++ * contact, we have overlapping or adjacent fingers. For the
++ * purposes of creating a bounding box, divide the single contact
++ * (roughly) equally between the two points.
++ */
++ if (fingers > 1) {
++ if (fingers_x == 1) {
++ i = x_low.num_bits / 2;
++ x_low.num_bits = x_low.num_bits - i;
++ x_high.start_bit = x_low.start_bit + i;
++ x_high.num_bits = max(i, 1);
++ } else if (fingers_y == 1) {
++ i = y_low.num_bits / 2;
++ y_low.num_bits = y_low.num_bits - i;
++ y_high.start_bit = y_low.start_bit + i;
++ y_high.num_bits = max(i, 1);
++ }
++ }
++
++ *x1 = (ALPS_V3_X_MAX * (2 * x_low.start_bit + x_low.num_bits - 1)) /
++ (2 * (ALPS_BITMAP_X_BITS - 1));
++ *y1 = (ALPS_V3_Y_MAX * (2 * y_low.start_bit + y_low.num_bits - 1)) /
++ (2 * (ALPS_BITMAP_Y_BITS - 1));
++
++ if (fingers > 1) {
++ *x2 = (ALPS_V3_X_MAX * (2 * x_high.start_bit + x_high.num_bits - 1)) /
++ (2 * (ALPS_BITMAP_X_BITS - 1));
++ *y2 = (ALPS_V3_Y_MAX * (2 * y_high.start_bit + y_high.num_bits - 1)) /
++ (2 * (ALPS_BITMAP_Y_BITS - 1));
++ }
++
++ return fingers;
++}
++
++static void alps_set_slot(struct input_dev *dev, int slot, bool active,
++ int x, int y)
++{
++ input_mt_slot(dev, slot);
++ input_mt_report_slot_state(dev, MT_TOOL_FINGER, active);
++ if (active) {
++ input_report_abs(dev, ABS_MT_POSITION_X, x);
++ input_report_abs(dev, ABS_MT_POSITION_Y, y);
++ }
++}
++
++static void alps_report_semi_mt_data(struct input_dev *dev, int num_fingers,
++ int x1, int y1, int x2, int y2)
++{
++ alps_set_slot(dev, 0, num_fingers != 0, x1, y1);
++ alps_set_slot(dev, 1, num_fingers == 2, x2, y2);
++}
++
++static void alps_process_trackstick_packet_v3(struct psmouse *psmouse)
++{
++ struct alps_data *priv = psmouse->private;
++ unsigned char *packet = psmouse->packet;
++ struct input_dev *dev = priv->dev2;
++ int x, y, z, left, right, middle;
++
++ /* Sanity check packet */
++ if (!(packet[0] & 0x40)) {
++ psmouse_dbg(psmouse, "Bad trackstick packet, discarding\n");
++ return;
++ }
++
++ /*
++ * There's a special packet that seems to indicate the end
++ * of a stream of trackstick data. Filter these out.
++ */
++ if (packet[1] == 0x7f && packet[2] == 0x7f && packet[4] == 0x7f)
++ return;
++
++ x = (s8)(((packet[0] & 0x20) << 2) | (packet[1] & 0x7f));
++ y = (s8)(((packet[0] & 0x10) << 3) | (packet[2] & 0x7f));
++ z = (packet[4] & 0x7c) >> 2;
++
++ /*
++ * The x and y values tend to be quite large, and when used
++ * alone the trackstick is difficult to use. Scale them down
++ * to compensate.
++ */
++ x /= 8;
++ y /= 8;
++
++ input_report_rel(dev, REL_X, x);
++ input_report_rel(dev, REL_Y, -y);
++
++ /*
++ * Most ALPS models report the trackstick buttons in the touchpad
++ * packets, but a few report them here. No reliable way has been
++ * found to differentiate between the models upfront, so we enable
++ * the quirk in response to seeing a button press in the trackstick
++ * packet.
++ */
++ left = packet[3] & 0x01;
++ right = packet[3] & 0x02;
++ middle = packet[3] & 0x04;
++
++ if (!(priv->quirks & ALPS_QUIRK_TRACKSTICK_BUTTONS) &&
++ (left || right || middle))
++ priv->quirks |= ALPS_QUIRK_TRACKSTICK_BUTTONS;
++
++ if (priv->quirks & ALPS_QUIRK_TRACKSTICK_BUTTONS) {
++ input_report_key(dev, BTN_LEFT, left);
++ input_report_key(dev, BTN_RIGHT, right);
++ input_report_key(dev, BTN_MIDDLE, middle);
++ }
++
++ input_sync(dev);
++ return;
++}
++
++static void alps_process_touchpad_packet_v3(struct psmouse *psmouse)
++{
++ struct alps_data *priv = psmouse->private;
++ unsigned char *packet = psmouse->packet;
++ struct input_dev *dev = psmouse->dev;
++ struct input_dev *dev2 = priv->dev2;
++ int x, y, z;
++ int left, right, middle;
++ int x1 = 0, y1 = 0, x2 = 0, y2 = 0;
++ int fingers = 0, bmap_fingers;
++ unsigned int x_bitmap, y_bitmap;
++
++ /*
++ * There's no single feature of touchpad position and bitmap packets
++ * that can be used to distinguish between them. We rely on the fact
++ * that a bitmap packet should always follow a position packet with
++ * bit 6 of packet[4] set.
++ */
++ if (priv->multi_packet) {
++ /*
++ * Sometimes a position packet will indicate a multi-packet
++ * sequence, but then what follows is another position
++ * packet. Check for this, and when it happens process the
++ * position packet as usual.
++ */
++ if (packet[0] & 0x40) {
++ fingers = (packet[5] & 0x3) + 1;
++ x_bitmap = ((packet[4] & 0x7e) << 8) |
++ ((packet[1] & 0x7f) << 2) |
++ ((packet[0] & 0x30) >> 4);
++ y_bitmap = ((packet[3] & 0x70) << 4) |
++ ((packet[2] & 0x7f) << 1) |
++ (packet[4] & 0x01);
++
++ bmap_fingers = alps_process_bitmap(x_bitmap, y_bitmap,
++ &x1, &y1, &x2, &y2);
++
++ /*
++ * We shouldn't report more than one finger if
++ * we don't have two coordinates.
++ */
++ if (fingers > 1 && bmap_fingers < 2)
++ fingers = bmap_fingers;
++
++ /* Now process position packet */
++ packet = priv->multi_data;
++ } else {
++ priv->multi_packet = 0;
++ }
++ }
++
++ /*
++ * Bit 6 of byte 0 is not usually set in position packets. The only
++ * times it seems to be set is in situations where the data is
++ * suspect anyway, e.g. a palm resting flat on the touchpad. Given
++ * this combined with the fact that this bit is useful for filtering
++ * out misidentified bitmap packets, we reject anything with this
++ * bit set.
++ */
++ if (packet[0] & 0x40)
++ return;
++
++ if (!priv->multi_packet && (packet[4] & 0x40)) {
++ priv->multi_packet = 1;
++ memcpy(priv->multi_data, packet, sizeof(priv->multi_data));
++ return;
++ }
++
++ priv->multi_packet = 0;
++
++ left = packet[3] & 0x01;
++ right = packet[3] & 0x02;
++ middle = packet[3] & 0x04;
++
++ x = ((packet[1] & 0x7f) << 4) | ((packet[4] & 0x30) >> 2) |
++ ((packet[0] & 0x30) >> 4);
++ y = ((packet[2] & 0x7f) << 4) | (packet[4] & 0x0f);
++ z = packet[5] & 0x7f;
++
++ /*
++ * Sometimes the hardware sends a single packet with z = 0
++ * in the middle of a stream. Real releases generate packets
++ * with x, y, and z all zero, so these seem to be flukes.
++ * Ignore them.
++ */
++ if (x && y && !z)
++ return;
++
++ /*
++ * If we don't have MT data or the bitmaps were empty, we have
++ * to rely on ST data.
++ */
++ if (!fingers) {
++ x1 = x;
++ y1 = y;
++ fingers = z > 0 ? 1 : 0;
++ }
++
++ if (z >= 64)
++ input_report_key(dev, BTN_TOUCH, 1);
++ else
++ input_report_key(dev, BTN_TOUCH, 0);
++
++ alps_report_semi_mt_data(dev, fingers, x1, y1, x2, y2);
++
++ input_report_key(dev, BTN_TOOL_FINGER, fingers == 1);
++ input_report_key(dev, BTN_TOOL_DOUBLETAP, fingers == 2);
++ input_report_key(dev, BTN_TOOL_TRIPLETAP, fingers == 3);
++ input_report_key(dev, BTN_TOOL_QUADTAP, fingers == 4);
++
++ input_report_key(dev, BTN_LEFT, left);
++ input_report_key(dev, BTN_RIGHT, right);
++ input_report_key(dev, BTN_MIDDLE, middle);
++
++ if (z > 0) {
++ input_report_abs(dev, ABS_X, x);
++ input_report_abs(dev, ABS_Y, y);
++ }
++ input_report_abs(dev, ABS_PRESSURE, z);
++
++ input_sync(dev);
++
++ if (!(priv->quirks & ALPS_QUIRK_TRACKSTICK_BUTTONS)) {
++ left = packet[3] & 0x10;
++ right = packet[3] & 0x20;
++ middle = packet[3] & 0x40;
++
++ input_report_key(dev2, BTN_LEFT, left);
++ input_report_key(dev2, BTN_RIGHT, right);
++ input_report_key(dev2, BTN_MIDDLE, middle);
++ input_sync(dev2);
++ }
++}
++
++static void alps_process_packet_v3(struct psmouse *psmouse)
++{
++ unsigned char *packet = psmouse->packet;
++
++ /*
++ * v3 protocol packets come in three types, two representing
++ * touchpad data and one representing trackstick data.
++ * Trackstick packets seem to be distinguished by always
++ * having 0x3f in the last byte. This value has never been
++ * observed in the last byte of either of the other types
++ * of packets.
++ */
++ if (packet[5] == 0x3f) {
++ alps_process_trackstick_packet_v3(psmouse);
++ return;
++ }
++
++ alps_process_touchpad_packet_v3(psmouse);
++}
++
++static void alps_process_packet_v4(struct psmouse *psmouse)
++{
++ unsigned char *packet = psmouse->packet;
++ struct input_dev *dev = psmouse->dev;
++ int x, y, z;
++ int left, right;
++
++ left = packet[4] & 0x01;
++ right = packet[4] & 0x02;
++
++ x = ((packet[1] & 0x7f) << 4) | ((packet[3] & 0x30) >> 2) |
++ ((packet[0] & 0x30) >> 4);
++ y = ((packet[2] & 0x7f) << 4) | (packet[3] & 0x0f);
++ z = packet[5] & 0x7f;
++
++ if (z >= 64)
++ input_report_key(dev, BTN_TOUCH, 1);
++ else
++ input_report_key(dev, BTN_TOUCH, 0);
++
++ if (z > 0) {
++ input_report_abs(dev, ABS_X, x);
++ input_report_abs(dev, ABS_Y, y);
++ }
++ input_report_abs(dev, ABS_PRESSURE, z);
++
++ input_report_key(dev, BTN_TOOL_FINGER, z > 0);
++ input_report_key(dev, BTN_LEFT, left);
++ input_report_key(dev, BTN_RIGHT, right);
++
++ input_sync(dev);
++}
++
++static void alps_process_packet(struct psmouse *psmouse)
++{
++ struct alps_data *priv = psmouse->private;
++ const struct alps_model_info *model = priv->i;
++
++ switch (model->proto_version) {
++ case ALPS_PROTO_V1:
++ case ALPS_PROTO_V2:
++ alps_process_packet_v1_v2(psmouse);
++ break;
++ case ALPS_PROTO_V3:
++ alps_process_packet_v3(psmouse);
++ break;
++ case ALPS_PROTO_V4:
++ alps_process_packet_v4(psmouse);
++ break;
++ }
++}
++
+ static void alps_report_bare_ps2_packet(struct psmouse *psmouse,
+ unsigned char packet[],
+ bool report_buttons)
+@@ -344,7 +759,7 @@
+
+ serio_pause_rx(psmouse->ps2dev.serio);
+
+- if (psmouse->pktcnt == 6) {
++ if (psmouse->pktcnt == psmouse->pktsize) {
+
+ /*
+ * We did not any more data in reasonable amount of time.
+@@ -395,8 +810,8 @@
+ return PSMOUSE_BAD_DATA;
+ }
+
+- /* Bytes 2 - 6 should have 0 in the highest bit */
+- if (psmouse->pktcnt >= 2 && psmouse->pktcnt <= 6 &&
++ /* Bytes 2 - pktsize should have 0 in the highest bit */
++ if (psmouse->pktcnt >= 2 && psmouse->pktcnt <= psmouse->pktsize &&
+ (psmouse->packet[psmouse->pktcnt - 1] & 0x80)) {
+ psmouse_dbg(psmouse, "refusing packet[%i] = %x\n",
+ psmouse->pktcnt - 1,
+@@ -404,7 +819,7 @@
+ return PSMOUSE_BAD_DATA;
+ }
+
+- if (psmouse->pktcnt == 6) {
++ if (psmouse->pktcnt == psmouse->pktsize) {
+ alps_process_packet(psmouse);
+ return PSMOUSE_FULL_PACKET;
+ }
+@@ -412,11 +827,127 @@
+ return PSMOUSE_GOOD_DATA;
+ }
+
++static int alps_command_mode_send_nibble(struct psmouse *psmouse, int nibble)
++{
++ struct ps2dev *ps2dev = &psmouse->ps2dev;
++ struct alps_data *priv = psmouse->private;
++ int command;
++ unsigned char *param;
++ unsigned char dummy[4];
++
++ BUG_ON(nibble > 0xf);
++
++ command = priv->nibble_commands[nibble].command;
++ param = (command & 0x0f00) ?
++ dummy : (unsigned char *)&priv->nibble_commands[nibble].data;
++
++ if (ps2_command(ps2dev, param, command))
++ return -1;
++
++ return 0;
++}
++
++static int alps_command_mode_set_addr(struct psmouse *psmouse, int addr)
++{
++ struct ps2dev *ps2dev = &psmouse->ps2dev;
++ struct alps_data *priv = psmouse->private;
++ int i, nibble;
++
++ if (ps2_command(ps2dev, NULL, priv->addr_command))
++ return -1;
++
++ for (i = 12; i >= 0; i -= 4) {
++ nibble = (addr >> i) & 0xf;
++ if (alps_command_mode_send_nibble(psmouse, nibble))
++ return -1;
++ }
++
++ return 0;
++}
++
++static int __alps_command_mode_read_reg(struct psmouse *psmouse, int addr)
++{
++ struct ps2dev *ps2dev = &psmouse->ps2dev;
++ unsigned char param[4];
++
++ if (ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO))
++ return -1;
++
++ /*
++ * The address being read is returned in the first two bytes
++ * of the result. Check that this address matches the expected
++ * address.
++ */
++ if (addr != ((param[0] << 8) | param[1]))
++ return -1;
++
++ return param[2];
++}
++
++static int alps_command_mode_read_reg(struct psmouse *psmouse, int addr)
++{
++ if (alps_command_mode_set_addr(psmouse, addr))
++ return -1;
++ return __alps_command_mode_read_reg(psmouse, addr);
++}
++
++static int __alps_command_mode_write_reg(struct psmouse *psmouse, u8 value)
++{
++ if (alps_command_mode_send_nibble(psmouse, (value >> 4) & 0xf))
++ return -1;
++ if (alps_command_mode_send_nibble(psmouse, value & 0xf))
++ return -1;
++ return 0;
++}
++
++static int alps_command_mode_write_reg(struct psmouse *psmouse, int addr,
++ u8 value)
++{
++ if (alps_command_mode_set_addr(psmouse, addr))
++ return -1;
++ return __alps_command_mode_write_reg(psmouse, value);
++}
++
++static int alps_enter_command_mode(struct psmouse *psmouse,
++ unsigned char *resp)
++{
++ unsigned char param[4];
++ struct ps2dev *ps2dev = &psmouse->ps2dev;
++
++ if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_WRAP) ||
++ ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_WRAP) ||
++ ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_WRAP) ||
++ ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO)) {
++ psmouse_err(psmouse, "failed to enter command mode\n");
++ return -1;
++ }
++
++ if (param[0] != 0x88 && param[1] != 0x07) {
++ psmouse_dbg(psmouse,
++ "unknown response while entering command mode: %2.2x %2.2x %2.2x\n",
++ param[0], param[1], param[2]);
++ return -1;
++ }
++
++ if (resp)
++ *resp = param[2];
++ return 0;
++}
++
++static inline int alps_exit_command_mode(struct psmouse *psmouse)
++{
++ struct ps2dev *ps2dev = &psmouse->ps2dev;
++ if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSTREAM))
++ return -1;
++ return 0;
++}
++
+ static const struct alps_model_info *alps_get_model(struct psmouse *psmouse, int *version)
+ {
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+ static const unsigned char rates[] = { 0, 10, 20, 40, 60, 80, 100, 200 };
+ unsigned char param[4];
++ const struct alps_model_info *model = NULL;
+ int i;
+
+ /*
+@@ -464,12 +995,41 @@
+ *version = (param[0] << 8) | (param[1] << 4) | i;
+ }
+
+- for (i = 0; i < ARRAY_SIZE(alps_model_data); i++)
++ for (i = 0; i < ARRAY_SIZE(alps_model_data); i++) {
+ if (!memcmp(param, alps_model_data[i].signature,
+- sizeof(alps_model_data[i].signature)))
+- return alps_model_data + i;
++ sizeof(alps_model_data[i].signature))) {
++ model = alps_model_data + i;
++ break;
++ }
++ }
++
++ if (model && model->proto_version > ALPS_PROTO_V2) {
++ /*
++ * Need to check command mode response to identify
++ * model
++ */
++ model = NULL;
++ if (alps_enter_command_mode(psmouse, param)) {
++ psmouse_warn(psmouse,
++ "touchpad failed to enter command mode\n");
++ } else {
++ for (i = 0; i < ARRAY_SIZE(alps_model_data); i++) {
++ if (alps_model_data[i].proto_version > ALPS_PROTO_V2 &&
++ alps_model_data[i].command_mode_resp == param[0]) {
++ model = alps_model_data + i;
++ break;
++ }
++ }
++ alps_exit_command_mode(psmouse);
++
++ if (!model)
++ psmouse_dbg(psmouse,
++ "Unknown command mode response %2.2x\n",
++ param[0]);
++ }
++ }
+
+- return NULL;
++ return model;
+ }
+
+ /*
+@@ -477,7 +1037,7 @@
+ * subsequent commands. It looks like glidepad is behind stickpointer,
+ * I'd thought it would be other way around...
+ */
+-static int alps_passthrough_mode(struct psmouse *psmouse, bool enable)
++static int alps_passthrough_mode_v2(struct psmouse *psmouse, bool enable)
+ {
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+ int cmd = enable ? PSMOUSE_CMD_SETSCALE21 : PSMOUSE_CMD_SETSCALE11;
+@@ -494,7 +1054,7 @@
+ return 0;
+ }
+
+-static int alps_absolute_mode(struct psmouse *psmouse)
++static int alps_absolute_mode_v1_v2(struct psmouse *psmouse)
+ {
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+
+@@ -565,17 +1125,17 @@
+ static int alps_poll(struct psmouse *psmouse)
+ {
+ struct alps_data *priv = psmouse->private;
+- unsigned char buf[6];
++ unsigned char buf[sizeof(psmouse->packet)];
+ bool poll_failed;
+
+ if (priv->i->flags & ALPS_PASS)
+- alps_passthrough_mode(psmouse, true);
++ alps_passthrough_mode_v2(psmouse, true);
+
+ poll_failed = ps2_command(&psmouse->ps2dev, buf,
+ PSMOUSE_CMD_POLL | (psmouse->pktsize << 8)) < 0;
+
+ if (priv->i->flags & ALPS_PASS)
+- alps_passthrough_mode(psmouse, false);
++ alps_passthrough_mode_v2(psmouse, false);
+
+ if (poll_failed || (buf[0] & priv->i->mask0) != priv->i->byte0)
+ return -1;
+@@ -592,13 +1152,13 @@
+ return 0;
+ }
+
+-static int alps_hw_init(struct psmouse *psmouse)
++static int alps_hw_init_v1_v2(struct psmouse *psmouse)
+ {
+ struct alps_data *priv = psmouse->private;
+ const struct alps_model_info *model = priv->i;
+
+ if ((model->flags & ALPS_PASS) &&
+- alps_passthrough_mode(psmouse, true)) {
++ alps_passthrough_mode_v2(psmouse, true)) {
+ return -1;
+ }
+
+@@ -607,13 +1167,13 @@
+ return -1;
+ }
+
+- if (alps_absolute_mode(psmouse)) {
++ if (alps_absolute_mode_v1_v2(psmouse)) {
+ psmouse_err(psmouse, "Failed to enable absolute mode\n");
+ return -1;
+ }
+
+ if ((model->flags & ALPS_PASS) &&
+- alps_passthrough_mode(psmouse, false)) {
++ alps_passthrough_mode_v2(psmouse, false)) {
+ return -1;
+ }
+
+@@ -626,6 +1186,297 @@
+ return 0;
+ }
+
++/*
++ * Enable or disable passthrough mode to the trackstick. Must be in
++ * command mode when calling this function.
++ */
++static int alps_passthrough_mode_v3(struct psmouse *psmouse, bool enable)
++{
++ int reg_val;
++
++ reg_val = alps_command_mode_read_reg(psmouse, 0x0008);
++ if (reg_val == -1)
++ return -1;
++
++ if (enable)
++ reg_val |= 0x01;
++ else
++ reg_val &= ~0x01;
++
++ if (__alps_command_mode_write_reg(psmouse, reg_val))
++ return -1;
++
++ return 0;
++}
++
++/* Must be in command mode when calling this function */
++static int alps_absolute_mode_v3(struct psmouse *psmouse)
++{
++ int reg_val;
++
++ reg_val = alps_command_mode_read_reg(psmouse, 0x0004);
++ if (reg_val == -1)
++ return -1;
++
++ reg_val |= 0x06;
++ if (__alps_command_mode_write_reg(psmouse, reg_val))
++ return -1;
++
++ return 0;
++}
++
++static int alps_hw_init_v3(struct psmouse *psmouse)
++{
++ struct alps_data *priv = psmouse->private;
++ struct ps2dev *ps2dev = &psmouse->ps2dev;
++ int reg_val;
++ unsigned char param[4];
++
++ priv->nibble_commands = alps_v3_nibble_commands;
++ priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
++
++ if (alps_enter_command_mode(psmouse, NULL))
++ goto error;
++
++ /* Check for trackstick */
++ reg_val = alps_command_mode_read_reg(psmouse, 0x0008);
++ if (reg_val == -1)
++ goto error;
++ if (reg_val & 0x80) {
++ if (alps_passthrough_mode_v3(psmouse, true))
++ goto error;
++ if (alps_exit_command_mode(psmouse))
++ goto error;
++
++ /*
++ * E7 report for the trackstick
++ *
++ * There have been reports of failures to seem to trace back
++ * to the above trackstick check failing. When these occur
++ * this E7 report fails, so when that happens we continue
++ * with the assumption that there isn't a trackstick after
++ * all.
++ */
++ param[0] = 0x64;
++ if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) ||
++ ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) ||
++ ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) ||
++ ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO)) {
++ psmouse_warn(psmouse, "trackstick E7 report failed\n");
++ } else {
++ psmouse_dbg(psmouse,
++ "trackstick E7 report: %2.2x %2.2x %2.2x\n",
++ param[0], param[1], param[2]);
++
++ /*
++ * Not sure what this does, but it is absolutely
++ * essential. Without it, the touchpad does not
++ * work at all and the trackstick just emits normal
++ * PS/2 packets.
++ */
++ if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
++ ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
++ ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
++ alps_command_mode_send_nibble(psmouse, 0x9) ||
++ alps_command_mode_send_nibble(psmouse, 0x4)) {
++ psmouse_err(psmouse,
++ "Error sending magic E6 sequence\n");
++ goto error_passthrough;
++ }
++ }
++
++ if (alps_enter_command_mode(psmouse, NULL))
++ goto error_passthrough;
++ if (alps_passthrough_mode_v3(psmouse, false))
++ goto error;
++ }
++
++ if (alps_absolute_mode_v3(psmouse)) {
++ psmouse_err(psmouse, "Failed to enter absolute mode\n");
++ goto error;
++ }
++
++ reg_val = alps_command_mode_read_reg(psmouse, 0x0006);
++ if (reg_val == -1)
++ goto error;
++ if (__alps_command_mode_write_reg(psmouse, reg_val | 0x01))
++ goto error;
++
++ reg_val = alps_command_mode_read_reg(psmouse, 0x0007);
++ if (reg_val == -1)
++ goto error;
++ if (__alps_command_mode_write_reg(psmouse, reg_val | 0x01))
++ goto error;
++
++ if (alps_command_mode_read_reg(psmouse, 0x0144) == -1)
++ goto error;
++ if (__alps_command_mode_write_reg(psmouse, 0x04))
++ goto error;
++
++ if (alps_command_mode_read_reg(psmouse, 0x0159) == -1)
++ goto error;
++ if (__alps_command_mode_write_reg(psmouse, 0x03))
++ goto error;
++
++ if (alps_command_mode_read_reg(psmouse, 0x0163) == -1)
++ goto error;
++ if (alps_command_mode_write_reg(psmouse, 0x0163, 0x03))
++ goto error;
++
++ if (alps_command_mode_read_reg(psmouse, 0x0162) == -1)
++ goto error;
++ if (alps_command_mode_write_reg(psmouse, 0x0162, 0x04))
++ goto error;
++
++ /*
++ * This ensures the trackstick packets are in the format
++ * supported by this driver. If bit 1 isn't set the packet
++ * format is different.
++ */
++ if (alps_command_mode_write_reg(psmouse, 0x0008, 0x82))
++ goto error;
++
++ alps_exit_command_mode(psmouse);
++
++ /* Set rate and enable data reporting */
++ param[0] = 0x64;
++ if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE) ||
++ ps2_command(ps2dev, NULL, PSMOUSE_CMD_ENABLE)) {
++ psmouse_err(psmouse, "Failed to enable data reporting\n");
++ return -1;
++ }
++
++ return 0;
++
++error_passthrough:
++ /* Something failed while in passthrough mode, so try to get out */
++ if (!alps_enter_command_mode(psmouse, NULL))
++ alps_passthrough_mode_v3(psmouse, false);
++error:
++ /*
++ * Leaving the touchpad in command mode will essentially render
++ * it unusable until the machine reboots, so exit it here just
++ * to be safe
++ */
++ alps_exit_command_mode(psmouse);
++ return -1;
++}
++
++/* Must be in command mode when calling this function */
++static int alps_absolute_mode_v4(struct psmouse *psmouse)
++{
++ int reg_val;
++
++ reg_val = alps_command_mode_read_reg(psmouse, 0x0004);
++ if (reg_val == -1)
++ return -1;
++
++ reg_val |= 0x02;
++ if (__alps_command_mode_write_reg(psmouse, reg_val))
++ return -1;
++
++ return 0;
++}
++
++static int alps_hw_init_v4(struct psmouse *psmouse)
++{
++ struct alps_data *priv = psmouse->private;
++ struct ps2dev *ps2dev = &psmouse->ps2dev;
++ unsigned char param[4];
++
++ priv->nibble_commands = alps_v4_nibble_commands;
++ priv->addr_command = PSMOUSE_CMD_DISABLE;
++
++ if (alps_enter_command_mode(psmouse, NULL))
++ goto error;
++
++ if (alps_absolute_mode_v4(psmouse)) {
++ psmouse_err(psmouse, "Failed to enter absolute mode\n");
++ goto error;
++ }
++
++ if (alps_command_mode_write_reg(psmouse, 0x0007, 0x8c))
++ goto error;
++
++ if (alps_command_mode_write_reg(psmouse, 0x0149, 0x03))
++ goto error;
++
++ if (alps_command_mode_write_reg(psmouse, 0x0160, 0x03))
++ goto error;
++
++ if (alps_command_mode_write_reg(psmouse, 0x017f, 0x15))
++ goto error;
++
++ if (alps_command_mode_write_reg(psmouse, 0x0151, 0x01))
++ goto error;
++
++ if (alps_command_mode_write_reg(psmouse, 0x0168, 0x03))
++ goto error;
++
++ if (alps_command_mode_write_reg(psmouse, 0x014a, 0x03))
++ goto error;
++
++ if (alps_command_mode_write_reg(psmouse, 0x0161, 0x03))
++ goto error;
++
++ alps_exit_command_mode(psmouse);
++
++ /*
++ * This sequence changes the output from a 9-byte to an
++ * 8-byte format. All the same data seems to be present,
++ * just in a more compact format.
++ */
++ param[0] = 0xc8;
++ param[1] = 0x64;
++ param[2] = 0x50;
++ if (ps2_command(ps2dev, &param[0], PSMOUSE_CMD_SETRATE) ||
++ ps2_command(ps2dev, &param[1], PSMOUSE_CMD_SETRATE) ||
++ ps2_command(ps2dev, &param[2], PSMOUSE_CMD_SETRATE) ||
++ ps2_command(ps2dev, param, PSMOUSE_CMD_GETID))
++ return -1;
++
++ /* Set rate and enable data reporting */
++ param[0] = 0x64;
++ if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE) ||
++ ps2_command(ps2dev, NULL, PSMOUSE_CMD_ENABLE)) {
++ psmouse_err(psmouse, "Failed to enable data reporting\n");
++ return -1;
++ }
++
++ return 0;
++
++error:
++ /*
++ * Leaving the touchpad in command mode will essentially render
++ * it unusable until the machine reboots, so exit it here just
++ * to be safe
++ */
++ alps_exit_command_mode(psmouse);
++ return -1;
++}
++
++static int alps_hw_init(struct psmouse *psmouse)
++{
++ struct alps_data *priv = psmouse->private;
++ const struct alps_model_info *model = priv->i;
++ int ret = -1;
++
++ switch (model->proto_version) {
++ case ALPS_PROTO_V1:
++ case ALPS_PROTO_V2:
++ ret = alps_hw_init_v1_v2(psmouse);
++ break;
++ case ALPS_PROTO_V3:
++ ret = alps_hw_init_v3(psmouse);
++ break;
++ case ALPS_PROTO_V4:
++ ret = alps_hw_init_v4(psmouse);
++ break;
++ }
++
++ return ret;
++}
++
+ static int alps_reconnect(struct psmouse *psmouse)
+ {
+ const struct alps_model_info *model;
+@@ -666,6 +1517,8 @@
+
+ psmouse->private = priv;
+
++ psmouse_reset(psmouse);
++
+ model = alps_get_model(psmouse, &version);
+ if (!model)
+ goto init_fail;
+@@ -693,8 +1546,29 @@
+ BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_RIGHT);
+
+ dev1->evbit[BIT_WORD(EV_ABS)] |= BIT_MASK(EV_ABS);
+- input_set_abs_params(dev1, ABS_X, 0, 1023, 0, 0);
+- input_set_abs_params(dev1, ABS_Y, 0, 767, 0, 0);
++
++ switch (model->proto_version) {
++ case ALPS_PROTO_V1:
++ case ALPS_PROTO_V2:
++ input_set_abs_params(dev1, ABS_X, 0, 1023, 0, 0);
++ input_set_abs_params(dev1, ABS_Y, 0, 767, 0, 0);
++ break;
++ case ALPS_PROTO_V3:
++ set_bit(INPUT_PROP_SEMI_MT, dev1->propbit);
++ input_mt_init_slots(dev1, 2);
++ input_set_abs_params(dev1, ABS_MT_POSITION_X, 0, ALPS_V3_X_MAX, 0, 0);
++ input_set_abs_params(dev1, ABS_MT_POSITION_Y, 0, ALPS_V3_Y_MAX, 0, 0);
++
++ set_bit(BTN_TOOL_DOUBLETAP, dev1->keybit);
++ set_bit(BTN_TOOL_TRIPLETAP, dev1->keybit);
++ set_bit(BTN_TOOL_QUADTAP, dev1->keybit);
++ /* fall through */
++ case ALPS_PROTO_V4:
++ input_set_abs_params(dev1, ABS_X, 0, ALPS_V3_X_MAX, 0, 0);
++ input_set_abs_params(dev1, ABS_Y, 0, ALPS_V3_Y_MAX, 0, 0);
++ break;
++ }
++
+ input_set_abs_params(dev1, ABS_PRESSURE, 0, 127, 0, 0);
+
+ if (model->flags & ALPS_WHEEL) {
+@@ -737,7 +1611,7 @@
+ psmouse->poll = alps_poll;
+ psmouse->disconnect = alps_disconnect;
+ psmouse->reconnect = alps_reconnect;
+- psmouse->pktsize = 6;
++ psmouse->pktsize = model->proto_version == ALPS_PROTO_V4 ? 8 : 6;
+
+ /* We are having trouble resyncing ALPS touchpads so disable it for now */
+ psmouse->resync_time = 0;
+diff -ur linux-3.2.old/drivers/input/mouse/alps.h linux-3.2/drivers/input/mouse/alps.h
+--- linux-3.2.old/drivers/input/mouse/alps.h 2012-01-04 17:55:44.000000000 -0600
++++ linux-3.2/drivers/input/mouse/alps.h 2012-01-05 18:53:42.235275879 -0600
+@@ -12,20 +12,39 @@
+ #ifndef _ALPS_H
+ #define _ALPS_H
+
++#define ALPS_PROTO_V1 0
++#define ALPS_PROTO_V2 1
++#define ALPS_PROTO_V3 2
++#define ALPS_PROTO_V4 3
++
+ struct alps_model_info {
+ unsigned char signature[3];
++ unsigned char command_mode_resp; /* v3/v4 only */
++ unsigned char proto_version;
+ unsigned char byte0, mask0;
+ unsigned char flags;
+ };
+
++struct alps_nibble_commands {
++ int command;
++ unsigned char data;
++};
++
+ struct alps_data {
+ struct input_dev *dev2; /* Relative device */
+ char phys[32]; /* Phys */
+ const struct alps_model_info *i;/* Info */
++ const struct alps_nibble_commands *nibble_commands;
++ int addr_command; /* Command to set register address */
+ int prev_fin; /* Finger bit from previous packet */
++ int multi_packet; /* Multi-packet data in progress */
++ unsigned char multi_data[6]; /* Saved multi-packet data */
++ u8 quirks;
+ struct timer_list timer;
+ };
+
++#define ALPS_QUIRK_TRACKSTICK_BUTTONS 1 /* trakcstick buttons in trackstick packet */
++
+ #ifdef CONFIG_MOUSE_PS2_ALPS
+ int alps_detect(struct psmouse *psmouse, bool set_properties);
+ int alps_init(struct psmouse *psmouse);
+diff -ur linux-3.2.old/drivers/input/mouse/psmouse.h linux-3.2/drivers/input/mouse/psmouse.h
+--- linux-3.2.old/drivers/input/mouse/psmouse.h 2012-01-04 17:55:44.000000000 -0600
++++ linux-3.2/drivers/input/mouse/psmouse.h 2012-01-05 18:53:42.231275880 -0600
+@@ -8,6 +8,7 @@
+ #define PSMOUSE_CMD_SETSTREAM 0x00ea
+ #define PSMOUSE_CMD_SETPOLL 0x00f0
+ #define PSMOUSE_CMD_POLL 0x00eb /* caller sets number of bytes to receive */
++#define PSMOUSE_CMD_RESET_WRAP 0x00ec
+ #define PSMOUSE_CMD_GETID 0x02f2
+ #define PSMOUSE_CMD_SETRATE 0x10f3
+ #define PSMOUSE_CMD_ENABLE 0x00f4
diff --git a/2700_drm-i915-resurrect-panel-lid-handling.patch b/2700_drm-i915-resurrect-panel-lid-handling.patch
new file mode 100644
index 00000000..49f3e8d8
--- /dev/null
+++ b/2700_drm-i915-resurrect-panel-lid-handling.patch
@@ -0,0 +1,117 @@
+From a726915cef1daab57aad4c5b5e4773822f0a4bf8 Mon Sep 17 00:00:00 2001
+From: Daniel Vetter <daniel.vetter@ffwll.ch>
+Date: Tue, 20 Nov 2012 14:50:08 +0100
+Subject: [PATCH] drm/i915: resurrect panel lid handling
+
+But disabled by default. This essentially reverts
+
+commit bcd5023c961a44c7149936553b6929b2b233dd27
+Author: Dave Airlie <airlied@redhat.com>
+Date: Mon Mar 14 14:17:55 2011 +1000
+
+ drm/i915: disable opregion lid detection for now
+
+but leaves the autodetect mode disabled. There's also the explicit lid
+status option added in
+
+commit fca874092597ef946b8f07031d8c31c58b212144
+Author: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Thu Feb 17 13:44:48 2011 +0000
+
+ drm/i915: Add a module parameter to ignore lid status
+
+Which overloaded the meaning for the panel_ignore_lid parameter even
+more. To fix up this mess, give the non-negative numbers 0,1 the
+original meaning back and use negative numbers to force a given state.
+So now we have
+
+1 - disable autodetect, return unknown
+0 - enable autodetect
+-1 - force to disconnected/lid closed
+-2 - force to connected/lid open
+
+v2: My C programmer license has been revoked ...
+
+v3: Beautify the code a bit, as suggested by Chris Wilson.
+
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=27622
+Tested-by: Andreas Sturmlechner <andreas.sturmlechner@gmail.com>
+Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
+Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+---
+ drivers/gpu/drm/i915/i915_drv.c | 6 +++---
+ drivers/gpu/drm/i915/intel_panel.c | 25 +++++++++++--------------
+ 2 files changed, 14 insertions(+), 17 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
+index a3d754d..f5b505a 100644
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -47,11 +47,11 @@ MODULE_PARM_DESC(modeset,
+ unsigned int i915_fbpercrtc __always_unused = 0;
+ module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
+
+-int i915_panel_ignore_lid __read_mostly = 0;
++int i915_panel_ignore_lid __read_mostly = 1;
+ module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
+ MODULE_PARM_DESC(panel_ignore_lid,
+- "Override lid status (0=autodetect [default], 1=lid open, "
+- "-1=lid closed)");
++ "Override lid status (0=autodetect, 1=autodetect disabled [default], "
++ "-1=force lid closed, -2=force lid open)");
+
+ unsigned int i915_powersave __read_mostly = 1;
+ module_param_named(powersave, i915_powersave, int, 0600);
+diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
+index 41d4635..c758ad2 100644
+--- a/drivers/gpu/drm/i915/intel_panel.c
++++ b/drivers/gpu/drm/i915/intel_panel.c
+@@ -275,7 +275,7 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level
+ }
+
+ tmp = I915_READ(BLC_PWM_CTL);
+- if (INTEL_INFO(dev)->gen < 4)
++ if (INTEL_INFO(dev)->gen < 4)
+ level <<= 1;
+ tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
+ I915_WRITE(BLC_PWM_CTL, tmp | level);
+@@ -374,26 +374,23 @@ static void intel_panel_init_backlight(struct drm_device *dev)
+ enum drm_connector_status
+ intel_panel_detect(struct drm_device *dev)
+ {
+-#if 0
+ struct drm_i915_private *dev_priv = dev->dev_private;
+-#endif
+-
+- if (i915_panel_ignore_lid)
+- return i915_panel_ignore_lid > 0 ?
+- connector_status_connected :
+- connector_status_disconnected;
+
+- /* opregion lid state on HP 2540p is wrong at boot up,
+- * appears to be either the BIOS or Linux ACPI fault */
+-#if 0
+ /* Assume that the BIOS does not lie through the OpRegion... */
+- if (dev_priv->opregion.lid_state)
++ if (!i915_panel_ignore_lid && dev_priv->opregion.lid_state) {
+ return ioread32(dev_priv->opregion.lid_state) & 0x1 ?
+ connector_status_connected :
+ connector_status_disconnected;
+-#endif
++ }
+
+- return connector_status_unknown;
++ switch (i915_panel_ignore_lid) {
++ case -2:
++ return connector_status_connected;
++ case -1:
++ return connector_status_disconnected;
++ default:
++ return connector_status_unknown;
++ }
+ }
+
+ #ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+--
+1.8.3.2
+
diff --git a/2705_ThinkPad-30-brightness-control-fix.patch b/2705_ThinkPad-30-brightness-control-fix.patch
new file mode 100644
index 00000000..cde968d0
--- /dev/null
+++ b/2705_ThinkPad-30-brightness-control-fix.patch
@@ -0,0 +1,81 @@
+diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
+index cb96296..6c242ed 100644
+--- a/drivers/acpi/blacklist.c
++++ b/drivers/acpi/blacklist.c
+@@ -193,6 +193,13 @@ static int __init dmi_disable_osi_win7(const struct dmi_system_id *d)
+ return 0;
+ }
+
++static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
++{
++ printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
++ acpi_osi_setup("!Windows 2012");
++ return 0;
++}
++
+ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
+ {
+ .callback = dmi_disable_osi_vista,
+@@ -269,6 +276,61 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
+ },
+
+ /*
++ * The following Lenovo models have a broken workaround in the
++ * acpi_video backlight implementation to meet the Windows 8
++ * requirement of 101 backlight levels. Reverting to pre-Win8
++ * behavior fixes the problem.
++ */
++ {
++ .callback = dmi_disable_osi_win8,
++ .ident = "Lenovo ThinkPad L430",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L430"),
++ },
++ },
++ {
++ .callback = dmi_disable_osi_win8,
++ .ident = "Lenovo ThinkPad T430s",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430s"),
++ },
++ },
++ {
++ .callback = dmi_disable_osi_win8,
++ .ident = "Lenovo ThinkPad T530",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T530"),
++ },
++ },
++ {
++ .callback = dmi_disable_osi_win8,
++ .ident = "Lenovo ThinkPad W530",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W530"),
++ },
++ },
++ {
++ .callback = dmi_disable_osi_win8,
++ .ident = "Lenovo ThinkPad X1 Carbon",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X1 Carbon"),
++ },
++ },
++ {
++ .callback = dmi_disable_osi_win8,
++ .ident = "Lenovo ThinkPad X230",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X230"),
++ },
++ },
++
++ /*
+ * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
+ * Linux ignores it, except for the machines enumerated below.
+ */
+
diff --git a/4200_fbcondecor-0.9.6.patch b/4200_fbcondecor-0.9.6.patch
new file mode 100644
index 00000000..f1a60290
--- /dev/null
+++ b/4200_fbcondecor-0.9.6.patch
@@ -0,0 +1,2358 @@
+diff --git a/Documentation/fb/00-INDEX b/Documentation/fb/00-INDEX
+index 30a7054..9b6a733 100644
+--- a/Documentation/fb/00-INDEX
++++ b/Documentation/fb/00-INDEX
+@@ -21,6 +21,8 @@ ep93xx-fb.txt
+ - info on the driver for EP93xx LCD controller.
+ fbcon.txt
+ - intro to and usage guide for the framebuffer console (fbcon).
++fbcondecor.txt
++ - info on the Framebuffer Console Decoration
+ framebuffer.txt
+ - introduction to frame buffer devices.
+ gxfb.txt
+diff --git a/Documentation/fb/fbcondecor.txt b/Documentation/fb/fbcondecor.txt
+new file mode 100644
+index 0000000..15889f3
+--- /dev/null
++++ b/Documentation/fb/fbcondecor.txt
+@@ -0,0 +1,207 @@
++What is it?
++-----------
++
++The framebuffer decorations are a kernel feature which allows displaying a
++background picture on selected consoles.
++
++What do I need to get it to work?
++---------------------------------
++
++To get fbcondecor up-and-running you will have to:
++ 1) get a copy of splashutils [1] or a similar program
++ 2) get some fbcondecor themes
++ 3) build the kernel helper program
++ 4) build your kernel with the FB_CON_DECOR option enabled.
++
++To get fbcondecor operational right after fbcon initialization is finished, you
++will have to include a theme and the kernel helper into your initramfs image.
++Please refer to splashutils documentation for instructions on how to do that.
++
++[1] The splashutils package can be downloaded from:
++ http://dev.gentoo.org/~spock/projects/splashutils/
++
++The userspace helper
++--------------------
++
++The userspace fbcondecor helper (by default: /sbin/fbcondecor_helper) is called by the
++kernel whenever an important event occurs and the kernel needs some kind of
++job to be carried out. Important events include console switches and video
++mode switches (the kernel requests background images and configuration
++parameters for the current console). The fbcondecor helper must be accessible at
++all times. If it's not, fbcondecor will be switched off automatically.
++
++It's possible to set path to the fbcondecor helper by writing it to
++/proc/sys/kernel/fbcondecor.
++
++*****************************************************************************
++
++The information below is mostly technical stuff. There's probably no need to
++read it unless you plan to develop a userspace helper.
++
++The fbcondecor protocol
++-----------------------
++
++The fbcondecor protocol defines a communication interface between the kernel and
++the userspace fbcondecor helper.
++
++The kernel side is responsible for:
++
++ * rendering console text, using an image as a background (instead of a
++ standard solid color fbcon uses),
++ * accepting commands from the user via ioctls on the fbcondecor device,
++ * calling the userspace helper to set things up as soon as the fb subsystem
++ is initialized.
++
++The userspace helper is responsible for everything else, including parsing
++configuration files, decompressing the image files whenever the kernel needs
++it, and communicating with the kernel if necessary.
++
++The fbcondecor protocol specifies how communication is done in both ways:
++kernel->userspace and userspace->helper.
++
++Kernel -> Userspace
++-------------------
++
++The kernel communicates with the userspace helper by calling it and specifying
++the task to be done in a series of arguments.
++
++The arguments follow the pattern:
++<fbcondecor protocol version> <command> <parameters>
++
++All commands defined in fbcondecor protocol v2 have the following parameters:
++ virtual console
++ framebuffer number
++ theme
++
++Fbcondecor protocol v1 specified an additional 'fbcondecor mode' after the
++framebuffer number. Fbcondecor protocol v1 is deprecated and should not be used.
++
++Fbcondecor protocol v2 specifies the following commands:
++
++getpic
++------
++ The kernel issues this command to request image data. It's up to the
++ userspace helper to find a background image appropriate for the specified
++ theme and the current resolution. The userspace helper should respond by
++ issuing the FBIOCONDECOR_SETPIC ioctl.
++
++init
++----
++ The kernel issues this command after the fbcondecor device is created and
++ the fbcondecor interface is initialized. Upon receiving 'init', the userspace
++ helper should parse the kernel command line (/proc/cmdline) or otherwise
++ decide whether fbcondecor is to be activated.
++
++ To activate fbcondecor on the first console the helper should issue the
++ FBIOCONDECOR_SETCFG, FBIOCONDECOR_SETPIC and FBIOCONDECOR_SETSTATE commands,
++ in the above-mentioned order.
++
++ When the userspace helper is called in an early phase of the boot process
++ (right after the initialization of fbcon), no filesystems will be mounted.
++ The helper program should mount sysfs and then create the appropriate
++ framebuffer, fbcondecor and tty0 devices (if they don't already exist) to get
++ current display settings and to be able to communicate with the kernel side.
++ It should probably also mount the procfs to be able to parse the kernel
++ command line parameters.
++
++ Note that the console sem is not held when the kernel calls fbcondecor_helper
++ with the 'init' command. The fbcondecor helper should perform all ioctls with
++ origin set to FBCON_DECOR_IO_ORIG_USER.
++
++modechange
++----------
++ The kernel issues this command on a mode change. The helper's response should
++ be similar to the response to the 'init' command. Note that this time the
++ console sem is held and all ioctls must be performed with origin set to
++ FBCON_DECOR_IO_ORIG_KERNEL.
++
++
++Userspace -> Kernel
++-------------------
++
++Userspace programs can communicate with fbcondecor via ioctls on the
++fbcondecor device. These ioctls are to be used by both the userspace helper
++(called only by the kernel) and userspace configuration tools (run by the users).
++
++The fbcondecor helper should set the origin field to FBCON_DECOR_IO_ORIG_KERNEL
++when doing the appropriate ioctls. All userspace configuration tools should
++use FBCON_DECOR_IO_ORIG_USER. Failure to set the appropriate value in the origin
++field when performing ioctls from the kernel helper will most likely result
++in a console deadlock.
++
++FBCON_DECOR_IO_ORIG_KERNEL instructs fbcondecor not to try to acquire the console
++semaphore. Not surprisingly, FBCON_DECOR_IO_ORIG_USER instructs it to acquire
++the console sem.
++
++The framebuffer console decoration provides the following ioctls (all defined in
++linux/fb.h):
++
++FBIOCONDECOR_SETPIC
++description: loads a background picture for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: struct fb_image*
++notes:
++If called for consoles other than the current foreground one, the picture data
++will be ignored.
++
++If the current virtual console is running in a 8-bpp mode, the cmap substruct
++of fb_image has to be filled appropriately: start should be set to 16 (first
++16 colors are reserved for fbcon), len to a value <= 240 and red, green and
++blue should point to valid cmap data. The transp field is ingored. The fields
++dx, dy, bg_color, fg_color in fb_image are ignored as well.
++
++FBIOCONDECOR_SETCFG
++description: sets the fbcondecor config for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: struct vc_decor*
++notes: The structure has to be filled with valid data.
++
++FBIOCONDECOR_GETCFG
++description: gets the fbcondecor config for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: struct vc_decor*
++
++FBIOCONDECOR_SETSTATE
++description: sets the fbcondecor state for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: unsigned int*
++ values: 0 = disabled, 1 = enabled.
++
++FBIOCONDECOR_GETSTATE
++description: gets the fbcondecor state for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: unsigned int*
++ values: as in FBIOCONDECOR_SETSTATE
++
++Info on used structures:
++
++Definition of struct vc_decor can be found in linux/console_decor.h. It's
++heavily commented. Note that the 'theme' field should point to a string
++no longer than FBCON_DECOR_THEME_LEN. When FBIOCONDECOR_GETCFG call is
++performed, the theme field should point to a char buffer of length
++FBCON_DECOR_THEME_LEN.
++
++Definition of struct fbcon_decor_iowrapper can be found in linux/fb.h.
++The fields in this struct have the following meaning:
++
++vc:
++Virtual console number.
++
++origin:
++Specifies if the ioctl is performed as a response to a kernel request. The
++fbcondecor helper should set this field to FBCON_DECOR_IO_ORIG_KERNEL, userspace
++programs should set it to FBCON_DECOR_IO_ORIG_USER. This field is necessary to
++avoid console semaphore deadlocks.
++
++data:
++Pointer to a data structure appropriate for the performed ioctl. Type of
++the data struct is specified in the ioctls description.
++
++*****************************************************************************
++
++Credit
++------
++
++Original 'bootsplash' project & implementation by:
++ Volker Poplawski <volker@poplawski.de>, Stefan Reinauer <stepan@suse.de>,
++ Steffen Winterfeldt <snwint@suse.de>, Michael Schroeder <mls@suse.de>,
++ Ken Wimer <wimer@suse.de>.
++
++Fbcondecor, fbcondecor protocol design, current implementation & docs by:
++ Michal Januszewski <spock@gentoo.org>
++
+diff --git a/drivers/Makefile b/drivers/Makefile
+index 09f3232..1929829 100644
+--- a/drivers/Makefile
++++ b/drivers/Makefile
+@@ -9,6 +9,10 @@ obj-y += gpio/
+ obj-$(CONFIG_PCI) += pci/
+ obj-$(CONFIG_PARISC) += parisc/
+ obj-$(CONFIG_RAPIDIO) += rapidio/
++# tty/ comes before char/ so that the VT console is the boot-time
++# default.
++obj-y += tty/
++obj-y += char/
+ obj-y += video/
+ obj-y += idle/
+ obj-$(CONFIG_ACPI) += acpi/
+@@ -27,11 +31,6 @@ obj-$(CONFIG_XEN) += xen/
+ # regulators early, since some subsystems rely on them to initialize
+ obj-$(CONFIG_REGULATOR) += regulator/
+
+-# tty/ comes before char/ so that the VT console is the boot-time
+-# default.
+-obj-y += tty/
+-obj-y += char/
+-
+ # gpu/ comes after char for AGP vs DRM startup
+ obj-y += gpu/
+
+diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
+index 549b960..23f98b0 100644
+--- a/drivers/video/Kconfig
++++ b/drivers/video/Kconfig
+@@ -1220,7 +1220,6 @@ config FB_MATROX
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+- select FB_TILEBLITTING
+ select FB_MACMODES if PPC_PMAC
+ ---help---
+ Say Y here if you have a Matrox Millennium, Matrox Millennium II,
+diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
+index 2209e35..abd35ee 100644
+--- a/drivers/video/console/Kconfig
++++ b/drivers/video/console/Kconfig
+@@ -122,6 +122,19 @@ config FRAMEBUFFER_CONSOLE_ROTATION
+ such that other users of the framebuffer will remain normally
+ oriented.
+
++config FB_CON_DECOR
++ bool "Support for the Framebuffer Console Decorations"
++ depends on FRAMEBUFFER_CONSOLE=y && !FB_TILEBLITTING
++ default n
++ ---help---
++ This option enables support for framebuffer console decorations which
++ makes it possible to display images in the background of the system
++ consoles. Note that userspace utilities are necessary in order to take
++ advantage of these features. Refer to Documentation/fb/fbcondecor.txt
++ for more information.
++
++ If unsure, say N.
++
+ config STI_CONSOLE
+ bool "STI text console"
+ depends on PARISC
+diff --git a/drivers/video/console/Makefile b/drivers/video/console/Makefile
+index a862e91..86bfcff 100644
+--- a/drivers/video/console/Makefile
++++ b/drivers/video/console/Makefile
+@@ -34,6 +34,7 @@ obj-$(CONFIG_FRAMEBUFFER_CONSOLE) += fbcon_rotate.o fbcon_cw.o fbcon_ud.o \
+ fbcon_ccw.o
+ endif
+
++obj-$(CONFIG_FB_CON_DECOR) += fbcondecor.o cfbcondecor.o
+ obj-$(CONFIG_FB_STI) += sticore.o font.o
+
+ ifeq ($(CONFIG_USB_SISUSBVGA_CON),y)
+diff --git a/drivers/video/console/bitblit.c b/drivers/video/console/bitblit.c
+index 28b1a83..33712c0 100644
+--- a/drivers/video/console/bitblit.c
++++ b/drivers/video/console/bitblit.c
+@@ -18,6 +18,7 @@
+ #include <linux/console.h>
+ #include <asm/types.h>
+ #include "fbcon.h"
++#include "fbcondecor.h"
+
+ /*
+ * Accelerated handlers.
+@@ -55,6 +56,13 @@ static void bit_bmove(struct vc_data *vc, struct fb_info *info, int sy,
+ area.height = height * vc->vc_font.height;
+ area.width = width * vc->vc_font.width;
+
++ if (fbcon_decor_active(info, vc)) {
++ area.sx += vc->vc_decor.tx;
++ area.sy += vc->vc_decor.ty;
++ area.dx += vc->vc_decor.tx;
++ area.dy += vc->vc_decor.ty;
++ }
++
+ info->fbops->fb_copyarea(info, &area);
+ }
+
+@@ -380,11 +388,15 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode,
+ cursor.image.depth = 1;
+ cursor.rop = ROP_XOR;
+
+- if (info->fbops->fb_cursor)
+- err = info->fbops->fb_cursor(info, &cursor);
++ if (fbcon_decor_active(info, vc)) {
++ fbcon_decor_cursor(info, &cursor);
++ } else {
++ if (info->fbops->fb_cursor)
++ err = info->fbops->fb_cursor(info, &cursor);
+
+- if (err)
+- soft_cursor(info, &cursor);
++ if (err)
++ soft_cursor(info, &cursor);
++ }
+
+ ops->cursor_reset = 0;
+ }
+diff --git a/drivers/video/console/cfbcondecor.c b/drivers/video/console/cfbcondecor.c
+new file mode 100644
+index 0000000..18a5452
+--- /dev/null
++++ b/drivers/video/console/cfbcondecor.c
+@@ -0,0 +1,472 @@
++/*
++ * linux/drivers/video/cfbcon_decor.c -- Framebuffer decor render functions
++ *
++ * Copyright (C) 2004 Michal Januszewski <spock@gentoo.org>
++ *
++ * Code based upon "Bootdecor" (C) 2001-2003
++ * Volker Poplawski <volker@poplawski.de>,
++ * Stefan Reinauer <stepan@suse.de>,
++ * Steffen Winterfeldt <snwint@suse.de>,
++ * Michael Schroeder <mls@suse.de>,
++ * Ken Wimer <wimer@suse.de>.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive for
++ * more details.
++ */
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/fb.h>
++#include <linux/selection.h>
++#include <linux/slab.h>
++#include <linux/vt_kern.h>
++#include <asm/irq.h>
++#include <asm/system.h>
++
++#include "fbcon.h"
++#include "fbcondecor.h"
++
++#define parse_pixel(shift,bpp,type) \
++ do { \
++ if (d & (0x80 >> (shift))) \
++ dd2[(shift)] = fgx; \
++ else \
++ dd2[(shift)] = transparent ? *(type *)decor_src : bgx; \
++ decor_src += (bpp); \
++ } while (0) \
++
++extern int get_color(struct vc_data *vc, struct fb_info *info,
++ u16 c, int is_fg);
++
++void fbcon_decor_fix_pseudo_pal(struct fb_info *info, struct vc_data *vc)
++{
++ int i, j, k;
++ int minlen = min(min(info->var.red.length, info->var.green.length),
++ info->var.blue.length);
++ u32 col;
++
++ for (j = i = 0; i < 16; i++) {
++ k = color_table[i];
++
++ col = ((vc->vc_palette[j++] >> (8-minlen))
++ << info->var.red.offset);
++ col |= ((vc->vc_palette[j++] >> (8-minlen))
++ << info->var.green.offset);
++ col |= ((vc->vc_palette[j++] >> (8-minlen))
++ << info->var.blue.offset);
++ ((u32 *)info->pseudo_palette)[k] = col;
++ }
++}
++
++void fbcon_decor_renderc(struct fb_info *info, int ypos, int xpos, int height,
++ int width, u8* src, u32 fgx, u32 bgx, u8 transparent)
++{
++ unsigned int x, y;
++ u32 dd;
++ int bytespp = ((info->var.bits_per_pixel + 7) >> 3);
++ unsigned int d = ypos * info->fix.line_length + xpos * bytespp;
++ unsigned int ds = (ypos * info->var.xres + xpos) * bytespp;
++ u16 dd2[4];
++
++ u8* decor_src = (u8 *)(info->bgdecor.data + ds);
++ u8* dst = (u8 *)(info->screen_base + d);
++
++ if ((ypos + height) > info->var.yres || (xpos + width) > info->var.xres)
++ return;
++
++ for (y = 0; y < height; y++) {
++ switch (info->var.bits_per_pixel) {
++
++ case 32:
++ for (x = 0; x < width; x++) {
++
++ if ((x & 7) == 0)
++ d = *src++;
++ if (d & 0x80)
++ dd = fgx;
++ else
++ dd = transparent ?
++ *(u32 *)decor_src : bgx;
++
++ d <<= 1;
++ decor_src += 4;
++ fb_writel(dd, dst);
++ dst += 4;
++ }
++ break;
++ case 24:
++ for (x = 0; x < width; x++) {
++
++ if ((x & 7) == 0)
++ d = *src++;
++ if (d & 0x80)
++ dd = fgx;
++ else
++ dd = transparent ?
++ (*(u32 *)decor_src & 0xffffff) : bgx;
++
++ d <<= 1;
++ decor_src += 3;
++#ifdef __LITTLE_ENDIAN
++ fb_writew(dd & 0xffff, dst);
++ dst += 2;
++ fb_writeb((dd >> 16), dst);
++#else
++ fb_writew(dd >> 8, dst);
++ dst += 2;
++ fb_writeb(dd & 0xff, dst);
++#endif
++ dst++;
++ }
++ break;
++ case 16:
++ for (x = 0; x < width; x += 2) {
++ if ((x & 7) == 0)
++ d = *src++;
++
++ parse_pixel(0, 2, u16);
++ parse_pixel(1, 2, u16);
++#ifdef __LITTLE_ENDIAN
++ dd = dd2[0] | (dd2[1] << 16);
++#else
++ dd = dd2[1] | (dd2[0] << 16);
++#endif
++ d <<= 2;
++ fb_writel(dd, dst);
++ dst += 4;
++ }
++ break;
++
++ case 8:
++ for (x = 0; x < width; x += 4) {
++ if ((x & 7) == 0)
++ d = *src++;
++
++ parse_pixel(0, 1, u8);
++ parse_pixel(1, 1, u8);
++ parse_pixel(2, 1, u8);
++ parse_pixel(3, 1, u8);
++
++#ifdef __LITTLE_ENDIAN
++ dd = dd2[0] | (dd2[1] << 8) | (dd2[2] << 16) | (dd2[3] << 24);
++#else
++ dd = dd2[3] | (dd2[2] << 8) | (dd2[1] << 16) | (dd2[0] << 24);
++#endif
++ d <<= 4;
++ fb_writel(dd, dst);
++ dst += 4;
++ }
++ }
++
++ dst += info->fix.line_length - width * bytespp;
++ decor_src += (info->var.xres - width) * bytespp;
++ }
++}
++
++#define cc2cx(a) \
++ ((info->fix.visual == FB_VISUAL_TRUECOLOR || \
++ info->fix.visual == FB_VISUAL_DIRECTCOLOR) ? \
++ ((u32*)info->pseudo_palette)[a] : a)
++
++void fbcon_decor_putcs(struct vc_data *vc, struct fb_info *info,
++ const unsigned short *s, int count, int yy, int xx)
++{
++ unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
++ struct fbcon_ops *ops = info->fbcon_par;
++ int fg_color, bg_color, transparent;
++ u8 *src;
++ u32 bgx, fgx;
++ u16 c = scr_readw(s);
++
++ fg_color = get_color(vc, info, c, 1);
++ bg_color = get_color(vc, info, c, 0);
++
++ /* Don't paint the background image if console is blanked */
++ transparent = ops->blank_state ? 0 :
++ (vc->vc_decor.bg_color == bg_color);
++
++ xx = xx * vc->vc_font.width + vc->vc_decor.tx;
++ yy = yy * vc->vc_font.height + vc->vc_decor.ty;
++
++ fgx = cc2cx(fg_color);
++ bgx = cc2cx(bg_color);
++
++ while (count--) {
++ c = scr_readw(s++);
++ src = vc->vc_font.data + (c & charmask) * vc->vc_font.height *
++ ((vc->vc_font.width + 7) >> 3);
++
++ fbcon_decor_renderc(info, yy, xx, vc->vc_font.height,
++ vc->vc_font.width, src, fgx, bgx, transparent);
++ xx += vc->vc_font.width;
++ }
++}
++
++void fbcon_decor_cursor(struct fb_info *info, struct fb_cursor *cursor)
++{
++ int i;
++ unsigned int dsize, s_pitch;
++ struct fbcon_ops *ops = info->fbcon_par;
++ struct vc_data* vc;
++ u8 *src;
++
++ /* we really don't need any cursors while the console is blanked */
++ if (info->state != FBINFO_STATE_RUNNING || ops->blank_state)
++ return;
++
++ vc = vc_cons[ops->currcon].d;
++
++ src = kmalloc(64 + sizeof(struct fb_image), GFP_ATOMIC);
++ if (!src)
++ return;
++
++ s_pitch = (cursor->image.width + 7) >> 3;
++ dsize = s_pitch * cursor->image.height;
++ if (cursor->enable) {
++ switch (cursor->rop) {
++ case ROP_XOR:
++ for (i = 0; i < dsize; i++)
++ src[i] = cursor->image.data[i] ^ cursor->mask[i];
++ break;
++ case ROP_COPY:
++ default:
++ for (i = 0; i < dsize; i++)
++ src[i] = cursor->image.data[i] & cursor->mask[i];
++ break;
++ }
++ } else
++ memcpy(src, cursor->image.data, dsize);
++
++ fbcon_decor_renderc(info,
++ cursor->image.dy + vc->vc_decor.ty,
++ cursor->image.dx + vc->vc_decor.tx,
++ cursor->image.height,
++ cursor->image.width,
++ (u8*)src,
++ cc2cx(cursor->image.fg_color),
++ cc2cx(cursor->image.bg_color),
++ cursor->image.bg_color == vc->vc_decor.bg_color);
++
++ kfree(src);
++}
++
++static void decorset(u8 *dst, int height, int width, int dstbytes,
++ u32 bgx, int bpp)
++{
++ int i;
++
++ if (bpp == 8)
++ bgx |= bgx << 8;
++ if (bpp == 16 || bpp == 8)
++ bgx |= bgx << 16;
++
++ while (height-- > 0) {
++ u8 *p = dst;
++
++ switch (bpp) {
++
++ case 32:
++ for (i=0; i < width; i++) {
++ fb_writel(bgx, p); p += 4;
++ }
++ break;
++ case 24:
++ for (i=0; i < width; i++) {
++#ifdef __LITTLE_ENDIAN
++ fb_writew((bgx & 0xffff),(u16*)p); p += 2;
++ fb_writeb((bgx >> 16),p++);
++#else
++ fb_writew((bgx >> 8),(u16*)p); p += 2;
++ fb_writeb((bgx & 0xff),p++);
++#endif
++ }
++ case 16:
++ for (i=0; i < width/4; i++) {
++ fb_writel(bgx,p); p += 4;
++ fb_writel(bgx,p); p += 4;
++ }
++ if (width & 2) {
++ fb_writel(bgx,p); p += 4;
++ }
++ if (width & 1)
++ fb_writew(bgx,(u16*)p);
++ break;
++ case 8:
++ for (i=0; i < width/4; i++) {
++ fb_writel(bgx,p); p += 4;
++ }
++
++ if (width & 2) {
++ fb_writew(bgx,p); p += 2;
++ }
++ if (width & 1)
++ fb_writeb(bgx,(u8*)p);
++ break;
++
++ }
++ dst += dstbytes;
++ }
++}
++
++void fbcon_decor_copy(u8 *dst, u8 *src, int height, int width, int linebytes,
++ int srclinebytes, int bpp)
++{
++ int i;
++
++ while (height-- > 0) {
++ u32 *p = (u32 *)dst;
++ u32 *q = (u32 *)src;
++
++ switch (bpp) {
++
++ case 32:
++ for (i=0; i < width; i++)
++ fb_writel(*q++, p++);
++ break;
++ case 24:
++ for (i=0; i < (width*3/4); i++)
++ fb_writel(*q++, p++);
++ if ((width*3) % 4) {
++ if (width & 2) {
++ fb_writeb(*(u8*)q, (u8*)p);
++ } else if (width & 1) {
++ fb_writew(*(u16*)q, (u16*)p);
++ fb_writeb(*(u8*)((u16*)q+1),(u8*)((u16*)p+2));
++ }
++ }
++ break;
++ case 16:
++ for (i=0; i < width/4; i++) {
++ fb_writel(*q++, p++);
++ fb_writel(*q++, p++);
++ }
++ if (width & 2)
++ fb_writel(*q++, p++);
++ if (width & 1)
++ fb_writew(*(u16*)q, (u16*)p);
++ break;
++ case 8:
++ for (i=0; i < width/4; i++)
++ fb_writel(*q++, p++);
++
++ if (width & 2) {
++ fb_writew(*(u16*)q, (u16*)p);
++ q = (u32*) ((u16*)q + 1);
++ p = (u32*) ((u16*)p + 1);
++ }
++ if (width & 1)
++ fb_writeb(*(u8*)q, (u8*)p);
++ break;
++ }
++
++ dst += linebytes;
++ src += srclinebytes;
++ }
++}
++
++static void decorfill(struct fb_info *info, int sy, int sx, int height,
++ int width)
++{
++ int bytespp = ((info->var.bits_per_pixel + 7) >> 3);
++ int d = sy * info->fix.line_length + sx * bytespp;
++ int ds = (sy * info->var.xres + sx) * bytespp;
++
++ fbcon_decor_copy((u8 *)(info->screen_base + d), (u8 *)(info->bgdecor.data + ds),
++ height, width, info->fix.line_length, info->var.xres * bytespp,
++ info->var.bits_per_pixel);
++}
++
++void fbcon_decor_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx,
++ int height, int width)
++{
++ int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
++ struct fbcon_ops *ops = info->fbcon_par;
++ u8 *dst;
++ int transparent, bg_color = attr_bgcol_ec(bgshift, vc, info);
++
++ transparent = (vc->vc_decor.bg_color == bg_color);
++ sy = sy * vc->vc_font.height + vc->vc_decor.ty;
++ sx = sx * vc->vc_font.width + vc->vc_decor.tx;
++ height *= vc->vc_font.height;
++ width *= vc->vc_font.width;
++
++ /* Don't paint the background image if console is blanked */
++ if (transparent && !ops->blank_state) {
++ decorfill(info, sy, sx, height, width);
++ } else {
++ dst = (u8 *)(info->screen_base + sy * info->fix.line_length +
++ sx * ((info->var.bits_per_pixel + 7) >> 3));
++ decorset(dst, height, width, info->fix.line_length, cc2cx(bg_color),
++ info->var.bits_per_pixel);
++ }
++}
++
++void fbcon_decor_clear_margins(struct vc_data *vc, struct fb_info *info,
++ int bottom_only)
++{
++ unsigned int tw = vc->vc_cols*vc->vc_font.width;
++ unsigned int th = vc->vc_rows*vc->vc_font.height;
++
++ if (!bottom_only) {
++ /* top margin */
++ decorfill(info, 0, 0, vc->vc_decor.ty, info->var.xres);
++ /* left margin */
++ decorfill(info, vc->vc_decor.ty, 0, th, vc->vc_decor.tx);
++ /* right margin */
++ decorfill(info, vc->vc_decor.ty, vc->vc_decor.tx + tw, th,
++ info->var.xres - vc->vc_decor.tx - tw);
++ }
++ decorfill(info, vc->vc_decor.ty + th, 0,
++ info->var.yres - vc->vc_decor.ty - th, info->var.xres);
++}
++
++void fbcon_decor_bmove_redraw(struct vc_data *vc, struct fb_info *info, int y,
++ int sx, int dx, int width)
++{
++ u16 *d = (u16 *) (vc->vc_origin + vc->vc_size_row * y + dx * 2);
++ u16 *s = d + (dx - sx);
++ u16 *start = d;
++ u16 *ls = d;
++ u16 *le = d + width;
++ u16 c;
++ int x = dx;
++ u16 attr = 1;
++
++ do {
++ c = scr_readw(d);
++ if (attr != (c & 0xff00)) {
++ attr = c & 0xff00;
++ if (d > start) {
++ fbcon_decor_putcs(vc, info, start, d - start, y, x);
++ x += d - start;
++ start = d;
++ }
++ }
++ if (s >= ls && s < le && c == scr_readw(s)) {
++ if (d > start) {
++ fbcon_decor_putcs(vc, info, start, d - start, y, x);
++ x += d - start + 1;
++ start = d + 1;
++ } else {
++ x++;
++ start++;
++ }
++ }
++ s++;
++ d++;
++ } while (d < le);
++ if (d > start)
++ fbcon_decor_putcs(vc, info, start, d - start, y, x);
++}
++
++void fbcon_decor_blank(struct vc_data *vc, struct fb_info *info, int blank)
++{
++ if (blank) {
++ decorset((u8 *)info->screen_base, info->var.yres, info->var.xres,
++ info->fix.line_length, 0, info->var.bits_per_pixel);
++ } else {
++ update_screen(vc);
++ fbcon_decor_clear_margins(vc, info, 0);
++ }
++}
++
+diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
+index 8745637..0c36600 100644
+--- a/drivers/video/console/fbcon.c
++++ b/drivers/video/console/fbcon.c
+@@ -26,7 +26,7 @@
+ *
+ * Hardware cursor support added by Emmanuel Marty (core@ggi-project.org)
+ * Smart redraw scrolling, arbitrary font width support, 512char font support
+- * and software scrollback added by
++ * and software scrollback added by
+ * Jakub Jelinek (jj@ultra.linux.cz)
+ *
+ * Random hacking by Martin Mares <mj@ucw.cz>
+@@ -80,6 +80,7 @@
+ #include <asm/system.h>
+
+ #include "fbcon.h"
++#include "fbcondecor.h"
+
+ #ifdef FBCONDEBUG
+ # define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __func__ , ## args)
+@@ -95,7 +96,7 @@ enum {
+
+ static struct display fb_display[MAX_NR_CONSOLES];
+
+-static signed char con2fb_map[MAX_NR_CONSOLES];
++signed char con2fb_map[MAX_NR_CONSOLES];
+ static signed char con2fb_map_boot[MAX_NR_CONSOLES];
+
+ static int logo_lines;
+@@ -111,7 +112,7 @@ static int softback_lines;
+ /* console mappings */
+ static int first_fb_vc;
+ static int last_fb_vc = MAX_NR_CONSOLES - 1;
+-static int fbcon_is_default = 1;
++static int fbcon_is_default = 1;
+ static int fbcon_has_exited;
+ static int primary_device = -1;
+ static int fbcon_has_console_bind;
+@@ -287,7 +288,7 @@ static inline int fbcon_is_inactive(struct vc_data *vc, struct fb_info *info)
+ !vt_force_oops_output(vc);
+ }
+
+-static int get_color(struct vc_data *vc, struct fb_info *info,
++int get_color(struct vc_data *vc, struct fb_info *info,
+ u16 c, int is_fg)
+ {
+ int depth = fb_get_color_depth(&info->var, &info->fix);
+@@ -444,7 +445,7 @@ static int __init fb_console_setup(char *this_opt)
+ while ((options = strsep(&this_opt, ",")) != NULL) {
+ if (!strncmp(options, "font:", 5))
+ strcpy(fontname, options + 5);
+-
++
+ if (!strncmp(options, "scrollback:", 11)) {
+ options += 11;
+ if (*options) {
+@@ -459,7 +460,7 @@ static int __init fb_console_setup(char *this_opt)
+ } else
+ return 1;
+ }
+-
++
+ if (!strncmp(options, "map:", 4)) {
+ options += 4;
+ if (*options) {
+@@ -484,8 +485,8 @@ static int __init fb_console_setup(char *this_opt)
+ first_fb_vc = 0;
+ if (*options++ == '-')
+ last_fb_vc = simple_strtoul(options, &options, 10) - 1;
+- fbcon_is_default = 0;
+- }
++ fbcon_is_default = 0;
++ }
+
+ if (!strncmp(options, "rotate:", 7)) {
+ options += 7;
+@@ -546,6 +547,9 @@ static int fbcon_takeover(int show_logo)
+ info_idx = -1;
+ } else {
+ fbcon_has_console_bind = 1;
++#ifdef CONFIG_FB_CON_DECOR
++ fbcon_decor_init();
++#endif
+ }
+
+ return err;
+@@ -936,7 +940,7 @@ static const char *fbcon_startup(void)
+ info = registered_fb[info_idx];
+ if (!info)
+ return NULL;
+-
++
+ owner = info->fbops->owner;
+ if (!try_module_get(owner))
+ return NULL;
+@@ -1000,6 +1004,12 @@ static const char *fbcon_startup(void)
+ rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
+ cols /= vc->vc_font.width;
+ rows /= vc->vc_font.height;
++
++ if (fbcon_decor_active(info, vc)) {
++ cols = vc->vc_decor.twidth / vc->vc_font.width;
++ rows = vc->vc_decor.theight / vc->vc_font.height;
++ }
++
+ vc_resize(vc, cols, rows);
+
+ DPRINTK("mode: %s\n", info->fix.id);
+@@ -1029,7 +1039,7 @@ static void fbcon_init(struct vc_data *vc, int init)
+ cap = info->flags;
+
+ if (vc != svc || logo_shown == FBCON_LOGO_DONTSHOW ||
+- (info->fix.type == FB_TYPE_TEXT))
++ (info->fix.type == FB_TYPE_TEXT) || fbcon_decor_active(info, vc))
+ logo = 0;
+
+ if (var_to_display(p, &info->var, info))
+@@ -1239,6 +1249,11 @@ static void fbcon_clear(struct vc_data *vc, int sy, int sx, int height,
+ if (sy < vc->vc_top && vc->vc_top == logo_lines)
+ vc->vc_top = 0;
+
++ if (fbcon_decor_active(info, vc)) {
++ fbcon_decor_clear(vc, info, sy, sx, height, width);
++ return;
++ }
++
+ /* Split blits that cross physical y_wrap boundary */
+
+ y_break = p->vrows - p->yscroll;
+@@ -1258,10 +1273,15 @@ static void fbcon_putcs(struct vc_data *vc, const unsigned short *s,
+ struct display *p = &fb_display[vc->vc_num];
+ struct fbcon_ops *ops = info->fbcon_par;
+
+- if (!fbcon_is_inactive(vc, info))
+- ops->putcs(vc, info, s, count, real_y(p, ypos), xpos,
+- get_color(vc, info, scr_readw(s), 1),
+- get_color(vc, info, scr_readw(s), 0));
++ if (!fbcon_is_inactive(vc, info)) {
++
++ if (fbcon_decor_active(info, vc))
++ fbcon_decor_putcs(vc, info, s, count, ypos, xpos);
++ else
++ ops->putcs(vc, info, s, count, real_y(p, ypos), xpos,
++ get_color(vc, info, scr_readw(s), 1),
++ get_color(vc, info, scr_readw(s), 0));
++ }
+ }
+
+ static void fbcon_putc(struct vc_data *vc, int c, int ypos, int xpos)
+@@ -1277,8 +1297,13 @@ static void fbcon_clear_margins(struct vc_data *vc, int bottom_only)
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fbcon_ops *ops = info->fbcon_par;
+
+- if (!fbcon_is_inactive(vc, info))
+- ops->clear_margins(vc, info, bottom_only);
++ if (!fbcon_is_inactive(vc, info)) {
++ if (fbcon_decor_active(info, vc)) {
++ fbcon_decor_clear_margins(vc, info, bottom_only);
++ } else {
++ ops->clear_margins(vc, info, bottom_only);
++ }
++ }
+ }
+
+ static void fbcon_cursor(struct vc_data *vc, int mode)
+@@ -1388,7 +1413,7 @@ static __inline__ void ywrap_up(struct vc_data *vc, int count)
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fbcon_ops *ops = info->fbcon_par;
+ struct display *p = &fb_display[vc->vc_num];
+-
++
+ p->yscroll += count;
+ if (p->yscroll >= p->vrows) /* Deal with wrap */
+ p->yscroll -= p->vrows;
+@@ -1407,7 +1432,7 @@ static __inline__ void ywrap_down(struct vc_data *vc, int count)
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fbcon_ops *ops = info->fbcon_par;
+ struct display *p = &fb_display[vc->vc_num];
+-
++
+ p->yscroll -= count;
+ if (p->yscroll < 0) /* Deal with wrap */
+ p->yscroll += p->vrows;
+@@ -1474,7 +1499,7 @@ static __inline__ void ypan_down(struct vc_data *vc, int count)
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct display *p = &fb_display[vc->vc_num];
+ struct fbcon_ops *ops = info->fbcon_par;
+-
++
+ p->yscroll -= count;
+ if (p->yscroll < 0) {
+ ops->bmove(vc, info, 0, 0, p->vrows - vc->vc_rows,
+@@ -1798,7 +1823,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
+ count = vc->vc_rows;
+ if (softback_top)
+ fbcon_softback_note(vc, t, count);
+- if (logo_shown >= 0)
++ if (logo_shown >= 0 || fbcon_decor_active(info, vc))
+ goto redraw_up;
+ switch (p->scrollmode) {
+ case SCROLL_MOVE:
+@@ -1891,6 +1916,8 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
+ count = vc->vc_rows;
+ if (logo_shown >= 0)
+ goto redraw_down;
++ if (fbcon_decor_active(info, vc))
++ goto redraw_down;
+ switch (p->scrollmode) {
+ case SCROLL_MOVE:
+ fbcon_redraw_blit(vc, info, p, b - 1, b - t - count,
+@@ -1983,7 +2010,7 @@ static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx,
+ {
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct display *p = &fb_display[vc->vc_num];
+-
++
+ if (fbcon_is_inactive(vc, info))
+ return;
+
+@@ -2001,7 +2028,7 @@ static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx,
+ p->vrows - p->yscroll);
+ }
+
+-static void fbcon_bmove_rec(struct vc_data *vc, struct display *p, int sy, int sx,
++static void fbcon_bmove_rec(struct vc_data *vc, struct display *p, int sy, int sx,
+ int dy, int dx, int height, int width, u_int y_break)
+ {
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+@@ -2039,6 +2066,13 @@ static void fbcon_bmove_rec(struct vc_data *vc, struct display *p, int sy, int s
+ }
+ return;
+ }
++
++ if (fbcon_decor_active(info, vc) && sy == dy && height == 1) {
++ /* must use slower redraw bmove to keep background pic intact */
++ fbcon_decor_bmove_redraw(vc, info, sy, sx, dx, width);
++ return;
++ }
++
+ ops->bmove(vc, info, real_y(p, sy), sx, real_y(p, dy), dx,
+ height, width);
+ }
+@@ -2090,7 +2124,7 @@ static void updatescrollmode(struct display *p,
+ }
+ }
+
+-static int fbcon_resize(struct vc_data *vc, unsigned int width,
++static int fbcon_resize(struct vc_data *vc, unsigned int width,
+ unsigned int height, unsigned int user)
+ {
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+@@ -2109,8 +2143,8 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
+ var.yres = virt_h * virt_fh;
+ x_diff = info->var.xres - var.xres;
+ y_diff = info->var.yres - var.yres;
+- if (x_diff < 0 || x_diff > virt_fw ||
+- y_diff < 0 || y_diff > virt_fh) {
++ if ((x_diff < 0 || x_diff > virt_fw ||
++ y_diff < 0 || y_diff > virt_fh) && !vc->vc_decor.state) {
+ const struct fb_videomode *mode;
+
+ DPRINTK("attempting resize %ix%i\n", var.xres, var.yres);
+@@ -2146,6 +2180,21 @@ static int fbcon_switch(struct vc_data *vc)
+
+ info = registered_fb[con2fb_map[vc->vc_num]];
+ ops = info->fbcon_par;
++ prev_console = ops->currcon;
++ if (prev_console != -1)
++ old_info = registered_fb[con2fb_map[prev_console]];
++
++#ifdef CONFIG_FB_CON_DECOR
++ if (!fbcon_decor_active_vc(vc) && info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
++ struct vc_data *vc_curr = vc_cons[prev_console].d;
++ if (vc_curr && fbcon_decor_active_vc(vc_curr)) {
++ /* Clear the screen to avoid displaying funky colors during
++ * palette updates. */
++ memset((u8*)info->screen_base + info->fix.line_length * info->var.yoffset,
++ 0, info->var.yres * info->fix.line_length);
++ }
++ }
++#endif
+
+ if (softback_top) {
+ if (softback_lines)
+@@ -2164,9 +2213,6 @@ static int fbcon_switch(struct vc_data *vc)
+ logo_shown = FBCON_LOGO_CANSHOW;
+ }
+
+- prev_console = ops->currcon;
+- if (prev_console != -1)
+- old_info = registered_fb[con2fb_map[prev_console]];
+ /*
+ * FIXME: If we have multiple fbdev's loaded, we need to
+ * update all info->currcon. Perhaps, we can place this
+@@ -2210,6 +2256,18 @@ static int fbcon_switch(struct vc_data *vc)
+ fbcon_del_cursor_timer(old_info);
+ }
+
++ if (fbcon_decor_active_vc(vc)) {
++ struct vc_data *vc_curr = vc_cons[prev_console].d;
++
++ if (!vc_curr->vc_decor.theme ||
++ strcmp(vc->vc_decor.theme, vc_curr->vc_decor.theme) ||
++ (fbcon_decor_active_nores(info, vc_curr) &&
++ !fbcon_decor_active(info, vc_curr))) {
++ fbcon_decor_disable(vc, 0);
++ fbcon_decor_call_helper("modechange", vc->vc_num);
++ }
++ }
++
+ if (fbcon_is_inactive(vc, info) ||
+ ops->blank_state != FB_BLANK_UNBLANK)
+ fbcon_del_cursor_timer(info);
+@@ -2258,11 +2316,10 @@ static int fbcon_switch(struct vc_data *vc)
+ ops->update_start(info);
+ }
+
+- fbcon_set_palette(vc, color_table);
++ fbcon_set_palette(vc, color_table);
+ fbcon_clear_margins(vc, 0);
+
+ if (logo_shown == FBCON_LOGO_DRAW) {
+-
+ logo_shown = fg_console;
+ /* This is protected above by initmem_freed */
+ fb_show_logo(info, ops->rotate);
+@@ -2318,15 +2375,20 @@ static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch)
+ }
+ }
+
+- if (!fbcon_is_inactive(vc, info)) {
++ if (!fbcon_is_inactive(vc, info)) {
+ if (ops->blank_state != blank) {
+ ops->blank_state = blank;
+ fbcon_cursor(vc, blank ? CM_ERASE : CM_DRAW);
+ ops->cursor_flash = (!blank);
+
+- if (!(info->flags & FBINFO_MISC_USEREVENT))
+- if (fb_blank(info, blank))
+- fbcon_generic_blank(vc, info, blank);
++ if (!(info->flags & FBINFO_MISC_USEREVENT)) {
++ if (fb_blank(info, blank)) {
++ if (fbcon_decor_active(info, vc))
++ fbcon_decor_blank(vc, info, blank);
++ else
++ fbcon_generic_blank(vc, info, blank);
++ }
++ }
+ }
+
+ if (!blank)
+@@ -2448,7 +2510,7 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
+ vc->vc_complement_mask >>= 1;
+ vc->vc_s_complement_mask >>= 1;
+ }
+-
++
+ /* ++Edmund: reorder the attribute bits */
+ if (vc->vc_can_do_color) {
+ unsigned short *cp =
+@@ -2471,7 +2533,7 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
+ vc->vc_complement_mask <<= 1;
+ vc->vc_s_complement_mask <<= 1;
+ }
+-
++
+ /* ++Edmund: reorder the attribute bits */
+ {
+ unsigned short *cp =
+@@ -2501,13 +2563,22 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
+ }
+
+ if (resize) {
++ /* reset wrap/pan */
+ int cols, rows;
+
+ cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres);
+ rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
++
++ if (fbcon_decor_active(info, vc)) {
++ info->var.xoffset = info->var.yoffset = p->yscroll = 0;
++ cols = vc->vc_decor.twidth;
++ rows = vc->vc_decor.theight;
++ }
+ cols /= w;
+ rows /= h;
++
+ vc_resize(vc, cols, rows);
++
+ if (CON_IS_VISIBLE(vc) && softback_buf)
+ fbcon_update_softback(vc);
+ } else if (CON_IS_VISIBLE(vc)
+@@ -2591,7 +2662,7 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font, unsigne
+ /* Check if the same font is on some other console already */
+ for (i = first_fb_vc; i <= last_fb_vc; i++) {
+ struct vc_data *tmp = vc_cons[i].d;
+-
++
+ if (fb_display[i].userfont &&
+ fb_display[i].fontdata &&
+ FNTSUM(fb_display[i].fontdata) == csum &&
+@@ -2636,7 +2707,11 @@ static int fbcon_set_palette(struct vc_data *vc, unsigned char *table)
+ int i, j, k, depth;
+ u8 val;
+
+- if (fbcon_is_inactive(vc, info))
++ if (fbcon_is_inactive(vc, info)
++#ifdef CONFIG_FB_CON_DECOR
++ || vc->vc_num != fg_console
++#endif
++ )
+ return -EINVAL;
+
+ if (!CON_IS_VISIBLE(vc))
+@@ -2662,14 +2737,56 @@ static int fbcon_set_palette(struct vc_data *vc, unsigned char *table)
+ } else
+ fb_copy_cmap(fb_default_cmap(1 << depth), &palette_cmap);
+
+- return fb_set_cmap(&palette_cmap, info);
++ if (fbcon_decor_active(info, vc_cons[fg_console].d) &&
++ info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
++
++ u16 *red, *green, *blue;
++ int minlen = min(min(info->var.red.length, info->var.green.length),
++ info->var.blue.length);
++ int h;
++
++ struct fb_cmap cmap = {
++ .start = 0,
++ .len = (1 << minlen),
++ .red = NULL,
++ .green = NULL,
++ .blue = NULL,
++ .transp = NULL
++ };
++
++ red = kmalloc(256 * sizeof(u16) * 3, GFP_KERNEL);
++
++ if (!red)
++ goto out;
++
++ green = red + 256;
++ blue = green + 256;
++ cmap.red = red;
++ cmap.green = green;
++ cmap.blue = blue;
++
++ for (i = 0; i < cmap.len; i++) {
++ red[i] = green[i] = blue[i] = (0xffff * i)/(cmap.len-1);
++ }
++
++ h = fb_set_cmap(&cmap, info);
++ fbcon_decor_fix_pseudo_pal(info, vc_cons[fg_console].d);
++ kfree(red);
++
++ return h;
++
++ } else if (fbcon_decor_active(info, vc_cons[fg_console].d) &&
++ info->var.bits_per_pixel == 8 && info->bgdecor.cmap.red != NULL)
++ fb_set_cmap(&info->bgdecor.cmap, info);
++
++out: return fb_set_cmap(&palette_cmap, info);
+ }
+
+ static u16 *fbcon_screen_pos(struct vc_data *vc, int offset)
+ {
+ unsigned long p;
+ int line;
+-
++
+ if (vc->vc_num != fg_console || !softback_lines)
+ return (u16 *) (vc->vc_origin + offset);
+ line = offset / vc->vc_size_row;
+@@ -2888,7 +3005,14 @@ static void fbcon_modechanged(struct fb_info *info)
+ rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
+ cols /= vc->vc_font.width;
+ rows /= vc->vc_font.height;
+- vc_resize(vc, cols, rows);
++
++ if (!fbcon_decor_active_nores(info, vc)) {
++ vc_resize(vc, cols, rows);
++ } else {
++ fbcon_decor_disable(vc, 0);
++ fbcon_decor_call_helper("modechange", vc->vc_num);
++ }
++
+ updatescrollmode(p, info, vc);
+ scrollback_max = 0;
+ scrollback_current = 0;
+@@ -2933,7 +3057,9 @@ static void fbcon_set_all_vcs(struct fb_info *info)
+ rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
+ cols /= vc->vc_font.width;
+ rows /= vc->vc_font.height;
+- vc_resize(vc, cols, rows);
++ if (!fbcon_decor_active_nores(info, vc)) {
++ vc_resize(vc, cols, rows);
++ }
+ }
+
+ if (fg != -1)
+@@ -3543,6 +3669,7 @@ static void fbcon_exit(void)
+ }
+ }
+
++ fbcon_decor_exit();
+ fbcon_has_exited = 1;
+ }
+
+@@ -3596,7 +3723,7 @@ static void __exit fb_console_exit(void)
+ fbcon_exit();
+ console_unlock();
+ unregister_con_driver(&fb_con);
+-}
++}
+
+ module_exit(fb_console_exit);
+
+diff --git a/drivers/video/console/fbcondecor.c b/drivers/video/console/fbcondecor.c
+new file mode 100644
+index 0000000..b80639c
+--- /dev/null
++++ b/drivers/video/console/fbcondecor.c
+@@ -0,0 +1,556 @@
++/*
++ * linux/drivers/video/console/fbcondecor.c -- Framebuffer console decorations
++ *
++ * Copyright (C) 2004-2009 Michal Januszewski <spock@gentoo.org>
++ *
++ * Code based upon "Bootsplash" (C) 2001-2003
++ * Volker Poplawski <volker@poplawski.de>,
++ * Stefan Reinauer <stepan@suse.de>,
++ * Steffen Winterfeldt <snwint@suse.de>,
++ * Michael Schroeder <mls@suse.de>,
++ * Ken Wimer <wimer@suse.de>.
++ *
++ * Compat ioctl support by Thorsten Klein <TK@Thorsten-Klein.de>.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive for
++ * more details.
++ *
++ */
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/fb.h>
++#include <linux/vt_kern.h>
++#include <linux/vmalloc.h>
++#include <linux/unistd.h>
++#include <linux/syscalls.h>
++#include <linux/init.h>
++#include <linux/proc_fs.h>
++#include <linux/workqueue.h>
++#include <linux/kmod.h>
++#include <linux/miscdevice.h>
++#include <linux/device.h>
++#include <linux/fs.h>
++#include <linux/compat.h>
++#include <linux/console.h>
++
++#include <asm/uaccess.h>
++#include <asm/irq.h>
++#include <asm/system.h>
++
++#include "fbcon.h"
++#include "fbcondecor.h"
++
++extern signed char con2fb_map[];
++static int fbcon_decor_enable(struct vc_data *vc);
++char fbcon_decor_path[KMOD_PATH_LEN] = "/sbin/fbcondecor_helper";
++static int initialized = 0;
++
++int fbcon_decor_call_helper(char* cmd, unsigned short vc)
++{
++ char *envp[] = {
++ "HOME=/",
++ "PATH=/sbin:/bin",
++ NULL
++ };
++
++ char tfb[5];
++ char tcons[5];
++ unsigned char fb = (int) con2fb_map[vc];
++
++ char *argv[] = {
++ fbcon_decor_path,
++ "2",
++ cmd,
++ tcons,
++ tfb,
++ vc_cons[vc].d->vc_decor.theme,
++ NULL
++ };
++
++ snprintf(tfb,5,"%d",fb);
++ snprintf(tcons,5,"%d",vc);
++
++ return call_usermodehelper(fbcon_decor_path, argv, envp, UMH_WAIT_EXEC);
++}
++
++/* Disables fbcondecor on a virtual console; called with console sem held. */
++int fbcon_decor_disable(struct vc_data *vc, unsigned char redraw)
++{
++ struct fb_info* info;
++
++ if (!vc->vc_decor.state)
++ return -EINVAL;
++
++ info = registered_fb[(int) con2fb_map[vc->vc_num]];
++
++ if (info == NULL)
++ return -EINVAL;
++
++ vc->vc_decor.state = 0;
++ vc_resize(vc, info->var.xres / vc->vc_font.width,
++ info->var.yres / vc->vc_font.height);
++
++ if (fg_console == vc->vc_num && redraw) {
++ redraw_screen(vc, 0);
++ update_region(vc, vc->vc_origin +
++ vc->vc_size_row * vc->vc_top,
++ vc->vc_size_row * (vc->vc_bottom - vc->vc_top) / 2);
++ }
++
++ printk(KERN_INFO "fbcondecor: switched decor state to 'off' on console %d\n",
++ vc->vc_num);
++
++ return 0;
++}
++
++/* Enables fbcondecor on a virtual console; called with console sem held. */
++static int fbcon_decor_enable(struct vc_data *vc)
++{
++ struct fb_info* info;
++
++ info = registered_fb[(int) con2fb_map[vc->vc_num]];
++
++ if (vc->vc_decor.twidth == 0 || vc->vc_decor.theight == 0 ||
++ info == NULL || vc->vc_decor.state || (!info->bgdecor.data &&
++ vc->vc_num == fg_console))
++ return -EINVAL;
++
++ vc->vc_decor.state = 1;
++ vc_resize(vc, vc->vc_decor.twidth / vc->vc_font.width,
++ vc->vc_decor.theight / vc->vc_font.height);
++
++ if (fg_console == vc->vc_num) {
++ redraw_screen(vc, 0);
++ update_region(vc, vc->vc_origin +
++ vc->vc_size_row * vc->vc_top,
++ vc->vc_size_row * (vc->vc_bottom - vc->vc_top) / 2);
++ fbcon_decor_clear_margins(vc, info, 0);
++ }
++
++ printk(KERN_INFO "fbcondecor: switched decor state to 'on' on console %d\n",
++ vc->vc_num);
++
++ return 0;
++}
++
++static inline int fbcon_decor_ioctl_dosetstate(struct vc_data *vc, unsigned int state, unsigned char origin)
++{
++ int ret;
++
++// if (origin == FBCON_DECOR_IO_ORIG_USER)
++ console_lock();
++ if (!state)
++ ret = fbcon_decor_disable(vc, 1);
++ else
++ ret = fbcon_decor_enable(vc);
++// if (origin == FBCON_DECOR_IO_ORIG_USER)
++ console_unlock();
++
++ return ret;
++}
++
++static inline void fbcon_decor_ioctl_dogetstate(struct vc_data *vc, unsigned int *state)
++{
++ *state = vc->vc_decor.state;
++}
++
++static int fbcon_decor_ioctl_dosetcfg(struct vc_data *vc, struct vc_decor *cfg, unsigned char origin)
++{
++ struct fb_info *info;
++ int len;
++ char *tmp;
++
++ info = registered_fb[(int) con2fb_map[vc->vc_num]];
++
++ if (info == NULL || !cfg->twidth || !cfg->theight ||
++ cfg->tx + cfg->twidth > info->var.xres ||
++ cfg->ty + cfg->theight > info->var.yres)
++ return -EINVAL;
++
++ len = strlen_user(cfg->theme);
++ if (!len || len > FBCON_DECOR_THEME_LEN)
++ return -EINVAL;
++ tmp = kmalloc(len, GFP_KERNEL);
++ if (!tmp)
++ return -ENOMEM;
++ if (copy_from_user(tmp, (void __user *)cfg->theme, len))
++ return -EFAULT;
++ cfg->theme = tmp;
++ cfg->state = 0;
++
++ /* If this ioctl is a response to a request from kernel, the console sem
++ * is already held; we also don't need to disable decor because either the
++ * new config and background picture will be successfully loaded, and the
++ * decor will stay on, or in case of a failure it'll be turned off in fbcon. */
++// if (origin == FBCON_DECOR_IO_ORIG_USER) {
++ console_lock();
++ if (vc->vc_decor.state)
++ fbcon_decor_disable(vc, 1);
++// }
++
++ if (vc->vc_decor.theme)
++ kfree(vc->vc_decor.theme);
++
++ vc->vc_decor = *cfg;
++
++// if (origin == FBCON_DECOR_IO_ORIG_USER)
++ console_unlock();
++
++ printk(KERN_INFO "fbcondecor: console %d using theme '%s'\n",
++ vc->vc_num, vc->vc_decor.theme);
++ return 0;
++}
++
++static int fbcon_decor_ioctl_dogetcfg(struct vc_data *vc, struct vc_decor *decor)
++{
++ char __user *tmp;
++
++ tmp = decor->theme;
++ *decor = vc->vc_decor;
++ decor->theme = tmp;
++
++ if (vc->vc_decor.theme) {
++ if (copy_to_user(tmp, vc->vc_decor.theme, strlen(vc->vc_decor.theme) + 1))
++ return -EFAULT;
++ } else
++ if (put_user(0, tmp))
++ return -EFAULT;
++
++ return 0;
++}
++
++static int fbcon_decor_ioctl_dosetpic(struct vc_data *vc, struct fb_image *img, unsigned char origin)
++{
++ struct fb_info *info;
++ int len;
++ u8 *tmp;
++
++ if (vc->vc_num != fg_console)
++ return -EINVAL;
++
++ info = registered_fb[(int) con2fb_map[vc->vc_num]];
++
++ if (info == NULL)
++ return -EINVAL;
++
++ if (img->width != info->var.xres || img->height != info->var.yres) {
++ printk(KERN_ERR "fbcondecor: picture dimensions mismatch\n");
++ printk(KERN_ERR "%dx%d vs %dx%d\n", img->width, img->height, info->var.xres, info->var.yres);
++ return -EINVAL;
++ }
++
++ if (img->depth != info->var.bits_per_pixel) {
++ printk(KERN_ERR "fbcondecor: picture depth mismatch\n");
++ return -EINVAL;
++ }
++
++ if (img->depth == 8) {
++ if (!img->cmap.len || !img->cmap.red || !img->cmap.green ||
++ !img->cmap.blue)
++ return -EINVAL;
++
++ tmp = vmalloc(img->cmap.len * 3 * 2);
++ if (!tmp)
++ return -ENOMEM;
++
++ if (copy_from_user(tmp,
++ (void __user*)img->cmap.red, (img->cmap.len << 1)) ||
++ copy_from_user(tmp + (img->cmap.len << 1),
++ (void __user*)img->cmap.green, (img->cmap.len << 1)) ||
++ copy_from_user(tmp + (img->cmap.len << 2),
++ (void __user*)img->cmap.blue, (img->cmap.len << 1))) {
++ vfree(tmp);
++ return -EFAULT;
++ }
++
++ img->cmap.transp = NULL;
++ img->cmap.red = (u16*)tmp;
++ img->cmap.green = img->cmap.red + img->cmap.len;
++ img->cmap.blue = img->cmap.green + img->cmap.len;
++ } else {
++ img->cmap.red = NULL;
++ }
++
++ len = ((img->depth + 7) >> 3) * img->width * img->height;
++
++ /*
++ * Allocate an additional byte so that we never go outside of the
++ * buffer boundaries in the rendering functions in a 24 bpp mode.
++ */
++ tmp = vmalloc(len + 1);
++
++ if (!tmp)
++ goto out;
++
++ if (copy_from_user(tmp, (void __user*)img->data, len))
++ goto out;
++
++ img->data = tmp;
++
++ /* If this ioctl is a response to a request from kernel, the console sem
++ * is already held. */
++// if (origin == FBCON_DECOR_IO_ORIG_USER)
++ console_lock();
++
++ if (info->bgdecor.data)
++ vfree((u8*)info->bgdecor.data);
++ if (info->bgdecor.cmap.red)
++ vfree(info->bgdecor.cmap.red);
++
++ info->bgdecor = *img;
++
++ if (fbcon_decor_active_vc(vc) && fg_console == vc->vc_num) {
++ redraw_screen(vc, 0);
++ update_region(vc, vc->vc_origin +
++ vc->vc_size_row * vc->vc_top,
++ vc->vc_size_row * (vc->vc_bottom - vc->vc_top) / 2);
++ fbcon_decor_clear_margins(vc, info, 0);
++ }
++
++// if (origin == FBCON_DECOR_IO_ORIG_USER)
++ console_unlock();
++
++ return 0;
++
++out: if (img->cmap.red)
++ vfree(img->cmap.red);
++
++ if (tmp)
++ vfree(tmp);
++ return -ENOMEM;
++}
++
++static long fbcon_decor_ioctl(struct file *filp, u_int cmd, u_long arg)
++{
++ struct fbcon_decor_iowrapper __user *wrapper = (void __user*) arg;
++ struct vc_data *vc = NULL;
++ unsigned short vc_num = 0;
++ unsigned char origin = 0;
++ void __user *data = NULL;
++
++ if (!access_ok(VERIFY_READ, wrapper,
++ sizeof(struct fbcon_decor_iowrapper)))
++ return -EFAULT;
++
++ __get_user(vc_num, &wrapper->vc);
++ __get_user(origin, &wrapper->origin);
++ __get_user(data, &wrapper->data);
++
++ if (!vc_cons_allocated(vc_num))
++ return -EINVAL;
++
++ vc = vc_cons[vc_num].d;
++
++ switch (cmd) {
++ case FBIOCONDECOR_SETPIC:
++ {
++ struct fb_image img;
++ if (copy_from_user(&img, (struct fb_image __user *)data, sizeof(struct fb_image)))
++ return -EFAULT;
++
++ return fbcon_decor_ioctl_dosetpic(vc, &img, origin);
++ }
++ case FBIOCONDECOR_SETCFG:
++ {
++ struct vc_decor cfg;
++ if (copy_from_user(&cfg, (struct vc_decor __user *)data, sizeof(struct vc_decor)))
++ return -EFAULT;
++
++ return fbcon_decor_ioctl_dosetcfg(vc, &cfg, origin);
++ }
++ case FBIOCONDECOR_GETCFG:
++ {
++ int rval;
++ struct vc_decor cfg;
++
++ if (copy_from_user(&cfg, (struct vc_decor __user *)data, sizeof(struct vc_decor)))
++ return -EFAULT;
++
++ rval = fbcon_decor_ioctl_dogetcfg(vc, &cfg);
++
++ if (copy_to_user(data, &cfg, sizeof(struct vc_decor)))
++ return -EFAULT;
++ return rval;
++ }
++ case FBIOCONDECOR_SETSTATE:
++ {
++ unsigned int state = 0;
++ if (get_user(state, (unsigned int __user *)data))
++ return -EFAULT;
++ return fbcon_decor_ioctl_dosetstate(vc, state, origin);
++ }
++ case FBIOCONDECOR_GETSTATE:
++ {
++ unsigned int state = 0;
++ fbcon_decor_ioctl_dogetstate(vc, &state);
++ return put_user(state, (unsigned int __user *)data);
++ }
++
++ default:
++ return -ENOIOCTLCMD;
++ }
++}
++
++#ifdef CONFIG_COMPAT
++
++static long fbcon_decor_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) {
++
++ struct fbcon_decor_iowrapper32 __user *wrapper = (void __user *)arg;
++ struct vc_data *vc = NULL;
++ unsigned short vc_num = 0;
++ unsigned char origin = 0;
++ compat_uptr_t data_compat = 0;
++ void __user *data = NULL;
++
++ if (!access_ok(VERIFY_READ, wrapper,
++ sizeof(struct fbcon_decor_iowrapper32)))
++ return -EFAULT;
++
++ __get_user(vc_num, &wrapper->vc);
++ __get_user(origin, &wrapper->origin);
++ __get_user(data_compat, &wrapper->data);
++ data = compat_ptr(data_compat);
++
++ if (!vc_cons_allocated(vc_num))
++ return -EINVAL;
++
++ vc = vc_cons[vc_num].d;
++
++ switch (cmd) {
++ case FBIOCONDECOR_SETPIC32:
++ {
++ struct fb_image32 img_compat;
++ struct fb_image img;
++
++ if (copy_from_user(&img_compat, (struct fb_image32 __user *)data, sizeof(struct fb_image32)))
++ return -EFAULT;
++
++ fb_image_from_compat(img, img_compat);
++
++ return fbcon_decor_ioctl_dosetpic(vc, &img, origin);
++ }
++
++ case FBIOCONDECOR_SETCFG32:
++ {
++ struct vc_decor32 cfg_compat;
++ struct vc_decor cfg;
++
++ if (copy_from_user(&cfg_compat, (struct vc_decor32 __user *)data, sizeof(struct vc_decor32)))
++ return -EFAULT;
++
++ vc_decor_from_compat(cfg, cfg_compat);
++
++ return fbcon_decor_ioctl_dosetcfg(vc, &cfg, origin);
++ }
++
++ case FBIOCONDECOR_GETCFG32:
++ {
++ int rval;
++ struct vc_decor32 cfg_compat;
++ struct vc_decor cfg;
++
++ if (copy_from_user(&cfg_compat, (struct vc_decor32 __user *)data, sizeof(struct vc_decor32)))
++ return -EFAULT;
++ cfg.theme = compat_ptr(cfg_compat.theme);
++
++ rval = fbcon_decor_ioctl_dogetcfg(vc, &cfg);
++
++ vc_decor_to_compat(cfg_compat, cfg);
++
++ if (copy_to_user((struct vc_decor32 __user *)data, &cfg_compat, sizeof(struct vc_decor32)))
++ return -EFAULT;
++ return rval;
++ }
++
++ case FBIOCONDECOR_SETSTATE32:
++ {
++ compat_uint_t state_compat = 0;
++ unsigned int state = 0;
++
++ if (get_user(state_compat, (compat_uint_t __user *)data))
++ return -EFAULT;
++
++ state = (unsigned int)state_compat;
++
++ return fbcon_decor_ioctl_dosetstate(vc, state, origin);
++ }
++
++ case FBIOCONDECOR_GETSTATE32:
++ {
++ compat_uint_t state_compat = 0;
++ unsigned int state = 0;
++
++ fbcon_decor_ioctl_dogetstate(vc, &state);
++ state_compat = (compat_uint_t)state;
++
++ return put_user(state_compat, (compat_uint_t __user *)data);
++ }
++
++ default:
++ return -ENOIOCTLCMD;
++ }
++}
++#else
++ #define fbcon_decor_compat_ioctl NULL
++#endif
++
++static struct file_operations fbcon_decor_ops = {
++ .owner = THIS_MODULE,
++ .unlocked_ioctl = fbcon_decor_ioctl,
++ .compat_ioctl = fbcon_decor_compat_ioctl
++};
++
++static struct miscdevice fbcon_decor_dev = {
++ .minor = MISC_DYNAMIC_MINOR,
++ .name = "fbcondecor",
++ .fops = &fbcon_decor_ops
++};
++
++void fbcon_decor_reset()
++{
++ int i;
++
++ for (i = 0; i < num_registered_fb; i++) {
++ registered_fb[i]->bgdecor.data = NULL;
++ registered_fb[i]->bgdecor.cmap.red = NULL;
++ }
++
++ for (i = 0; i < MAX_NR_CONSOLES && vc_cons[i].d; i++) {
++ vc_cons[i].d->vc_decor.state = vc_cons[i].d->vc_decor.twidth =
++ vc_cons[i].d->vc_decor.theight = 0;
++ vc_cons[i].d->vc_decor.theme = NULL;
++ }
++
++ return;
++}
++
++int fbcon_decor_init()
++{
++ int i;
++
++ fbcon_decor_reset();
++
++ if (initialized)
++ return 0;
++
++ i = misc_register(&fbcon_decor_dev);
++ if (i) {
++ printk(KERN_ERR "fbcondecor: failed to register device\n");
++ return i;
++ }
++
++ fbcon_decor_call_helper("init", 0);
++ initialized = 1;
++ return 0;
++}
++
++int fbcon_decor_exit(void)
++{
++ fbcon_decor_reset();
++ return 0;
++}
++
++EXPORT_SYMBOL(fbcon_decor_path);
+diff --git a/drivers/video/console/fbcondecor.h b/drivers/video/console/fbcondecor.h
+new file mode 100644
+index 0000000..1d852dd
+--- /dev/null
++++ b/drivers/video/console/fbcondecor.h
+@@ -0,0 +1,78 @@
++/*
++ * linux/drivers/video/console/fbcondecor.h -- Framebuffer Console Decoration headers
++ *
++ * Copyright (C) 2004 Michal Januszewski <spock@gentoo.org>
++ *
++ */
++
++#ifndef __FBCON_DECOR_H
++#define __FBCON_DECOR_H
++
++#ifndef _LINUX_FB_H
++#include <linux/fb.h>
++#endif
++
++/* This is needed for vc_cons in fbcmap.c */
++#include <linux/vt_kern.h>
++
++struct fb_cursor;
++struct fb_info;
++struct vc_data;
++
++#ifdef CONFIG_FB_CON_DECOR
++/* fbcondecor.c */
++int fbcon_decor_init(void);
++int fbcon_decor_exit(void);
++int fbcon_decor_call_helper(char* cmd, unsigned short cons);
++int fbcon_decor_disable(struct vc_data *vc, unsigned char redraw);
++
++/* cfbcondecor.c */
++void fbcon_decor_putcs(struct vc_data *vc, struct fb_info *info, const unsigned short *s, int count, int yy, int xx);
++void fbcon_decor_cursor(struct fb_info *info, struct fb_cursor *cursor);
++void fbcon_decor_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width);
++void fbcon_decor_clear_margins(struct vc_data *vc, struct fb_info *info, int bottom_only);
++void fbcon_decor_blank(struct vc_data *vc, struct fb_info *info, int blank);
++void fbcon_decor_bmove_redraw(struct vc_data *vc, struct fb_info *info, int y, int sx, int dx, int width);
++void fbcon_decor_copy(u8 *dst, u8 *src, int height, int width, int linebytes, int srclinesbytes, int bpp);
++void fbcon_decor_fix_pseudo_pal(struct fb_info *info, struct vc_data *vc);
++
++/* vt.c */
++void acquire_console_sem(void);
++void release_console_sem(void);
++void do_unblank_screen(int entering_gfx);
++
++/* struct vc_data *y */
++#define fbcon_decor_active_vc(y) (y->vc_decor.state && y->vc_decor.theme)
++
++/* struct fb_info *x, struct vc_data *y */
++#define fbcon_decor_active_nores(x,y) (x->bgdecor.data && fbcon_decor_active_vc(y))
++
++/* struct fb_info *x, struct vc_data *y */
++#define fbcon_decor_active(x,y) (fbcon_decor_active_nores(x,y) && \
++ x->bgdecor.width == x->var.xres && \
++ x->bgdecor.height == x->var.yres && \
++ x->bgdecor.depth == x->var.bits_per_pixel)
++
++
++#else /* CONFIG_FB_CON_DECOR */
++
++static inline void fbcon_decor_putcs(struct vc_data *vc, struct fb_info *info, const unsigned short *s, int count, int yy, int xx) {}
++static inline void fbcon_decor_putc(struct vc_data *vc, struct fb_info *info, int c, int ypos, int xpos) {}
++static inline void fbcon_decor_cursor(struct fb_info *info, struct fb_cursor *cursor) {}
++static inline void fbcon_decor_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width) {}
++static inline void fbcon_decor_clear_margins(struct vc_data *vc, struct fb_info *info, int bottom_only) {}
++static inline void fbcon_decor_blank(struct vc_data *vc, struct fb_info *info, int blank) {}
++static inline void fbcon_decor_bmove_redraw(struct vc_data *vc, struct fb_info *info, int y, int sx, int dx, int width) {}
++static inline void fbcon_decor_fix_pseudo_pal(struct fb_info *info, struct vc_data *vc) {}
++static inline int fbcon_decor_call_helper(char* cmd, unsigned short cons) { return 0; }
++static inline int fbcon_decor_init(void) { return 0; }
++static inline int fbcon_decor_exit(void) { return 0; }
++static inline int fbcon_decor_disable(struct vc_data *vc, unsigned char redraw) { return 0; }
++
++#define fbcon_decor_active_vc(y) (0)
++#define fbcon_decor_active_nores(x,y) (0)
++#define fbcon_decor_active(x,y) (0)
++
++#endif /* CONFIG_FB_CON_DECOR */
++
++#endif /* __FBCON_DECOR_H */
+diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
+index 5c3960d..162b5f4 100644
+--- a/drivers/video/fbcmap.c
++++ b/drivers/video/fbcmap.c
+@@ -17,6 +17,8 @@
+ #include <linux/slab.h>
+ #include <linux/uaccess.h>
+
++#include "console/fbcondecor.h"
++
+ static u16 red2[] __read_mostly = {
+ 0x0000, 0xaaaa
+ };
+@@ -249,14 +251,17 @@ int fb_set_cmap(struct fb_cmap *cmap, struct fb_info *info)
+ if (transp)
+ htransp = *transp++;
+ if (info->fbops->fb_setcolreg(start++,
+- hred, hgreen, hblue,
++ hred, hgreen, hblue,
+ htransp, info))
+ break;
+ }
+ }
+- if (rc == 0)
++ if (rc == 0) {
+ fb_copy_cmap(cmap, &info->cmap);
+-
++ if (fbcon_decor_active(info, vc_cons[fg_console].d) &&
++ info->fix.visual == FB_VISUAL_DIRECTCOLOR)
++ fbcon_decor_fix_pseudo_pal(info, vc_cons[fg_console].d);
++ }
+ return rc;
+ }
+
+diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
+index 5aac00e..c7b285d 100644
+--- a/drivers/video/fbmem.c
++++ b/drivers/video/fbmem.c
+@@ -1208,15 +1208,6 @@ struct fb_fix_screeninfo32 {
+ u16 reserved[3];
+ };
+
+-struct fb_cmap32 {
+- u32 start;
+- u32 len;
+- compat_caddr_t red;
+- compat_caddr_t green;
+- compat_caddr_t blue;
+- compat_caddr_t transp;
+-};
+-
+ static int fb_getput_cmap(struct fb_info *info, unsigned int cmd,
+ unsigned long arg)
+ {
+diff --git a/include/linux/console_decor.h b/include/linux/console_decor.h
+new file mode 100644
+index 0000000..04b8d80
+--- /dev/null
++++ b/include/linux/console_decor.h
+@@ -0,0 +1,46 @@
++#ifndef _LINUX_CONSOLE_DECOR_H_
++#define _LINUX_CONSOLE_DECOR_H_ 1
++
++/* A structure used by the framebuffer console decorations (drivers/video/console/fbcondecor.c) */
++struct vc_decor {
++ __u8 bg_color; /* The color that is to be treated as transparent */
++ __u8 state; /* Current decor state: 0 = off, 1 = on */
++ __u16 tx, ty; /* Top left corner coordinates of the text field */
++ __u16 twidth, theight; /* Width and height of the text field */
++ char* theme;
++};
++
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++#include <linux/compat.h>
++
++struct vc_decor32 {
++ __u8 bg_color; /* The color that is to be treated as transparent */
++ __u8 state; /* Current decor state: 0 = off, 1 = on */
++ __u16 tx, ty; /* Top left corner coordinates of the text field */
++ __u16 twidth, theight; /* Width and height of the text field */
++ compat_uptr_t theme;
++};
++
++#define vc_decor_from_compat(to, from) \
++ (to).bg_color = (from).bg_color; \
++ (to).state = (from).state; \
++ (to).tx = (from).tx; \
++ (to).ty = (from).ty; \
++ (to).twidth = (from).twidth; \
++ (to).theight = (from).theight; \
++ (to).theme = compat_ptr((from).theme)
++
++#define vc_decor_to_compat(to, from) \
++ (to).bg_color = (from).bg_color; \
++ (to).state = (from).state; \
++ (to).tx = (from).tx; \
++ (to).ty = (from).ty; \
++ (to).twidth = (from).twidth; \
++ (to).theight = (from).theight; \
++ (to).theme = ptr_to_compat((from).theme)
++
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
++#endif
+diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h
+index 7f0c329..98f5d60 100644
+--- a/include/linux/console_struct.h
++++ b/include/linux/console_struct.h
+@@ -19,6 +19,7 @@
+ struct vt_struct;
+
+ #define NPAR 16
++#include <linux/console_decor.h>
+
+ struct vc_data {
+ struct tty_port port; /* Upper level data */
+@@ -107,6 +108,8 @@ struct vc_data {
+ unsigned long vc_uni_pagedir;
+ unsigned long *vc_uni_pagedir_loc; /* [!] Location of uni_pagedir variable for this console */
+ bool vc_panic_force_write; /* when oops/panic this VC can accept forced output/blanking */
++
++ struct vc_decor vc_decor;
+ /* additional information is in vt_kern.h */
+ };
+
+diff --git a/include/linux/fb.h b/include/linux/fb.h
+index 6a82748..ec1b8c5 100644
+--- a/include/linux/fb.h
++++ b/include/linux/fb.h
+@@ -11,6 +11,25 @@
+
+ #define FB_MAX 32 /* sufficient for now */
+
++struct fbcon_decor_iowrapper
++{
++ unsigned short vc; /* Virtual console */
++ unsigned char origin; /* Point of origin of the request */
++ void *data;
++};
++
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++#include <linux/compat.h>
++struct fbcon_decor_iowrapper32
++{
++ unsigned short vc; /* Virtual console */
++ unsigned char origin; /* Point of origin of the request */
++ compat_uptr_t data;
++};
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
+ /* ioctls
+ 0x46 is 'F' */
+ #define FBIOGET_VSCREENINFO 0x4600
+@@ -39,6 +58,24 @@
+ #define FBIOPUT_MODEINFO 0x4617
+ #define FBIOGET_DISPINFO 0x4618
+ #define FBIO_WAITFORVSYNC _IOW('F', 0x20, __u32)
++#define FBIOCONDECOR_SETCFG _IOWR('F', 0x19, struct fbcon_decor_iowrapper)
++#define FBIOCONDECOR_GETCFG _IOR('F', 0x1A, struct fbcon_decor_iowrapper)
++#define FBIOCONDECOR_SETSTATE _IOWR('F', 0x1B, struct fbcon_decor_iowrapper)
++#define FBIOCONDECOR_GETSTATE _IOR('F', 0x1C, struct fbcon_decor_iowrapper)
++#define FBIOCONDECOR_SETPIC _IOWR('F', 0x1D, struct fbcon_decor_iowrapper)
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++#define FBIOCONDECOR_SETCFG32 _IOWR('F', 0x19, struct fbcon_decor_iowrapper32)
++#define FBIOCONDECOR_GETCFG32 _IOR('F', 0x1A, struct fbcon_decor_iowrapper32)
++#define FBIOCONDECOR_SETSTATE32 _IOWR('F', 0x1B, struct fbcon_decor_iowrapper32)
++#define FBIOCONDECOR_GETSTATE32 _IOR('F', 0x1C, struct fbcon_decor_iowrapper32)
++#define FBIOCONDECOR_SETPIC32 _IOWR('F', 0x1D, struct fbcon_decor_iowrapper32)
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
++#define FBCON_DECOR_THEME_LEN 128 /* Maximum lenght of a theme name */
++#define FBCON_DECOR_IO_ORIG_KERNEL 0 /* Kernel ioctl origin */
++#define FBCON_DECOR_IO_ORIG_USER 1 /* User ioctl origin */
+
+ #define FB_TYPE_PACKED_PIXELS 0 /* Packed Pixels */
+ #define FB_TYPE_PLANES 1 /* Non interleaved planes */
+@@ -285,6 +322,28 @@ struct fb_cmap {
+ __u16 *transp; /* transparency, can be NULL */
+ };
+
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++struct fb_cmap32 {
++ __u32 start;
++ __u32 len; /* Number of entries */
++ compat_uptr_t red; /* Red values */
++ compat_uptr_t green;
++ compat_uptr_t blue;
++ compat_uptr_t transp; /* transparency, can be NULL */
++};
++
++#define fb_cmap_from_compat(to, from) \
++ (to).start = (from).start; \
++ (to).len = (from).len; \
++ (to).red = compat_ptr((from).red); \
++ (to).green = compat_ptr((from).green); \
++ (to).blue = compat_ptr((from).blue); \
++ (to).transp = compat_ptr((from).transp)
++
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
+ struct fb_con2fbmap {
+ __u32 console;
+ __u32 framebuffer;
+@@ -366,6 +425,34 @@ struct fb_image {
+ struct fb_cmap cmap; /* color map info */
+ };
+
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++struct fb_image32 {
++ __u32 dx; /* Where to place image */
++ __u32 dy;
++ __u32 width; /* Size of image */
++ __u32 height;
++ __u32 fg_color; /* Only used when a mono bitmap */
++ __u32 bg_color;
++ __u8 depth; /* Depth of the image */
++ const compat_uptr_t data; /* Pointer to image data */
++ struct fb_cmap32 cmap; /* color map info */
++};
++
++#define fb_image_from_compat(to, from) \
++ (to).dx = (from).dx; \
++ (to).dy = (from).dy; \
++ (to).width = (from).width; \
++ (to).height = (from).height; \
++ (to).fg_color = (from).fg_color; \
++ (to).bg_color = (from).bg_color; \
++ (to).depth = (from).depth; \
++ (to).data = compat_ptr((from).data); \
++ fb_cmap_from_compat((to).cmap, (from).cmap)
++
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
+ /*
+ * hardware cursor control
+ */
+@@ -876,6 +963,9 @@ struct fb_info {
+ #define FBINFO_STATE_SUSPENDED 1
+ u32 state; /* Hardware state i.e suspend */
+ void *fbcon_par; /* fbcon use-only private area */
++
++ struct fb_image bgdecor;
++
+ /* From here on everything is device dependent */
+ void *par;
+ /* we need the PCI or similar aperture base/size not
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index f175d98..3127af2 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -135,6 +135,10 @@ static int min_percpu_pagelist_fract = 8;
+
+ static int ngroups_max = NGROUPS_MAX;
+
++#ifdef CONFIG_FB_CON_DECOR
++extern char fbcon_decor_path[];
++#endif
++
+ #ifdef CONFIG_INOTIFY_USER
+ #include <linux/inotify.h>
+ #endif
+@@ -254,6 +258,15 @@ static struct ctl_table root_table[] = {
+ .mode = 0555,
+ .child = dev_table,
+ },
++#ifdef CONFIG_FB_CON_DECOR
++ {
++ .procname = "fbcondecor",
++ .data = &fbcon_decor_path,
++ .maxlen = KMOD_PATH_LEN,
++ .mode = 0644,
++ .proc_handler = &proc_dostring,
++ },
++#endif
+ { }
+ };
+
+@@ -1028,7 +1041,7 @@ static struct ctl_table vm_table[] = {
+ .proc_handler = proc_dointvec,
+ },
+ {
+- .procname = "page-cluster",
++ .procname = "page-cluster",
+ .data = &page_cluster,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+@@ -1474,7 +1487,7 @@ static struct ctl_table fs_table[] = {
+ .mode = 0555,
+ .child = inotify_table,
+ },
+-#endif
++#endif
+ #ifdef CONFIG_EPOLL
+ {
+ .procname = "epoll",
+@@ -1835,7 +1848,7 @@ static void try_attach(struct ctl_table_header *p, struct ctl_table_header *q)
+ * cover common cases -
+ *
+ * proc_dostring(), proc_dointvec(), proc_dointvec_jiffies(),
+- * proc_dointvec_userhz_jiffies(), proc_dointvec_minmax(),
++ * proc_dointvec_userhz_jiffies(), proc_dointvec_minmax(),
+ * proc_doulongvec_ms_jiffies_minmax(), proc_doulongvec_minmax()
+ *
+ * It is the handler's job to read the input buffer from user memory
+@@ -2275,12 +2288,12 @@ static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table,
+ unsigned long page = 0;
+ size_t left;
+ char *kbuf;
+-
++
+ if (!tbl_data || !table->maxlen || !*lenp || (*ppos && !write)) {
+ *lenp = 0;
+ return 0;
+ }
+-
++
+ i = (int *) tbl_data;
+ vleft = table->maxlen / sizeof(*i);
+ left = *lenp;
+@@ -2369,7 +2382,7 @@ static int do_proc_dointvec(struct ctl_table *table, int write,
+ * @ppos: file position
+ *
+ * Reads/writes up to table->maxlen/sizeof(unsigned int) integer
+- * values from/to the user buffer, treated as an ASCII string.
++ * values from/to the user buffer, treated as an ASCII string.
+ *
+ * Returns 0 on success.
+ */
+@@ -2696,7 +2709,7 @@ static int do_proc_dointvec_ms_jiffies_conv(bool *negp, unsigned long *lvalp,
+ * @ppos: file position
+ *
+ * Reads/writes up to table->maxlen/sizeof(unsigned int) integer
+- * values from/to the user buffer, treated as an ASCII string.
++ * values from/to the user buffer, treated as an ASCII string.
+ * The values read are assumed to be in seconds, and are converted into
+ * jiffies.
+ *
+@@ -2718,8 +2731,8 @@ int proc_dointvec_jiffies(struct ctl_table *table, int write,
+ * @ppos: pointer to the file position
+ *
+ * Reads/writes up to table->maxlen/sizeof(unsigned int) integer
+- * values from/to the user buffer, treated as an ASCII string.
+- * The values read are assumed to be in 1/USER_HZ seconds, and
++ * values from/to the user buffer, treated as an ASCII string.
++ * The values read are assumed to be in 1/USER_HZ seconds, and
+ * are converted into jiffies.
+ *
+ * Returns 0 on success.
+@@ -2741,8 +2754,8 @@ int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write,
+ * @ppos: the current position in the file
+ *
+ * Reads/writes up to table->maxlen/sizeof(unsigned int) integer
+- * values from/to the user buffer, treated as an ASCII string.
+- * The values read are assumed to be in 1/1000 seconds, and
++ * values from/to the user buffer, treated as an ASCII string.
++ * The values read are assumed to be in 1/1000 seconds, and
+ * are converted into jiffies.
+ *
+ * Returns 0 on success.
diff --git a/4500_nouveau-video-output-control-Kconfig.patch b/4500_nouveau-video-output-control-Kconfig.patch
new file mode 100644
index 00000000..fa6b3a32
--- /dev/null
+++ b/4500_nouveau-video-output-control-Kconfig.patch
@@ -0,0 +1,9 @@
+--- a/drivers/gpu/drm/nouveau/Kconfig
++++ b/drivers/gpu/drm/nouveau/Kconfig
+@@ -15,5 +15,6 @@ config DRM_NOUVEAU
+ select ACPI_WMI if ACPI
+ select MXM_WMI if ACPI
++ select VIDEO_OUTPUT_CONTROL
+ help
+ Choose this option for open-source nVidia support.
+
diff --git a/4567_distro-Gentoo-Kconfig.patch b/4567_distro-Gentoo-Kconfig.patch
index 652e2a7a..850cabce 100644
--- a/4567_distro-Gentoo-Kconfig.patch
+++ b/4567_distro-Gentoo-Kconfig.patch
@@ -1,5 +1,5 @@
---- a/Kconfig 2014-04-02 09:45:05.389224541 -0400
-+++ b/Kconfig 2014-04-02 09:45:39.269224273 -0400
+--- a/Kconfig 2014-04-02 10:11:12.959212144 -0400
++++ b/Kconfig 2014-04-02 10:11:41.139211921 -0400
@@ -8,4 +8,6 @@ config SRCARCH
string
option env="SRCARCH"
@@ -8,8 +8,8 @@
+
source "arch/$SRCARCH/Kconfig"
--- 1969-12-31 19:00:00.000000000 -0500
-+++ b/distro/Kconfig 2014-04-02 09:57:03.539218861 -0400
-@@ -0,0 +1,108 @@
++++ b/distro/Kconfig 2014-04-02 10:14:34.049210553 -0400
+@@ -0,0 +1,110 @@
+menu "Gentoo Linux"
+
+config GENTOO_LINUX
@@ -35,6 +35,7 @@
+ select TMPFS
+
+ select MMU
++ select HOTPLUG
+ select SHMEM
+
+ help
@@ -90,9 +91,10 @@
+ select EPOLL
+ select FANOTIFY
+ select FHANDLE
++ select HOTPLUG
+ select INOTIFY_USER
+ select NET
-+ select NET_NS
++ select NET_NS
+ select PROC_FS
+ select SIGNALFD
+ select SYSFS